4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
56 EXPORT_SYMBOL(init_buffer);
58 static int sleep_on_buffer(void *word)
64 void __lock_buffer(struct buffer_head *bh)
66 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67 TASK_UNINTERRUPTIBLE);
69 EXPORT_SYMBOL(__lock_buffer);
71 void unlock_buffer(struct buffer_head *bh)
73 clear_bit_unlock(BH_Lock, &bh->b_state);
74 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
77 EXPORT_SYMBOL(unlock_buffer);
80 * Block until a buffer comes unlocked. This doesn't stop it
81 * from becoming locked again - you have to lock it yourself
82 * if you want to preserve its state.
84 void __wait_on_buffer(struct buffer_head * bh)
86 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
88 EXPORT_SYMBOL(__wait_on_buffer);
91 __clear_page_buffers(struct page *page)
93 ClearPagePrivate(page);
94 set_page_private(page, 0);
95 page_cache_release(page);
99 static int quiet_error(struct buffer_head *bh)
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
107 static void buffer_io_error(struct buffer_head *bh)
109 char b[BDEVNAME_SIZE];
110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
116 * End-of-IO handler helper function which does not touch the bh after
118 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119 * a race there is benign: unlock_buffer() only use the bh's address for
120 * hashing after unlocking the buffer, so it doesn't actually touch the bh
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
126 set_buffer_uptodate(bh);
128 /* This happens, due to failed READA attempts. */
129 clear_buffer_uptodate(bh);
135 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
136 * unlock the buffer. This is what ll_rw_block uses too.
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
140 __end_buffer_read_notouch(bh, uptodate);
143 EXPORT_SYMBOL(end_buffer_read_sync);
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
147 char b[BDEVNAME_SIZE];
150 set_buffer_uptodate(bh);
152 if (!quiet_error(bh)) {
154 printk(KERN_WARNING "lost page write due to "
156 bdevname(bh->b_bdev, b));
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
164 EXPORT_SYMBOL(end_buffer_write_sync);
167 * Various filesystems appear to want __find_get_block to be non-blocking.
168 * But it's the page lock which protects the buffers. To get around this,
169 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173 * may be quite high. This code could TryLock the page, and if that
174 * succeeds, there is no need to take private_lock. (But if
175 * private_lock is contended then so is mapping->tree_lock).
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
184 struct buffer_head *bh;
185 struct buffer_head *head;
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
197 head = page_buffers(page);
200 if (!buffer_mapped(bh))
202 else if (bh->b_blocknr == block) {
207 bh = bh->b_this_page;
208 } while (bh != head);
210 /* we might be here because some of the buffers on this page are
211 * not mapped. This is due to various races between
212 * file io on the block device and getblk. It gets dealt with
213 * elsewhere, don't buffer_error if we had some unmapped buffers
216 char b[BDEVNAME_SIZE];
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
224 printk("device %s blocksize: %d\n", bdevname(bdev, b),
225 1 << bd_inode->i_blkbits);
228 spin_unlock(&bd_mapping->private_lock);
229 page_cache_release(page);
234 /* If invalidate_buffers() will trash dirty buffers, it means some kind
235 of fs corruption is going on. Trashing dirty data always imply losing
236 information that was supposed to be just stored on the physical layer
239 Thus invalidate_buffers in general usage is not allwowed to trash
240 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
241 be preserved. These buffers are simply skipped.
243 We also skip buffers which are still in use. For example this can
244 happen if a userspace program is reading the block device.
246 NOTE: In the case where the user removed a removable-media-disk even if
247 there's still dirty data not synced on disk (due a bug in the device driver
248 or due an error of the user), by not destroying the dirty buffers we could
249 generate corruption also on the next media inserted, thus a parameter is
250 necessary to handle this case in the most safe way possible (trying
251 to not corrupt also the new disk inserted with the data belonging to
252 the old now corrupted disk). Also for the ramdisk the natural thing
253 to do in order to release the ramdisk memory is to destroy dirty buffers.
255 These are two special cases. Normal usage imply the device driver
256 to issue a sync on the device (without waiting I/O completion) and
257 then an invalidate_buffers call that doesn't trash dirty buffers.
259 For handling cache coherency with the blkdev pagecache the 'update' case
260 is been introduced. It is needed to re-read from disk any pinned
261 buffer. NOTE: re-reading from disk is destructive so we can do it only
262 when we assume nobody is changing the buffercache under our I/O and when
263 we think the disk contains more recent information than the buffercache.
264 The update == 1 pass marks the buffers we need to update, the update == 2
265 pass does the actual I/O. */
266 void invalidate_bdev(struct block_device *bdev)
268 struct address_space *mapping = bdev->bd_inode->i_mapping;
270 if (mapping->nrpages == 0)
273 invalidate_bh_lrus();
274 lru_add_drain_all(); /* make sure all lru add caches are flushed */
275 invalidate_mapping_pages(mapping, 0, -1);
276 /* 99% of the time, we don't need to flush the cleancache on the bdev.
277 * But, for the strange corners, lets be cautious
279 cleancache_flush_inode(mapping);
281 EXPORT_SYMBOL(invalidate_bdev);
284 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
286 static void free_more_memory(void)
291 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
294 for_each_online_node(nid) {
295 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 gfp_zone(GFP_NOFS), NULL,
299 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
305 * I/O completion handler for block_read_full_page() - pages
306 * which come unlocked at the end of I/O.
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
311 struct buffer_head *first;
312 struct buffer_head *tmp;
314 int page_uptodate = 1;
316 BUG_ON(!buffer_async_read(bh));
320 set_buffer_uptodate(bh);
322 clear_buffer_uptodate(bh);
323 if (!quiet_error(bh))
329 * Be _very_ careful from here on. Bad things can happen if
330 * two buffer heads end IO at almost the same time and both
331 * decide that the page is now completely done.
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336 clear_buffer_async_read(bh);
340 if (!buffer_uptodate(tmp))
342 if (buffer_async_read(tmp)) {
343 BUG_ON(!buffer_locked(tmp));
346 tmp = tmp->b_this_page;
348 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 local_irq_restore(flags);
352 * If none of the buffers had errors and they are all
353 * uptodate then we can set the page uptodate.
355 if (page_uptodate && !PageError(page))
356 SetPageUptodate(page);
361 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 local_irq_restore(flags);
367 * Completion handler for block_write_full_page() - pages which are unlocked
368 * during I/O, and which have PageWriteback cleared upon I/O completion.
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
372 char b[BDEVNAME_SIZE];
374 struct buffer_head *first;
375 struct buffer_head *tmp;
378 BUG_ON(!buffer_async_write(bh));
382 set_buffer_uptodate(bh);
384 if (!quiet_error(bh)) {
386 printk(KERN_WARNING "lost page write due to "
388 bdevname(bh->b_bdev, b));
390 set_bit(AS_EIO, &page->mapping->flags);
391 set_buffer_write_io_error(bh);
392 clear_buffer_uptodate(bh);
396 first = page_buffers(page);
397 local_irq_save(flags);
398 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
400 clear_buffer_async_write(bh);
402 tmp = bh->b_this_page;
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
408 tmp = tmp->b_this_page;
410 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 local_irq_restore(flags);
412 end_page_writeback(page);
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
420 EXPORT_SYMBOL(end_buffer_async_write);
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
431 * The page comes unlocked when it has no locked buffer_async buffers
434 * PageLocked prevents anyone starting new async I/O reads any of
437 * PageWriteback is used to prevent simultaneous writeout of the same
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
443 static void mark_buffer_async_read(struct buffer_head *bh)
445 bh->b_end_io = end_buffer_async_read;
446 set_buffer_async_read(bh);
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
452 bh->b_end_io = handler;
453 set_buffer_async_write(bh);
456 void mark_buffer_async_write(struct buffer_head *bh)
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
460 EXPORT_SYMBOL(mark_buffer_async_write);
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
470 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472 * management of a list of dependent buffers at ->i_mapping->private_list.
474 * Locking is a little subtle: try_to_free_buffers() will remove buffers
475 * from their controlling inode's queue when they are being freed. But
476 * try_to_free_buffers() will be operating against the *blockdev* mapping
477 * at the time, not against the S_ISREG file which depends on those buffers.
478 * So the locking for private_list is via the private_lock in the address_space
479 * which backs the buffers. Which is different from the address_space
480 * against which the buffers are listed. So for a particular address_space,
481 * mapping->private_lock does *not* protect mapping->private_list! In fact,
482 * mapping->private_list will always be protected by the backing blockdev's
485 * Which introduces a requirement: all buffers on an address_space's
486 * ->private_list must be from the same address_space: the blockdev's.
488 * address_spaces which do not place buffers at ->private_list via these
489 * utility functions are free to use private_lock and private_list for
490 * whatever they want. The only requirement is that list_empty(private_list)
491 * be true at clear_inode() time.
493 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
494 * filesystems should do that. invalidate_inode_buffers() should just go
495 * BUG_ON(!list_empty).
497 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
498 * take an address_space, not an inode. And it should be called
499 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
502 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503 * list if it is already on a list. Because if the buffer is on a list,
504 * it *must* already be on the right one. If not, the filesystem is being
505 * silly. This will save a ton of locking. But first we have to ensure
506 * that buffers are taken *off* the old inode's list when they are freed
507 * (presumably in truncate). That requires careful auditing of all
508 * filesystems (do it inside bforget()). It could also be done by bringing
513 * The buffer's backing address_space's private_lock must be held
515 static void __remove_assoc_queue(struct buffer_head *bh)
517 list_del_init(&bh->b_assoc_buffers);
518 WARN_ON(!bh->b_assoc_map);
519 if (buffer_write_io_error(bh))
520 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 bh->b_assoc_map = NULL;
524 int inode_has_buffers(struct inode *inode)
526 return !list_empty(&inode->i_data.private_list);
530 * osync is designed to support O_SYNC io. It waits synchronously for
531 * all already-submitted IO to complete, but does not queue any new
532 * writes to the disk.
534 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535 * you dirty the buffers, and then use osync_inode_buffers to wait for
536 * completion. Any other dirty buffers which are not yet queued for
537 * write will not be flushed to disk by the osync.
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 struct buffer_head *bh;
547 list_for_each_prev(p, list) {
549 if (buffer_locked(bh)) {
553 if (!buffer_uptodate(bh))
564 static void do_thaw_one(struct super_block *sb, void *unused)
566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
572 static void do_thaw_all(struct work_struct *work)
574 iterate_supers(do_thaw_one, NULL);
576 printk(KERN_WARNING "Emergency Thaw complete\n");
580 * emergency_thaw_all -- forcibly thaw every frozen filesystem
582 * Used for emergency unfreeze of all filesystems via SysRq
584 void emergency_thaw_all(void)
586 struct work_struct *work;
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 INIT_WORK(work, do_thaw_all);
596 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597 * @mapping: the mapping which wants those buffers written
599 * Starts I/O against the buffers at mapping->private_list, and waits upon
602 * Basically, this is a convenience function for fsync().
603 * @mapping is a file or directory which needs those buffers to be written for
604 * a successful fsync().
606 int sync_mapping_buffers(struct address_space *mapping)
608 struct address_space *buffer_mapping = mapping->assoc_mapping;
610 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
613 return fsync_buffers_list(&buffer_mapping->private_lock,
614 &mapping->private_list);
616 EXPORT_SYMBOL(sync_mapping_buffers);
619 * Called when we've recently written block `bblock', and it is known that
620 * `bblock' was for a buffer_boundary() buffer. This means that the block at
621 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
622 * dirty, schedule it for IO. So that indirects merge nicely with their data.
624 void write_boundary_block(struct block_device *bdev,
625 sector_t bblock, unsigned blocksize)
627 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 if (buffer_dirty(bh))
630 ll_rw_block(WRITE, 1, &bh);
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637 struct address_space *mapping = inode->i_mapping;
638 struct address_space *buffer_mapping = bh->b_page->mapping;
640 mark_buffer_dirty(bh);
641 if (!mapping->assoc_mapping) {
642 mapping->assoc_mapping = buffer_mapping;
644 BUG_ON(mapping->assoc_mapping != buffer_mapping);
646 if (!bh->b_assoc_map) {
647 spin_lock(&buffer_mapping->private_lock);
648 list_move_tail(&bh->b_assoc_buffers,
649 &mapping->private_list);
650 bh->b_assoc_map = mapping;
651 spin_unlock(&buffer_mapping->private_lock);
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
657 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
660 * If warn is true, then emit a warning if the page is not uptodate and has
661 * not been truncated.
663 static void __set_page_dirty(struct page *page,
664 struct address_space *mapping, int warn)
668 spin_lock_irqsave(&mapping->tree_lock, flags);
669 if (page->mapping) { /* Race with truncate? */
670 WARN_ON_ONCE(warn && !PageUptodate(page));
671 account_page_dirtied(page, mapping);
672 radix_tree_tag_set(&mapping->page_tree,
673 page_index(page), PAGECACHE_TAG_DIRTY);
675 spin_unlock_irqrestore(&mapping->tree_lock, flags);
676 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
680 * Add a page to the dirty page list.
682 * It is a sad fact of life that this function is called from several places
683 * deeply under spinlocking. It may not sleep.
685 * If the page has buffers, the uptodate buffers are set dirty, to preserve
686 * dirty-state coherency between the page and the buffers. It the page does
687 * not have buffers then when they are later attached they will all be set
690 * The buffers are dirtied before the page is dirtied. There's a small race
691 * window in which a writepage caller may see the page cleanness but not the
692 * buffer dirtiness. That's fine. If this code were to set the page dirty
693 * before the buffers, a concurrent writepage caller could clear the page dirty
694 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
695 * page on the dirty page list.
697 * We use private_lock to lock against try_to_free_buffers while using the
698 * page's buffer list. Also use this to protect against clean buffers being
699 * added to the page after it was set dirty.
701 * FIXME: may need to call ->reservepage here as well. That's rather up to the
702 * address_space though.
704 int __set_page_dirty_buffers(struct page *page)
707 struct address_space *mapping = page_mapping(page);
709 if (unlikely(!mapping))
710 return !TestSetPageDirty(page);
712 spin_lock(&mapping->private_lock);
713 if (page_has_buffers(page)) {
714 struct buffer_head *head = page_buffers(page);
715 struct buffer_head *bh = head;
718 set_buffer_dirty(bh);
719 bh = bh->b_this_page;
720 } while (bh != head);
722 newly_dirty = !TestSetPageDirty(page);
723 spin_unlock(&mapping->private_lock);
726 __set_page_dirty(page, mapping, 1);
729 EXPORT_SYMBOL(__set_page_dirty_buffers);
732 * Write out and wait upon a list of buffers.
734 * We have conflicting pressures: we want to make sure that all
735 * initially dirty buffers get waited on, but that any subsequently
736 * dirtied buffers don't. After all, we don't want fsync to last
737 * forever if somebody is actively writing to the file.
739 * Do this in two main stages: first we copy dirty buffers to a
740 * temporary inode list, queueing the writes as we go. Then we clean
741 * up, waiting for those writes to complete.
743 * During this second stage, any subsequent updates to the file may end
744 * up refiling the buffer on the original inode's dirty list again, so
745 * there is a chance we will end up with a buffer queued for write but
746 * not yet completed on that list. So, as a final cleanup we go through
747 * the osync code to catch these locked, dirty buffers without requeuing
748 * any newly dirty buffers for write.
750 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
752 struct buffer_head *bh;
753 struct list_head tmp;
754 struct address_space *mapping;
756 struct blk_plug plug;
758 INIT_LIST_HEAD(&tmp);
759 blk_start_plug(&plug);
762 while (!list_empty(list)) {
763 bh = BH_ENTRY(list->next);
764 mapping = bh->b_assoc_map;
765 __remove_assoc_queue(bh);
766 /* Avoid race with mark_buffer_dirty_inode() which does
767 * a lockless check and we rely on seeing the dirty bit */
769 if (buffer_dirty(bh) || buffer_locked(bh)) {
770 list_add(&bh->b_assoc_buffers, &tmp);
771 bh->b_assoc_map = mapping;
772 if (buffer_dirty(bh)) {
776 * Ensure any pending I/O completes so that
777 * write_dirty_buffer() actually writes the
778 * current contents - it is a noop if I/O is
779 * still in flight on potentially older
782 write_dirty_buffer(bh, WRITE_SYNC);
785 * Kick off IO for the previous mapping. Note
786 * that we will not run the very last mapping,
787 * wait_on_buffer() will do that for us
788 * through sync_buffer().
797 blk_finish_plug(&plug);
800 while (!list_empty(&tmp)) {
801 bh = BH_ENTRY(tmp.prev);
803 mapping = bh->b_assoc_map;
804 __remove_assoc_queue(bh);
805 /* Avoid race with mark_buffer_dirty_inode() which does
806 * a lockless check and we rely on seeing the dirty bit */
808 if (buffer_dirty(bh)) {
809 list_add(&bh->b_assoc_buffers,
810 &mapping->private_list);
811 bh->b_assoc_map = mapping;
815 if (!buffer_uptodate(bh))
822 err2 = osync_buffers_list(lock, list);
830 * Invalidate any and all dirty buffers on a given inode. We are
831 * probably unmounting the fs, but that doesn't mean we have already
832 * done a sync(). Just drop the buffers from the inode list.
834 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
835 * assumes that all the buffers are against the blockdev. Not true
838 void invalidate_inode_buffers(struct inode *inode)
840 if (inode_has_buffers(inode)) {
841 struct address_space *mapping = &inode->i_data;
842 struct list_head *list = &mapping->private_list;
843 struct address_space *buffer_mapping = mapping->assoc_mapping;
845 spin_lock(&buffer_mapping->private_lock);
846 while (!list_empty(list))
847 __remove_assoc_queue(BH_ENTRY(list->next));
848 spin_unlock(&buffer_mapping->private_lock);
851 EXPORT_SYMBOL(invalidate_inode_buffers);
854 * Remove any clean buffers from the inode's buffer list. This is called
855 * when we're trying to free the inode itself. Those buffers can pin it.
857 * Returns true if all buffers were removed.
859 int remove_inode_buffers(struct inode *inode)
863 if (inode_has_buffers(inode)) {
864 struct address_space *mapping = &inode->i_data;
865 struct list_head *list = &mapping->private_list;
866 struct address_space *buffer_mapping = mapping->assoc_mapping;
868 spin_lock(&buffer_mapping->private_lock);
869 while (!list_empty(list)) {
870 struct buffer_head *bh = BH_ENTRY(list->next);
871 if (buffer_dirty(bh)) {
875 __remove_assoc_queue(bh);
877 spin_unlock(&buffer_mapping->private_lock);
883 * Create the appropriate buffers when given a page for data area and
884 * the size of each buffer.. Use the bh->b_this_page linked list to
885 * follow the buffers created. Return NULL if unable to create more
888 * The retry flag is used to differentiate async IO (paging, swapping)
889 * which may not fail from ordinary buffer allocations.
891 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
894 struct buffer_head *bh, *head;
900 while ((offset -= size) >= 0) {
901 bh = alloc_buffer_head(GFP_NOFS);
906 bh->b_this_page = head;
911 atomic_set(&bh->b_count, 0);
914 /* Link the buffer to its page */
915 set_bh_page(bh, page, offset);
917 init_buffer(bh, NULL, NULL);
921 * In case anything failed, we just free everything we got.
927 head = head->b_this_page;
928 free_buffer_head(bh);
933 * Return failure for non-async IO requests. Async IO requests
934 * are not allowed to fail, so we have to wait until buffer heads
935 * become available. But we don't want tasks sleeping with
936 * partially complete buffers, so all were released above.
941 /* We're _really_ low on memory. Now we just
942 * wait for old buffer heads to become free due to
943 * finishing IO. Since this is an async request and
944 * the reserve list is empty, we're sure there are
945 * async buffer heads in use.
950 EXPORT_SYMBOL_GPL(alloc_page_buffers);
953 link_dev_buffers(struct page *page, struct buffer_head *head)
955 struct buffer_head *bh, *tail;
960 bh = bh->b_this_page;
962 tail->b_this_page = head;
963 attach_page_buffers(page, head);
967 * Initialise the state of a blockdev page's buffers.
970 init_page_buffers(struct page *page, struct block_device *bdev,
971 sector_t block, int size)
973 struct buffer_head *head = page_buffers(page);
974 struct buffer_head *bh = head;
975 int uptodate = PageUptodate(page);
976 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
979 if (!buffer_mapped(bh)) {
980 init_buffer(bh, NULL, NULL);
982 bh->b_blocknr = block;
984 set_buffer_uptodate(bh);
985 if (block < end_block)
986 set_buffer_mapped(bh);
989 bh = bh->b_this_page;
990 } while (bh != head);
993 * Caller needs to validate requested block against end of device.
999 * Create the page-cache page that contains the requested block.
1001 * This is used purely for blockdev mappings.
1004 grow_dev_page(struct block_device *bdev, sector_t block,
1005 pgoff_t index, int size, int sizebits)
1007 struct inode *inode = bdev->bd_inode;
1009 struct buffer_head *bh;
1011 int ret = 0; /* Will call free_more_memory() */
1013 page = find_or_create_page(inode->i_mapping, index,
1014 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1018 BUG_ON(!PageLocked(page));
1020 if (page_has_buffers(page)) {
1021 bh = page_buffers(page);
1022 if (bh->b_size == size) {
1023 end_block = init_page_buffers(page, bdev,
1024 index << sizebits, size);
1027 if (!try_to_free_buffers(page))
1032 * Allocate some buffers for this page
1034 bh = alloc_page_buffers(page, size, 0);
1039 * Link the page to the buffers and initialise them. Take the
1040 * lock to be atomic wrt __find_get_block(), which does not
1041 * run under the page lock.
1043 spin_lock(&inode->i_mapping->private_lock);
1044 link_dev_buffers(page, bh);
1045 end_block = init_page_buffers(page, bdev, index << sizebits, size);
1046 spin_unlock(&inode->i_mapping->private_lock);
1048 ret = (block < end_block) ? 1 : -ENXIO;
1051 page_cache_release(page);
1056 * Create buffers for the specified block device block's page. If
1057 * that page was dirty, the buffers are set dirty also.
1060 grow_buffers(struct block_device *bdev, sector_t block, int size)
1068 } while ((size << sizebits) < PAGE_SIZE);
1070 index = block >> sizebits;
1073 * Check for a block which wants to lie outside our maximum possible
1074 * pagecache index. (this comparison is done using sector_t types).
1076 if (unlikely(index != block >> sizebits)) {
1077 char b[BDEVNAME_SIZE];
1079 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1081 __func__, (unsigned long long)block,
1086 /* Create a page with the proper size buffers.. */
1087 return grow_dev_page(bdev, block, index, size, sizebits);
1090 static struct buffer_head *
1091 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1093 /* Size must be multiple of hard sectorsize */
1094 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1095 (size < 512 || size > PAGE_SIZE))) {
1096 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1098 printk(KERN_ERR "logical block size: %d\n",
1099 bdev_logical_block_size(bdev));
1106 struct buffer_head *bh;
1109 bh = __find_get_block(bdev, block, size);
1113 ret = grow_buffers(bdev, block, size);
1122 * The relationship between dirty buffers and dirty pages:
1124 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1125 * the page is tagged dirty in its radix tree.
1127 * At all times, the dirtiness of the buffers represents the dirtiness of
1128 * subsections of the page. If the page has buffers, the page dirty bit is
1129 * merely a hint about the true dirty state.
1131 * When a page is set dirty in its entirety, all its buffers are marked dirty
1132 * (if the page has buffers).
1134 * When a buffer is marked dirty, its page is dirtied, but the page's other
1137 * Also. When blockdev buffers are explicitly read with bread(), they
1138 * individually become uptodate. But their backing page remains not
1139 * uptodate - even if all of its buffers are uptodate. A subsequent
1140 * block_read_full_page() against that page will discover all the uptodate
1141 * buffers, will set the page uptodate and will perform no I/O.
1145 * mark_buffer_dirty - mark a buffer_head as needing writeout
1146 * @bh: the buffer_head to mark dirty
1148 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1149 * backing page dirty, then tag the page as dirty in its address_space's radix
1150 * tree and then attach the address_space's inode to its superblock's dirty
1153 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1154 * mapping->tree_lock and mapping->host->i_lock.
1156 void mark_buffer_dirty(struct buffer_head *bh)
1158 WARN_ON_ONCE(!buffer_uptodate(bh));
1161 * Very *carefully* optimize the it-is-already-dirty case.
1163 * Don't let the final "is it dirty" escape to before we
1164 * perhaps modified the buffer.
1166 if (buffer_dirty(bh)) {
1168 if (buffer_dirty(bh))
1172 if (!test_set_buffer_dirty(bh)) {
1173 struct page *page = bh->b_page;
1174 if (!TestSetPageDirty(page)) {
1175 struct address_space *mapping = page_mapping(page);
1177 __set_page_dirty(page, mapping, 0);
1181 EXPORT_SYMBOL(mark_buffer_dirty);
1184 * Decrement a buffer_head's reference count. If all buffers against a page
1185 * have zero reference count, are clean and unlocked, and if the page is clean
1186 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1187 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1188 * a page but it ends up not being freed, and buffers may later be reattached).
1190 void __brelse(struct buffer_head * buf)
1192 if (atomic_read(&buf->b_count)) {
1196 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1198 EXPORT_SYMBOL(__brelse);
1201 * bforget() is like brelse(), except it discards any
1202 * potentially dirty data.
1204 void __bforget(struct buffer_head *bh)
1206 clear_buffer_dirty(bh);
1207 if (bh->b_assoc_map) {
1208 struct address_space *buffer_mapping = bh->b_page->mapping;
1210 spin_lock(&buffer_mapping->private_lock);
1211 list_del_init(&bh->b_assoc_buffers);
1212 bh->b_assoc_map = NULL;
1213 spin_unlock(&buffer_mapping->private_lock);
1217 EXPORT_SYMBOL(__bforget);
1219 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1222 if (buffer_uptodate(bh)) {
1227 bh->b_end_io = end_buffer_read_sync;
1228 submit_bh(READ, bh);
1230 if (buffer_uptodate(bh))
1238 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1239 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1240 * refcount elevated by one when they're in an LRU. A buffer can only appear
1241 * once in a particular CPU's LRU. A single buffer can be present in multiple
1242 * CPU's LRUs at the same time.
1244 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1245 * sb_find_get_block().
1247 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1248 * a local interrupt disable for that.
1251 #define BH_LRU_SIZE 8
1254 struct buffer_head *bhs[BH_LRU_SIZE];
1257 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1260 #define bh_lru_lock() local_irq_disable()
1261 #define bh_lru_unlock() local_irq_enable()
1263 #define bh_lru_lock() preempt_disable()
1264 #define bh_lru_unlock() preempt_enable()
1267 static inline void check_irqs_on(void)
1269 #ifdef irqs_disabled
1270 BUG_ON(irqs_disabled());
1275 * The LRU management algorithm is dopey-but-simple. Sorry.
1277 static void bh_lru_install(struct buffer_head *bh)
1279 struct buffer_head *evictee = NULL;
1283 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1284 struct buffer_head *bhs[BH_LRU_SIZE];
1290 for (in = 0; in < BH_LRU_SIZE; in++) {
1291 struct buffer_head *bh2 =
1292 __this_cpu_read(bh_lrus.bhs[in]);
1297 if (out >= BH_LRU_SIZE) {
1298 BUG_ON(evictee != NULL);
1305 while (out < BH_LRU_SIZE)
1307 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1316 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1318 static struct buffer_head *
1319 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1321 struct buffer_head *ret = NULL;
1326 for (i = 0; i < BH_LRU_SIZE; i++) {
1327 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1329 if (bh && bh->b_bdev == bdev &&
1330 bh->b_blocknr == block && bh->b_size == size) {
1333 __this_cpu_write(bh_lrus.bhs[i],
1334 __this_cpu_read(bh_lrus.bhs[i - 1]));
1337 __this_cpu_write(bh_lrus.bhs[0], bh);
1349 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1350 * it in the LRU and mark it as accessed. If it is not present then return
1353 struct buffer_head *
1354 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1356 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1359 bh = __find_get_block_slow(bdev, block);
1367 EXPORT_SYMBOL(__find_get_block);
1370 * __getblk will locate (and, if necessary, create) the buffer_head
1371 * which corresponds to the passed block_device, block and size. The
1372 * returned buffer has its reference count incremented.
1374 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1375 * attempt is failing. FIXME, perhaps?
1377 struct buffer_head *
1378 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1380 struct buffer_head *bh = __find_get_block(bdev, block, size);
1384 bh = __getblk_slow(bdev, block, size);
1387 EXPORT_SYMBOL(__getblk);
1390 * Do async read-ahead on a buffer..
1392 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1394 struct buffer_head *bh = __getblk(bdev, block, size);
1396 ll_rw_block(READA, 1, &bh);
1400 EXPORT_SYMBOL(__breadahead);
1403 * __bread() - reads a specified block and returns the bh
1404 * @bdev: the block_device to read from
1405 * @block: number of block
1406 * @size: size (in bytes) to read
1408 * Reads a specified block, and returns buffer head that contains it.
1409 * It returns NULL if the block was unreadable.
1411 struct buffer_head *
1412 __bread(struct block_device *bdev, sector_t block, unsigned size)
1414 struct buffer_head *bh = __getblk(bdev, block, size);
1416 if (likely(bh) && !buffer_uptodate(bh))
1417 bh = __bread_slow(bh);
1420 EXPORT_SYMBOL(__bread);
1423 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1424 * This doesn't race because it runs in each cpu either in irq
1425 * or with preempt disabled.
1427 static void invalidate_bh_lru(void *arg)
1429 struct bh_lru *b = &get_cpu_var(bh_lrus);
1432 for (i = 0; i < BH_LRU_SIZE; i++) {
1436 put_cpu_var(bh_lrus);
1439 void invalidate_bh_lrus(void)
1441 on_each_cpu(invalidate_bh_lru, NULL, 1);
1443 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1445 void set_bh_page(struct buffer_head *bh,
1446 struct page *page, unsigned long offset)
1449 BUG_ON(offset >= PAGE_SIZE);
1450 if (PageHighMem(page))
1452 * This catches illegal uses and preserves the offset:
1454 bh->b_data = (char *)(0 + offset);
1456 bh->b_data = page_address(page) + offset;
1458 EXPORT_SYMBOL(set_bh_page);
1461 * Called when truncating a buffer on a page completely.
1463 static void discard_buffer(struct buffer_head * bh)
1466 clear_buffer_dirty(bh);
1468 clear_buffer_mapped(bh);
1469 clear_buffer_req(bh);
1470 clear_buffer_new(bh);
1471 clear_buffer_delay(bh);
1472 clear_buffer_unwritten(bh);
1477 * block_invalidatepage - invalidate part or all of a buffer-backed page
1479 * @page: the page which is affected
1480 * @offset: the index of the truncation point
1482 * block_invalidatepage() is called when all or part of the page has become
1483 * invalidated by a truncate operation.
1485 * block_invalidatepage() does not have to release all buffers, but it must
1486 * ensure that no dirty buffer is left outside @offset and that no I/O
1487 * is underway against any of the blocks which are outside the truncation
1488 * point. Because the caller is about to free (and possibly reuse) those
1491 void block_invalidatepage(struct page *page, unsigned long offset)
1493 struct buffer_head *head, *bh, *next;
1494 unsigned int curr_off = 0;
1496 BUG_ON(!PageLocked(page));
1497 if (!page_has_buffers(page))
1500 head = page_buffers(page);
1503 unsigned int next_off = curr_off + bh->b_size;
1504 next = bh->b_this_page;
1507 * is this block fully invalidated?
1509 if (offset <= curr_off)
1511 curr_off = next_off;
1513 } while (bh != head);
1516 * We release buffers only if the entire page is being invalidated.
1517 * The get_block cached value has been unconditionally invalidated,
1518 * so real IO is not possible anymore.
1521 try_to_release_page(page, 0);
1525 EXPORT_SYMBOL(block_invalidatepage);
1528 * We attach and possibly dirty the buffers atomically wrt
1529 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1530 * is already excluded via the page lock.
1532 void create_empty_buffers(struct page *page,
1533 unsigned long blocksize, unsigned long b_state)
1535 struct buffer_head *bh, *head, *tail;
1537 head = alloc_page_buffers(page, blocksize, 1);
1540 bh->b_state |= b_state;
1542 bh = bh->b_this_page;
1544 tail->b_this_page = head;
1546 spin_lock(&page->mapping->private_lock);
1547 if (PageUptodate(page) || PageDirty(page)) {
1550 if (PageDirty(page))
1551 set_buffer_dirty(bh);
1552 if (PageUptodate(page))
1553 set_buffer_uptodate(bh);
1554 bh = bh->b_this_page;
1555 } while (bh != head);
1557 attach_page_buffers(page, head);
1558 spin_unlock(&page->mapping->private_lock);
1560 EXPORT_SYMBOL(create_empty_buffers);
1563 * We are taking a block for data and we don't want any output from any
1564 * buffer-cache aliases starting from return from that function and
1565 * until the moment when something will explicitly mark the buffer
1566 * dirty (hopefully that will not happen until we will free that block ;-)
1567 * We don't even need to mark it not-uptodate - nobody can expect
1568 * anything from a newly allocated buffer anyway. We used to used
1569 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1570 * don't want to mark the alias unmapped, for example - it would confuse
1571 * anyone who might pick it with bread() afterwards...
1573 * Also.. Note that bforget() doesn't lock the buffer. So there can
1574 * be writeout I/O going on against recently-freed buffers. We don't
1575 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1576 * only if we really need to. That happens here.
1578 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1580 struct buffer_head *old_bh;
1584 old_bh = __find_get_block_slow(bdev, block);
1586 clear_buffer_dirty(old_bh);
1587 wait_on_buffer(old_bh);
1588 clear_buffer_req(old_bh);
1592 EXPORT_SYMBOL(unmap_underlying_metadata);
1595 * NOTE! All mapped/uptodate combinations are valid:
1597 * Mapped Uptodate Meaning
1599 * No No "unknown" - must do get_block()
1600 * No Yes "hole" - zero-filled
1601 * Yes No "allocated" - allocated on disk, not read in
1602 * Yes Yes "valid" - allocated and up-to-date in memory.
1604 * "Dirty" is valid only with the last case (mapped+uptodate).
1608 * While block_write_full_page is writing back the dirty buffers under
1609 * the page lock, whoever dirtied the buffers may decide to clean them
1610 * again at any time. We handle that by only looking at the buffer
1611 * state inside lock_buffer().
1613 * If block_write_full_page() is called for regular writeback
1614 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1615 * locked buffer. This only can happen if someone has written the buffer
1616 * directly, with submit_bh(). At the address_space level PageWriteback
1617 * prevents this contention from occurring.
1619 * If block_write_full_page() is called with wbc->sync_mode ==
1620 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1621 * causes the writes to be flagged as synchronous writes.
1623 static int __block_write_full_page(struct inode *inode, struct page *page,
1624 get_block_t *get_block, struct writeback_control *wbc,
1625 bh_end_io_t *handler)
1629 sector_t last_block;
1630 struct buffer_head *bh, *head;
1631 const unsigned blocksize = 1 << inode->i_blkbits;
1632 int nr_underway = 0;
1633 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1634 WRITE_SYNC : WRITE);
1636 BUG_ON(!PageLocked(page));
1638 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1640 if (!page_has_buffers(page)) {
1641 create_empty_buffers(page, blocksize,
1642 (1 << BH_Dirty)|(1 << BH_Uptodate));
1646 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1647 * here, and the (potentially unmapped) buffers may become dirty at
1648 * any time. If a buffer becomes dirty here after we've inspected it
1649 * then we just miss that fact, and the page stays dirty.
1651 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1652 * handle that here by just cleaning them.
1655 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1656 head = page_buffers(page);
1660 * Get all the dirty buffers mapped to disk addresses and
1661 * handle any aliases from the underlying blockdev's mapping.
1664 if (block > last_block) {
1666 * mapped buffers outside i_size will occur, because
1667 * this page can be outside i_size when there is a
1668 * truncate in progress.
1671 * The buffer was zeroed by block_write_full_page()
1673 clear_buffer_dirty(bh);
1674 set_buffer_uptodate(bh);
1675 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1677 WARN_ON(bh->b_size != blocksize);
1678 err = get_block(inode, block, bh, 1);
1681 clear_buffer_delay(bh);
1682 if (buffer_new(bh)) {
1683 /* blockdev mappings never come here */
1684 clear_buffer_new(bh);
1685 unmap_underlying_metadata(bh->b_bdev,
1689 bh = bh->b_this_page;
1691 } while (bh != head);
1694 if (!buffer_mapped(bh))
1697 * If it's a fully non-blocking write attempt and we cannot
1698 * lock the buffer then redirty the page. Note that this can
1699 * potentially cause a busy-wait loop from writeback threads
1700 * and kswapd activity, but those code paths have their own
1701 * higher-level throttling.
1703 if (wbc->sync_mode != WB_SYNC_NONE) {
1705 } else if (!trylock_buffer(bh)) {
1706 redirty_page_for_writepage(wbc, page);
1709 if (test_clear_buffer_dirty(bh)) {
1710 mark_buffer_async_write_endio(bh, handler);
1714 } while ((bh = bh->b_this_page) != head);
1717 * The page and its buffers are protected by PageWriteback(), so we can
1718 * drop the bh refcounts early.
1720 BUG_ON(PageWriteback(page));
1721 set_page_writeback(page);
1724 struct buffer_head *next = bh->b_this_page;
1725 if (buffer_async_write(bh)) {
1726 submit_bh(write_op, bh);
1730 } while (bh != head);
1735 if (nr_underway == 0) {
1737 * The page was marked dirty, but the buffers were
1738 * clean. Someone wrote them back by hand with
1739 * ll_rw_block/submit_bh. A rare case.
1741 end_page_writeback(page);
1744 * The page and buffer_heads can be released at any time from
1752 * ENOSPC, or some other error. We may already have added some
1753 * blocks to the file, so we need to write these out to avoid
1754 * exposing stale data.
1755 * The page is currently locked and not marked for writeback
1758 /* Recovery: lock and submit the mapped buffers */
1760 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1761 !buffer_delay(bh)) {
1763 mark_buffer_async_write_endio(bh, handler);
1766 * The buffer may have been set dirty during
1767 * attachment to a dirty page.
1769 clear_buffer_dirty(bh);
1771 } while ((bh = bh->b_this_page) != head);
1773 BUG_ON(PageWriteback(page));
1774 mapping_set_error(page->mapping, err);
1775 set_page_writeback(page);
1777 struct buffer_head *next = bh->b_this_page;
1778 if (buffer_async_write(bh)) {
1779 clear_buffer_dirty(bh);
1780 submit_bh(write_op, bh);
1784 } while (bh != head);
1790 * If a page has any new buffers, zero them out here, and mark them uptodate
1791 * and dirty so they'll be written out (in order to prevent uninitialised
1792 * block data from leaking). And clear the new bit.
1794 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1796 unsigned int block_start, block_end;
1797 struct buffer_head *head, *bh;
1799 BUG_ON(!PageLocked(page));
1800 if (!page_has_buffers(page))
1803 bh = head = page_buffers(page);
1806 block_end = block_start + bh->b_size;
1808 if (buffer_new(bh)) {
1809 if (block_end > from && block_start < to) {
1810 if (!PageUptodate(page)) {
1811 unsigned start, size;
1813 start = max(from, block_start);
1814 size = min(to, block_end) - start;
1816 zero_user(page, start, size);
1817 set_buffer_uptodate(bh);
1820 clear_buffer_new(bh);
1821 mark_buffer_dirty(bh);
1825 block_start = block_end;
1826 bh = bh->b_this_page;
1827 } while (bh != head);
1829 EXPORT_SYMBOL(page_zero_new_buffers);
1831 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1832 get_block_t *get_block)
1834 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1835 unsigned to = from + len;
1836 struct inode *inode = page->mapping->host;
1837 unsigned block_start, block_end;
1840 unsigned blocksize, bbits;
1841 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1843 BUG_ON(!PageLocked(page));
1844 BUG_ON(from > PAGE_CACHE_SIZE);
1845 BUG_ON(to > PAGE_CACHE_SIZE);
1848 blocksize = 1 << inode->i_blkbits;
1849 if (!page_has_buffers(page))
1850 create_empty_buffers(page, blocksize, 0);
1851 head = page_buffers(page);
1853 bbits = inode->i_blkbits;
1854 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1856 for(bh = head, block_start = 0; bh != head || !block_start;
1857 block++, block_start=block_end, bh = bh->b_this_page) {
1858 block_end = block_start + blocksize;
1859 if (block_end <= from || block_start >= to) {
1860 if (PageUptodate(page)) {
1861 if (!buffer_uptodate(bh))
1862 set_buffer_uptodate(bh);
1867 clear_buffer_new(bh);
1868 if (!buffer_mapped(bh)) {
1869 WARN_ON(bh->b_size != blocksize);
1870 err = get_block(inode, block, bh, 1);
1873 if (buffer_new(bh)) {
1874 unmap_underlying_metadata(bh->b_bdev,
1876 if (PageUptodate(page)) {
1877 clear_buffer_new(bh);
1878 set_buffer_uptodate(bh);
1879 mark_buffer_dirty(bh);
1882 if (block_end > to || block_start < from)
1883 zero_user_segments(page,
1889 if (PageUptodate(page)) {
1890 if (!buffer_uptodate(bh))
1891 set_buffer_uptodate(bh);
1894 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1895 !buffer_unwritten(bh) &&
1896 (block_start < from || block_end > to)) {
1897 ll_rw_block(READ, 1, &bh);
1902 * If we issued read requests - let them complete.
1904 while(wait_bh > wait) {
1905 wait_on_buffer(*--wait_bh);
1906 if (!buffer_uptodate(*wait_bh))
1910 page_zero_new_buffers(page, from, to);
1913 EXPORT_SYMBOL(__block_write_begin);
1915 static int __block_commit_write(struct inode *inode, struct page *page,
1916 unsigned from, unsigned to)
1918 unsigned block_start, block_end;
1921 struct buffer_head *bh, *head;
1923 blocksize = 1 << inode->i_blkbits;
1925 for(bh = head = page_buffers(page), block_start = 0;
1926 bh != head || !block_start;
1927 block_start=block_end, bh = bh->b_this_page) {
1928 block_end = block_start + blocksize;
1929 if (block_end <= from || block_start >= to) {
1930 if (!buffer_uptodate(bh))
1933 set_buffer_uptodate(bh);
1934 mark_buffer_dirty(bh);
1936 clear_buffer_new(bh);
1940 * If this is a partial write which happened to make all buffers
1941 * uptodate then we can optimize away a bogus readpage() for
1942 * the next read(). Here we 'discover' whether the page went
1943 * uptodate as a result of this (potentially partial) write.
1946 SetPageUptodate(page);
1951 * block_write_begin takes care of the basic task of block allocation and
1952 * bringing partial write blocks uptodate first.
1954 * The filesystem needs to handle block truncation upon failure.
1956 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1957 unsigned flags, struct page **pagep, get_block_t *get_block)
1959 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1963 page = grab_cache_page_write_begin(mapping, index, flags);
1967 status = __block_write_begin(page, pos, len, get_block);
1968 if (unlikely(status)) {
1970 page_cache_release(page);
1977 EXPORT_SYMBOL(block_write_begin);
1979 int block_write_end(struct file *file, struct address_space *mapping,
1980 loff_t pos, unsigned len, unsigned copied,
1981 struct page *page, void *fsdata)
1983 struct inode *inode = mapping->host;
1986 start = pos & (PAGE_CACHE_SIZE - 1);
1988 if (unlikely(copied < len)) {
1990 * The buffers that were written will now be uptodate, so we
1991 * don't have to worry about a readpage reading them and
1992 * overwriting a partial write. However if we have encountered
1993 * a short write and only partially written into a buffer, it
1994 * will not be marked uptodate, so a readpage might come in and
1995 * destroy our partial write.
1997 * Do the simplest thing, and just treat any short write to a
1998 * non uptodate page as a zero-length write, and force the
1999 * caller to redo the whole thing.
2001 if (!PageUptodate(page))
2004 page_zero_new_buffers(page, start+copied, start+len);
2006 flush_dcache_page(page);
2008 /* This could be a short (even 0-length) commit */
2009 __block_commit_write(inode, page, start, start+copied);
2013 EXPORT_SYMBOL(block_write_end);
2015 int generic_write_end(struct file *file, struct address_space *mapping,
2016 loff_t pos, unsigned len, unsigned copied,
2017 struct page *page, void *fsdata)
2019 struct inode *inode = mapping->host;
2020 int i_size_changed = 0;
2022 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2025 * No need to use i_size_read() here, the i_size
2026 * cannot change under us because we hold i_mutex.
2028 * But it's important to update i_size while still holding page lock:
2029 * page writeout could otherwise come in and zero beyond i_size.
2031 if (pos+copied > inode->i_size) {
2032 i_size_write(inode, pos+copied);
2037 page_cache_release(page);
2040 * Don't mark the inode dirty under page lock. First, it unnecessarily
2041 * makes the holding time of page lock longer. Second, it forces lock
2042 * ordering of page lock and transaction start for journaling
2046 mark_inode_dirty(inode);
2050 EXPORT_SYMBOL(generic_write_end);
2053 * block_is_partially_uptodate checks whether buffers within a page are
2056 * Returns true if all buffers which correspond to a file portion
2057 * we want to read are uptodate.
2059 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2062 struct inode *inode = page->mapping->host;
2063 unsigned block_start, block_end, blocksize;
2065 struct buffer_head *bh, *head;
2068 if (!page_has_buffers(page))
2071 blocksize = 1 << inode->i_blkbits;
2072 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2074 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2077 head = page_buffers(page);
2081 block_end = block_start + blocksize;
2082 if (block_end > from && block_start < to) {
2083 if (!buffer_uptodate(bh)) {
2087 if (block_end >= to)
2090 block_start = block_end;
2091 bh = bh->b_this_page;
2092 } while (bh != head);
2096 EXPORT_SYMBOL(block_is_partially_uptodate);
2099 * Generic "read page" function for block devices that have the normal
2100 * get_block functionality. This is most of the block device filesystems.
2101 * Reads the page asynchronously --- the unlock_buffer() and
2102 * set/clear_buffer_uptodate() functions propagate buffer state into the
2103 * page struct once IO has completed.
2105 int block_read_full_page(struct page *page, get_block_t *get_block)
2107 struct inode *inode = page->mapping->host;
2108 sector_t iblock, lblock;
2109 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2110 unsigned int blocksize;
2112 int fully_mapped = 1;
2114 BUG_ON(!PageLocked(page));
2115 blocksize = 1 << inode->i_blkbits;
2116 if (!page_has_buffers(page))
2117 create_empty_buffers(page, blocksize, 0);
2118 head = page_buffers(page);
2120 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2121 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2127 if (buffer_uptodate(bh))
2130 if (!buffer_mapped(bh)) {
2134 if (iblock < lblock) {
2135 WARN_ON(bh->b_size != blocksize);
2136 err = get_block(inode, iblock, bh, 0);
2140 if (!buffer_mapped(bh)) {
2141 zero_user(page, i * blocksize, blocksize);
2143 set_buffer_uptodate(bh);
2147 * get_block() might have updated the buffer
2150 if (buffer_uptodate(bh))
2154 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2157 SetPageMappedToDisk(page);
2161 * All buffers are uptodate - we can set the page uptodate
2162 * as well. But not if get_block() returned an error.
2164 if (!PageError(page))
2165 SetPageUptodate(page);
2170 /* Stage two: lock the buffers */
2171 for (i = 0; i < nr; i++) {
2174 mark_buffer_async_read(bh);
2178 * Stage 3: start the IO. Check for uptodateness
2179 * inside the buffer lock in case another process reading
2180 * the underlying blockdev brought it uptodate (the sct fix).
2182 for (i = 0; i < nr; i++) {
2184 if (buffer_uptodate(bh))
2185 end_buffer_async_read(bh, 1);
2187 submit_bh(READ, bh);
2191 EXPORT_SYMBOL(block_read_full_page);
2193 /* utility function for filesystems that need to do work on expanding
2194 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2195 * deal with the hole.
2197 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2199 struct address_space *mapping = inode->i_mapping;
2204 err = inode_newsize_ok(inode, size);
2208 err = pagecache_write_begin(NULL, mapping, size, 0,
2209 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2214 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2220 EXPORT_SYMBOL(generic_cont_expand_simple);
2222 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2223 loff_t pos, loff_t *bytes)
2225 struct inode *inode = mapping->host;
2226 unsigned blocksize = 1 << inode->i_blkbits;
2229 pgoff_t index, curidx;
2231 unsigned zerofrom, offset, len;
2234 index = pos >> PAGE_CACHE_SHIFT;
2235 offset = pos & ~PAGE_CACHE_MASK;
2237 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2238 zerofrom = curpos & ~PAGE_CACHE_MASK;
2239 if (zerofrom & (blocksize-1)) {
2240 *bytes |= (blocksize-1);
2243 len = PAGE_CACHE_SIZE - zerofrom;
2245 err = pagecache_write_begin(file, mapping, curpos, len,
2246 AOP_FLAG_UNINTERRUPTIBLE,
2250 zero_user(page, zerofrom, len);
2251 err = pagecache_write_end(file, mapping, curpos, len, len,
2258 balance_dirty_pages_ratelimited(mapping);
2261 /* page covers the boundary, find the boundary offset */
2262 if (index == curidx) {
2263 zerofrom = curpos & ~PAGE_CACHE_MASK;
2264 /* if we will expand the thing last block will be filled */
2265 if (offset <= zerofrom) {
2268 if (zerofrom & (blocksize-1)) {
2269 *bytes |= (blocksize-1);
2272 len = offset - zerofrom;
2274 err = pagecache_write_begin(file, mapping, curpos, len,
2275 AOP_FLAG_UNINTERRUPTIBLE,
2279 zero_user(page, zerofrom, len);
2280 err = pagecache_write_end(file, mapping, curpos, len, len,
2292 * For moronic filesystems that do not allow holes in file.
2293 * We may have to extend the file.
2295 int cont_write_begin(struct file *file, struct address_space *mapping,
2296 loff_t pos, unsigned len, unsigned flags,
2297 struct page **pagep, void **fsdata,
2298 get_block_t *get_block, loff_t *bytes)
2300 struct inode *inode = mapping->host;
2301 unsigned blocksize = 1 << inode->i_blkbits;
2305 err = cont_expand_zero(file, mapping, pos, bytes);
2309 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2310 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2311 *bytes |= (blocksize-1);
2315 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2317 EXPORT_SYMBOL(cont_write_begin);
2319 int block_commit_write(struct page *page, unsigned from, unsigned to)
2321 struct inode *inode = page->mapping->host;
2322 __block_commit_write(inode,page,from,to);
2325 EXPORT_SYMBOL(block_commit_write);
2328 * block_page_mkwrite() is not allowed to change the file size as it gets
2329 * called from a page fault handler when a page is first dirtied. Hence we must
2330 * be careful to check for EOF conditions here. We set the page up correctly
2331 * for a written page which means we get ENOSPC checking when writing into
2332 * holes and correct delalloc and unwritten extent mapping on filesystems that
2333 * support these features.
2335 * We are not allowed to take the i_mutex here so we have to play games to
2336 * protect against truncate races as the page could now be beyond EOF. Because
2337 * truncate writes the inode size before removing pages, once we have the
2338 * page lock we can determine safely if the page is beyond EOF. If it is not
2339 * beyond EOF, then the page is guaranteed safe against truncation until we
2342 * Direct callers of this function should call vfs_check_frozen() so that page
2343 * fault does not busyloop until the fs is thawed.
2345 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2346 get_block_t get_block)
2348 struct page *page = vmf->page;
2349 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2355 size = i_size_read(inode);
2356 if ((page->mapping != inode->i_mapping) ||
2357 (page_offset(page) > size)) {
2358 /* We overload EFAULT to mean page got truncated */
2363 /* page is wholly or partially inside EOF */
2364 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2365 end = size & ~PAGE_CACHE_MASK;
2367 end = PAGE_CACHE_SIZE;
2369 ret = __block_write_begin(page, 0, end, get_block);
2371 ret = block_commit_write(page, 0, end);
2373 if (unlikely(ret < 0))
2376 * Freezing in progress? We check after the page is marked dirty and
2377 * with page lock held so if the test here fails, we are sure freezing
2378 * code will wait during syncing until the page fault is done - at that
2379 * point page will be dirty and unlocked so freezing code will write it
2380 * and writeprotect it again.
2382 set_page_dirty(page);
2383 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2387 wait_on_page_writeback(page);
2393 EXPORT_SYMBOL(__block_page_mkwrite);
2395 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2396 get_block_t get_block)
2399 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2402 * This check is racy but catches the common case. The check in
2403 * __block_page_mkwrite() is reliable.
2405 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2406 ret = __block_page_mkwrite(vma, vmf, get_block);
2407 return block_page_mkwrite_return(ret);
2409 EXPORT_SYMBOL(block_page_mkwrite);
2412 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2413 * immediately, while under the page lock. So it needs a special end_io
2414 * handler which does not touch the bh after unlocking it.
2416 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2418 __end_buffer_read_notouch(bh, uptodate);
2422 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2423 * the page (converting it to circular linked list and taking care of page
2426 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2428 struct buffer_head *bh;
2430 BUG_ON(!PageLocked(page));
2432 spin_lock(&page->mapping->private_lock);
2435 if (PageDirty(page))
2436 set_buffer_dirty(bh);
2437 if (!bh->b_this_page)
2438 bh->b_this_page = head;
2439 bh = bh->b_this_page;
2440 } while (bh != head);
2441 attach_page_buffers(page, head);
2442 spin_unlock(&page->mapping->private_lock);
2446 * On entry, the page is fully not uptodate.
2447 * On exit the page is fully uptodate in the areas outside (from,to)
2448 * The filesystem needs to handle block truncation upon failure.
2450 int nobh_write_begin(struct address_space *mapping,
2451 loff_t pos, unsigned len, unsigned flags,
2452 struct page **pagep, void **fsdata,
2453 get_block_t *get_block)
2455 struct inode *inode = mapping->host;
2456 const unsigned blkbits = inode->i_blkbits;
2457 const unsigned blocksize = 1 << blkbits;
2458 struct buffer_head *head, *bh;
2462 unsigned block_in_page;
2463 unsigned block_start, block_end;
2464 sector_t block_in_file;
2467 int is_mapped_to_disk = 1;
2469 index = pos >> PAGE_CACHE_SHIFT;
2470 from = pos & (PAGE_CACHE_SIZE - 1);
2473 page = grab_cache_page_write_begin(mapping, index, flags);
2479 if (page_has_buffers(page)) {
2480 ret = __block_write_begin(page, pos, len, get_block);
2486 if (PageMappedToDisk(page))
2490 * Allocate buffers so that we can keep track of state, and potentially
2491 * attach them to the page if an error occurs. In the common case of
2492 * no error, they will just be freed again without ever being attached
2493 * to the page (which is all OK, because we're under the page lock).
2495 * Be careful: the buffer linked list is a NULL terminated one, rather
2496 * than the circular one we're used to.
2498 head = alloc_page_buffers(page, blocksize, 0);
2504 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2507 * We loop across all blocks in the page, whether or not they are
2508 * part of the affected region. This is so we can discover if the
2509 * page is fully mapped-to-disk.
2511 for (block_start = 0, block_in_page = 0, bh = head;
2512 block_start < PAGE_CACHE_SIZE;
2513 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2516 block_end = block_start + blocksize;
2519 if (block_start >= to)
2521 ret = get_block(inode, block_in_file + block_in_page,
2525 if (!buffer_mapped(bh))
2526 is_mapped_to_disk = 0;
2528 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2529 if (PageUptodate(page)) {
2530 set_buffer_uptodate(bh);
2533 if (buffer_new(bh) || !buffer_mapped(bh)) {
2534 zero_user_segments(page, block_start, from,
2538 if (buffer_uptodate(bh))
2539 continue; /* reiserfs does this */
2540 if (block_start < from || block_end > to) {
2542 bh->b_end_io = end_buffer_read_nobh;
2543 submit_bh(READ, bh);
2550 * The page is locked, so these buffers are protected from
2551 * any VM or truncate activity. Hence we don't need to care
2552 * for the buffer_head refcounts.
2554 for (bh = head; bh; bh = bh->b_this_page) {
2556 if (!buffer_uptodate(bh))
2563 if (is_mapped_to_disk)
2564 SetPageMappedToDisk(page);
2566 *fsdata = head; /* to be released by nobh_write_end */
2573 * Error recovery is a bit difficult. We need to zero out blocks that
2574 * were newly allocated, and dirty them to ensure they get written out.
2575 * Buffers need to be attached to the page at this point, otherwise
2576 * the handling of potential IO errors during writeout would be hard
2577 * (could try doing synchronous writeout, but what if that fails too?)
2579 attach_nobh_buffers(page, head);
2580 page_zero_new_buffers(page, from, to);
2584 page_cache_release(page);
2589 EXPORT_SYMBOL(nobh_write_begin);
2591 int nobh_write_end(struct file *file, struct address_space *mapping,
2592 loff_t pos, unsigned len, unsigned copied,
2593 struct page *page, void *fsdata)
2595 struct inode *inode = page->mapping->host;
2596 struct buffer_head *head = fsdata;
2597 struct buffer_head *bh;
2598 BUG_ON(fsdata != NULL && page_has_buffers(page));
2600 if (unlikely(copied < len) && head)
2601 attach_nobh_buffers(page, head);
2602 if (page_has_buffers(page))
2603 return generic_write_end(file, mapping, pos, len,
2604 copied, page, fsdata);
2606 SetPageUptodate(page);
2607 set_page_dirty(page);
2608 if (pos+copied > inode->i_size) {
2609 i_size_write(inode, pos+copied);
2610 mark_inode_dirty(inode);
2614 page_cache_release(page);
2618 head = head->b_this_page;
2619 free_buffer_head(bh);
2624 EXPORT_SYMBOL(nobh_write_end);
2627 * nobh_writepage() - based on block_full_write_page() except
2628 * that it tries to operate without attaching bufferheads to
2631 int nobh_writepage(struct page *page, get_block_t *get_block,
2632 struct writeback_control *wbc)
2634 struct inode * const inode = page->mapping->host;
2635 loff_t i_size = i_size_read(inode);
2636 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2640 /* Is the page fully inside i_size? */
2641 if (page->index < end_index)
2644 /* Is the page fully outside i_size? (truncate in progress) */
2645 offset = i_size & (PAGE_CACHE_SIZE-1);
2646 if (page->index >= end_index+1 || !offset) {
2648 * The page may have dirty, unmapped buffers. For example,
2649 * they may have been added in ext3_writepage(). Make them
2650 * freeable here, so the page does not leak.
2653 /* Not really sure about this - do we need this ? */
2654 if (page->mapping->a_ops->invalidatepage)
2655 page->mapping->a_ops->invalidatepage(page, offset);
2658 return 0; /* don't care */
2662 * The page straddles i_size. It must be zeroed out on each and every
2663 * writepage invocation because it may be mmapped. "A file is mapped
2664 * in multiples of the page size. For a file that is not a multiple of
2665 * the page size, the remaining memory is zeroed when mapped, and
2666 * writes to that region are not written out to the file."
2668 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2670 ret = mpage_writepage(page, get_block, wbc);
2672 ret = __block_write_full_page(inode, page, get_block, wbc,
2673 end_buffer_async_write);
2676 EXPORT_SYMBOL(nobh_writepage);
2678 int nobh_truncate_page(struct address_space *mapping,
2679 loff_t from, get_block_t *get_block)
2681 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2682 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2685 unsigned length, pos;
2686 struct inode *inode = mapping->host;
2688 struct buffer_head map_bh;
2691 blocksize = 1 << inode->i_blkbits;
2692 length = offset & (blocksize - 1);
2694 /* Block boundary? Nothing to do */
2698 length = blocksize - length;
2699 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2701 page = grab_cache_page(mapping, index);
2706 if (page_has_buffers(page)) {
2709 page_cache_release(page);
2710 return block_truncate_page(mapping, from, get_block);
2713 /* Find the buffer that contains "offset" */
2715 while (offset >= pos) {
2720 map_bh.b_size = blocksize;
2722 err = get_block(inode, iblock, &map_bh, 0);
2725 /* unmapped? It's a hole - nothing to do */
2726 if (!buffer_mapped(&map_bh))
2729 /* Ok, it's mapped. Make sure it's up-to-date */
2730 if (!PageUptodate(page)) {
2731 err = mapping->a_ops->readpage(NULL, page);
2733 page_cache_release(page);
2737 if (!PageUptodate(page)) {
2741 if (page_has_buffers(page))
2744 zero_user(page, offset, length);
2745 set_page_dirty(page);
2750 page_cache_release(page);
2754 EXPORT_SYMBOL(nobh_truncate_page);
2756 int block_truncate_page(struct address_space *mapping,
2757 loff_t from, get_block_t *get_block)
2759 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2760 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2763 unsigned length, pos;
2764 struct inode *inode = mapping->host;
2766 struct buffer_head *bh;
2769 blocksize = 1 << inode->i_blkbits;
2770 length = offset & (blocksize - 1);
2772 /* Block boundary? Nothing to do */
2776 length = blocksize - length;
2777 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2779 page = grab_cache_page(mapping, index);
2784 if (!page_has_buffers(page))
2785 create_empty_buffers(page, blocksize, 0);
2787 /* Find the buffer that contains "offset" */
2788 bh = page_buffers(page);
2790 while (offset >= pos) {
2791 bh = bh->b_this_page;
2797 if (!buffer_mapped(bh)) {
2798 WARN_ON(bh->b_size != blocksize);
2799 err = get_block(inode, iblock, bh, 0);
2802 /* unmapped? It's a hole - nothing to do */
2803 if (!buffer_mapped(bh))
2807 /* Ok, it's mapped. Make sure it's up-to-date */
2808 if (PageUptodate(page))
2809 set_buffer_uptodate(bh);
2811 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2813 ll_rw_block(READ, 1, &bh);
2815 /* Uhhuh. Read error. Complain and punt. */
2816 if (!buffer_uptodate(bh))
2820 zero_user(page, offset, length);
2821 mark_buffer_dirty(bh);
2826 page_cache_release(page);
2830 EXPORT_SYMBOL(block_truncate_page);
2833 * The generic ->writepage function for buffer-backed address_spaces
2834 * this form passes in the end_io handler used to finish the IO.
2836 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2837 struct writeback_control *wbc, bh_end_io_t *handler)
2839 struct inode * const inode = page->mapping->host;
2840 loff_t i_size = i_size_read(inode);
2841 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2844 /* Is the page fully inside i_size? */
2845 if (page->index < end_index)
2846 return __block_write_full_page(inode, page, get_block, wbc,
2849 /* Is the page fully outside i_size? (truncate in progress) */
2850 offset = i_size & (PAGE_CACHE_SIZE-1);
2851 if (page->index >= end_index+1 || !offset) {
2853 * The page may have dirty, unmapped buffers. For example,
2854 * they may have been added in ext3_writepage(). Make them
2855 * freeable here, so the page does not leak.
2857 do_invalidatepage(page, 0);
2859 return 0; /* don't care */
2863 * The page straddles i_size. It must be zeroed out on each and every
2864 * writepage invocation because it may be mmapped. "A file is mapped
2865 * in multiples of the page size. For a file that is not a multiple of
2866 * the page size, the remaining memory is zeroed when mapped, and
2867 * writes to that region are not written out to the file."
2869 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2870 return __block_write_full_page(inode, page, get_block, wbc, handler);
2872 EXPORT_SYMBOL(block_write_full_page_endio);
2875 * The generic ->writepage function for buffer-backed address_spaces
2877 int block_write_full_page(struct page *page, get_block_t *get_block,
2878 struct writeback_control *wbc)
2880 return block_write_full_page_endio(page, get_block, wbc,
2881 end_buffer_async_write);
2883 EXPORT_SYMBOL(block_write_full_page);
2885 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2886 get_block_t *get_block)
2888 struct buffer_head tmp;
2889 struct inode *inode = mapping->host;
2892 tmp.b_size = 1 << inode->i_blkbits;
2893 get_block(inode, block, &tmp, 0);
2894 return tmp.b_blocknr;
2896 EXPORT_SYMBOL(generic_block_bmap);
2898 static void end_bio_bh_io_sync(struct bio *bio, int err)
2900 struct buffer_head *bh = bio->bi_private;
2902 if (err == -EOPNOTSUPP) {
2903 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2906 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2907 set_bit(BH_Quiet, &bh->b_state);
2909 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2913 int submit_bh(int rw, struct buffer_head * bh)
2918 BUG_ON(!buffer_locked(bh));
2919 BUG_ON(!buffer_mapped(bh));
2920 BUG_ON(!bh->b_end_io);
2921 BUG_ON(buffer_delay(bh));
2922 BUG_ON(buffer_unwritten(bh));
2925 * Only clear out a write error when rewriting
2927 if (test_set_buffer_req(bh) && (rw & WRITE))
2928 clear_buffer_write_io_error(bh);
2931 * from here on down, it's all bio -- do the initial mapping,
2932 * submit_bio -> generic_make_request may further map this bio around
2934 bio = bio_alloc(GFP_NOIO, 1);
2936 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2937 bio->bi_bdev = bh->b_bdev;
2938 bio->bi_io_vec[0].bv_page = bh->b_page;
2939 bio->bi_io_vec[0].bv_len = bh->b_size;
2940 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2944 bio->bi_size = bh->b_size;
2946 bio->bi_end_io = end_bio_bh_io_sync;
2947 bio->bi_private = bh;
2950 submit_bio(rw, bio);
2952 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2958 EXPORT_SYMBOL(submit_bh);
2961 * ll_rw_block: low-level access to block devices (DEPRECATED)
2962 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2963 * @nr: number of &struct buffer_heads in the array
2964 * @bhs: array of pointers to &struct buffer_head
2966 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2967 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2968 * %READA option is described in the documentation for generic_make_request()
2969 * which ll_rw_block() calls.
2971 * This function drops any buffer that it cannot get a lock on (with the
2972 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2973 * request, and any buffer that appears to be up-to-date when doing read
2974 * request. Further it marks as clean buffers that are processed for
2975 * writing (the buffer cache won't assume that they are actually clean
2976 * until the buffer gets unlocked).
2978 * ll_rw_block sets b_end_io to simple completion handler that marks
2979 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2982 * All of the buffers must be for the same device, and must also be a
2983 * multiple of the current approved size for the device.
2985 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2989 for (i = 0; i < nr; i++) {
2990 struct buffer_head *bh = bhs[i];
2992 if (!trylock_buffer(bh))
2995 if (test_clear_buffer_dirty(bh)) {
2996 bh->b_end_io = end_buffer_write_sync;
2998 submit_bh(WRITE, bh);
3002 if (!buffer_uptodate(bh)) {
3003 bh->b_end_io = end_buffer_read_sync;
3012 EXPORT_SYMBOL(ll_rw_block);
3014 void write_dirty_buffer(struct buffer_head *bh, int rw)
3017 if (!test_clear_buffer_dirty(bh)) {
3021 bh->b_end_io = end_buffer_write_sync;
3025 EXPORT_SYMBOL(write_dirty_buffer);
3028 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3029 * and then start new I/O and then wait upon it. The caller must have a ref on
3032 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3036 WARN_ON(atomic_read(&bh->b_count) < 1);
3038 if (test_clear_buffer_dirty(bh)) {
3040 bh->b_end_io = end_buffer_write_sync;
3041 ret = submit_bh(rw, bh);
3043 if (!ret && !buffer_uptodate(bh))
3050 EXPORT_SYMBOL(__sync_dirty_buffer);
3052 int sync_dirty_buffer(struct buffer_head *bh)
3054 return __sync_dirty_buffer(bh, WRITE_SYNC);
3056 EXPORT_SYMBOL(sync_dirty_buffer);
3059 * try_to_free_buffers() checks if all the buffers on this particular page
3060 * are unused, and releases them if so.
3062 * Exclusion against try_to_free_buffers may be obtained by either
3063 * locking the page or by holding its mapping's private_lock.
3065 * If the page is dirty but all the buffers are clean then we need to
3066 * be sure to mark the page clean as well. This is because the page
3067 * may be against a block device, and a later reattachment of buffers
3068 * to a dirty page will set *all* buffers dirty. Which would corrupt
3069 * filesystem data on the same device.
3071 * The same applies to regular filesystem pages: if all the buffers are
3072 * clean then we set the page clean and proceed. To do that, we require
3073 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3076 * try_to_free_buffers() is non-blocking.
3078 static inline int buffer_busy(struct buffer_head *bh)
3080 return atomic_read(&bh->b_count) |
3081 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3085 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3087 struct buffer_head *head = page_buffers(page);
3088 struct buffer_head *bh;
3092 if (buffer_write_io_error(bh) && page->mapping)
3093 set_bit(AS_EIO, &page->mapping->flags);
3094 if (buffer_busy(bh))
3096 bh = bh->b_this_page;
3097 } while (bh != head);
3100 struct buffer_head *next = bh->b_this_page;
3102 if (bh->b_assoc_map)
3103 __remove_assoc_queue(bh);
3105 } while (bh != head);
3106 *buffers_to_free = head;
3107 __clear_page_buffers(page);
3113 int try_to_free_buffers(struct page *page)
3115 struct address_space * const mapping = page->mapping;
3116 struct buffer_head *buffers_to_free = NULL;
3119 BUG_ON(!PageLocked(page));
3120 if (PageWriteback(page))
3123 if (mapping == NULL) { /* can this still happen? */
3124 ret = drop_buffers(page, &buffers_to_free);
3128 spin_lock(&mapping->private_lock);
3129 ret = drop_buffers(page, &buffers_to_free);
3132 * If the filesystem writes its buffers by hand (eg ext3)
3133 * then we can have clean buffers against a dirty page. We
3134 * clean the page here; otherwise the VM will never notice
3135 * that the filesystem did any IO at all.
3137 * Also, during truncate, discard_buffer will have marked all
3138 * the page's buffers clean. We discover that here and clean
3141 * private_lock must be held over this entire operation in order
3142 * to synchronise against __set_page_dirty_buffers and prevent the
3143 * dirty bit from being lost.
3146 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3147 spin_unlock(&mapping->private_lock);
3149 if (buffers_to_free) {
3150 struct buffer_head *bh = buffers_to_free;
3153 struct buffer_head *next = bh->b_this_page;
3154 free_buffer_head(bh);
3156 } while (bh != buffers_to_free);
3160 EXPORT_SYMBOL(try_to_free_buffers);
3163 * There are no bdflush tunables left. But distributions are
3164 * still running obsolete flush daemons, so we terminate them here.
3166 * Use of bdflush() is deprecated and will be removed in a future kernel.
3167 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3169 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3171 static int msg_count;
3173 if (!capable(CAP_SYS_ADMIN))
3176 if (msg_count < 5) {
3179 "warning: process `%s' used the obsolete bdflush"
3180 " system call\n", current->comm);
3181 printk(KERN_INFO "Fix your initscripts?\n");
3190 * Buffer-head allocation
3192 static struct kmem_cache *bh_cachep;
3195 * Once the number of bh's in the machine exceeds this level, we start
3196 * stripping them in writeback.
3198 static int max_buffer_heads;
3200 int buffer_heads_over_limit;
3202 struct bh_accounting {
3203 int nr; /* Number of live bh's */
3204 int ratelimit; /* Limit cacheline bouncing */
3207 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3209 static void recalc_bh_state(void)
3214 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3216 __this_cpu_write(bh_accounting.ratelimit, 0);
3217 for_each_online_cpu(i)
3218 tot += per_cpu(bh_accounting, i).nr;
3219 buffer_heads_over_limit = (tot > max_buffer_heads);
3222 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3224 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3226 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3228 __this_cpu_inc(bh_accounting.nr);
3234 EXPORT_SYMBOL(alloc_buffer_head);
3236 void free_buffer_head(struct buffer_head *bh)
3238 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3239 kmem_cache_free(bh_cachep, bh);
3241 __this_cpu_dec(bh_accounting.nr);
3245 EXPORT_SYMBOL(free_buffer_head);
3247 static void buffer_exit_cpu(int cpu)
3250 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3252 for (i = 0; i < BH_LRU_SIZE; i++) {
3256 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3257 per_cpu(bh_accounting, cpu).nr = 0;
3260 static int buffer_cpu_notify(struct notifier_block *self,
3261 unsigned long action, void *hcpu)
3263 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3264 buffer_exit_cpu((unsigned long)hcpu);
3269 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3270 * @bh: struct buffer_head
3272 * Return true if the buffer is up-to-date and false,
3273 * with the buffer locked, if not.
3275 int bh_uptodate_or_lock(struct buffer_head *bh)
3277 if (!buffer_uptodate(bh)) {
3279 if (!buffer_uptodate(bh))
3285 EXPORT_SYMBOL(bh_uptodate_or_lock);
3288 * bh_submit_read - Submit a locked buffer for reading
3289 * @bh: struct buffer_head
3291 * Returns zero on success and -EIO on error.
3293 int bh_submit_read(struct buffer_head *bh)
3295 BUG_ON(!buffer_locked(bh));
3297 if (buffer_uptodate(bh)) {
3303 bh->b_end_io = end_buffer_read_sync;
3304 submit_bh(READ, bh);
3306 if (buffer_uptodate(bh))
3310 EXPORT_SYMBOL(bh_submit_read);
3312 void __init buffer_init(void)
3316 bh_cachep = kmem_cache_create("buffer_head",
3317 sizeof(struct buffer_head), 0,
3318 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3323 * Limit the bh occupancy to 10% of ZONE_NORMAL
3325 nrpages = (nr_free_buffer_pages() * 10) / 100;
3326 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3327 hotcpu_notifier(buffer_cpu_notify, 0);