4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
56 EXPORT_SYMBOL(init_buffer);
58 static int sleep_on_buffer(void *word)
64 void __lock_buffer(struct buffer_head *bh)
66 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67 TASK_UNINTERRUPTIBLE);
69 EXPORT_SYMBOL(__lock_buffer);
71 void unlock_buffer(struct buffer_head *bh)
73 clear_bit_unlock(BH_Lock, &bh->b_state);
74 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
77 EXPORT_SYMBOL(unlock_buffer);
80 * Block until a buffer comes unlocked. This doesn't stop it
81 * from becoming locked again - you have to lock it yourself
82 * if you want to preserve its state.
84 void __wait_on_buffer(struct buffer_head * bh)
86 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
88 EXPORT_SYMBOL(__wait_on_buffer);
91 __clear_page_buffers(struct page *page)
93 ClearPagePrivate(page);
94 set_page_private(page, 0);
95 page_cache_release(page);
99 static int quiet_error(struct buffer_head *bh)
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
107 static void buffer_io_error(struct buffer_head *bh)
109 char b[BDEVNAME_SIZE];
110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
116 * End-of-IO handler helper function which does not touch the bh after
118 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119 * a race there is benign: unlock_buffer() only use the bh's address for
120 * hashing after unlocking the buffer, so it doesn't actually touch the bh
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
126 set_buffer_uptodate(bh);
128 /* This happens, due to failed READA attempts. */
129 clear_buffer_uptodate(bh);
135 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
136 * unlock the buffer. This is what ll_rw_block uses too.
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
140 __end_buffer_read_notouch(bh, uptodate);
143 EXPORT_SYMBOL(end_buffer_read_sync);
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
147 char b[BDEVNAME_SIZE];
150 set_buffer_uptodate(bh);
152 if (!quiet_error(bh)) {
154 printk(KERN_WARNING "lost page write due to "
156 bdevname(bh->b_bdev, b));
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
164 EXPORT_SYMBOL(end_buffer_write_sync);
167 * Various filesystems appear to want __find_get_block to be non-blocking.
168 * But it's the page lock which protects the buffers. To get around this,
169 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173 * may be quite high. This code could TryLock the page, and if that
174 * succeeds, there is no need to take private_lock. (But if
175 * private_lock is contended then so is mapping->tree_lock).
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
184 struct buffer_head *bh;
185 struct buffer_head *head;
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
197 head = page_buffers(page);
200 if (!buffer_mapped(bh))
202 else if (bh->b_blocknr == block) {
207 bh = bh->b_this_page;
208 } while (bh != head);
210 /* we might be here because some of the buffers on this page are
211 * not mapped. This is due to various races between
212 * file io on the block device and getblk. It gets dealt with
213 * elsewhere, don't buffer_error if we had some unmapped buffers
216 char b[BDEVNAME_SIZE];
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
224 printk("device %s blocksize: %d\n", bdevname(bdev, b),
225 1 << bd_inode->i_blkbits);
228 spin_unlock(&bd_mapping->private_lock);
229 page_cache_release(page);
234 /* If invalidate_buffers() will trash dirty buffers, it means some kind
235 of fs corruption is going on. Trashing dirty data always imply losing
236 information that was supposed to be just stored on the physical layer
239 Thus invalidate_buffers in general usage is not allwowed to trash
240 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
241 be preserved. These buffers are simply skipped.
243 We also skip buffers which are still in use. For example this can
244 happen if a userspace program is reading the block device.
246 NOTE: In the case where the user removed a removable-media-disk even if
247 there's still dirty data not synced on disk (due a bug in the device driver
248 or due an error of the user), by not destroying the dirty buffers we could
249 generate corruption also on the next media inserted, thus a parameter is
250 necessary to handle this case in the most safe way possible (trying
251 to not corrupt also the new disk inserted with the data belonging to
252 the old now corrupted disk). Also for the ramdisk the natural thing
253 to do in order to release the ramdisk memory is to destroy dirty buffers.
255 These are two special cases. Normal usage imply the device driver
256 to issue a sync on the device (without waiting I/O completion) and
257 then an invalidate_buffers call that doesn't trash dirty buffers.
259 For handling cache coherency with the blkdev pagecache the 'update' case
260 is been introduced. It is needed to re-read from disk any pinned
261 buffer. NOTE: re-reading from disk is destructive so we can do it only
262 when we assume nobody is changing the buffercache under our I/O and when
263 we think the disk contains more recent information than the buffercache.
264 The update == 1 pass marks the buffers we need to update, the update == 2
265 pass does the actual I/O. */
266 void invalidate_bdev(struct block_device *bdev)
268 struct address_space *mapping = bdev->bd_inode->i_mapping;
270 if (mapping->nrpages == 0)
273 invalidate_bh_lrus();
274 lru_add_drain_all(); /* make sure all lru add caches are flushed */
275 invalidate_mapping_pages(mapping, 0, -1);
276 /* 99% of the time, we don't need to flush the cleancache on the bdev.
277 * But, for the strange corners, lets be cautious
279 cleancache_flush_inode(mapping);
281 EXPORT_SYMBOL(invalidate_bdev);
284 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
286 static void free_more_memory(void)
291 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
294 for_each_online_node(nid) {
295 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 gfp_zone(GFP_NOFS), NULL,
299 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
305 * I/O completion handler for block_read_full_page() - pages
306 * which come unlocked at the end of I/O.
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
311 struct buffer_head *first;
312 struct buffer_head *tmp;
314 int page_uptodate = 1;
316 BUG_ON(!buffer_async_read(bh));
320 set_buffer_uptodate(bh);
322 clear_buffer_uptodate(bh);
323 if (!quiet_error(bh))
329 * Be _very_ careful from here on. Bad things can happen if
330 * two buffer heads end IO at almost the same time and both
331 * decide that the page is now completely done.
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336 clear_buffer_async_read(bh);
340 if (!buffer_uptodate(tmp))
342 if (buffer_async_read(tmp)) {
343 BUG_ON(!buffer_locked(tmp));
346 tmp = tmp->b_this_page;
348 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 local_irq_restore(flags);
352 * If none of the buffers had errors and they are all
353 * uptodate then we can set the page uptodate.
355 if (page_uptodate && !PageError(page))
356 SetPageUptodate(page);
361 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 local_irq_restore(flags);
367 * Completion handler for block_write_full_page() - pages which are unlocked
368 * during I/O, and which have PageWriteback cleared upon I/O completion.
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
372 char b[BDEVNAME_SIZE];
374 struct buffer_head *first;
375 struct buffer_head *tmp;
378 BUG_ON(!buffer_async_write(bh));
382 set_buffer_uptodate(bh);
384 if (!quiet_error(bh)) {
386 printk(KERN_WARNING "lost page write due to "
388 bdevname(bh->b_bdev, b));
390 set_bit(AS_EIO, &page->mapping->flags);
391 set_buffer_write_io_error(bh);
392 clear_buffer_uptodate(bh);
396 first = page_buffers(page);
397 local_irq_save(flags);
398 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
400 clear_buffer_async_write(bh);
402 tmp = bh->b_this_page;
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
408 tmp = tmp->b_this_page;
410 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 local_irq_restore(flags);
412 end_page_writeback(page);
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
420 EXPORT_SYMBOL(end_buffer_async_write);
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
431 * The page comes unlocked when it has no locked buffer_async buffers
434 * PageLocked prevents anyone starting new async I/O reads any of
437 * PageWriteback is used to prevent simultaneous writeout of the same
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
443 static void mark_buffer_async_read(struct buffer_head *bh)
445 bh->b_end_io = end_buffer_async_read;
446 set_buffer_async_read(bh);
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
452 bh->b_end_io = handler;
453 set_buffer_async_write(bh);
456 void mark_buffer_async_write(struct buffer_head *bh)
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
460 EXPORT_SYMBOL(mark_buffer_async_write);
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
470 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472 * management of a list of dependent buffers at ->i_mapping->private_list.
474 * Locking is a little subtle: try_to_free_buffers() will remove buffers
475 * from their controlling inode's queue when they are being freed. But
476 * try_to_free_buffers() will be operating against the *blockdev* mapping
477 * at the time, not against the S_ISREG file which depends on those buffers.
478 * So the locking for private_list is via the private_lock in the address_space
479 * which backs the buffers. Which is different from the address_space
480 * against which the buffers are listed. So for a particular address_space,
481 * mapping->private_lock does *not* protect mapping->private_list! In fact,
482 * mapping->private_list will always be protected by the backing blockdev's
485 * Which introduces a requirement: all buffers on an address_space's
486 * ->private_list must be from the same address_space: the blockdev's.
488 * address_spaces which do not place buffers at ->private_list via these
489 * utility functions are free to use private_lock and private_list for
490 * whatever they want. The only requirement is that list_empty(private_list)
491 * be true at clear_inode() time.
493 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
494 * filesystems should do that. invalidate_inode_buffers() should just go
495 * BUG_ON(!list_empty).
497 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
498 * take an address_space, not an inode. And it should be called
499 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
502 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503 * list if it is already on a list. Because if the buffer is on a list,
504 * it *must* already be on the right one. If not, the filesystem is being
505 * silly. This will save a ton of locking. But first we have to ensure
506 * that buffers are taken *off* the old inode's list when they are freed
507 * (presumably in truncate). That requires careful auditing of all
508 * filesystems (do it inside bforget()). It could also be done by bringing
513 * The buffer's backing address_space's private_lock must be held
515 static void __remove_assoc_queue(struct buffer_head *bh)
517 list_del_init(&bh->b_assoc_buffers);
518 WARN_ON(!bh->b_assoc_map);
519 if (buffer_write_io_error(bh))
520 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 bh->b_assoc_map = NULL;
524 int inode_has_buffers(struct inode *inode)
526 return !list_empty(&inode->i_data.private_list);
530 * osync is designed to support O_SYNC io. It waits synchronously for
531 * all already-submitted IO to complete, but does not queue any new
532 * writes to the disk.
534 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535 * you dirty the buffers, and then use osync_inode_buffers to wait for
536 * completion. Any other dirty buffers which are not yet queued for
537 * write will not be flushed to disk by the osync.
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 struct buffer_head *bh;
547 list_for_each_prev(p, list) {
549 if (buffer_locked(bh)) {
553 if (!buffer_uptodate(bh))
564 static void do_thaw_one(struct super_block *sb, void *unused)
566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
572 static void do_thaw_all(struct work_struct *work)
574 iterate_supers(do_thaw_one, NULL);
576 printk(KERN_WARNING "Emergency Thaw complete\n");
580 * emergency_thaw_all -- forcibly thaw every frozen filesystem
582 * Used for emergency unfreeze of all filesystems via SysRq
584 void emergency_thaw_all(void)
586 struct work_struct *work;
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 INIT_WORK(work, do_thaw_all);
596 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597 * @mapping: the mapping which wants those buffers written
599 * Starts I/O against the buffers at mapping->private_list, and waits upon
602 * Basically, this is a convenience function for fsync().
603 * @mapping is a file or directory which needs those buffers to be written for
604 * a successful fsync().
606 int sync_mapping_buffers(struct address_space *mapping)
608 struct address_space *buffer_mapping = mapping->assoc_mapping;
610 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
613 return fsync_buffers_list(&buffer_mapping->private_lock,
614 &mapping->private_list);
616 EXPORT_SYMBOL(sync_mapping_buffers);
619 * Called when we've recently written block `bblock', and it is known that
620 * `bblock' was for a buffer_boundary() buffer. This means that the block at
621 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
622 * dirty, schedule it for IO. So that indirects merge nicely with their data.
624 void write_boundary_block(struct block_device *bdev,
625 sector_t bblock, unsigned blocksize)
627 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 if (buffer_dirty(bh))
630 ll_rw_block(WRITE, 1, &bh);
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637 struct address_space *mapping = inode->i_mapping;
638 struct address_space *buffer_mapping = bh->b_page->mapping;
640 mark_buffer_dirty(bh);
641 if (!mapping->assoc_mapping) {
642 mapping->assoc_mapping = buffer_mapping;
644 BUG_ON(mapping->assoc_mapping != buffer_mapping);
646 if (!bh->b_assoc_map) {
647 spin_lock(&buffer_mapping->private_lock);
648 list_move_tail(&bh->b_assoc_buffers,
649 &mapping->private_list);
650 bh->b_assoc_map = mapping;
651 spin_unlock(&buffer_mapping->private_lock);
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
657 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
660 * If warn is true, then emit a warning if the page is not uptodate and has
661 * not been truncated.
663 static void __set_page_dirty(struct page *page,
664 struct address_space *mapping, int warn)
666 spin_lock_irq(&mapping->tree_lock);
667 if (page->mapping) { /* Race with truncate? */
668 WARN_ON_ONCE(warn && !PageUptodate(page));
669 account_page_dirtied(page, mapping);
670 radix_tree_tag_set(&mapping->page_tree,
671 page_index(page), PAGECACHE_TAG_DIRTY);
673 spin_unlock_irq(&mapping->tree_lock);
674 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
678 * Add a page to the dirty page list.
680 * It is a sad fact of life that this function is called from several places
681 * deeply under spinlocking. It may not sleep.
683 * If the page has buffers, the uptodate buffers are set dirty, to preserve
684 * dirty-state coherency between the page and the buffers. It the page does
685 * not have buffers then when they are later attached they will all be set
688 * The buffers are dirtied before the page is dirtied. There's a small race
689 * window in which a writepage caller may see the page cleanness but not the
690 * buffer dirtiness. That's fine. If this code were to set the page dirty
691 * before the buffers, a concurrent writepage caller could clear the page dirty
692 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693 * page on the dirty page list.
695 * We use private_lock to lock against try_to_free_buffers while using the
696 * page's buffer list. Also use this to protect against clean buffers being
697 * added to the page after it was set dirty.
699 * FIXME: may need to call ->reservepage here as well. That's rather up to the
700 * address_space though.
702 int __set_page_dirty_buffers(struct page *page)
705 struct address_space *mapping = page_mapping(page);
707 if (unlikely(!mapping))
708 return !TestSetPageDirty(page);
710 spin_lock(&mapping->private_lock);
711 if (page_has_buffers(page)) {
712 struct buffer_head *head = page_buffers(page);
713 struct buffer_head *bh = head;
716 set_buffer_dirty(bh);
717 bh = bh->b_this_page;
718 } while (bh != head);
720 newly_dirty = !TestSetPageDirty(page);
721 spin_unlock(&mapping->private_lock);
724 __set_page_dirty(page, mapping, 1);
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
730 * Write out and wait upon a list of buffers.
732 * We have conflicting pressures: we want to make sure that all
733 * initially dirty buffers get waited on, but that any subsequently
734 * dirtied buffers don't. After all, we don't want fsync to last
735 * forever if somebody is actively writing to the file.
737 * Do this in two main stages: first we copy dirty buffers to a
738 * temporary inode list, queueing the writes as we go. Then we clean
739 * up, waiting for those writes to complete.
741 * During this second stage, any subsequent updates to the file may end
742 * up refiling the buffer on the original inode's dirty list again, so
743 * there is a chance we will end up with a buffer queued for write but
744 * not yet completed on that list. So, as a final cleanup we go through
745 * the osync code to catch these locked, dirty buffers without requeuing
746 * any newly dirty buffers for write.
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
750 struct buffer_head *bh;
751 struct list_head tmp;
752 struct address_space *mapping;
754 struct blk_plug plug;
756 INIT_LIST_HEAD(&tmp);
757 blk_start_plug(&plug);
760 while (!list_empty(list)) {
761 bh = BH_ENTRY(list->next);
762 mapping = bh->b_assoc_map;
763 __remove_assoc_queue(bh);
764 /* Avoid race with mark_buffer_dirty_inode() which does
765 * a lockless check and we rely on seeing the dirty bit */
767 if (buffer_dirty(bh) || buffer_locked(bh)) {
768 list_add(&bh->b_assoc_buffers, &tmp);
769 bh->b_assoc_map = mapping;
770 if (buffer_dirty(bh)) {
774 * Ensure any pending I/O completes so that
775 * write_dirty_buffer() actually writes the
776 * current contents - it is a noop if I/O is
777 * still in flight on potentially older
780 write_dirty_buffer(bh, WRITE_SYNC);
783 * Kick off IO for the previous mapping. Note
784 * that we will not run the very last mapping,
785 * wait_on_buffer() will do that for us
786 * through sync_buffer().
795 blk_finish_plug(&plug);
798 while (!list_empty(&tmp)) {
799 bh = BH_ENTRY(tmp.prev);
801 mapping = bh->b_assoc_map;
802 __remove_assoc_queue(bh);
803 /* Avoid race with mark_buffer_dirty_inode() which does
804 * a lockless check and we rely on seeing the dirty bit */
806 if (buffer_dirty(bh)) {
807 list_add(&bh->b_assoc_buffers,
808 &mapping->private_list);
809 bh->b_assoc_map = mapping;
813 if (!buffer_uptodate(bh))
820 err2 = osync_buffers_list(lock, list);
828 * Invalidate any and all dirty buffers on a given inode. We are
829 * probably unmounting the fs, but that doesn't mean we have already
830 * done a sync(). Just drop the buffers from the inode list.
832 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
833 * assumes that all the buffers are against the blockdev. Not true
836 void invalidate_inode_buffers(struct inode *inode)
838 if (inode_has_buffers(inode)) {
839 struct address_space *mapping = &inode->i_data;
840 struct list_head *list = &mapping->private_list;
841 struct address_space *buffer_mapping = mapping->assoc_mapping;
843 spin_lock(&buffer_mapping->private_lock);
844 while (!list_empty(list))
845 __remove_assoc_queue(BH_ENTRY(list->next));
846 spin_unlock(&buffer_mapping->private_lock);
849 EXPORT_SYMBOL(invalidate_inode_buffers);
852 * Remove any clean buffers from the inode's buffer list. This is called
853 * when we're trying to free the inode itself. Those buffers can pin it.
855 * Returns true if all buffers were removed.
857 int remove_inode_buffers(struct inode *inode)
861 if (inode_has_buffers(inode)) {
862 struct address_space *mapping = &inode->i_data;
863 struct list_head *list = &mapping->private_list;
864 struct address_space *buffer_mapping = mapping->assoc_mapping;
866 spin_lock(&buffer_mapping->private_lock);
867 while (!list_empty(list)) {
868 struct buffer_head *bh = BH_ENTRY(list->next);
869 if (buffer_dirty(bh)) {
873 __remove_assoc_queue(bh);
875 spin_unlock(&buffer_mapping->private_lock);
881 * Create the appropriate buffers when given a page for data area and
882 * the size of each buffer.. Use the bh->b_this_page linked list to
883 * follow the buffers created. Return NULL if unable to create more
886 * The retry flag is used to differentiate async IO (paging, swapping)
887 * which may not fail from ordinary buffer allocations.
889 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
892 struct buffer_head *bh, *head;
898 while ((offset -= size) >= 0) {
899 bh = alloc_buffer_head(GFP_NOFS);
904 bh->b_this_page = head;
909 atomic_set(&bh->b_count, 0);
912 /* Link the buffer to its page */
913 set_bh_page(bh, page, offset);
915 init_buffer(bh, NULL, NULL);
919 * In case anything failed, we just free everything we got.
925 head = head->b_this_page;
926 free_buffer_head(bh);
931 * Return failure for non-async IO requests. Async IO requests
932 * are not allowed to fail, so we have to wait until buffer heads
933 * become available. But we don't want tasks sleeping with
934 * partially complete buffers, so all were released above.
939 /* We're _really_ low on memory. Now we just
940 * wait for old buffer heads to become free due to
941 * finishing IO. Since this is an async request and
942 * the reserve list is empty, we're sure there are
943 * async buffer heads in use.
948 EXPORT_SYMBOL_GPL(alloc_page_buffers);
951 link_dev_buffers(struct page *page, struct buffer_head *head)
953 struct buffer_head *bh, *tail;
958 bh = bh->b_this_page;
960 tail->b_this_page = head;
961 attach_page_buffers(page, head);
965 * Initialise the state of a blockdev page's buffers.
968 init_page_buffers(struct page *page, struct block_device *bdev,
969 sector_t block, int size)
971 struct buffer_head *head = page_buffers(page);
972 struct buffer_head *bh = head;
973 int uptodate = PageUptodate(page);
974 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
977 if (!buffer_mapped(bh)) {
978 init_buffer(bh, NULL, NULL);
980 bh->b_blocknr = block;
982 set_buffer_uptodate(bh);
983 if (block < end_block)
984 set_buffer_mapped(bh);
987 bh = bh->b_this_page;
988 } while (bh != head);
992 * Create the page-cache page that contains the requested block.
994 * This is user purely for blockdev mappings.
997 grow_dev_page(struct block_device *bdev, sector_t block,
998 pgoff_t index, int size)
1000 struct inode *inode = bdev->bd_inode;
1002 struct buffer_head *bh;
1004 page = find_or_create_page(inode->i_mapping, index,
1005 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1009 BUG_ON(!PageLocked(page));
1011 if (page_has_buffers(page)) {
1012 bh = page_buffers(page);
1013 if (bh->b_size == size) {
1014 init_page_buffers(page, bdev, block, size);
1017 if (!try_to_free_buffers(page))
1022 * Allocate some buffers for this page
1024 bh = alloc_page_buffers(page, size, 0);
1029 * Link the page to the buffers and initialise them. Take the
1030 * lock to be atomic wrt __find_get_block(), which does not
1031 * run under the page lock.
1033 spin_lock(&inode->i_mapping->private_lock);
1034 link_dev_buffers(page, bh);
1035 init_page_buffers(page, bdev, block, size);
1036 spin_unlock(&inode->i_mapping->private_lock);
1042 page_cache_release(page);
1047 * Create buffers for the specified block device block's page. If
1048 * that page was dirty, the buffers are set dirty also.
1051 grow_buffers(struct block_device *bdev, sector_t block, int size)
1060 } while ((size << sizebits) < PAGE_SIZE);
1062 index = block >> sizebits;
1065 * Check for a block which wants to lie outside our maximum possible
1066 * pagecache index. (this comparison is done using sector_t types).
1068 if (unlikely(index != block >> sizebits)) {
1069 char b[BDEVNAME_SIZE];
1071 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1073 __func__, (unsigned long long)block,
1077 block = index << sizebits;
1078 /* Create a page with the proper size buffers.. */
1079 page = grow_dev_page(bdev, block, index, size);
1083 page_cache_release(page);
1087 static struct buffer_head *
1088 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1090 /* Size must be multiple of hard sectorsize */
1091 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1092 (size < 512 || size > PAGE_SIZE))) {
1093 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1095 printk(KERN_ERR "logical block size: %d\n",
1096 bdev_logical_block_size(bdev));
1103 struct buffer_head * bh;
1106 bh = __find_get_block(bdev, block, size);
1110 ret = grow_buffers(bdev, block, size);
1119 * The relationship between dirty buffers and dirty pages:
1121 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1122 * the page is tagged dirty in its radix tree.
1124 * At all times, the dirtiness of the buffers represents the dirtiness of
1125 * subsections of the page. If the page has buffers, the page dirty bit is
1126 * merely a hint about the true dirty state.
1128 * When a page is set dirty in its entirety, all its buffers are marked dirty
1129 * (if the page has buffers).
1131 * When a buffer is marked dirty, its page is dirtied, but the page's other
1134 * Also. When blockdev buffers are explicitly read with bread(), they
1135 * individually become uptodate. But their backing page remains not
1136 * uptodate - even if all of its buffers are uptodate. A subsequent
1137 * block_read_full_page() against that page will discover all the uptodate
1138 * buffers, will set the page uptodate and will perform no I/O.
1142 * mark_buffer_dirty - mark a buffer_head as needing writeout
1143 * @bh: the buffer_head to mark dirty
1145 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1146 * backing page dirty, then tag the page as dirty in its address_space's radix
1147 * tree and then attach the address_space's inode to its superblock's dirty
1150 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1151 * mapping->tree_lock and mapping->host->i_lock.
1153 void mark_buffer_dirty(struct buffer_head *bh)
1155 WARN_ON_ONCE(!buffer_uptodate(bh));
1158 * Very *carefully* optimize the it-is-already-dirty case.
1160 * Don't let the final "is it dirty" escape to before we
1161 * perhaps modified the buffer.
1163 if (buffer_dirty(bh)) {
1165 if (buffer_dirty(bh))
1169 if (!test_set_buffer_dirty(bh)) {
1170 struct page *page = bh->b_page;
1171 if (!TestSetPageDirty(page)) {
1172 struct address_space *mapping = page_mapping(page);
1174 __set_page_dirty(page, mapping, 0);
1178 EXPORT_SYMBOL(mark_buffer_dirty);
1181 * Decrement a buffer_head's reference count. If all buffers against a page
1182 * have zero reference count, are clean and unlocked, and if the page is clean
1183 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1184 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1185 * a page but it ends up not being freed, and buffers may later be reattached).
1187 void __brelse(struct buffer_head * buf)
1189 if (atomic_read(&buf->b_count)) {
1193 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1195 EXPORT_SYMBOL(__brelse);
1198 * bforget() is like brelse(), except it discards any
1199 * potentially dirty data.
1201 void __bforget(struct buffer_head *bh)
1203 clear_buffer_dirty(bh);
1204 if (bh->b_assoc_map) {
1205 struct address_space *buffer_mapping = bh->b_page->mapping;
1207 spin_lock(&buffer_mapping->private_lock);
1208 list_del_init(&bh->b_assoc_buffers);
1209 bh->b_assoc_map = NULL;
1210 spin_unlock(&buffer_mapping->private_lock);
1214 EXPORT_SYMBOL(__bforget);
1216 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1219 if (buffer_uptodate(bh)) {
1224 bh->b_end_io = end_buffer_read_sync;
1225 submit_bh(READ, bh);
1227 if (buffer_uptodate(bh))
1235 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1236 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1237 * refcount elevated by one when they're in an LRU. A buffer can only appear
1238 * once in a particular CPU's LRU. A single buffer can be present in multiple
1239 * CPU's LRUs at the same time.
1241 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1242 * sb_find_get_block().
1244 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1245 * a local interrupt disable for that.
1248 #define BH_LRU_SIZE 8
1251 struct buffer_head *bhs[BH_LRU_SIZE];
1254 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1257 #define bh_lru_lock() local_irq_disable()
1258 #define bh_lru_unlock() local_irq_enable()
1260 #define bh_lru_lock() preempt_disable()
1261 #define bh_lru_unlock() preempt_enable()
1264 static inline void check_irqs_on(void)
1266 #ifdef irqs_disabled
1267 BUG_ON(irqs_disabled());
1272 * The LRU management algorithm is dopey-but-simple. Sorry.
1274 static void bh_lru_install(struct buffer_head *bh)
1276 struct buffer_head *evictee = NULL;
1280 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1281 struct buffer_head *bhs[BH_LRU_SIZE];
1287 for (in = 0; in < BH_LRU_SIZE; in++) {
1288 struct buffer_head *bh2 =
1289 __this_cpu_read(bh_lrus.bhs[in]);
1294 if (out >= BH_LRU_SIZE) {
1295 BUG_ON(evictee != NULL);
1302 while (out < BH_LRU_SIZE)
1304 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1313 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1315 static struct buffer_head *
1316 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1318 struct buffer_head *ret = NULL;
1323 for (i = 0; i < BH_LRU_SIZE; i++) {
1324 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1326 if (bh && bh->b_bdev == bdev &&
1327 bh->b_blocknr == block && bh->b_size == size) {
1330 __this_cpu_write(bh_lrus.bhs[i],
1331 __this_cpu_read(bh_lrus.bhs[i - 1]));
1334 __this_cpu_write(bh_lrus.bhs[0], bh);
1346 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1347 * it in the LRU and mark it as accessed. If it is not present then return
1350 struct buffer_head *
1351 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1353 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1356 bh = __find_get_block_slow(bdev, block);
1364 EXPORT_SYMBOL(__find_get_block);
1367 * __getblk will locate (and, if necessary, create) the buffer_head
1368 * which corresponds to the passed block_device, block and size. The
1369 * returned buffer has its reference count incremented.
1371 * __getblk() cannot fail - it just keeps trying. If you pass it an
1372 * illegal block number, __getblk() will happily return a buffer_head
1373 * which represents the non-existent block. Very weird.
1375 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1376 * attempt is failing. FIXME, perhaps?
1378 struct buffer_head *
1379 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1381 struct buffer_head *bh = __find_get_block(bdev, block, size);
1385 bh = __getblk_slow(bdev, block, size);
1388 EXPORT_SYMBOL(__getblk);
1391 * Do async read-ahead on a buffer..
1393 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1395 struct buffer_head *bh = __getblk(bdev, block, size);
1397 ll_rw_block(READA, 1, &bh);
1401 EXPORT_SYMBOL(__breadahead);
1404 * __bread() - reads a specified block and returns the bh
1405 * @bdev: the block_device to read from
1406 * @block: number of block
1407 * @size: size (in bytes) to read
1409 * Reads a specified block, and returns buffer head that contains it.
1410 * It returns NULL if the block was unreadable.
1412 struct buffer_head *
1413 __bread(struct block_device *bdev, sector_t block, unsigned size)
1415 struct buffer_head *bh = __getblk(bdev, block, size);
1417 if (likely(bh) && !buffer_uptodate(bh))
1418 bh = __bread_slow(bh);
1421 EXPORT_SYMBOL(__bread);
1424 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1425 * This doesn't race because it runs in each cpu either in irq
1426 * or with preempt disabled.
1428 static void invalidate_bh_lru(void *arg)
1430 struct bh_lru *b = &get_cpu_var(bh_lrus);
1433 for (i = 0; i < BH_LRU_SIZE; i++) {
1437 put_cpu_var(bh_lrus);
1440 void invalidate_bh_lrus(void)
1442 on_each_cpu(invalidate_bh_lru, NULL, 1);
1444 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1446 void set_bh_page(struct buffer_head *bh,
1447 struct page *page, unsigned long offset)
1450 BUG_ON(offset >= PAGE_SIZE);
1451 if (PageHighMem(page))
1453 * This catches illegal uses and preserves the offset:
1455 bh->b_data = (char *)(0 + offset);
1457 bh->b_data = page_address(page) + offset;
1459 EXPORT_SYMBOL(set_bh_page);
1462 * Called when truncating a buffer on a page completely.
1464 static void discard_buffer(struct buffer_head * bh)
1467 clear_buffer_dirty(bh);
1469 clear_buffer_mapped(bh);
1470 clear_buffer_req(bh);
1471 clear_buffer_new(bh);
1472 clear_buffer_delay(bh);
1473 clear_buffer_unwritten(bh);
1478 * block_invalidatepage - invalidate part or all of a buffer-backed page
1480 * @page: the page which is affected
1481 * @offset: the index of the truncation point
1483 * block_invalidatepage() is called when all or part of the page has become
1484 * invalidated by a truncate operation.
1486 * block_invalidatepage() does not have to release all buffers, but it must
1487 * ensure that no dirty buffer is left outside @offset and that no I/O
1488 * is underway against any of the blocks which are outside the truncation
1489 * point. Because the caller is about to free (and possibly reuse) those
1492 void block_invalidatepage(struct page *page, unsigned long offset)
1494 struct buffer_head *head, *bh, *next;
1495 unsigned int curr_off = 0;
1497 BUG_ON(!PageLocked(page));
1498 if (!page_has_buffers(page))
1501 head = page_buffers(page);
1504 unsigned int next_off = curr_off + bh->b_size;
1505 next = bh->b_this_page;
1508 * is this block fully invalidated?
1510 if (offset <= curr_off)
1512 curr_off = next_off;
1514 } while (bh != head);
1517 * We release buffers only if the entire page is being invalidated.
1518 * The get_block cached value has been unconditionally invalidated,
1519 * so real IO is not possible anymore.
1522 try_to_release_page(page, 0);
1526 EXPORT_SYMBOL(block_invalidatepage);
1529 * We attach and possibly dirty the buffers atomically wrt
1530 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1531 * is already excluded via the page lock.
1533 void create_empty_buffers(struct page *page,
1534 unsigned long blocksize, unsigned long b_state)
1536 struct buffer_head *bh, *head, *tail;
1538 head = alloc_page_buffers(page, blocksize, 1);
1541 bh->b_state |= b_state;
1543 bh = bh->b_this_page;
1545 tail->b_this_page = head;
1547 spin_lock(&page->mapping->private_lock);
1548 if (PageUptodate(page) || PageDirty(page)) {
1551 if (PageDirty(page))
1552 set_buffer_dirty(bh);
1553 if (PageUptodate(page))
1554 set_buffer_uptodate(bh);
1555 bh = bh->b_this_page;
1556 } while (bh != head);
1558 attach_page_buffers(page, head);
1559 spin_unlock(&page->mapping->private_lock);
1561 EXPORT_SYMBOL(create_empty_buffers);
1564 * We are taking a block for data and we don't want any output from any
1565 * buffer-cache aliases starting from return from that function and
1566 * until the moment when something will explicitly mark the buffer
1567 * dirty (hopefully that will not happen until we will free that block ;-)
1568 * We don't even need to mark it not-uptodate - nobody can expect
1569 * anything from a newly allocated buffer anyway. We used to used
1570 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1571 * don't want to mark the alias unmapped, for example - it would confuse
1572 * anyone who might pick it with bread() afterwards...
1574 * Also.. Note that bforget() doesn't lock the buffer. So there can
1575 * be writeout I/O going on against recently-freed buffers. We don't
1576 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1577 * only if we really need to. That happens here.
1579 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1581 struct buffer_head *old_bh;
1585 old_bh = __find_get_block_slow(bdev, block);
1587 clear_buffer_dirty(old_bh);
1588 wait_on_buffer(old_bh);
1589 clear_buffer_req(old_bh);
1593 EXPORT_SYMBOL(unmap_underlying_metadata);
1596 * NOTE! All mapped/uptodate combinations are valid:
1598 * Mapped Uptodate Meaning
1600 * No No "unknown" - must do get_block()
1601 * No Yes "hole" - zero-filled
1602 * Yes No "allocated" - allocated on disk, not read in
1603 * Yes Yes "valid" - allocated and up-to-date in memory.
1605 * "Dirty" is valid only with the last case (mapped+uptodate).
1609 * While block_write_full_page is writing back the dirty buffers under
1610 * the page lock, whoever dirtied the buffers may decide to clean them
1611 * again at any time. We handle that by only looking at the buffer
1612 * state inside lock_buffer().
1614 * If block_write_full_page() is called for regular writeback
1615 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1616 * locked buffer. This only can happen if someone has written the buffer
1617 * directly, with submit_bh(). At the address_space level PageWriteback
1618 * prevents this contention from occurring.
1620 * If block_write_full_page() is called with wbc->sync_mode ==
1621 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1622 * causes the writes to be flagged as synchronous writes.
1624 static int __block_write_full_page(struct inode *inode, struct page *page,
1625 get_block_t *get_block, struct writeback_control *wbc,
1626 bh_end_io_t *handler)
1630 sector_t last_block;
1631 struct buffer_head *bh, *head;
1632 const unsigned blocksize = 1 << inode->i_blkbits;
1633 int nr_underway = 0;
1634 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1635 WRITE_SYNC : WRITE);
1637 BUG_ON(!PageLocked(page));
1639 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1641 if (!page_has_buffers(page)) {
1642 create_empty_buffers(page, blocksize,
1643 (1 << BH_Dirty)|(1 << BH_Uptodate));
1647 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1648 * here, and the (potentially unmapped) buffers may become dirty at
1649 * any time. If a buffer becomes dirty here after we've inspected it
1650 * then we just miss that fact, and the page stays dirty.
1652 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1653 * handle that here by just cleaning them.
1656 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1657 head = page_buffers(page);
1661 * Get all the dirty buffers mapped to disk addresses and
1662 * handle any aliases from the underlying blockdev's mapping.
1665 if (block > last_block) {
1667 * mapped buffers outside i_size will occur, because
1668 * this page can be outside i_size when there is a
1669 * truncate in progress.
1672 * The buffer was zeroed by block_write_full_page()
1674 clear_buffer_dirty(bh);
1675 set_buffer_uptodate(bh);
1676 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1678 WARN_ON(bh->b_size != blocksize);
1679 err = get_block(inode, block, bh, 1);
1682 clear_buffer_delay(bh);
1683 if (buffer_new(bh)) {
1684 /* blockdev mappings never come here */
1685 clear_buffer_new(bh);
1686 unmap_underlying_metadata(bh->b_bdev,
1690 bh = bh->b_this_page;
1692 } while (bh != head);
1695 if (!buffer_mapped(bh))
1698 * If it's a fully non-blocking write attempt and we cannot
1699 * lock the buffer then redirty the page. Note that this can
1700 * potentially cause a busy-wait loop from writeback threads
1701 * and kswapd activity, but those code paths have their own
1702 * higher-level throttling.
1704 if (wbc->sync_mode != WB_SYNC_NONE) {
1706 } else if (!trylock_buffer(bh)) {
1707 redirty_page_for_writepage(wbc, page);
1710 if (test_clear_buffer_dirty(bh)) {
1711 mark_buffer_async_write_endio(bh, handler);
1715 } while ((bh = bh->b_this_page) != head);
1718 * The page and its buffers are protected by PageWriteback(), so we can
1719 * drop the bh refcounts early.
1721 BUG_ON(PageWriteback(page));
1722 set_page_writeback(page);
1725 struct buffer_head *next = bh->b_this_page;
1726 if (buffer_async_write(bh)) {
1727 submit_bh(write_op, bh);
1731 } while (bh != head);
1736 if (nr_underway == 0) {
1738 * The page was marked dirty, but the buffers were
1739 * clean. Someone wrote them back by hand with
1740 * ll_rw_block/submit_bh. A rare case.
1742 end_page_writeback(page);
1745 * The page and buffer_heads can be released at any time from
1753 * ENOSPC, or some other error. We may already have added some
1754 * blocks to the file, so we need to write these out to avoid
1755 * exposing stale data.
1756 * The page is currently locked and not marked for writeback
1759 /* Recovery: lock and submit the mapped buffers */
1761 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1762 !buffer_delay(bh)) {
1764 mark_buffer_async_write_endio(bh, handler);
1767 * The buffer may have been set dirty during
1768 * attachment to a dirty page.
1770 clear_buffer_dirty(bh);
1772 } while ((bh = bh->b_this_page) != head);
1774 BUG_ON(PageWriteback(page));
1775 mapping_set_error(page->mapping, err);
1776 set_page_writeback(page);
1778 struct buffer_head *next = bh->b_this_page;
1779 if (buffer_async_write(bh)) {
1780 clear_buffer_dirty(bh);
1781 submit_bh(write_op, bh);
1785 } while (bh != head);
1791 * If a page has any new buffers, zero them out here, and mark them uptodate
1792 * and dirty so they'll be written out (in order to prevent uninitialised
1793 * block data from leaking). And clear the new bit.
1795 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1797 unsigned int block_start, block_end;
1798 struct buffer_head *head, *bh;
1800 BUG_ON(!PageLocked(page));
1801 if (!page_has_buffers(page))
1804 bh = head = page_buffers(page);
1807 block_end = block_start + bh->b_size;
1809 if (buffer_new(bh)) {
1810 if (block_end > from && block_start < to) {
1811 if (!PageUptodate(page)) {
1812 unsigned start, size;
1814 start = max(from, block_start);
1815 size = min(to, block_end) - start;
1817 zero_user(page, start, size);
1818 set_buffer_uptodate(bh);
1821 clear_buffer_new(bh);
1822 mark_buffer_dirty(bh);
1826 block_start = block_end;
1827 bh = bh->b_this_page;
1828 } while (bh != head);
1830 EXPORT_SYMBOL(page_zero_new_buffers);
1832 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1833 get_block_t *get_block)
1835 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1836 unsigned to = from + len;
1837 struct inode *inode = page->mapping->host;
1838 unsigned block_start, block_end;
1841 unsigned blocksize, bbits;
1842 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1844 BUG_ON(!PageLocked(page));
1845 BUG_ON(from > PAGE_CACHE_SIZE);
1846 BUG_ON(to > PAGE_CACHE_SIZE);
1849 blocksize = 1 << inode->i_blkbits;
1850 if (!page_has_buffers(page))
1851 create_empty_buffers(page, blocksize, 0);
1852 head = page_buffers(page);
1854 bbits = inode->i_blkbits;
1855 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1857 for(bh = head, block_start = 0; bh != head || !block_start;
1858 block++, block_start=block_end, bh = bh->b_this_page) {
1859 block_end = block_start + blocksize;
1860 if (block_end <= from || block_start >= to) {
1861 if (PageUptodate(page)) {
1862 if (!buffer_uptodate(bh))
1863 set_buffer_uptodate(bh);
1868 clear_buffer_new(bh);
1869 if (!buffer_mapped(bh)) {
1870 WARN_ON(bh->b_size != blocksize);
1871 err = get_block(inode, block, bh, 1);
1874 if (buffer_new(bh)) {
1875 unmap_underlying_metadata(bh->b_bdev,
1877 if (PageUptodate(page)) {
1878 clear_buffer_new(bh);
1879 set_buffer_uptodate(bh);
1880 mark_buffer_dirty(bh);
1883 if (block_end > to || block_start < from)
1884 zero_user_segments(page,
1890 if (PageUptodate(page)) {
1891 if (!buffer_uptodate(bh))
1892 set_buffer_uptodate(bh);
1895 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1896 !buffer_unwritten(bh) &&
1897 (block_start < from || block_end > to)) {
1898 ll_rw_block(READ, 1, &bh);
1903 * If we issued read requests - let them complete.
1905 while(wait_bh > wait) {
1906 wait_on_buffer(*--wait_bh);
1907 if (!buffer_uptodate(*wait_bh))
1911 page_zero_new_buffers(page, from, to);
1914 EXPORT_SYMBOL(__block_write_begin);
1916 static int __block_commit_write(struct inode *inode, struct page *page,
1917 unsigned from, unsigned to)
1919 unsigned block_start, block_end;
1922 struct buffer_head *bh, *head;
1924 blocksize = 1 << inode->i_blkbits;
1926 for(bh = head = page_buffers(page), block_start = 0;
1927 bh != head || !block_start;
1928 block_start=block_end, bh = bh->b_this_page) {
1929 block_end = block_start + blocksize;
1930 if (block_end <= from || block_start >= to) {
1931 if (!buffer_uptodate(bh))
1934 set_buffer_uptodate(bh);
1935 mark_buffer_dirty(bh);
1937 clear_buffer_new(bh);
1941 * If this is a partial write which happened to make all buffers
1942 * uptodate then we can optimize away a bogus readpage() for
1943 * the next read(). Here we 'discover' whether the page went
1944 * uptodate as a result of this (potentially partial) write.
1947 SetPageUptodate(page);
1952 * block_write_begin takes care of the basic task of block allocation and
1953 * bringing partial write blocks uptodate first.
1955 * The filesystem needs to handle block truncation upon failure.
1957 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1958 unsigned flags, struct page **pagep, get_block_t *get_block)
1960 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1964 page = grab_cache_page_write_begin(mapping, index, flags);
1968 status = __block_write_begin(page, pos, len, get_block);
1969 if (unlikely(status)) {
1971 page_cache_release(page);
1978 EXPORT_SYMBOL(block_write_begin);
1980 int block_write_end(struct file *file, struct address_space *mapping,
1981 loff_t pos, unsigned len, unsigned copied,
1982 struct page *page, void *fsdata)
1984 struct inode *inode = mapping->host;
1987 start = pos & (PAGE_CACHE_SIZE - 1);
1989 if (unlikely(copied < len)) {
1991 * The buffers that were written will now be uptodate, so we
1992 * don't have to worry about a readpage reading them and
1993 * overwriting a partial write. However if we have encountered
1994 * a short write and only partially written into a buffer, it
1995 * will not be marked uptodate, so a readpage might come in and
1996 * destroy our partial write.
1998 * Do the simplest thing, and just treat any short write to a
1999 * non uptodate page as a zero-length write, and force the
2000 * caller to redo the whole thing.
2002 if (!PageUptodate(page))
2005 page_zero_new_buffers(page, start+copied, start+len);
2007 flush_dcache_page(page);
2009 /* This could be a short (even 0-length) commit */
2010 __block_commit_write(inode, page, start, start+copied);
2014 EXPORT_SYMBOL(block_write_end);
2016 int generic_write_end(struct file *file, struct address_space *mapping,
2017 loff_t pos, unsigned len, unsigned copied,
2018 struct page *page, void *fsdata)
2020 struct inode *inode = mapping->host;
2021 int i_size_changed = 0;
2023 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2026 * No need to use i_size_read() here, the i_size
2027 * cannot change under us because we hold i_mutex.
2029 * But it's important to update i_size while still holding page lock:
2030 * page writeout could otherwise come in and zero beyond i_size.
2032 if (pos+copied > inode->i_size) {
2033 i_size_write(inode, pos+copied);
2038 page_cache_release(page);
2041 * Don't mark the inode dirty under page lock. First, it unnecessarily
2042 * makes the holding time of page lock longer. Second, it forces lock
2043 * ordering of page lock and transaction start for journaling
2047 mark_inode_dirty(inode);
2051 EXPORT_SYMBOL(generic_write_end);
2054 * block_is_partially_uptodate checks whether buffers within a page are
2057 * Returns true if all buffers which correspond to a file portion
2058 * we want to read are uptodate.
2060 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2063 struct inode *inode = page->mapping->host;
2064 unsigned block_start, block_end, blocksize;
2066 struct buffer_head *bh, *head;
2069 if (!page_has_buffers(page))
2072 blocksize = 1 << inode->i_blkbits;
2073 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2075 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2078 head = page_buffers(page);
2082 block_end = block_start + blocksize;
2083 if (block_end > from && block_start < to) {
2084 if (!buffer_uptodate(bh)) {
2088 if (block_end >= to)
2091 block_start = block_end;
2092 bh = bh->b_this_page;
2093 } while (bh != head);
2097 EXPORT_SYMBOL(block_is_partially_uptodate);
2100 * Generic "read page" function for block devices that have the normal
2101 * get_block functionality. This is most of the block device filesystems.
2102 * Reads the page asynchronously --- the unlock_buffer() and
2103 * set/clear_buffer_uptodate() functions propagate buffer state into the
2104 * page struct once IO has completed.
2106 int block_read_full_page(struct page *page, get_block_t *get_block)
2108 struct inode *inode = page->mapping->host;
2109 sector_t iblock, lblock;
2110 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2111 unsigned int blocksize;
2113 int fully_mapped = 1;
2115 BUG_ON(!PageLocked(page));
2116 blocksize = 1 << inode->i_blkbits;
2117 if (!page_has_buffers(page))
2118 create_empty_buffers(page, blocksize, 0);
2119 head = page_buffers(page);
2121 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2122 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2128 if (buffer_uptodate(bh))
2131 if (!buffer_mapped(bh)) {
2135 if (iblock < lblock) {
2136 WARN_ON(bh->b_size != blocksize);
2137 err = get_block(inode, iblock, bh, 0);
2141 if (!buffer_mapped(bh)) {
2142 zero_user(page, i * blocksize, blocksize);
2144 set_buffer_uptodate(bh);
2148 * get_block() might have updated the buffer
2151 if (buffer_uptodate(bh))
2155 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2158 SetPageMappedToDisk(page);
2162 * All buffers are uptodate - we can set the page uptodate
2163 * as well. But not if get_block() returned an error.
2165 if (!PageError(page))
2166 SetPageUptodate(page);
2171 /* Stage two: lock the buffers */
2172 for (i = 0; i < nr; i++) {
2175 mark_buffer_async_read(bh);
2179 * Stage 3: start the IO. Check for uptodateness
2180 * inside the buffer lock in case another process reading
2181 * the underlying blockdev brought it uptodate (the sct fix).
2183 for (i = 0; i < nr; i++) {
2185 if (buffer_uptodate(bh))
2186 end_buffer_async_read(bh, 1);
2188 submit_bh(READ, bh);
2192 EXPORT_SYMBOL(block_read_full_page);
2194 /* utility function for filesystems that need to do work on expanding
2195 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2196 * deal with the hole.
2198 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2200 struct address_space *mapping = inode->i_mapping;
2205 err = inode_newsize_ok(inode, size);
2209 err = pagecache_write_begin(NULL, mapping, size, 0,
2210 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2215 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2221 EXPORT_SYMBOL(generic_cont_expand_simple);
2223 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2224 loff_t pos, loff_t *bytes)
2226 struct inode *inode = mapping->host;
2227 unsigned blocksize = 1 << inode->i_blkbits;
2230 pgoff_t index, curidx;
2232 unsigned zerofrom, offset, len;
2235 index = pos >> PAGE_CACHE_SHIFT;
2236 offset = pos & ~PAGE_CACHE_MASK;
2238 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2239 zerofrom = curpos & ~PAGE_CACHE_MASK;
2240 if (zerofrom & (blocksize-1)) {
2241 *bytes |= (blocksize-1);
2244 len = PAGE_CACHE_SIZE - zerofrom;
2246 err = pagecache_write_begin(file, mapping, curpos, len,
2247 AOP_FLAG_UNINTERRUPTIBLE,
2251 zero_user(page, zerofrom, len);
2252 err = pagecache_write_end(file, mapping, curpos, len, len,
2259 balance_dirty_pages_ratelimited(mapping);
2262 /* page covers the boundary, find the boundary offset */
2263 if (index == curidx) {
2264 zerofrom = curpos & ~PAGE_CACHE_MASK;
2265 /* if we will expand the thing last block will be filled */
2266 if (offset <= zerofrom) {
2269 if (zerofrom & (blocksize-1)) {
2270 *bytes |= (blocksize-1);
2273 len = offset - zerofrom;
2275 err = pagecache_write_begin(file, mapping, curpos, len,
2276 AOP_FLAG_UNINTERRUPTIBLE,
2280 zero_user(page, zerofrom, len);
2281 err = pagecache_write_end(file, mapping, curpos, len, len,
2293 * For moronic filesystems that do not allow holes in file.
2294 * We may have to extend the file.
2296 int cont_write_begin(struct file *file, struct address_space *mapping,
2297 loff_t pos, unsigned len, unsigned flags,
2298 struct page **pagep, void **fsdata,
2299 get_block_t *get_block, loff_t *bytes)
2301 struct inode *inode = mapping->host;
2302 unsigned blocksize = 1 << inode->i_blkbits;
2306 err = cont_expand_zero(file, mapping, pos, bytes);
2310 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2311 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2312 *bytes |= (blocksize-1);
2316 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2318 EXPORT_SYMBOL(cont_write_begin);
2320 int block_commit_write(struct page *page, unsigned from, unsigned to)
2322 struct inode *inode = page->mapping->host;
2323 __block_commit_write(inode,page,from,to);
2326 EXPORT_SYMBOL(block_commit_write);
2329 * block_page_mkwrite() is not allowed to change the file size as it gets
2330 * called from a page fault handler when a page is first dirtied. Hence we must
2331 * be careful to check for EOF conditions here. We set the page up correctly
2332 * for a written page which means we get ENOSPC checking when writing into
2333 * holes and correct delalloc and unwritten extent mapping on filesystems that
2334 * support these features.
2336 * We are not allowed to take the i_mutex here so we have to play games to
2337 * protect against truncate races as the page could now be beyond EOF. Because
2338 * truncate writes the inode size before removing pages, once we have the
2339 * page lock we can determine safely if the page is beyond EOF. If it is not
2340 * beyond EOF, then the page is guaranteed safe against truncation until we
2343 * Direct callers of this function should call vfs_check_frozen() so that page
2344 * fault does not busyloop until the fs is thawed.
2346 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2347 get_block_t get_block)
2349 struct page *page = vmf->page;
2350 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2356 size = i_size_read(inode);
2357 if ((page->mapping != inode->i_mapping) ||
2358 (page_offset(page) > size)) {
2359 /* We overload EFAULT to mean page got truncated */
2364 /* page is wholly or partially inside EOF */
2365 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2366 end = size & ~PAGE_CACHE_MASK;
2368 end = PAGE_CACHE_SIZE;
2370 ret = __block_write_begin(page, 0, end, get_block);
2372 ret = block_commit_write(page, 0, end);
2374 if (unlikely(ret < 0))
2377 * Freezing in progress? We check after the page is marked dirty and
2378 * with page lock held so if the test here fails, we are sure freezing
2379 * code will wait during syncing until the page fault is done - at that
2380 * point page will be dirty and unlocked so freezing code will write it
2381 * and writeprotect it again.
2383 set_page_dirty(page);
2384 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2388 wait_on_page_writeback(page);
2394 EXPORT_SYMBOL(__block_page_mkwrite);
2396 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2397 get_block_t get_block)
2400 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2403 * This check is racy but catches the common case. The check in
2404 * __block_page_mkwrite() is reliable.
2406 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2407 ret = __block_page_mkwrite(vma, vmf, get_block);
2408 return block_page_mkwrite_return(ret);
2410 EXPORT_SYMBOL(block_page_mkwrite);
2413 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2414 * immediately, while under the page lock. So it needs a special end_io
2415 * handler which does not touch the bh after unlocking it.
2417 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2419 __end_buffer_read_notouch(bh, uptodate);
2423 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2424 * the page (converting it to circular linked list and taking care of page
2427 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2429 struct buffer_head *bh;
2431 BUG_ON(!PageLocked(page));
2433 spin_lock(&page->mapping->private_lock);
2436 if (PageDirty(page))
2437 set_buffer_dirty(bh);
2438 if (!bh->b_this_page)
2439 bh->b_this_page = head;
2440 bh = bh->b_this_page;
2441 } while (bh != head);
2442 attach_page_buffers(page, head);
2443 spin_unlock(&page->mapping->private_lock);
2447 * On entry, the page is fully not uptodate.
2448 * On exit the page is fully uptodate in the areas outside (from,to)
2449 * The filesystem needs to handle block truncation upon failure.
2451 int nobh_write_begin(struct address_space *mapping,
2452 loff_t pos, unsigned len, unsigned flags,
2453 struct page **pagep, void **fsdata,
2454 get_block_t *get_block)
2456 struct inode *inode = mapping->host;
2457 const unsigned blkbits = inode->i_blkbits;
2458 const unsigned blocksize = 1 << blkbits;
2459 struct buffer_head *head, *bh;
2463 unsigned block_in_page;
2464 unsigned block_start, block_end;
2465 sector_t block_in_file;
2468 int is_mapped_to_disk = 1;
2470 index = pos >> PAGE_CACHE_SHIFT;
2471 from = pos & (PAGE_CACHE_SIZE - 1);
2474 page = grab_cache_page_write_begin(mapping, index, flags);
2480 if (page_has_buffers(page)) {
2481 ret = __block_write_begin(page, pos, len, get_block);
2487 if (PageMappedToDisk(page))
2491 * Allocate buffers so that we can keep track of state, and potentially
2492 * attach them to the page if an error occurs. In the common case of
2493 * no error, they will just be freed again without ever being attached
2494 * to the page (which is all OK, because we're under the page lock).
2496 * Be careful: the buffer linked list is a NULL terminated one, rather
2497 * than the circular one we're used to.
2499 head = alloc_page_buffers(page, blocksize, 0);
2505 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2508 * We loop across all blocks in the page, whether or not they are
2509 * part of the affected region. This is so we can discover if the
2510 * page is fully mapped-to-disk.
2512 for (block_start = 0, block_in_page = 0, bh = head;
2513 block_start < PAGE_CACHE_SIZE;
2514 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2517 block_end = block_start + blocksize;
2520 if (block_start >= to)
2522 ret = get_block(inode, block_in_file + block_in_page,
2526 if (!buffer_mapped(bh))
2527 is_mapped_to_disk = 0;
2529 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2530 if (PageUptodate(page)) {
2531 set_buffer_uptodate(bh);
2534 if (buffer_new(bh) || !buffer_mapped(bh)) {
2535 zero_user_segments(page, block_start, from,
2539 if (buffer_uptodate(bh))
2540 continue; /* reiserfs does this */
2541 if (block_start < from || block_end > to) {
2543 bh->b_end_io = end_buffer_read_nobh;
2544 submit_bh(READ, bh);
2551 * The page is locked, so these buffers are protected from
2552 * any VM or truncate activity. Hence we don't need to care
2553 * for the buffer_head refcounts.
2555 for (bh = head; bh; bh = bh->b_this_page) {
2557 if (!buffer_uptodate(bh))
2564 if (is_mapped_to_disk)
2565 SetPageMappedToDisk(page);
2567 *fsdata = head; /* to be released by nobh_write_end */
2574 * Error recovery is a bit difficult. We need to zero out blocks that
2575 * were newly allocated, and dirty them to ensure they get written out.
2576 * Buffers need to be attached to the page at this point, otherwise
2577 * the handling of potential IO errors during writeout would be hard
2578 * (could try doing synchronous writeout, but what if that fails too?)
2580 attach_nobh_buffers(page, head);
2581 page_zero_new_buffers(page, from, to);
2585 page_cache_release(page);
2590 EXPORT_SYMBOL(nobh_write_begin);
2592 int nobh_write_end(struct file *file, struct address_space *mapping,
2593 loff_t pos, unsigned len, unsigned copied,
2594 struct page *page, void *fsdata)
2596 struct inode *inode = page->mapping->host;
2597 struct buffer_head *head = fsdata;
2598 struct buffer_head *bh;
2599 BUG_ON(fsdata != NULL && page_has_buffers(page));
2601 if (unlikely(copied < len) && head)
2602 attach_nobh_buffers(page, head);
2603 if (page_has_buffers(page))
2604 return generic_write_end(file, mapping, pos, len,
2605 copied, page, fsdata);
2607 SetPageUptodate(page);
2608 set_page_dirty(page);
2609 if (pos+copied > inode->i_size) {
2610 i_size_write(inode, pos+copied);
2611 mark_inode_dirty(inode);
2615 page_cache_release(page);
2619 head = head->b_this_page;
2620 free_buffer_head(bh);
2625 EXPORT_SYMBOL(nobh_write_end);
2628 * nobh_writepage() - based on block_full_write_page() except
2629 * that it tries to operate without attaching bufferheads to
2632 int nobh_writepage(struct page *page, get_block_t *get_block,
2633 struct writeback_control *wbc)
2635 struct inode * const inode = page->mapping->host;
2636 loff_t i_size = i_size_read(inode);
2637 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2641 /* Is the page fully inside i_size? */
2642 if (page->index < end_index)
2645 /* Is the page fully outside i_size? (truncate in progress) */
2646 offset = i_size & (PAGE_CACHE_SIZE-1);
2647 if (page->index >= end_index+1 || !offset) {
2649 * The page may have dirty, unmapped buffers. For example,
2650 * they may have been added in ext3_writepage(). Make them
2651 * freeable here, so the page does not leak.
2654 /* Not really sure about this - do we need this ? */
2655 if (page->mapping->a_ops->invalidatepage)
2656 page->mapping->a_ops->invalidatepage(page, offset);
2659 return 0; /* don't care */
2663 * The page straddles i_size. It must be zeroed out on each and every
2664 * writepage invocation because it may be mmapped. "A file is mapped
2665 * in multiples of the page size. For a file that is not a multiple of
2666 * the page size, the remaining memory is zeroed when mapped, and
2667 * writes to that region are not written out to the file."
2669 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2671 ret = mpage_writepage(page, get_block, wbc);
2673 ret = __block_write_full_page(inode, page, get_block, wbc,
2674 end_buffer_async_write);
2677 EXPORT_SYMBOL(nobh_writepage);
2679 int nobh_truncate_page(struct address_space *mapping,
2680 loff_t from, get_block_t *get_block)
2682 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2683 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2686 unsigned length, pos;
2687 struct inode *inode = mapping->host;
2689 struct buffer_head map_bh;
2692 blocksize = 1 << inode->i_blkbits;
2693 length = offset & (blocksize - 1);
2695 /* Block boundary? Nothing to do */
2699 length = blocksize - length;
2700 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2702 page = grab_cache_page(mapping, index);
2707 if (page_has_buffers(page)) {
2710 page_cache_release(page);
2711 return block_truncate_page(mapping, from, get_block);
2714 /* Find the buffer that contains "offset" */
2716 while (offset >= pos) {
2721 map_bh.b_size = blocksize;
2723 err = get_block(inode, iblock, &map_bh, 0);
2726 /* unmapped? It's a hole - nothing to do */
2727 if (!buffer_mapped(&map_bh))
2730 /* Ok, it's mapped. Make sure it's up-to-date */
2731 if (!PageUptodate(page)) {
2732 err = mapping->a_ops->readpage(NULL, page);
2734 page_cache_release(page);
2738 if (!PageUptodate(page)) {
2742 if (page_has_buffers(page))
2745 zero_user(page, offset, length);
2746 set_page_dirty(page);
2751 page_cache_release(page);
2755 EXPORT_SYMBOL(nobh_truncate_page);
2757 int block_truncate_page(struct address_space *mapping,
2758 loff_t from, get_block_t *get_block)
2760 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2761 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2764 unsigned length, pos;
2765 struct inode *inode = mapping->host;
2767 struct buffer_head *bh;
2770 blocksize = 1 << inode->i_blkbits;
2771 length = offset & (blocksize - 1);
2773 /* Block boundary? Nothing to do */
2777 length = blocksize - length;
2778 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2780 page = grab_cache_page(mapping, index);
2785 if (!page_has_buffers(page))
2786 create_empty_buffers(page, blocksize, 0);
2788 /* Find the buffer that contains "offset" */
2789 bh = page_buffers(page);
2791 while (offset >= pos) {
2792 bh = bh->b_this_page;
2798 if (!buffer_mapped(bh)) {
2799 WARN_ON(bh->b_size != blocksize);
2800 err = get_block(inode, iblock, bh, 0);
2803 /* unmapped? It's a hole - nothing to do */
2804 if (!buffer_mapped(bh))
2808 /* Ok, it's mapped. Make sure it's up-to-date */
2809 if (PageUptodate(page))
2810 set_buffer_uptodate(bh);
2812 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2814 ll_rw_block(READ, 1, &bh);
2816 /* Uhhuh. Read error. Complain and punt. */
2817 if (!buffer_uptodate(bh))
2821 zero_user(page, offset, length);
2822 mark_buffer_dirty(bh);
2827 page_cache_release(page);
2831 EXPORT_SYMBOL(block_truncate_page);
2834 * The generic ->writepage function for buffer-backed address_spaces
2835 * this form passes in the end_io handler used to finish the IO.
2837 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2838 struct writeback_control *wbc, bh_end_io_t *handler)
2840 struct inode * const inode = page->mapping->host;
2841 loff_t i_size = i_size_read(inode);
2842 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2845 /* Is the page fully inside i_size? */
2846 if (page->index < end_index)
2847 return __block_write_full_page(inode, page, get_block, wbc,
2850 /* Is the page fully outside i_size? (truncate in progress) */
2851 offset = i_size & (PAGE_CACHE_SIZE-1);
2852 if (page->index >= end_index+1 || !offset) {
2854 * The page may have dirty, unmapped buffers. For example,
2855 * they may have been added in ext3_writepage(). Make them
2856 * freeable here, so the page does not leak.
2858 do_invalidatepage(page, 0);
2860 return 0; /* don't care */
2864 * The page straddles i_size. It must be zeroed out on each and every
2865 * writepage invocation because it may be mmapped. "A file is mapped
2866 * in multiples of the page size. For a file that is not a multiple of
2867 * the page size, the remaining memory is zeroed when mapped, and
2868 * writes to that region are not written out to the file."
2870 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2871 return __block_write_full_page(inode, page, get_block, wbc, handler);
2873 EXPORT_SYMBOL(block_write_full_page_endio);
2876 * The generic ->writepage function for buffer-backed address_spaces
2878 int block_write_full_page(struct page *page, get_block_t *get_block,
2879 struct writeback_control *wbc)
2881 return block_write_full_page_endio(page, get_block, wbc,
2882 end_buffer_async_write);
2884 EXPORT_SYMBOL(block_write_full_page);
2886 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2887 get_block_t *get_block)
2889 struct buffer_head tmp;
2890 struct inode *inode = mapping->host;
2893 tmp.b_size = 1 << inode->i_blkbits;
2894 get_block(inode, block, &tmp, 0);
2895 return tmp.b_blocknr;
2897 EXPORT_SYMBOL(generic_block_bmap);
2899 static void end_bio_bh_io_sync(struct bio *bio, int err)
2901 struct buffer_head *bh = bio->bi_private;
2903 if (err == -EOPNOTSUPP) {
2904 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2907 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2908 set_bit(BH_Quiet, &bh->b_state);
2910 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2914 int submit_bh(int rw, struct buffer_head * bh)
2919 BUG_ON(!buffer_locked(bh));
2920 BUG_ON(!buffer_mapped(bh));
2921 BUG_ON(!bh->b_end_io);
2922 BUG_ON(buffer_delay(bh));
2923 BUG_ON(buffer_unwritten(bh));
2926 * Only clear out a write error when rewriting
2928 if (test_set_buffer_req(bh) && (rw & WRITE))
2929 clear_buffer_write_io_error(bh);
2932 * from here on down, it's all bio -- do the initial mapping,
2933 * submit_bio -> generic_make_request may further map this bio around
2935 bio = bio_alloc(GFP_NOIO, 1);
2937 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2938 bio->bi_bdev = bh->b_bdev;
2939 bio->bi_io_vec[0].bv_page = bh->b_page;
2940 bio->bi_io_vec[0].bv_len = bh->b_size;
2941 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2945 bio->bi_size = bh->b_size;
2947 bio->bi_end_io = end_bio_bh_io_sync;
2948 bio->bi_private = bh;
2951 submit_bio(rw, bio);
2953 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2959 EXPORT_SYMBOL(submit_bh);
2962 * ll_rw_block: low-level access to block devices (DEPRECATED)
2963 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2964 * @nr: number of &struct buffer_heads in the array
2965 * @bhs: array of pointers to &struct buffer_head
2967 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2968 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2969 * %READA option is described in the documentation for generic_make_request()
2970 * which ll_rw_block() calls.
2972 * This function drops any buffer that it cannot get a lock on (with the
2973 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2974 * request, and any buffer that appears to be up-to-date when doing read
2975 * request. Further it marks as clean buffers that are processed for
2976 * writing (the buffer cache won't assume that they are actually clean
2977 * until the buffer gets unlocked).
2979 * ll_rw_block sets b_end_io to simple completion handler that marks
2980 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2983 * All of the buffers must be for the same device, and must also be a
2984 * multiple of the current approved size for the device.
2986 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2990 for (i = 0; i < nr; i++) {
2991 struct buffer_head *bh = bhs[i];
2993 if (!trylock_buffer(bh))
2996 if (test_clear_buffer_dirty(bh)) {
2997 bh->b_end_io = end_buffer_write_sync;
2999 submit_bh(WRITE, bh);
3003 if (!buffer_uptodate(bh)) {
3004 bh->b_end_io = end_buffer_read_sync;
3013 EXPORT_SYMBOL(ll_rw_block);
3015 void write_dirty_buffer(struct buffer_head *bh, int rw)
3018 if (!test_clear_buffer_dirty(bh)) {
3022 bh->b_end_io = end_buffer_write_sync;
3026 EXPORT_SYMBOL(write_dirty_buffer);
3029 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3030 * and then start new I/O and then wait upon it. The caller must have a ref on
3033 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3037 WARN_ON(atomic_read(&bh->b_count) < 1);
3039 if (test_clear_buffer_dirty(bh)) {
3041 bh->b_end_io = end_buffer_write_sync;
3042 ret = submit_bh(rw, bh);
3044 if (!ret && !buffer_uptodate(bh))
3051 EXPORT_SYMBOL(__sync_dirty_buffer);
3053 int sync_dirty_buffer(struct buffer_head *bh)
3055 return __sync_dirty_buffer(bh, WRITE_SYNC);
3057 EXPORT_SYMBOL(sync_dirty_buffer);
3060 * try_to_free_buffers() checks if all the buffers on this particular page
3061 * are unused, and releases them if so.
3063 * Exclusion against try_to_free_buffers may be obtained by either
3064 * locking the page or by holding its mapping's private_lock.
3066 * If the page is dirty but all the buffers are clean then we need to
3067 * be sure to mark the page clean as well. This is because the page
3068 * may be against a block device, and a later reattachment of buffers
3069 * to a dirty page will set *all* buffers dirty. Which would corrupt
3070 * filesystem data on the same device.
3072 * The same applies to regular filesystem pages: if all the buffers are
3073 * clean then we set the page clean and proceed. To do that, we require
3074 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3077 * try_to_free_buffers() is non-blocking.
3079 static inline int buffer_busy(struct buffer_head *bh)
3081 return atomic_read(&bh->b_count) |
3082 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3086 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3088 struct buffer_head *head = page_buffers(page);
3089 struct buffer_head *bh;
3093 if (buffer_write_io_error(bh) && page->mapping)
3094 set_bit(AS_EIO, &page->mapping->flags);
3095 if (buffer_busy(bh))
3097 bh = bh->b_this_page;
3098 } while (bh != head);
3101 struct buffer_head *next = bh->b_this_page;
3103 if (bh->b_assoc_map)
3104 __remove_assoc_queue(bh);
3106 } while (bh != head);
3107 *buffers_to_free = head;
3108 __clear_page_buffers(page);
3114 int try_to_free_buffers(struct page *page)
3116 struct address_space * const mapping = page->mapping;
3117 struct buffer_head *buffers_to_free = NULL;
3120 BUG_ON(!PageLocked(page));
3121 if (PageWriteback(page))
3124 if (mapping == NULL) { /* can this still happen? */
3125 ret = drop_buffers(page, &buffers_to_free);
3129 spin_lock(&mapping->private_lock);
3130 ret = drop_buffers(page, &buffers_to_free);
3133 * If the filesystem writes its buffers by hand (eg ext3)
3134 * then we can have clean buffers against a dirty page. We
3135 * clean the page here; otherwise the VM will never notice
3136 * that the filesystem did any IO at all.
3138 * Also, during truncate, discard_buffer will have marked all
3139 * the page's buffers clean. We discover that here and clean
3142 * private_lock must be held over this entire operation in order
3143 * to synchronise against __set_page_dirty_buffers and prevent the
3144 * dirty bit from being lost.
3147 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3148 spin_unlock(&mapping->private_lock);
3150 if (buffers_to_free) {
3151 struct buffer_head *bh = buffers_to_free;
3154 struct buffer_head *next = bh->b_this_page;
3155 free_buffer_head(bh);
3157 } while (bh != buffers_to_free);
3161 EXPORT_SYMBOL(try_to_free_buffers);
3164 * There are no bdflush tunables left. But distributions are
3165 * still running obsolete flush daemons, so we terminate them here.
3167 * Use of bdflush() is deprecated and will be removed in a future kernel.
3168 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3170 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3172 static int msg_count;
3174 if (!capable(CAP_SYS_ADMIN))
3177 if (msg_count < 5) {
3180 "warning: process `%s' used the obsolete bdflush"
3181 " system call\n", current->comm);
3182 printk(KERN_INFO "Fix your initscripts?\n");
3191 * Buffer-head allocation
3193 static struct kmem_cache *bh_cachep;
3196 * Once the number of bh's in the machine exceeds this level, we start
3197 * stripping them in writeback.
3199 static int max_buffer_heads;
3201 int buffer_heads_over_limit;
3203 struct bh_accounting {
3204 int nr; /* Number of live bh's */
3205 int ratelimit; /* Limit cacheline bouncing */
3208 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3210 static void recalc_bh_state(void)
3215 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3217 __this_cpu_write(bh_accounting.ratelimit, 0);
3218 for_each_online_cpu(i)
3219 tot += per_cpu(bh_accounting, i).nr;
3220 buffer_heads_over_limit = (tot > max_buffer_heads);
3223 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3225 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3227 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3229 __this_cpu_inc(bh_accounting.nr);
3235 EXPORT_SYMBOL(alloc_buffer_head);
3237 void free_buffer_head(struct buffer_head *bh)
3239 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3240 kmem_cache_free(bh_cachep, bh);
3242 __this_cpu_dec(bh_accounting.nr);
3246 EXPORT_SYMBOL(free_buffer_head);
3248 static void buffer_exit_cpu(int cpu)
3251 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3253 for (i = 0; i < BH_LRU_SIZE; i++) {
3257 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3258 per_cpu(bh_accounting, cpu).nr = 0;
3261 static int buffer_cpu_notify(struct notifier_block *self,
3262 unsigned long action, void *hcpu)
3264 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3265 buffer_exit_cpu((unsigned long)hcpu);
3270 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3271 * @bh: struct buffer_head
3273 * Return true if the buffer is up-to-date and false,
3274 * with the buffer locked, if not.
3276 int bh_uptodate_or_lock(struct buffer_head *bh)
3278 if (!buffer_uptodate(bh)) {
3280 if (!buffer_uptodate(bh))
3286 EXPORT_SYMBOL(bh_uptodate_or_lock);
3289 * bh_submit_read - Submit a locked buffer for reading
3290 * @bh: struct buffer_head
3292 * Returns zero on success and -EIO on error.
3294 int bh_submit_read(struct buffer_head *bh)
3296 BUG_ON(!buffer_locked(bh));
3298 if (buffer_uptodate(bh)) {
3304 bh->b_end_io = end_buffer_read_sync;
3305 submit_bh(READ, bh);
3307 if (buffer_uptodate(bh))
3311 EXPORT_SYMBOL(bh_submit_read);
3313 void __init buffer_init(void)
3317 bh_cachep = kmem_cache_create("buffer_head",
3318 sizeof(struct buffer_head), 0,
3319 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3324 * Limit the bh occupancy to 10% of ZONE_NORMAL
3326 nrpages = (nr_free_buffer_pages() * 10) / 100;
3327 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3328 hotcpu_notifier(buffer_cpu_notify, 0);