4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
55 EXPORT_SYMBOL(init_buffer);
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void unlock_buffer(struct buffer_head *bh)
80 clear_bit_unlock(BH_Lock, &bh->b_state);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
84 EXPORT_SYMBOL(unlock_buffer);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 EXPORT_SYMBOL(__wait_on_buffer);
98 __clear_page_buffers(struct page *page)
100 ClearPagePrivate(page);
101 set_page_private(page, 0);
102 page_cache_release(page);
106 static int quiet_error(struct buffer_head *bh)
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
114 static void buffer_io_error(struct buffer_head *bh)
116 char b[BDEVNAME_SIZE];
117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
123 * End-of-IO handler helper function which does not touch the bh after
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
133 set_buffer_uptodate(bh);
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
147 __end_buffer_read_notouch(bh, uptodate);
150 EXPORT_SYMBOL(end_buffer_read_sync);
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
154 char b[BDEVNAME_SIZE];
157 set_buffer_uptodate(bh);
159 if (!quiet_error(bh)) {
161 printk(KERN_WARNING "lost page write due to "
163 bdevname(bh->b_bdev, b));
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
171 EXPORT_SYMBOL(end_buffer_write_sync);
174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
191 struct buffer_head *bh;
192 struct buffer_head *head;
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
204 head = page_buffers(page);
207 if (!buffer_mapped(bh))
209 else if (bh->b_blocknr == block) {
214 bh = bh->b_this_page;
215 } while (bh != head);
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
272 struct address_space *mapping = bdev->bd_inode->i_mapping;
274 if (mapping->nrpages == 0)
277 invalidate_bh_lrus();
278 lru_add_drain_all(); /* make sure all lru add caches are flushed */
279 invalidate_mapping_pages(mapping, 0, -1);
281 EXPORT_SYMBOL(invalidate_bdev);
284 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
286 static void free_more_memory(void)
291 wakeup_flusher_threads(1024);
294 for_each_online_node(nid) {
295 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 gfp_zone(GFP_NOFS), NULL,
299 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
305 * I/O completion handler for block_read_full_page() - pages
306 * which come unlocked at the end of I/O.
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
311 struct buffer_head *first;
312 struct buffer_head *tmp;
314 int page_uptodate = 1;
316 BUG_ON(!buffer_async_read(bh));
320 set_buffer_uptodate(bh);
322 clear_buffer_uptodate(bh);
323 if (!quiet_error(bh))
329 * Be _very_ careful from here on. Bad things can happen if
330 * two buffer heads end IO at almost the same time and both
331 * decide that the page is now completely done.
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336 clear_buffer_async_read(bh);
340 if (!buffer_uptodate(tmp))
342 if (buffer_async_read(tmp)) {
343 BUG_ON(!buffer_locked(tmp));
346 tmp = tmp->b_this_page;
348 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 local_irq_restore(flags);
352 * If none of the buffers had errors and they are all
353 * uptodate then we can set the page uptodate.
355 if (page_uptodate && !PageError(page))
356 SetPageUptodate(page);
361 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 local_irq_restore(flags);
367 * Completion handler for block_write_full_page() - pages which are unlocked
368 * during I/O, and which have PageWriteback cleared upon I/O completion.
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
372 char b[BDEVNAME_SIZE];
374 struct buffer_head *first;
375 struct buffer_head *tmp;
378 BUG_ON(!buffer_async_write(bh));
382 set_buffer_uptodate(bh);
384 if (!quiet_error(bh)) {
386 printk(KERN_WARNING "lost page write due to "
388 bdevname(bh->b_bdev, b));
390 set_bit(AS_EIO, &page->mapping->flags);
391 set_buffer_write_io_error(bh);
392 clear_buffer_uptodate(bh);
396 first = page_buffers(page);
397 local_irq_save(flags);
398 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
400 clear_buffer_async_write(bh);
402 tmp = bh->b_this_page;
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
408 tmp = tmp->b_this_page;
410 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 local_irq_restore(flags);
412 end_page_writeback(page);
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
420 EXPORT_SYMBOL(end_buffer_async_write);
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
431 * The page comes unlocked when it has no locked buffer_async buffers
434 * PageLocked prevents anyone starting new async I/O reads any of
437 * PageWriteback is used to prevent simultaneous writeout of the same
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
443 static void mark_buffer_async_read(struct buffer_head *bh)
445 bh->b_end_io = end_buffer_async_read;
446 set_buffer_async_read(bh);
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
452 bh->b_end_io = handler;
453 set_buffer_async_write(bh);
456 void mark_buffer_async_write(struct buffer_head *bh)
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
460 EXPORT_SYMBOL(mark_buffer_async_write);
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
470 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472 * management of a list of dependent buffers at ->i_mapping->private_list.
474 * Locking is a little subtle: try_to_free_buffers() will remove buffers
475 * from their controlling inode's queue when they are being freed. But
476 * try_to_free_buffers() will be operating against the *blockdev* mapping
477 * at the time, not against the S_ISREG file which depends on those buffers.
478 * So the locking for private_list is via the private_lock in the address_space
479 * which backs the buffers. Which is different from the address_space
480 * against which the buffers are listed. So for a particular address_space,
481 * mapping->private_lock does *not* protect mapping->private_list! In fact,
482 * mapping->private_list will always be protected by the backing blockdev's
485 * Which introduces a requirement: all buffers on an address_space's
486 * ->private_list must be from the same address_space: the blockdev's.
488 * address_spaces which do not place buffers at ->private_list via these
489 * utility functions are free to use private_lock and private_list for
490 * whatever they want. The only requirement is that list_empty(private_list)
491 * be true at clear_inode() time.
493 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
494 * filesystems should do that. invalidate_inode_buffers() should just go
495 * BUG_ON(!list_empty).
497 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
498 * take an address_space, not an inode. And it should be called
499 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
502 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503 * list if it is already on a list. Because if the buffer is on a list,
504 * it *must* already be on the right one. If not, the filesystem is being
505 * silly. This will save a ton of locking. But first we have to ensure
506 * that buffers are taken *off* the old inode's list when they are freed
507 * (presumably in truncate). That requires careful auditing of all
508 * filesystems (do it inside bforget()). It could also be done by bringing
513 * The buffer's backing address_space's private_lock must be held
515 static void __remove_assoc_queue(struct buffer_head *bh)
517 list_del_init(&bh->b_assoc_buffers);
518 WARN_ON(!bh->b_assoc_map);
519 if (buffer_write_io_error(bh))
520 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 bh->b_assoc_map = NULL;
524 int inode_has_buffers(struct inode *inode)
526 return !list_empty(&inode->i_data.private_list);
530 * osync is designed to support O_SYNC io. It waits synchronously for
531 * all already-submitted IO to complete, but does not queue any new
532 * writes to the disk.
534 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535 * you dirty the buffers, and then use osync_inode_buffers to wait for
536 * completion. Any other dirty buffers which are not yet queued for
537 * write will not be flushed to disk by the osync.
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 struct buffer_head *bh;
547 list_for_each_prev(p, list) {
549 if (buffer_locked(bh)) {
553 if (!buffer_uptodate(bh))
564 static void do_thaw_one(struct super_block *sb, void *unused)
566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
572 static void do_thaw_all(struct work_struct *work)
574 iterate_supers(do_thaw_one, NULL);
576 printk(KERN_WARNING "Emergency Thaw complete\n");
580 * emergency_thaw_all -- forcibly thaw every frozen filesystem
582 * Used for emergency unfreeze of all filesystems via SysRq
584 void emergency_thaw_all(void)
586 struct work_struct *work;
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 INIT_WORK(work, do_thaw_all);
596 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597 * @mapping: the mapping which wants those buffers written
599 * Starts I/O against the buffers at mapping->private_list, and waits upon
602 * Basically, this is a convenience function for fsync().
603 * @mapping is a file or directory which needs those buffers to be written for
604 * a successful fsync().
606 int sync_mapping_buffers(struct address_space *mapping)
608 struct address_space *buffer_mapping = mapping->assoc_mapping;
610 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
613 return fsync_buffers_list(&buffer_mapping->private_lock,
614 &mapping->private_list);
616 EXPORT_SYMBOL(sync_mapping_buffers);
619 * Called when we've recently written block `bblock', and it is known that
620 * `bblock' was for a buffer_boundary() buffer. This means that the block at
621 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
622 * dirty, schedule it for IO. So that indirects merge nicely with their data.
624 void write_boundary_block(struct block_device *bdev,
625 sector_t bblock, unsigned blocksize)
627 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 if (buffer_dirty(bh))
630 ll_rw_block(WRITE, 1, &bh);
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637 struct address_space *mapping = inode->i_mapping;
638 struct address_space *buffer_mapping = bh->b_page->mapping;
640 mark_buffer_dirty(bh);
641 if (!mapping->assoc_mapping) {
642 mapping->assoc_mapping = buffer_mapping;
644 BUG_ON(mapping->assoc_mapping != buffer_mapping);
646 if (!bh->b_assoc_map) {
647 spin_lock(&buffer_mapping->private_lock);
648 list_move_tail(&bh->b_assoc_buffers,
649 &mapping->private_list);
650 bh->b_assoc_map = mapping;
651 spin_unlock(&buffer_mapping->private_lock);
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
657 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
660 * If warn is true, then emit a warning if the page is not uptodate and has
661 * not been truncated.
663 static void __set_page_dirty(struct page *page,
664 struct address_space *mapping, int warn)
666 spin_lock_irq(&mapping->tree_lock);
667 if (page->mapping) { /* Race with truncate? */
668 WARN_ON_ONCE(warn && !PageUptodate(page));
669 account_page_dirtied(page, mapping);
670 radix_tree_tag_set(&mapping->page_tree,
671 page_index(page), PAGECACHE_TAG_DIRTY);
673 spin_unlock_irq(&mapping->tree_lock);
674 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
678 * Add a page to the dirty page list.
680 * It is a sad fact of life that this function is called from several places
681 * deeply under spinlocking. It may not sleep.
683 * If the page has buffers, the uptodate buffers are set dirty, to preserve
684 * dirty-state coherency between the page and the buffers. It the page does
685 * not have buffers then when they are later attached they will all be set
688 * The buffers are dirtied before the page is dirtied. There's a small race
689 * window in which a writepage caller may see the page cleanness but not the
690 * buffer dirtiness. That's fine. If this code were to set the page dirty
691 * before the buffers, a concurrent writepage caller could clear the page dirty
692 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693 * page on the dirty page list.
695 * We use private_lock to lock against try_to_free_buffers while using the
696 * page's buffer list. Also use this to protect against clean buffers being
697 * added to the page after it was set dirty.
699 * FIXME: may need to call ->reservepage here as well. That's rather up to the
700 * address_space though.
702 int __set_page_dirty_buffers(struct page *page)
705 struct address_space *mapping = page_mapping(page);
707 if (unlikely(!mapping))
708 return !TestSetPageDirty(page);
710 spin_lock(&mapping->private_lock);
711 if (page_has_buffers(page)) {
712 struct buffer_head *head = page_buffers(page);
713 struct buffer_head *bh = head;
716 set_buffer_dirty(bh);
717 bh = bh->b_this_page;
718 } while (bh != head);
720 newly_dirty = !TestSetPageDirty(page);
721 spin_unlock(&mapping->private_lock);
724 __set_page_dirty(page, mapping, 1);
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
730 * Write out and wait upon a list of buffers.
732 * We have conflicting pressures: we want to make sure that all
733 * initially dirty buffers get waited on, but that any subsequently
734 * dirtied buffers don't. After all, we don't want fsync to last
735 * forever if somebody is actively writing to the file.
737 * Do this in two main stages: first we copy dirty buffers to a
738 * temporary inode list, queueing the writes as we go. Then we clean
739 * up, waiting for those writes to complete.
741 * During this second stage, any subsequent updates to the file may end
742 * up refiling the buffer on the original inode's dirty list again, so
743 * there is a chance we will end up with a buffer queued for write but
744 * not yet completed on that list. So, as a final cleanup we go through
745 * the osync code to catch these locked, dirty buffers without requeuing
746 * any newly dirty buffers for write.
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
750 struct buffer_head *bh;
751 struct list_head tmp;
752 struct address_space *mapping, *prev_mapping = NULL;
755 INIT_LIST_HEAD(&tmp);
758 while (!list_empty(list)) {
759 bh = BH_ENTRY(list->next);
760 mapping = bh->b_assoc_map;
761 __remove_assoc_queue(bh);
762 /* Avoid race with mark_buffer_dirty_inode() which does
763 * a lockless check and we rely on seeing the dirty bit */
765 if (buffer_dirty(bh) || buffer_locked(bh)) {
766 list_add(&bh->b_assoc_buffers, &tmp);
767 bh->b_assoc_map = mapping;
768 if (buffer_dirty(bh)) {
772 * Ensure any pending I/O completes so that
773 * write_dirty_buffer() actually writes the
774 * current contents - it is a noop if I/O is
775 * still in flight on potentially older
778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
781 * Kick off IO for the previous mapping. Note
782 * that we will not run the very last mapping,
783 * wait_on_buffer() will do that for us
784 * through sync_buffer().
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
796 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev);
799 mapping = bh->b_assoc_map;
800 __remove_assoc_queue(bh);
801 /* Avoid race with mark_buffer_dirty_inode() which does
802 * a lockless check and we rely on seeing the dirty bit */
804 if (buffer_dirty(bh)) {
805 list_add(&bh->b_assoc_buffers,
806 &mapping->private_list);
807 bh->b_assoc_map = mapping;
811 if (!buffer_uptodate(bh))
818 err2 = osync_buffers_list(lock, list);
826 * Invalidate any and all dirty buffers on a given inode. We are
827 * probably unmounting the fs, but that doesn't mean we have already
828 * done a sync(). Just drop the buffers from the inode list.
830 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
831 * assumes that all the buffers are against the blockdev. Not true
834 void invalidate_inode_buffers(struct inode *inode)
836 if (inode_has_buffers(inode)) {
837 struct address_space *mapping = &inode->i_data;
838 struct list_head *list = &mapping->private_list;
839 struct address_space *buffer_mapping = mapping->assoc_mapping;
841 spin_lock(&buffer_mapping->private_lock);
842 while (!list_empty(list))
843 __remove_assoc_queue(BH_ENTRY(list->next));
844 spin_unlock(&buffer_mapping->private_lock);
847 EXPORT_SYMBOL(invalidate_inode_buffers);
850 * Remove any clean buffers from the inode's buffer list. This is called
851 * when we're trying to free the inode itself. Those buffers can pin it.
853 * Returns true if all buffers were removed.
855 int remove_inode_buffers(struct inode *inode)
859 if (inode_has_buffers(inode)) {
860 struct address_space *mapping = &inode->i_data;
861 struct list_head *list = &mapping->private_list;
862 struct address_space *buffer_mapping = mapping->assoc_mapping;
864 spin_lock(&buffer_mapping->private_lock);
865 while (!list_empty(list)) {
866 struct buffer_head *bh = BH_ENTRY(list->next);
867 if (buffer_dirty(bh)) {
871 __remove_assoc_queue(bh);
873 spin_unlock(&buffer_mapping->private_lock);
879 * Create the appropriate buffers when given a page for data area and
880 * the size of each buffer.. Use the bh->b_this_page linked list to
881 * follow the buffers created. Return NULL if unable to create more
884 * The retry flag is used to differentiate async IO (paging, swapping)
885 * which may not fail from ordinary buffer allocations.
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
890 struct buffer_head *bh, *head;
896 while ((offset -= size) >= 0) {
897 bh = alloc_buffer_head(GFP_NOFS);
902 bh->b_this_page = head;
907 atomic_set(&bh->b_count, 0);
910 /* Link the buffer to its page */
911 set_bh_page(bh, page, offset);
913 init_buffer(bh, NULL, NULL);
917 * In case anything failed, we just free everything we got.
923 head = head->b_this_page;
924 free_buffer_head(bh);
929 * Return failure for non-async IO requests. Async IO requests
930 * are not allowed to fail, so we have to wait until buffer heads
931 * become available. But we don't want tasks sleeping with
932 * partially complete buffers, so all were released above.
937 /* We're _really_ low on memory. Now we just
938 * wait for old buffer heads to become free due to
939 * finishing IO. Since this is an async request and
940 * the reserve list is empty, we're sure there are
941 * async buffer heads in use.
946 EXPORT_SYMBOL_GPL(alloc_page_buffers);
949 link_dev_buffers(struct page *page, struct buffer_head *head)
951 struct buffer_head *bh, *tail;
956 bh = bh->b_this_page;
958 tail->b_this_page = head;
959 attach_page_buffers(page, head);
963 * Initialise the state of a blockdev page's buffers.
966 init_page_buffers(struct page *page, struct block_device *bdev,
967 sector_t block, int size)
969 struct buffer_head *head = page_buffers(page);
970 struct buffer_head *bh = head;
971 int uptodate = PageUptodate(page);
974 if (!buffer_mapped(bh)) {
975 init_buffer(bh, NULL, NULL);
977 bh->b_blocknr = block;
979 set_buffer_uptodate(bh);
980 set_buffer_mapped(bh);
983 bh = bh->b_this_page;
984 } while (bh != head);
988 * Create the page-cache page that contains the requested block.
990 * This is user purely for blockdev mappings.
993 grow_dev_page(struct block_device *bdev, sector_t block,
994 pgoff_t index, int size)
996 struct inode *inode = bdev->bd_inode;
998 struct buffer_head *bh;
1000 page = find_or_create_page(inode->i_mapping, index,
1001 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1005 BUG_ON(!PageLocked(page));
1007 if (page_has_buffers(page)) {
1008 bh = page_buffers(page);
1009 if (bh->b_size == size) {
1010 init_page_buffers(page, bdev, block, size);
1013 if (!try_to_free_buffers(page))
1018 * Allocate some buffers for this page
1020 bh = alloc_page_buffers(page, size, 0);
1025 * Link the page to the buffers and initialise them. Take the
1026 * lock to be atomic wrt __find_get_block(), which does not
1027 * run under the page lock.
1029 spin_lock(&inode->i_mapping->private_lock);
1030 link_dev_buffers(page, bh);
1031 init_page_buffers(page, bdev, block, size);
1032 spin_unlock(&inode->i_mapping->private_lock);
1038 page_cache_release(page);
1043 * Create buffers for the specified block device block's page. If
1044 * that page was dirty, the buffers are set dirty also.
1047 grow_buffers(struct block_device *bdev, sector_t block, int size)
1056 } while ((size << sizebits) < PAGE_SIZE);
1058 index = block >> sizebits;
1061 * Check for a block which wants to lie outside our maximum possible
1062 * pagecache index. (this comparison is done using sector_t types).
1064 if (unlikely(index != block >> sizebits)) {
1065 char b[BDEVNAME_SIZE];
1067 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1069 __func__, (unsigned long long)block,
1073 block = index << sizebits;
1074 /* Create a page with the proper size buffers.. */
1075 page = grow_dev_page(bdev, block, index, size);
1079 page_cache_release(page);
1083 static struct buffer_head *
1084 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1086 /* Size must be multiple of hard sectorsize */
1087 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088 (size < 512 || size > PAGE_SIZE))) {
1089 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 printk(KERN_ERR "logical block size: %d\n",
1092 bdev_logical_block_size(bdev));
1099 struct buffer_head * bh;
1102 bh = __find_get_block(bdev, block, size);
1106 ret = grow_buffers(bdev, block, size);
1115 * The relationship between dirty buffers and dirty pages:
1117 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118 * the page is tagged dirty in its radix tree.
1120 * At all times, the dirtiness of the buffers represents the dirtiness of
1121 * subsections of the page. If the page has buffers, the page dirty bit is
1122 * merely a hint about the true dirty state.
1124 * When a page is set dirty in its entirety, all its buffers are marked dirty
1125 * (if the page has buffers).
1127 * When a buffer is marked dirty, its page is dirtied, but the page's other
1130 * Also. When blockdev buffers are explicitly read with bread(), they
1131 * individually become uptodate. But their backing page remains not
1132 * uptodate - even if all of its buffers are uptodate. A subsequent
1133 * block_read_full_page() against that page will discover all the uptodate
1134 * buffers, will set the page uptodate and will perform no I/O.
1138 * mark_buffer_dirty - mark a buffer_head as needing writeout
1139 * @bh: the buffer_head to mark dirty
1141 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142 * backing page dirty, then tag the page as dirty in its address_space's radix
1143 * tree and then attach the address_space's inode to its superblock's dirty
1146 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1147 * mapping->tree_lock and the global inode_lock.
1149 void mark_buffer_dirty(struct buffer_head *bh)
1151 WARN_ON_ONCE(!buffer_uptodate(bh));
1154 * Very *carefully* optimize the it-is-already-dirty case.
1156 * Don't let the final "is it dirty" escape to before we
1157 * perhaps modified the buffer.
1159 if (buffer_dirty(bh)) {
1161 if (buffer_dirty(bh))
1165 if (!test_set_buffer_dirty(bh)) {
1166 struct page *page = bh->b_page;
1167 if (!TestSetPageDirty(page)) {
1168 struct address_space *mapping = page_mapping(page);
1170 __set_page_dirty(page, mapping, 0);
1174 EXPORT_SYMBOL(mark_buffer_dirty);
1177 * Decrement a buffer_head's reference count. If all buffers against a page
1178 * have zero reference count, are clean and unlocked, and if the page is clean
1179 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1180 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1181 * a page but it ends up not being freed, and buffers may later be reattached).
1183 void __brelse(struct buffer_head * buf)
1185 if (atomic_read(&buf->b_count)) {
1189 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1191 EXPORT_SYMBOL(__brelse);
1194 * bforget() is like brelse(), except it discards any
1195 * potentially dirty data.
1197 void __bforget(struct buffer_head *bh)
1199 clear_buffer_dirty(bh);
1200 if (bh->b_assoc_map) {
1201 struct address_space *buffer_mapping = bh->b_page->mapping;
1203 spin_lock(&buffer_mapping->private_lock);
1204 list_del_init(&bh->b_assoc_buffers);
1205 bh->b_assoc_map = NULL;
1206 spin_unlock(&buffer_mapping->private_lock);
1210 EXPORT_SYMBOL(__bforget);
1212 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1215 if (buffer_uptodate(bh)) {
1220 bh->b_end_io = end_buffer_read_sync;
1221 submit_bh(READ, bh);
1223 if (buffer_uptodate(bh))
1231 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1232 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1233 * refcount elevated by one when they're in an LRU. A buffer can only appear
1234 * once in a particular CPU's LRU. A single buffer can be present in multiple
1235 * CPU's LRUs at the same time.
1237 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1238 * sb_find_get_block().
1240 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1241 * a local interrupt disable for that.
1244 #define BH_LRU_SIZE 8
1247 struct buffer_head *bhs[BH_LRU_SIZE];
1250 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1253 #define bh_lru_lock() local_irq_disable()
1254 #define bh_lru_unlock() local_irq_enable()
1256 #define bh_lru_lock() preempt_disable()
1257 #define bh_lru_unlock() preempt_enable()
1260 static inline void check_irqs_on(void)
1262 #ifdef irqs_disabled
1263 BUG_ON(irqs_disabled());
1268 * The LRU management algorithm is dopey-but-simple. Sorry.
1270 static void bh_lru_install(struct buffer_head *bh)
1272 struct buffer_head *evictee = NULL;
1276 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1277 struct buffer_head *bhs[BH_LRU_SIZE];
1283 for (in = 0; in < BH_LRU_SIZE; in++) {
1284 struct buffer_head *bh2 =
1285 __this_cpu_read(bh_lrus.bhs[in]);
1290 if (out >= BH_LRU_SIZE) {
1291 BUG_ON(evictee != NULL);
1298 while (out < BH_LRU_SIZE)
1300 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1309 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1311 static struct buffer_head *
1312 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1314 struct buffer_head *ret = NULL;
1319 for (i = 0; i < BH_LRU_SIZE; i++) {
1320 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1322 if (bh && bh->b_bdev == bdev &&
1323 bh->b_blocknr == block && bh->b_size == size) {
1326 __this_cpu_write(bh_lrus.bhs[i],
1327 __this_cpu_read(bh_lrus.bhs[i - 1]));
1330 __this_cpu_write(bh_lrus.bhs[0], bh);
1342 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1343 * it in the LRU and mark it as accessed. If it is not present then return
1346 struct buffer_head *
1347 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1349 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1352 bh = __find_get_block_slow(bdev, block);
1360 EXPORT_SYMBOL(__find_get_block);
1363 * __getblk will locate (and, if necessary, create) the buffer_head
1364 * which corresponds to the passed block_device, block and size. The
1365 * returned buffer has its reference count incremented.
1367 * __getblk() cannot fail - it just keeps trying. If you pass it an
1368 * illegal block number, __getblk() will happily return a buffer_head
1369 * which represents the non-existent block. Very weird.
1371 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1372 * attempt is failing. FIXME, perhaps?
1374 struct buffer_head *
1375 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1377 struct buffer_head *bh = __find_get_block(bdev, block, size);
1381 bh = __getblk_slow(bdev, block, size);
1384 EXPORT_SYMBOL(__getblk);
1387 * Do async read-ahead on a buffer..
1389 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1391 struct buffer_head *bh = __getblk(bdev, block, size);
1393 ll_rw_block(READA, 1, &bh);
1397 EXPORT_SYMBOL(__breadahead);
1400 * __bread() - reads a specified block and returns the bh
1401 * @bdev: the block_device to read from
1402 * @block: number of block
1403 * @size: size (in bytes) to read
1405 * Reads a specified block, and returns buffer head that contains it.
1406 * It returns NULL if the block was unreadable.
1408 struct buffer_head *
1409 __bread(struct block_device *bdev, sector_t block, unsigned size)
1411 struct buffer_head *bh = __getblk(bdev, block, size);
1413 if (likely(bh) && !buffer_uptodate(bh))
1414 bh = __bread_slow(bh);
1417 EXPORT_SYMBOL(__bread);
1420 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1421 * This doesn't race because it runs in each cpu either in irq
1422 * or with preempt disabled.
1424 static void invalidate_bh_lru(void *arg)
1426 struct bh_lru *b = &get_cpu_var(bh_lrus);
1429 for (i = 0; i < BH_LRU_SIZE; i++) {
1433 put_cpu_var(bh_lrus);
1436 void invalidate_bh_lrus(void)
1438 on_each_cpu(invalidate_bh_lru, NULL, 1);
1440 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1442 void set_bh_page(struct buffer_head *bh,
1443 struct page *page, unsigned long offset)
1446 BUG_ON(offset >= PAGE_SIZE);
1447 if (PageHighMem(page))
1449 * This catches illegal uses and preserves the offset:
1451 bh->b_data = (char *)(0 + offset);
1453 bh->b_data = page_address(page) + offset;
1455 EXPORT_SYMBOL(set_bh_page);
1458 * Called when truncating a buffer on a page completely.
1460 static void discard_buffer(struct buffer_head * bh)
1463 clear_buffer_dirty(bh);
1465 clear_buffer_mapped(bh);
1466 clear_buffer_req(bh);
1467 clear_buffer_new(bh);
1468 clear_buffer_delay(bh);
1469 clear_buffer_unwritten(bh);
1474 * block_invalidatepage - invalidate part of all of a buffer-backed page
1476 * @page: the page which is affected
1477 * @offset: the index of the truncation point
1479 * block_invalidatepage() is called when all or part of the page has become
1480 * invalidatedby a truncate operation.
1482 * block_invalidatepage() does not have to release all buffers, but it must
1483 * ensure that no dirty buffer is left outside @offset and that no I/O
1484 * is underway against any of the blocks which are outside the truncation
1485 * point. Because the caller is about to free (and possibly reuse) those
1488 void block_invalidatepage(struct page *page, unsigned long offset)
1490 struct buffer_head *head, *bh, *next;
1491 unsigned int curr_off = 0;
1493 BUG_ON(!PageLocked(page));
1494 if (!page_has_buffers(page))
1497 head = page_buffers(page);
1500 unsigned int next_off = curr_off + bh->b_size;
1501 next = bh->b_this_page;
1504 * is this block fully invalidated?
1506 if (offset <= curr_off)
1508 curr_off = next_off;
1510 } while (bh != head);
1513 * We release buffers only if the entire page is being invalidated.
1514 * The get_block cached value has been unconditionally invalidated,
1515 * so real IO is not possible anymore.
1518 try_to_release_page(page, 0);
1522 EXPORT_SYMBOL(block_invalidatepage);
1525 * We attach and possibly dirty the buffers atomically wrt
1526 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1527 * is already excluded via the page lock.
1529 void create_empty_buffers(struct page *page,
1530 unsigned long blocksize, unsigned long b_state)
1532 struct buffer_head *bh, *head, *tail;
1534 head = alloc_page_buffers(page, blocksize, 1);
1537 bh->b_state |= b_state;
1539 bh = bh->b_this_page;
1541 tail->b_this_page = head;
1543 spin_lock(&page->mapping->private_lock);
1544 if (PageUptodate(page) || PageDirty(page)) {
1547 if (PageDirty(page))
1548 set_buffer_dirty(bh);
1549 if (PageUptodate(page))
1550 set_buffer_uptodate(bh);
1551 bh = bh->b_this_page;
1552 } while (bh != head);
1554 attach_page_buffers(page, head);
1555 spin_unlock(&page->mapping->private_lock);
1557 EXPORT_SYMBOL(create_empty_buffers);
1560 * We are taking a block for data and we don't want any output from any
1561 * buffer-cache aliases starting from return from that function and
1562 * until the moment when something will explicitly mark the buffer
1563 * dirty (hopefully that will not happen until we will free that block ;-)
1564 * We don't even need to mark it not-uptodate - nobody can expect
1565 * anything from a newly allocated buffer anyway. We used to used
1566 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1567 * don't want to mark the alias unmapped, for example - it would confuse
1568 * anyone who might pick it with bread() afterwards...
1570 * Also.. Note that bforget() doesn't lock the buffer. So there can
1571 * be writeout I/O going on against recently-freed buffers. We don't
1572 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1573 * only if we really need to. That happens here.
1575 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1577 struct buffer_head *old_bh;
1581 old_bh = __find_get_block_slow(bdev, block);
1583 clear_buffer_dirty(old_bh);
1584 wait_on_buffer(old_bh);
1585 clear_buffer_req(old_bh);
1589 EXPORT_SYMBOL(unmap_underlying_metadata);
1592 * NOTE! All mapped/uptodate combinations are valid:
1594 * Mapped Uptodate Meaning
1596 * No No "unknown" - must do get_block()
1597 * No Yes "hole" - zero-filled
1598 * Yes No "allocated" - allocated on disk, not read in
1599 * Yes Yes "valid" - allocated and up-to-date in memory.
1601 * "Dirty" is valid only with the last case (mapped+uptodate).
1605 * While block_write_full_page is writing back the dirty buffers under
1606 * the page lock, whoever dirtied the buffers may decide to clean them
1607 * again at any time. We handle that by only looking at the buffer
1608 * state inside lock_buffer().
1610 * If block_write_full_page() is called for regular writeback
1611 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1612 * locked buffer. This only can happen if someone has written the buffer
1613 * directly, with submit_bh(). At the address_space level PageWriteback
1614 * prevents this contention from occurring.
1616 * If block_write_full_page() is called with wbc->sync_mode ==
1617 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1618 * causes the writes to be flagged as synchronous writes, but the
1619 * block device queue will NOT be unplugged, since usually many pages
1620 * will be pushed to the out before the higher-level caller actually
1621 * waits for the writes to be completed. The various wait functions,
1622 * such as wait_on_writeback_range() will ultimately call sync_page()
1623 * which will ultimately call blk_run_backing_dev(), which will end up
1624 * unplugging the device queue.
1626 static int __block_write_full_page(struct inode *inode, struct page *page,
1627 get_block_t *get_block, struct writeback_control *wbc,
1628 bh_end_io_t *handler)
1632 sector_t last_block;
1633 struct buffer_head *bh, *head;
1634 const unsigned blocksize = 1 << inode->i_blkbits;
1635 int nr_underway = 0;
1636 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1637 WRITE_SYNC_PLUG : WRITE);
1639 BUG_ON(!PageLocked(page));
1641 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1643 if (!page_has_buffers(page)) {
1644 create_empty_buffers(page, blocksize,
1645 (1 << BH_Dirty)|(1 << BH_Uptodate));
1649 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1650 * here, and the (potentially unmapped) buffers may become dirty at
1651 * any time. If a buffer becomes dirty here after we've inspected it
1652 * then we just miss that fact, and the page stays dirty.
1654 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1655 * handle that here by just cleaning them.
1658 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1659 head = page_buffers(page);
1663 * Get all the dirty buffers mapped to disk addresses and
1664 * handle any aliases from the underlying blockdev's mapping.
1667 if (block > last_block) {
1669 * mapped buffers outside i_size will occur, because
1670 * this page can be outside i_size when there is a
1671 * truncate in progress.
1674 * The buffer was zeroed by block_write_full_page()
1676 clear_buffer_dirty(bh);
1677 set_buffer_uptodate(bh);
1678 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1680 WARN_ON(bh->b_size != blocksize);
1681 err = get_block(inode, block, bh, 1);
1684 clear_buffer_delay(bh);
1685 if (buffer_new(bh)) {
1686 /* blockdev mappings never come here */
1687 clear_buffer_new(bh);
1688 unmap_underlying_metadata(bh->b_bdev,
1692 bh = bh->b_this_page;
1694 } while (bh != head);
1697 if (!buffer_mapped(bh))
1700 * If it's a fully non-blocking write attempt and we cannot
1701 * lock the buffer then redirty the page. Note that this can
1702 * potentially cause a busy-wait loop from writeback threads
1703 * and kswapd activity, but those code paths have their own
1704 * higher-level throttling.
1706 if (wbc->sync_mode != WB_SYNC_NONE) {
1708 } else if (!trylock_buffer(bh)) {
1709 redirty_page_for_writepage(wbc, page);
1712 if (test_clear_buffer_dirty(bh)) {
1713 mark_buffer_async_write_endio(bh, handler);
1717 } while ((bh = bh->b_this_page) != head);
1720 * The page and its buffers are protected by PageWriteback(), so we can
1721 * drop the bh refcounts early.
1723 BUG_ON(PageWriteback(page));
1724 set_page_writeback(page);
1727 struct buffer_head *next = bh->b_this_page;
1728 if (buffer_async_write(bh)) {
1729 submit_bh(write_op, bh);
1733 } while (bh != head);
1738 if (nr_underway == 0) {
1740 * The page was marked dirty, but the buffers were
1741 * clean. Someone wrote them back by hand with
1742 * ll_rw_block/submit_bh. A rare case.
1744 end_page_writeback(page);
1747 * The page and buffer_heads can be released at any time from
1755 * ENOSPC, or some other error. We may already have added some
1756 * blocks to the file, so we need to write these out to avoid
1757 * exposing stale data.
1758 * The page is currently locked and not marked for writeback
1761 /* Recovery: lock and submit the mapped buffers */
1763 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1764 !buffer_delay(bh)) {
1766 mark_buffer_async_write_endio(bh, handler);
1769 * The buffer may have been set dirty during
1770 * attachment to a dirty page.
1772 clear_buffer_dirty(bh);
1774 } while ((bh = bh->b_this_page) != head);
1776 BUG_ON(PageWriteback(page));
1777 mapping_set_error(page->mapping, err);
1778 set_page_writeback(page);
1780 struct buffer_head *next = bh->b_this_page;
1781 if (buffer_async_write(bh)) {
1782 clear_buffer_dirty(bh);
1783 submit_bh(write_op, bh);
1787 } while (bh != head);
1793 * If a page has any new buffers, zero them out here, and mark them uptodate
1794 * and dirty so they'll be written out (in order to prevent uninitialised
1795 * block data from leaking). And clear the new bit.
1797 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1799 unsigned int block_start, block_end;
1800 struct buffer_head *head, *bh;
1802 BUG_ON(!PageLocked(page));
1803 if (!page_has_buffers(page))
1806 bh = head = page_buffers(page);
1809 block_end = block_start + bh->b_size;
1811 if (buffer_new(bh)) {
1812 if (block_end > from && block_start < to) {
1813 if (!PageUptodate(page)) {
1814 unsigned start, size;
1816 start = max(from, block_start);
1817 size = min(to, block_end) - start;
1819 zero_user(page, start, size);
1820 set_buffer_uptodate(bh);
1823 clear_buffer_new(bh);
1824 mark_buffer_dirty(bh);
1828 block_start = block_end;
1829 bh = bh->b_this_page;
1830 } while (bh != head);
1832 EXPORT_SYMBOL(page_zero_new_buffers);
1834 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1835 get_block_t *get_block)
1837 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1838 unsigned to = from + len;
1839 struct inode *inode = page->mapping->host;
1840 unsigned block_start, block_end;
1843 unsigned blocksize, bbits;
1844 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1846 BUG_ON(!PageLocked(page));
1847 BUG_ON(from > PAGE_CACHE_SIZE);
1848 BUG_ON(to > PAGE_CACHE_SIZE);
1851 blocksize = 1 << inode->i_blkbits;
1852 if (!page_has_buffers(page))
1853 create_empty_buffers(page, blocksize, 0);
1854 head = page_buffers(page);
1856 bbits = inode->i_blkbits;
1857 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1859 for(bh = head, block_start = 0; bh != head || !block_start;
1860 block++, block_start=block_end, bh = bh->b_this_page) {
1861 block_end = block_start + blocksize;
1862 if (block_end <= from || block_start >= to) {
1863 if (PageUptodate(page)) {
1864 if (!buffer_uptodate(bh))
1865 set_buffer_uptodate(bh);
1870 clear_buffer_new(bh);
1871 if (!buffer_mapped(bh)) {
1872 WARN_ON(bh->b_size != blocksize);
1873 err = get_block(inode, block, bh, 1);
1876 if (buffer_new(bh)) {
1877 unmap_underlying_metadata(bh->b_bdev,
1879 if (PageUptodate(page)) {
1880 clear_buffer_new(bh);
1881 set_buffer_uptodate(bh);
1882 mark_buffer_dirty(bh);
1885 if (block_end > to || block_start < from)
1886 zero_user_segments(page,
1892 if (PageUptodate(page)) {
1893 if (!buffer_uptodate(bh))
1894 set_buffer_uptodate(bh);
1897 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1898 !buffer_unwritten(bh) &&
1899 (block_start < from || block_end > to)) {
1900 ll_rw_block(READ, 1, &bh);
1905 * If we issued read requests - let them complete.
1907 while(wait_bh > wait) {
1908 wait_on_buffer(*--wait_bh);
1909 if (!buffer_uptodate(*wait_bh))
1912 if (unlikely(err)) {
1913 page_zero_new_buffers(page, from, to);
1914 ClearPageUptodate(page);
1918 EXPORT_SYMBOL(__block_write_begin);
1920 static int __block_commit_write(struct inode *inode, struct page *page,
1921 unsigned from, unsigned to)
1923 unsigned block_start, block_end;
1926 struct buffer_head *bh, *head;
1928 blocksize = 1 << inode->i_blkbits;
1930 for(bh = head = page_buffers(page), block_start = 0;
1931 bh != head || !block_start;
1932 block_start=block_end, bh = bh->b_this_page) {
1933 block_end = block_start + blocksize;
1934 if (block_end <= from || block_start >= to) {
1935 if (!buffer_uptodate(bh))
1938 set_buffer_uptodate(bh);
1939 mark_buffer_dirty(bh);
1941 clear_buffer_new(bh);
1945 * If this is a partial write which happened to make all buffers
1946 * uptodate then we can optimize away a bogus readpage() for
1947 * the next read(). Here we 'discover' whether the page went
1948 * uptodate as a result of this (potentially partial) write.
1951 SetPageUptodate(page);
1956 * block_write_begin takes care of the basic task of block allocation and
1957 * bringing partial write blocks uptodate first.
1959 * The filesystem needs to handle block truncation upon failure.
1961 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1962 unsigned flags, struct page **pagep, get_block_t *get_block)
1964 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1968 page = grab_cache_page_write_begin(mapping, index, flags);
1972 status = __block_write_begin(page, pos, len, get_block);
1973 if (unlikely(status)) {
1975 page_cache_release(page);
1982 EXPORT_SYMBOL(block_write_begin);
1984 int block_write_end(struct file *file, struct address_space *mapping,
1985 loff_t pos, unsigned len, unsigned copied,
1986 struct page *page, void *fsdata)
1988 struct inode *inode = mapping->host;
1991 start = pos & (PAGE_CACHE_SIZE - 1);
1993 if (unlikely(copied < len)) {
1995 * The buffers that were written will now be uptodate, so we
1996 * don't have to worry about a readpage reading them and
1997 * overwriting a partial write. However if we have encountered
1998 * a short write and only partially written into a buffer, it
1999 * will not be marked uptodate, so a readpage might come in and
2000 * destroy our partial write.
2002 * Do the simplest thing, and just treat any short write to a
2003 * non uptodate page as a zero-length write, and force the
2004 * caller to redo the whole thing.
2006 if (!PageUptodate(page))
2009 page_zero_new_buffers(page, start+copied, start+len);
2011 flush_dcache_page(page);
2013 /* This could be a short (even 0-length) commit */
2014 __block_commit_write(inode, page, start, start+copied);
2018 EXPORT_SYMBOL(block_write_end);
2020 int generic_write_end(struct file *file, struct address_space *mapping,
2021 loff_t pos, unsigned len, unsigned copied,
2022 struct page *page, void *fsdata)
2024 struct inode *inode = mapping->host;
2025 int i_size_changed = 0;
2027 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2030 * No need to use i_size_read() here, the i_size
2031 * cannot change under us because we hold i_mutex.
2033 * But it's important to update i_size while still holding page lock:
2034 * page writeout could otherwise come in and zero beyond i_size.
2036 if (pos+copied > inode->i_size) {
2037 i_size_write(inode, pos+copied);
2042 page_cache_release(page);
2045 * Don't mark the inode dirty under page lock. First, it unnecessarily
2046 * makes the holding time of page lock longer. Second, it forces lock
2047 * ordering of page lock and transaction start for journaling
2051 mark_inode_dirty(inode);
2055 EXPORT_SYMBOL(generic_write_end);
2058 * block_is_partially_uptodate checks whether buffers within a page are
2061 * Returns true if all buffers which correspond to a file portion
2062 * we want to read are uptodate.
2064 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2067 struct inode *inode = page->mapping->host;
2068 unsigned block_start, block_end, blocksize;
2070 struct buffer_head *bh, *head;
2073 if (!page_has_buffers(page))
2076 blocksize = 1 << inode->i_blkbits;
2077 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2079 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2082 head = page_buffers(page);
2086 block_end = block_start + blocksize;
2087 if (block_end > from && block_start < to) {
2088 if (!buffer_uptodate(bh)) {
2092 if (block_end >= to)
2095 block_start = block_end;
2096 bh = bh->b_this_page;
2097 } while (bh != head);
2101 EXPORT_SYMBOL(block_is_partially_uptodate);
2104 * Generic "read page" function for block devices that have the normal
2105 * get_block functionality. This is most of the block device filesystems.
2106 * Reads the page asynchronously --- the unlock_buffer() and
2107 * set/clear_buffer_uptodate() functions propagate buffer state into the
2108 * page struct once IO has completed.
2110 int block_read_full_page(struct page *page, get_block_t *get_block)
2112 struct inode *inode = page->mapping->host;
2113 sector_t iblock, lblock;
2114 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2115 unsigned int blocksize;
2117 int fully_mapped = 1;
2119 BUG_ON(!PageLocked(page));
2120 blocksize = 1 << inode->i_blkbits;
2121 if (!page_has_buffers(page))
2122 create_empty_buffers(page, blocksize, 0);
2123 head = page_buffers(page);
2125 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2126 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2132 if (buffer_uptodate(bh))
2135 if (!buffer_mapped(bh)) {
2139 if (iblock < lblock) {
2140 WARN_ON(bh->b_size != blocksize);
2141 err = get_block(inode, iblock, bh, 0);
2145 if (!buffer_mapped(bh)) {
2146 zero_user(page, i * blocksize, blocksize);
2148 set_buffer_uptodate(bh);
2152 * get_block() might have updated the buffer
2155 if (buffer_uptodate(bh))
2159 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2162 SetPageMappedToDisk(page);
2166 * All buffers are uptodate - we can set the page uptodate
2167 * as well. But not if get_block() returned an error.
2169 if (!PageError(page))
2170 SetPageUptodate(page);
2175 /* Stage two: lock the buffers */
2176 for (i = 0; i < nr; i++) {
2179 mark_buffer_async_read(bh);
2183 * Stage 3: start the IO. Check for uptodateness
2184 * inside the buffer lock in case another process reading
2185 * the underlying blockdev brought it uptodate (the sct fix).
2187 for (i = 0; i < nr; i++) {
2189 if (buffer_uptodate(bh))
2190 end_buffer_async_read(bh, 1);
2192 submit_bh(READ, bh);
2196 EXPORT_SYMBOL(block_read_full_page);
2198 /* utility function for filesystems that need to do work on expanding
2199 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2200 * deal with the hole.
2202 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2204 struct address_space *mapping = inode->i_mapping;
2209 err = inode_newsize_ok(inode, size);
2213 err = pagecache_write_begin(NULL, mapping, size, 0,
2214 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2219 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2225 EXPORT_SYMBOL(generic_cont_expand_simple);
2227 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2228 loff_t pos, loff_t *bytes)
2230 struct inode *inode = mapping->host;
2231 unsigned blocksize = 1 << inode->i_blkbits;
2234 pgoff_t index, curidx;
2236 unsigned zerofrom, offset, len;
2239 index = pos >> PAGE_CACHE_SHIFT;
2240 offset = pos & ~PAGE_CACHE_MASK;
2242 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2243 zerofrom = curpos & ~PAGE_CACHE_MASK;
2244 if (zerofrom & (blocksize-1)) {
2245 *bytes |= (blocksize-1);
2248 len = PAGE_CACHE_SIZE - zerofrom;
2250 err = pagecache_write_begin(file, mapping, curpos, len,
2251 AOP_FLAG_UNINTERRUPTIBLE,
2255 zero_user(page, zerofrom, len);
2256 err = pagecache_write_end(file, mapping, curpos, len, len,
2263 balance_dirty_pages_ratelimited(mapping);
2266 /* page covers the boundary, find the boundary offset */
2267 if (index == curidx) {
2268 zerofrom = curpos & ~PAGE_CACHE_MASK;
2269 /* if we will expand the thing last block will be filled */
2270 if (offset <= zerofrom) {
2273 if (zerofrom & (blocksize-1)) {
2274 *bytes |= (blocksize-1);
2277 len = offset - zerofrom;
2279 err = pagecache_write_begin(file, mapping, curpos, len,
2280 AOP_FLAG_UNINTERRUPTIBLE,
2284 zero_user(page, zerofrom, len);
2285 err = pagecache_write_end(file, mapping, curpos, len, len,
2297 * For moronic filesystems that do not allow holes in file.
2298 * We may have to extend the file.
2300 int cont_write_begin(struct file *file, struct address_space *mapping,
2301 loff_t pos, unsigned len, unsigned flags,
2302 struct page **pagep, void **fsdata,
2303 get_block_t *get_block, loff_t *bytes)
2305 struct inode *inode = mapping->host;
2306 unsigned blocksize = 1 << inode->i_blkbits;
2310 err = cont_expand_zero(file, mapping, pos, bytes);
2314 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2315 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2316 *bytes |= (blocksize-1);
2320 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2322 EXPORT_SYMBOL(cont_write_begin);
2324 int block_commit_write(struct page *page, unsigned from, unsigned to)
2326 struct inode *inode = page->mapping->host;
2327 __block_commit_write(inode,page,from,to);
2330 EXPORT_SYMBOL(block_commit_write);
2333 * block_page_mkwrite() is not allowed to change the file size as it gets
2334 * called from a page fault handler when a page is first dirtied. Hence we must
2335 * be careful to check for EOF conditions here. We set the page up correctly
2336 * for a written page which means we get ENOSPC checking when writing into
2337 * holes and correct delalloc and unwritten extent mapping on filesystems that
2338 * support these features.
2340 * We are not allowed to take the i_mutex here so we have to play games to
2341 * protect against truncate races as the page could now be beyond EOF. Because
2342 * truncate writes the inode size before removing pages, once we have the
2343 * page lock we can determine safely if the page is beyond EOF. If it is not
2344 * beyond EOF, then the page is guaranteed safe against truncation until we
2348 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2349 get_block_t get_block)
2351 struct page *page = vmf->page;
2352 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2355 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2358 size = i_size_read(inode);
2359 if ((page->mapping != inode->i_mapping) ||
2360 (page_offset(page) > size)) {
2361 /* page got truncated out from underneath us */
2366 /* page is wholly or partially inside EOF */
2367 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2368 end = size & ~PAGE_CACHE_MASK;
2370 end = PAGE_CACHE_SIZE;
2372 ret = __block_write_begin(page, 0, end, get_block);
2374 ret = block_commit_write(page, 0, end);
2376 if (unlikely(ret)) {
2380 else /* -ENOSPC, -EIO, etc */
2381 ret = VM_FAULT_SIGBUS;
2383 ret = VM_FAULT_LOCKED;
2388 EXPORT_SYMBOL(block_page_mkwrite);
2391 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2392 * immediately, while under the page lock. So it needs a special end_io
2393 * handler which does not touch the bh after unlocking it.
2395 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2397 __end_buffer_read_notouch(bh, uptodate);
2401 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2402 * the page (converting it to circular linked list and taking care of page
2405 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2407 struct buffer_head *bh;
2409 BUG_ON(!PageLocked(page));
2411 spin_lock(&page->mapping->private_lock);
2414 if (PageDirty(page))
2415 set_buffer_dirty(bh);
2416 if (!bh->b_this_page)
2417 bh->b_this_page = head;
2418 bh = bh->b_this_page;
2419 } while (bh != head);
2420 attach_page_buffers(page, head);
2421 spin_unlock(&page->mapping->private_lock);
2425 * On entry, the page is fully not uptodate.
2426 * On exit the page is fully uptodate in the areas outside (from,to)
2427 * The filesystem needs to handle block truncation upon failure.
2429 int nobh_write_begin(struct address_space *mapping,
2430 loff_t pos, unsigned len, unsigned flags,
2431 struct page **pagep, void **fsdata,
2432 get_block_t *get_block)
2434 struct inode *inode = mapping->host;
2435 const unsigned blkbits = inode->i_blkbits;
2436 const unsigned blocksize = 1 << blkbits;
2437 struct buffer_head *head, *bh;
2441 unsigned block_in_page;
2442 unsigned block_start, block_end;
2443 sector_t block_in_file;
2446 int is_mapped_to_disk = 1;
2448 index = pos >> PAGE_CACHE_SHIFT;
2449 from = pos & (PAGE_CACHE_SIZE - 1);
2452 page = grab_cache_page_write_begin(mapping, index, flags);
2458 if (page_has_buffers(page)) {
2459 ret = __block_write_begin(page, pos, len, get_block);
2465 if (PageMappedToDisk(page))
2469 * Allocate buffers so that we can keep track of state, and potentially
2470 * attach them to the page if an error occurs. In the common case of
2471 * no error, they will just be freed again without ever being attached
2472 * to the page (which is all OK, because we're under the page lock).
2474 * Be careful: the buffer linked list is a NULL terminated one, rather
2475 * than the circular one we're used to.
2477 head = alloc_page_buffers(page, blocksize, 0);
2483 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2486 * We loop across all blocks in the page, whether or not they are
2487 * part of the affected region. This is so we can discover if the
2488 * page is fully mapped-to-disk.
2490 for (block_start = 0, block_in_page = 0, bh = head;
2491 block_start < PAGE_CACHE_SIZE;
2492 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2495 block_end = block_start + blocksize;
2498 if (block_start >= to)
2500 ret = get_block(inode, block_in_file + block_in_page,
2504 if (!buffer_mapped(bh))
2505 is_mapped_to_disk = 0;
2507 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2508 if (PageUptodate(page)) {
2509 set_buffer_uptodate(bh);
2512 if (buffer_new(bh) || !buffer_mapped(bh)) {
2513 zero_user_segments(page, block_start, from,
2517 if (buffer_uptodate(bh))
2518 continue; /* reiserfs does this */
2519 if (block_start < from || block_end > to) {
2521 bh->b_end_io = end_buffer_read_nobh;
2522 submit_bh(READ, bh);
2529 * The page is locked, so these buffers are protected from
2530 * any VM or truncate activity. Hence we don't need to care
2531 * for the buffer_head refcounts.
2533 for (bh = head; bh; bh = bh->b_this_page) {
2535 if (!buffer_uptodate(bh))
2542 if (is_mapped_to_disk)
2543 SetPageMappedToDisk(page);
2545 *fsdata = head; /* to be released by nobh_write_end */
2552 * Error recovery is a bit difficult. We need to zero out blocks that
2553 * were newly allocated, and dirty them to ensure they get written out.
2554 * Buffers need to be attached to the page at this point, otherwise
2555 * the handling of potential IO errors during writeout would be hard
2556 * (could try doing synchronous writeout, but what if that fails too?)
2558 attach_nobh_buffers(page, head);
2559 page_zero_new_buffers(page, from, to);
2563 page_cache_release(page);
2568 EXPORT_SYMBOL(nobh_write_begin);
2570 int nobh_write_end(struct file *file, struct address_space *mapping,
2571 loff_t pos, unsigned len, unsigned copied,
2572 struct page *page, void *fsdata)
2574 struct inode *inode = page->mapping->host;
2575 struct buffer_head *head = fsdata;
2576 struct buffer_head *bh;
2577 BUG_ON(fsdata != NULL && page_has_buffers(page));
2579 if (unlikely(copied < len) && head)
2580 attach_nobh_buffers(page, head);
2581 if (page_has_buffers(page))
2582 return generic_write_end(file, mapping, pos, len,
2583 copied, page, fsdata);
2585 SetPageUptodate(page);
2586 set_page_dirty(page);
2587 if (pos+copied > inode->i_size) {
2588 i_size_write(inode, pos+copied);
2589 mark_inode_dirty(inode);
2593 page_cache_release(page);
2597 head = head->b_this_page;
2598 free_buffer_head(bh);
2603 EXPORT_SYMBOL(nobh_write_end);
2606 * nobh_writepage() - based on block_full_write_page() except
2607 * that it tries to operate without attaching bufferheads to
2610 int nobh_writepage(struct page *page, get_block_t *get_block,
2611 struct writeback_control *wbc)
2613 struct inode * const inode = page->mapping->host;
2614 loff_t i_size = i_size_read(inode);
2615 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2619 /* Is the page fully inside i_size? */
2620 if (page->index < end_index)
2623 /* Is the page fully outside i_size? (truncate in progress) */
2624 offset = i_size & (PAGE_CACHE_SIZE-1);
2625 if (page->index >= end_index+1 || !offset) {
2627 * The page may have dirty, unmapped buffers. For example,
2628 * they may have been added in ext3_writepage(). Make them
2629 * freeable here, so the page does not leak.
2632 /* Not really sure about this - do we need this ? */
2633 if (page->mapping->a_ops->invalidatepage)
2634 page->mapping->a_ops->invalidatepage(page, offset);
2637 return 0; /* don't care */
2641 * The page straddles i_size. It must be zeroed out on each and every
2642 * writepage invocation because it may be mmapped. "A file is mapped
2643 * in multiples of the page size. For a file that is not a multiple of
2644 * the page size, the remaining memory is zeroed when mapped, and
2645 * writes to that region are not written out to the file."
2647 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2649 ret = mpage_writepage(page, get_block, wbc);
2651 ret = __block_write_full_page(inode, page, get_block, wbc,
2652 end_buffer_async_write);
2655 EXPORT_SYMBOL(nobh_writepage);
2657 int nobh_truncate_page(struct address_space *mapping,
2658 loff_t from, get_block_t *get_block)
2660 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2661 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2664 unsigned length, pos;
2665 struct inode *inode = mapping->host;
2667 struct buffer_head map_bh;
2670 blocksize = 1 << inode->i_blkbits;
2671 length = offset & (blocksize - 1);
2673 /* Block boundary? Nothing to do */
2677 length = blocksize - length;
2678 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2680 page = grab_cache_page(mapping, index);
2685 if (page_has_buffers(page)) {
2688 page_cache_release(page);
2689 return block_truncate_page(mapping, from, get_block);
2692 /* Find the buffer that contains "offset" */
2694 while (offset >= pos) {
2699 map_bh.b_size = blocksize;
2701 err = get_block(inode, iblock, &map_bh, 0);
2704 /* unmapped? It's a hole - nothing to do */
2705 if (!buffer_mapped(&map_bh))
2708 /* Ok, it's mapped. Make sure it's up-to-date */
2709 if (!PageUptodate(page)) {
2710 err = mapping->a_ops->readpage(NULL, page);
2712 page_cache_release(page);
2716 if (!PageUptodate(page)) {
2720 if (page_has_buffers(page))
2723 zero_user(page, offset, length);
2724 set_page_dirty(page);
2729 page_cache_release(page);
2733 EXPORT_SYMBOL(nobh_truncate_page);
2735 int block_truncate_page(struct address_space *mapping,
2736 loff_t from, get_block_t *get_block)
2738 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2739 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2742 unsigned length, pos;
2743 struct inode *inode = mapping->host;
2745 struct buffer_head *bh;
2748 blocksize = 1 << inode->i_blkbits;
2749 length = offset & (blocksize - 1);
2751 /* Block boundary? Nothing to do */
2755 length = blocksize - length;
2756 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2758 page = grab_cache_page(mapping, index);
2763 if (!page_has_buffers(page))
2764 create_empty_buffers(page, blocksize, 0);
2766 /* Find the buffer that contains "offset" */
2767 bh = page_buffers(page);
2769 while (offset >= pos) {
2770 bh = bh->b_this_page;
2776 if (!buffer_mapped(bh)) {
2777 WARN_ON(bh->b_size != blocksize);
2778 err = get_block(inode, iblock, bh, 0);
2781 /* unmapped? It's a hole - nothing to do */
2782 if (!buffer_mapped(bh))
2786 /* Ok, it's mapped. Make sure it's up-to-date */
2787 if (PageUptodate(page))
2788 set_buffer_uptodate(bh);
2790 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2792 ll_rw_block(READ, 1, &bh);
2794 /* Uhhuh. Read error. Complain and punt. */
2795 if (!buffer_uptodate(bh))
2799 zero_user(page, offset, length);
2800 mark_buffer_dirty(bh);
2805 page_cache_release(page);
2809 EXPORT_SYMBOL(block_truncate_page);
2812 * The generic ->writepage function for buffer-backed address_spaces
2813 * this form passes in the end_io handler used to finish the IO.
2815 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2816 struct writeback_control *wbc, bh_end_io_t *handler)
2818 struct inode * const inode = page->mapping->host;
2819 loff_t i_size = i_size_read(inode);
2820 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2823 /* Is the page fully inside i_size? */
2824 if (page->index < end_index)
2825 return __block_write_full_page(inode, page, get_block, wbc,
2828 /* Is the page fully outside i_size? (truncate in progress) */
2829 offset = i_size & (PAGE_CACHE_SIZE-1);
2830 if (page->index >= end_index+1 || !offset) {
2832 * The page may have dirty, unmapped buffers. For example,
2833 * they may have been added in ext3_writepage(). Make them
2834 * freeable here, so the page does not leak.
2836 do_invalidatepage(page, 0);
2838 return 0; /* don't care */
2842 * The page straddles i_size. It must be zeroed out on each and every
2843 * writepage invocation because it may be mmapped. "A file is mapped
2844 * in multiples of the page size. For a file that is not a multiple of
2845 * the page size, the remaining memory is zeroed when mapped, and
2846 * writes to that region are not written out to the file."
2848 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2849 return __block_write_full_page(inode, page, get_block, wbc, handler);
2851 EXPORT_SYMBOL(block_write_full_page_endio);
2854 * The generic ->writepage function for buffer-backed address_spaces
2856 int block_write_full_page(struct page *page, get_block_t *get_block,
2857 struct writeback_control *wbc)
2859 return block_write_full_page_endio(page, get_block, wbc,
2860 end_buffer_async_write);
2862 EXPORT_SYMBOL(block_write_full_page);
2864 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2865 get_block_t *get_block)
2867 struct buffer_head tmp;
2868 struct inode *inode = mapping->host;
2871 tmp.b_size = 1 << inode->i_blkbits;
2872 get_block(inode, block, &tmp, 0);
2873 return tmp.b_blocknr;
2875 EXPORT_SYMBOL(generic_block_bmap);
2877 static void end_bio_bh_io_sync(struct bio *bio, int err)
2879 struct buffer_head *bh = bio->bi_private;
2881 if (err == -EOPNOTSUPP) {
2882 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2885 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2886 set_bit(BH_Quiet, &bh->b_state);
2888 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2892 int submit_bh(int rw, struct buffer_head * bh)
2897 BUG_ON(!buffer_locked(bh));
2898 BUG_ON(!buffer_mapped(bh));
2899 BUG_ON(!bh->b_end_io);
2900 BUG_ON(buffer_delay(bh));
2901 BUG_ON(buffer_unwritten(bh));
2904 * Only clear out a write error when rewriting
2906 if (test_set_buffer_req(bh) && (rw & WRITE))
2907 clear_buffer_write_io_error(bh);
2910 * from here on down, it's all bio -- do the initial mapping,
2911 * submit_bio -> generic_make_request may further map this bio around
2913 bio = bio_alloc(GFP_NOIO, 1);
2915 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2916 bio->bi_bdev = bh->b_bdev;
2917 bio->bi_io_vec[0].bv_page = bh->b_page;
2918 bio->bi_io_vec[0].bv_len = bh->b_size;
2919 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2923 bio->bi_size = bh->b_size;
2925 bio->bi_end_io = end_bio_bh_io_sync;
2926 bio->bi_private = bh;
2929 submit_bio(rw, bio);
2931 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2937 EXPORT_SYMBOL(submit_bh);
2940 * ll_rw_block: low-level access to block devices (DEPRECATED)
2941 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2942 * @nr: number of &struct buffer_heads in the array
2943 * @bhs: array of pointers to &struct buffer_head
2945 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2946 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2947 * %READA option is described in the documentation for generic_make_request()
2948 * which ll_rw_block() calls.
2950 * This function drops any buffer that it cannot get a lock on (with the
2951 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2952 * request, and any buffer that appears to be up-to-date when doing read
2953 * request. Further it marks as clean buffers that are processed for
2954 * writing (the buffer cache won't assume that they are actually clean
2955 * until the buffer gets unlocked).
2957 * ll_rw_block sets b_end_io to simple completion handler that marks
2958 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2961 * All of the buffers must be for the same device, and must also be a
2962 * multiple of the current approved size for the device.
2964 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2968 for (i = 0; i < nr; i++) {
2969 struct buffer_head *bh = bhs[i];
2971 if (!trylock_buffer(bh))
2974 if (test_clear_buffer_dirty(bh)) {
2975 bh->b_end_io = end_buffer_write_sync;
2977 submit_bh(WRITE, bh);
2981 if (!buffer_uptodate(bh)) {
2982 bh->b_end_io = end_buffer_read_sync;
2991 EXPORT_SYMBOL(ll_rw_block);
2993 void write_dirty_buffer(struct buffer_head *bh, int rw)
2996 if (!test_clear_buffer_dirty(bh)) {
3000 bh->b_end_io = end_buffer_write_sync;
3004 EXPORT_SYMBOL(write_dirty_buffer);
3007 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3008 * and then start new I/O and then wait upon it. The caller must have a ref on
3011 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3015 WARN_ON(atomic_read(&bh->b_count) < 1);
3017 if (test_clear_buffer_dirty(bh)) {
3019 bh->b_end_io = end_buffer_write_sync;
3020 ret = submit_bh(rw, bh);
3022 if (!ret && !buffer_uptodate(bh))
3029 EXPORT_SYMBOL(__sync_dirty_buffer);
3031 int sync_dirty_buffer(struct buffer_head *bh)
3033 return __sync_dirty_buffer(bh, WRITE_SYNC);
3035 EXPORT_SYMBOL(sync_dirty_buffer);
3038 * try_to_free_buffers() checks if all the buffers on this particular page
3039 * are unused, and releases them if so.
3041 * Exclusion against try_to_free_buffers may be obtained by either
3042 * locking the page or by holding its mapping's private_lock.
3044 * If the page is dirty but all the buffers are clean then we need to
3045 * be sure to mark the page clean as well. This is because the page
3046 * may be against a block device, and a later reattachment of buffers
3047 * to a dirty page will set *all* buffers dirty. Which would corrupt
3048 * filesystem data on the same device.
3050 * The same applies to regular filesystem pages: if all the buffers are
3051 * clean then we set the page clean and proceed. To do that, we require
3052 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3055 * try_to_free_buffers() is non-blocking.
3057 static inline int buffer_busy(struct buffer_head *bh)
3059 return atomic_read(&bh->b_count) |
3060 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3064 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3066 struct buffer_head *head = page_buffers(page);
3067 struct buffer_head *bh;
3071 if (buffer_write_io_error(bh) && page->mapping)
3072 set_bit(AS_EIO, &page->mapping->flags);
3073 if (buffer_busy(bh))
3075 bh = bh->b_this_page;
3076 } while (bh != head);
3079 struct buffer_head *next = bh->b_this_page;
3081 if (bh->b_assoc_map)
3082 __remove_assoc_queue(bh);
3084 } while (bh != head);
3085 *buffers_to_free = head;
3086 __clear_page_buffers(page);
3092 int try_to_free_buffers(struct page *page)
3094 struct address_space * const mapping = page->mapping;
3095 struct buffer_head *buffers_to_free = NULL;
3098 BUG_ON(!PageLocked(page));
3099 if (PageWriteback(page))
3102 if (mapping == NULL) { /* can this still happen? */
3103 ret = drop_buffers(page, &buffers_to_free);
3107 spin_lock(&mapping->private_lock);
3108 ret = drop_buffers(page, &buffers_to_free);
3111 * If the filesystem writes its buffers by hand (eg ext3)
3112 * then we can have clean buffers against a dirty page. We
3113 * clean the page here; otherwise the VM will never notice
3114 * that the filesystem did any IO at all.
3116 * Also, during truncate, discard_buffer will have marked all
3117 * the page's buffers clean. We discover that here and clean
3120 * private_lock must be held over this entire operation in order
3121 * to synchronise against __set_page_dirty_buffers and prevent the
3122 * dirty bit from being lost.
3125 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3126 spin_unlock(&mapping->private_lock);
3128 if (buffers_to_free) {
3129 struct buffer_head *bh = buffers_to_free;
3132 struct buffer_head *next = bh->b_this_page;
3133 free_buffer_head(bh);
3135 } while (bh != buffers_to_free);
3139 EXPORT_SYMBOL(try_to_free_buffers);
3141 void block_sync_page(struct page *page)
3143 struct address_space *mapping;
3146 mapping = page_mapping(page);
3148 blk_run_backing_dev(mapping->backing_dev_info, page);
3150 EXPORT_SYMBOL(block_sync_page);
3153 * There are no bdflush tunables left. But distributions are
3154 * still running obsolete flush daemons, so we terminate them here.
3156 * Use of bdflush() is deprecated and will be removed in a future kernel.
3157 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3159 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3161 static int msg_count;
3163 if (!capable(CAP_SYS_ADMIN))
3166 if (msg_count < 5) {
3169 "warning: process `%s' used the obsolete bdflush"
3170 " system call\n", current->comm);
3171 printk(KERN_INFO "Fix your initscripts?\n");
3180 * Buffer-head allocation
3182 static struct kmem_cache *bh_cachep;
3185 * Once the number of bh's in the machine exceeds this level, we start
3186 * stripping them in writeback.
3188 static int max_buffer_heads;
3190 int buffer_heads_over_limit;
3192 struct bh_accounting {
3193 int nr; /* Number of live bh's */
3194 int ratelimit; /* Limit cacheline bouncing */
3197 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3199 static void recalc_bh_state(void)
3204 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3206 __this_cpu_write(bh_accounting.ratelimit, 0);
3207 for_each_online_cpu(i)
3208 tot += per_cpu(bh_accounting, i).nr;
3209 buffer_heads_over_limit = (tot > max_buffer_heads);
3212 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3214 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3216 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3218 __this_cpu_inc(bh_accounting.nr);
3224 EXPORT_SYMBOL(alloc_buffer_head);
3226 void free_buffer_head(struct buffer_head *bh)
3228 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3229 kmem_cache_free(bh_cachep, bh);
3231 __this_cpu_dec(bh_accounting.nr);
3235 EXPORT_SYMBOL(free_buffer_head);
3237 static void buffer_exit_cpu(int cpu)
3240 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3242 for (i = 0; i < BH_LRU_SIZE; i++) {
3246 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3247 per_cpu(bh_accounting, cpu).nr = 0;
3250 static int buffer_cpu_notify(struct notifier_block *self,
3251 unsigned long action, void *hcpu)
3253 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3254 buffer_exit_cpu((unsigned long)hcpu);
3259 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3260 * @bh: struct buffer_head
3262 * Return true if the buffer is up-to-date and false,
3263 * with the buffer locked, if not.
3265 int bh_uptodate_or_lock(struct buffer_head *bh)
3267 if (!buffer_uptodate(bh)) {
3269 if (!buffer_uptodate(bh))
3275 EXPORT_SYMBOL(bh_uptodate_or_lock);
3278 * bh_submit_read - Submit a locked buffer for reading
3279 * @bh: struct buffer_head
3281 * Returns zero on success and -EIO on error.
3283 int bh_submit_read(struct buffer_head *bh)
3285 BUG_ON(!buffer_locked(bh));
3287 if (buffer_uptodate(bh)) {
3293 bh->b_end_io = end_buffer_read_sync;
3294 submit_bh(READ, bh);
3296 if (buffer_uptodate(bh))
3300 EXPORT_SYMBOL(bh_submit_read);
3302 void __init buffer_init(void)
3306 bh_cachep = kmem_cache_create("buffer_head",
3307 sizeof(struct buffer_head), 0,
3308 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3313 * Limit the bh occupancy to 10% of ZONE_NORMAL
3315 nrpages = (nr_free_buffer_pages() * 10) / 100;
3316 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3317 hotcpu_notifier(buffer_cpu_notify, 0);