ore: Fix NFS crash by supporting any unaligned RAID IO
[pandora-kernel.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
45
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50 inline void
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 {
53         bh->b_end_io = handler;
54         bh->b_private = private;
55 }
56 EXPORT_SYMBOL(init_buffer);
57
58 static int sleep_on_buffer(void *word)
59 {
60         io_schedule();
61         return 0;
62 }
63
64 void __lock_buffer(struct buffer_head *bh)
65 {
66         wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67                                                         TASK_UNINTERRUPTIBLE);
68 }
69 EXPORT_SYMBOL(__lock_buffer);
70
71 void unlock_buffer(struct buffer_head *bh)
72 {
73         clear_bit_unlock(BH_Lock, &bh->b_state);
74         smp_mb__after_clear_bit();
75         wake_up_bit(&bh->b_state, BH_Lock);
76 }
77 EXPORT_SYMBOL(unlock_buffer);
78
79 /*
80  * Block until a buffer comes unlocked.  This doesn't stop it
81  * from becoming locked again - you have to lock it yourself
82  * if you want to preserve its state.
83  */
84 void __wait_on_buffer(struct buffer_head * bh)
85 {
86         wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
87 }
88 EXPORT_SYMBOL(__wait_on_buffer);
89
90 static void
91 __clear_page_buffers(struct page *page)
92 {
93         ClearPagePrivate(page);
94         set_page_private(page, 0);
95         page_cache_release(page);
96 }
97
98
99 static int quiet_error(struct buffer_head *bh)
100 {
101         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
102                 return 0;
103         return 1;
104 }
105
106
107 static void buffer_io_error(struct buffer_head *bh)
108 {
109         char b[BDEVNAME_SIZE];
110         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111                         bdevname(bh->b_bdev, b),
112                         (unsigned long long)bh->b_blocknr);
113 }
114
115 /*
116  * End-of-IO handler helper function which does not touch the bh after
117  * unlocking it.
118  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119  * a race there is benign: unlock_buffer() only use the bh's address for
120  * hashing after unlocking the buffer, so it doesn't actually touch the bh
121  * itself.
122  */
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
124 {
125         if (uptodate) {
126                 set_buffer_uptodate(bh);
127         } else {
128                 /* This happens, due to failed READA attempts. */
129                 clear_buffer_uptodate(bh);
130         }
131         unlock_buffer(bh);
132 }
133
134 /*
135  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
136  * unlock the buffer. This is what ll_rw_block uses too.
137  */
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
139 {
140         __end_buffer_read_notouch(bh, uptodate);
141         put_bh(bh);
142 }
143 EXPORT_SYMBOL(end_buffer_read_sync);
144
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
146 {
147         char b[BDEVNAME_SIZE];
148
149         if (uptodate) {
150                 set_buffer_uptodate(bh);
151         } else {
152                 if (!quiet_error(bh)) {
153                         buffer_io_error(bh);
154                         printk(KERN_WARNING "lost page write due to "
155                                         "I/O error on %s\n",
156                                        bdevname(bh->b_bdev, b));
157                 }
158                 set_buffer_write_io_error(bh);
159                 clear_buffer_uptodate(bh);
160         }
161         unlock_buffer(bh);
162         put_bh(bh);
163 }
164 EXPORT_SYMBOL(end_buffer_write_sync);
165
166 /*
167  * Various filesystems appear to want __find_get_block to be non-blocking.
168  * But it's the page lock which protects the buffers.  To get around this,
169  * we get exclusion from try_to_free_buffers with the blockdev mapping's
170  * private_lock.
171  *
172  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173  * may be quite high.  This code could TryLock the page, and if that
174  * succeeds, there is no need to take private_lock. (But if
175  * private_lock is contended then so is mapping->tree_lock).
176  */
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
179 {
180         struct inode *bd_inode = bdev->bd_inode;
181         struct address_space *bd_mapping = bd_inode->i_mapping;
182         struct buffer_head *ret = NULL;
183         pgoff_t index;
184         struct buffer_head *bh;
185         struct buffer_head *head;
186         struct page *page;
187         int all_mapped = 1;
188
189         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190         page = find_get_page(bd_mapping, index);
191         if (!page)
192                 goto out;
193
194         spin_lock(&bd_mapping->private_lock);
195         if (!page_has_buffers(page))
196                 goto out_unlock;
197         head = page_buffers(page);
198         bh = head;
199         do {
200                 if (!buffer_mapped(bh))
201                         all_mapped = 0;
202                 else if (bh->b_blocknr == block) {
203                         ret = bh;
204                         get_bh(bh);
205                         goto out_unlock;
206                 }
207                 bh = bh->b_this_page;
208         } while (bh != head);
209
210         /* we might be here because some of the buffers on this page are
211          * not mapped.  This is due to various races between
212          * file io on the block device and getblk.  It gets dealt with
213          * elsewhere, don't buffer_error if we had some unmapped buffers
214          */
215         if (all_mapped) {
216                 char b[BDEVNAME_SIZE];
217
218                 printk("__find_get_block_slow() failed. "
219                         "block=%llu, b_blocknr=%llu\n",
220                         (unsigned long long)block,
221                         (unsigned long long)bh->b_blocknr);
222                 printk("b_state=0x%08lx, b_size=%zu\n",
223                         bh->b_state, bh->b_size);
224                 printk("device %s blocksize: %d\n", bdevname(bdev, b),
225                         1 << bd_inode->i_blkbits);
226         }
227 out_unlock:
228         spin_unlock(&bd_mapping->private_lock);
229         page_cache_release(page);
230 out:
231         return ret;
232 }
233
234 /* If invalidate_buffers() will trash dirty buffers, it means some kind
235    of fs corruption is going on. Trashing dirty data always imply losing
236    information that was supposed to be just stored on the physical layer
237    by the user.
238
239    Thus invalidate_buffers in general usage is not allwowed to trash
240    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
241    be preserved.  These buffers are simply skipped.
242   
243    We also skip buffers which are still in use.  For example this can
244    happen if a userspace program is reading the block device.
245
246    NOTE: In the case where the user removed a removable-media-disk even if
247    there's still dirty data not synced on disk (due a bug in the device driver
248    or due an error of the user), by not destroying the dirty buffers we could
249    generate corruption also on the next media inserted, thus a parameter is
250    necessary to handle this case in the most safe way possible (trying
251    to not corrupt also the new disk inserted with the data belonging to
252    the old now corrupted disk). Also for the ramdisk the natural thing
253    to do in order to release the ramdisk memory is to destroy dirty buffers.
254
255    These are two special cases. Normal usage imply the device driver
256    to issue a sync on the device (without waiting I/O completion) and
257    then an invalidate_buffers call that doesn't trash dirty buffers.
258
259    For handling cache coherency with the blkdev pagecache the 'update' case
260    is been introduced. It is needed to re-read from disk any pinned
261    buffer. NOTE: re-reading from disk is destructive so we can do it only
262    when we assume nobody is changing the buffercache under our I/O and when
263    we think the disk contains more recent information than the buffercache.
264    The update == 1 pass marks the buffers we need to update, the update == 2
265    pass does the actual I/O. */
266 void invalidate_bdev(struct block_device *bdev)
267 {
268         struct address_space *mapping = bdev->bd_inode->i_mapping;
269
270         if (mapping->nrpages == 0)
271                 return;
272
273         invalidate_bh_lrus();
274         lru_add_drain_all();    /* make sure all lru add caches are flushed */
275         invalidate_mapping_pages(mapping, 0, -1);
276         /* 99% of the time, we don't need to flush the cleancache on the bdev.
277          * But, for the strange corners, lets be cautious
278          */
279         cleancache_flush_inode(mapping);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288         struct zone *zone;
289         int nid;
290
291         wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
292         yield();
293
294         for_each_online_node(nid) {
295                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296                                                 gfp_zone(GFP_NOFS), NULL,
297                                                 &zone);
298                 if (zone)
299                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300                                                 GFP_NOFS, NULL);
301         }
302 }
303
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310         unsigned long flags;
311         struct buffer_head *first;
312         struct buffer_head *tmp;
313         struct page *page;
314         int page_uptodate = 1;
315
316         BUG_ON(!buffer_async_read(bh));
317
318         page = bh->b_page;
319         if (uptodate) {
320                 set_buffer_uptodate(bh);
321         } else {
322                 clear_buffer_uptodate(bh);
323                 if (!quiet_error(bh))
324                         buffer_io_error(bh);
325                 SetPageError(page);
326         }
327
328         /*
329          * Be _very_ careful from here on. Bad things can happen if
330          * two buffer heads end IO at almost the same time and both
331          * decide that the page is now completely done.
332          */
333         first = page_buffers(page);
334         local_irq_save(flags);
335         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336         clear_buffer_async_read(bh);
337         unlock_buffer(bh);
338         tmp = bh;
339         do {
340                 if (!buffer_uptodate(tmp))
341                         page_uptodate = 0;
342                 if (buffer_async_read(tmp)) {
343                         BUG_ON(!buffer_locked(tmp));
344                         goto still_busy;
345                 }
346                 tmp = tmp->b_this_page;
347         } while (tmp != bh);
348         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349         local_irq_restore(flags);
350
351         /*
352          * If none of the buffers had errors and they are all
353          * uptodate then we can set the page uptodate.
354          */
355         if (page_uptodate && !PageError(page))
356                 SetPageUptodate(page);
357         unlock_page(page);
358         return;
359
360 still_busy:
361         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362         local_irq_restore(flags);
363         return;
364 }
365
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372         char b[BDEVNAME_SIZE];
373         unsigned long flags;
374         struct buffer_head *first;
375         struct buffer_head *tmp;
376         struct page *page;
377
378         BUG_ON(!buffer_async_write(bh));
379
380         page = bh->b_page;
381         if (uptodate) {
382                 set_buffer_uptodate(bh);
383         } else {
384                 if (!quiet_error(bh)) {
385                         buffer_io_error(bh);
386                         printk(KERN_WARNING "lost page write due to "
387                                         "I/O error on %s\n",
388                                bdevname(bh->b_bdev, b));
389                 }
390                 set_bit(AS_EIO, &page->mapping->flags);
391                 set_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393                 SetPageError(page);
394         }
395
396         first = page_buffers(page);
397         local_irq_save(flags);
398         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411         local_irq_restore(flags);
412         end_page_writeback(page);
413         return;
414
415 still_busy:
416         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417         local_irq_restore(flags);
418         return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445         bh->b_end_io = end_buffer_async_read;
446         set_buffer_async_read(bh);
447 }
448
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450                                           bh_end_io_t *handler)
451 {
452         bh->b_end_io = handler;
453         set_buffer_async_write(bh);
454 }
455
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458         mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space 
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517         list_del_init(&bh->b_assoc_buffers);
518         WARN_ON(!bh->b_assoc_map);
519         if (buffer_write_io_error(bh))
520                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521         bh->b_assoc_map = NULL;
522 }
523
524 int inode_has_buffers(struct inode *inode)
525 {
526         return !list_empty(&inode->i_data.private_list);
527 }
528
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541         struct buffer_head *bh;
542         struct list_head *p;
543         int err = 0;
544
545         spin_lock(lock);
546 repeat:
547         list_for_each_prev(p, list) {
548                 bh = BH_ENTRY(p);
549                 if (buffer_locked(bh)) {
550                         get_bh(bh);
551                         spin_unlock(lock);
552                         wait_on_buffer(bh);
553                         if (!buffer_uptodate(bh))
554                                 err = -EIO;
555                         brelse(bh);
556                         spin_lock(lock);
557                         goto repeat;
558                 }
559         }
560         spin_unlock(lock);
561         return err;
562 }
563
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566         char b[BDEVNAME_SIZE];
567         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568                 printk(KERN_WARNING "Emergency Thaw on %s\n",
569                        bdevname(sb->s_bdev, b));
570 }
571
572 static void do_thaw_all(struct work_struct *work)
573 {
574         iterate_supers(do_thaw_one, NULL);
575         kfree(work);
576         printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586         struct work_struct *work;
587
588         work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         if (work) {
590                 INIT_WORK(work, do_thaw_all);
591                 schedule_work(work);
592         }
593 }
594
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608         struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611                 return 0;
612
613         return fsync_buffers_list(&buffer_mapping->private_lock,
614                                         &mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625                         sector_t bblock, unsigned blocksize)
626 {
627         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628         if (bh) {
629                 if (buffer_dirty(bh))
630                         ll_rw_block(WRITE, 1, &bh);
631                 put_bh(bh);
632         }
633 }
634
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637         struct address_space *mapping = inode->i_mapping;
638         struct address_space *buffer_mapping = bh->b_page->mapping;
639
640         mark_buffer_dirty(bh);
641         if (!mapping->assoc_mapping) {
642                 mapping->assoc_mapping = buffer_mapping;
643         } else {
644                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
645         }
646         if (!bh->b_assoc_map) {
647                 spin_lock(&buffer_mapping->private_lock);
648                 list_move_tail(&bh->b_assoc_buffers,
649                                 &mapping->private_list);
650                 bh->b_assoc_map = mapping;
651                 spin_unlock(&buffer_mapping->private_lock);
652         }
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664                 struct address_space *mapping, int warn)
665 {
666         spin_lock_irq(&mapping->tree_lock);
667         if (page->mapping) {    /* Race with truncate? */
668                 WARN_ON_ONCE(warn && !PageUptodate(page));
669                 account_page_dirtied(page, mapping);
670                 radix_tree_tag_set(&mapping->page_tree,
671                                 page_index(page), PAGECACHE_TAG_DIRTY);
672         }
673         spin_unlock_irq(&mapping->tree_lock);
674         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 }
676
677 /*
678  * Add a page to the dirty page list.
679  *
680  * It is a sad fact of life that this function is called from several places
681  * deeply under spinlocking.  It may not sleep.
682  *
683  * If the page has buffers, the uptodate buffers are set dirty, to preserve
684  * dirty-state coherency between the page and the buffers.  It the page does
685  * not have buffers then when they are later attached they will all be set
686  * dirty.
687  *
688  * The buffers are dirtied before the page is dirtied.  There's a small race
689  * window in which a writepage caller may see the page cleanness but not the
690  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
691  * before the buffers, a concurrent writepage caller could clear the page dirty
692  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693  * page on the dirty page list.
694  *
695  * We use private_lock to lock against try_to_free_buffers while using the
696  * page's buffer list.  Also use this to protect against clean buffers being
697  * added to the page after it was set dirty.
698  *
699  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
700  * address_space though.
701  */
702 int __set_page_dirty_buffers(struct page *page)
703 {
704         int newly_dirty;
705         struct address_space *mapping = page_mapping(page);
706
707         if (unlikely(!mapping))
708                 return !TestSetPageDirty(page);
709
710         spin_lock(&mapping->private_lock);
711         if (page_has_buffers(page)) {
712                 struct buffer_head *head = page_buffers(page);
713                 struct buffer_head *bh = head;
714
715                 do {
716                         set_buffer_dirty(bh);
717                         bh = bh->b_this_page;
718                 } while (bh != head);
719         }
720         newly_dirty = !TestSetPageDirty(page);
721         spin_unlock(&mapping->private_lock);
722
723         if (newly_dirty)
724                 __set_page_dirty(page, mapping, 1);
725         return newly_dirty;
726 }
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
728
729 /*
730  * Write out and wait upon a list of buffers.
731  *
732  * We have conflicting pressures: we want to make sure that all
733  * initially dirty buffers get waited on, but that any subsequently
734  * dirtied buffers don't.  After all, we don't want fsync to last
735  * forever if somebody is actively writing to the file.
736  *
737  * Do this in two main stages: first we copy dirty buffers to a
738  * temporary inode list, queueing the writes as we go.  Then we clean
739  * up, waiting for those writes to complete.
740  * 
741  * During this second stage, any subsequent updates to the file may end
742  * up refiling the buffer on the original inode's dirty list again, so
743  * there is a chance we will end up with a buffer queued for write but
744  * not yet completed on that list.  So, as a final cleanup we go through
745  * the osync code to catch these locked, dirty buffers without requeuing
746  * any newly dirty buffers for write.
747  */
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749 {
750         struct buffer_head *bh;
751         struct list_head tmp;
752         struct address_space *mapping;
753         int err = 0, err2;
754         struct blk_plug plug;
755
756         INIT_LIST_HEAD(&tmp);
757         blk_start_plug(&plug);
758
759         spin_lock(lock);
760         while (!list_empty(list)) {
761                 bh = BH_ENTRY(list->next);
762                 mapping = bh->b_assoc_map;
763                 __remove_assoc_queue(bh);
764                 /* Avoid race with mark_buffer_dirty_inode() which does
765                  * a lockless check and we rely on seeing the dirty bit */
766                 smp_mb();
767                 if (buffer_dirty(bh) || buffer_locked(bh)) {
768                         list_add(&bh->b_assoc_buffers, &tmp);
769                         bh->b_assoc_map = mapping;
770                         if (buffer_dirty(bh)) {
771                                 get_bh(bh);
772                                 spin_unlock(lock);
773                                 /*
774                                  * Ensure any pending I/O completes so that
775                                  * write_dirty_buffer() actually writes the
776                                  * current contents - it is a noop if I/O is
777                                  * still in flight on potentially older
778                                  * contents.
779                                  */
780                                 write_dirty_buffer(bh, WRITE_SYNC);
781
782                                 /*
783                                  * Kick off IO for the previous mapping. Note
784                                  * that we will not run the very last mapping,
785                                  * wait_on_buffer() will do that for us
786                                  * through sync_buffer().
787                                  */
788                                 brelse(bh);
789                                 spin_lock(lock);
790                         }
791                 }
792         }
793
794         spin_unlock(lock);
795         blk_finish_plug(&plug);
796         spin_lock(lock);
797
798         while (!list_empty(&tmp)) {
799                 bh = BH_ENTRY(tmp.prev);
800                 get_bh(bh);
801                 mapping = bh->b_assoc_map;
802                 __remove_assoc_queue(bh);
803                 /* Avoid race with mark_buffer_dirty_inode() which does
804                  * a lockless check and we rely on seeing the dirty bit */
805                 smp_mb();
806                 if (buffer_dirty(bh)) {
807                         list_add(&bh->b_assoc_buffers,
808                                  &mapping->private_list);
809                         bh->b_assoc_map = mapping;
810                 }
811                 spin_unlock(lock);
812                 wait_on_buffer(bh);
813                 if (!buffer_uptodate(bh))
814                         err = -EIO;
815                 brelse(bh);
816                 spin_lock(lock);
817         }
818         
819         spin_unlock(lock);
820         err2 = osync_buffers_list(lock, list);
821         if (err)
822                 return err;
823         else
824                 return err2;
825 }
826
827 /*
828  * Invalidate any and all dirty buffers on a given inode.  We are
829  * probably unmounting the fs, but that doesn't mean we have already
830  * done a sync().  Just drop the buffers from the inode list.
831  *
832  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
833  * assumes that all the buffers are against the blockdev.  Not true
834  * for reiserfs.
835  */
836 void invalidate_inode_buffers(struct inode *inode)
837 {
838         if (inode_has_buffers(inode)) {
839                 struct address_space *mapping = &inode->i_data;
840                 struct list_head *list = &mapping->private_list;
841                 struct address_space *buffer_mapping = mapping->assoc_mapping;
842
843                 spin_lock(&buffer_mapping->private_lock);
844                 while (!list_empty(list))
845                         __remove_assoc_queue(BH_ENTRY(list->next));
846                 spin_unlock(&buffer_mapping->private_lock);
847         }
848 }
849 EXPORT_SYMBOL(invalidate_inode_buffers);
850
851 /*
852  * Remove any clean buffers from the inode's buffer list.  This is called
853  * when we're trying to free the inode itself.  Those buffers can pin it.
854  *
855  * Returns true if all buffers were removed.
856  */
857 int remove_inode_buffers(struct inode *inode)
858 {
859         int ret = 1;
860
861         if (inode_has_buffers(inode)) {
862                 struct address_space *mapping = &inode->i_data;
863                 struct list_head *list = &mapping->private_list;
864                 struct address_space *buffer_mapping = mapping->assoc_mapping;
865
866                 spin_lock(&buffer_mapping->private_lock);
867                 while (!list_empty(list)) {
868                         struct buffer_head *bh = BH_ENTRY(list->next);
869                         if (buffer_dirty(bh)) {
870                                 ret = 0;
871                                 break;
872                         }
873                         __remove_assoc_queue(bh);
874                 }
875                 spin_unlock(&buffer_mapping->private_lock);
876         }
877         return ret;
878 }
879
880 /*
881  * Create the appropriate buffers when given a page for data area and
882  * the size of each buffer.. Use the bh->b_this_page linked list to
883  * follow the buffers created.  Return NULL if unable to create more
884  * buffers.
885  *
886  * The retry flag is used to differentiate async IO (paging, swapping)
887  * which may not fail from ordinary buffer allocations.
888  */
889 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
890                 int retry)
891 {
892         struct buffer_head *bh, *head;
893         long offset;
894
895 try_again:
896         head = NULL;
897         offset = PAGE_SIZE;
898         while ((offset -= size) >= 0) {
899                 bh = alloc_buffer_head(GFP_NOFS);
900                 if (!bh)
901                         goto no_grow;
902
903                 bh->b_bdev = NULL;
904                 bh->b_this_page = head;
905                 bh->b_blocknr = -1;
906                 head = bh;
907
908                 bh->b_state = 0;
909                 atomic_set(&bh->b_count, 0);
910                 bh->b_size = size;
911
912                 /* Link the buffer to its page */
913                 set_bh_page(bh, page, offset);
914
915                 init_buffer(bh, NULL, NULL);
916         }
917         return head;
918 /*
919  * In case anything failed, we just free everything we got.
920  */
921 no_grow:
922         if (head) {
923                 do {
924                         bh = head;
925                         head = head->b_this_page;
926                         free_buffer_head(bh);
927                 } while (head);
928         }
929
930         /*
931          * Return failure for non-async IO requests.  Async IO requests
932          * are not allowed to fail, so we have to wait until buffer heads
933          * become available.  But we don't want tasks sleeping with 
934          * partially complete buffers, so all were released above.
935          */
936         if (!retry)
937                 return NULL;
938
939         /* We're _really_ low on memory. Now we just
940          * wait for old buffer heads to become free due to
941          * finishing IO.  Since this is an async request and
942          * the reserve list is empty, we're sure there are 
943          * async buffer heads in use.
944          */
945         free_more_memory();
946         goto try_again;
947 }
948 EXPORT_SYMBOL_GPL(alloc_page_buffers);
949
950 static inline void
951 link_dev_buffers(struct page *page, struct buffer_head *head)
952 {
953         struct buffer_head *bh, *tail;
954
955         bh = head;
956         do {
957                 tail = bh;
958                 bh = bh->b_this_page;
959         } while (bh);
960         tail->b_this_page = head;
961         attach_page_buffers(page, head);
962 }
963
964 /*
965  * Initialise the state of a blockdev page's buffers.
966  */ 
967 static void
968 init_page_buffers(struct page *page, struct block_device *bdev,
969                         sector_t block, int size)
970 {
971         struct buffer_head *head = page_buffers(page);
972         struct buffer_head *bh = head;
973         int uptodate = PageUptodate(page);
974         sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
975
976         do {
977                 if (!buffer_mapped(bh)) {
978                         init_buffer(bh, NULL, NULL);
979                         bh->b_bdev = bdev;
980                         bh->b_blocknr = block;
981                         if (uptodate)
982                                 set_buffer_uptodate(bh);
983                         if (block < end_block)
984                                 set_buffer_mapped(bh);
985                 }
986                 block++;
987                 bh = bh->b_this_page;
988         } while (bh != head);
989 }
990
991 /*
992  * Create the page-cache page that contains the requested block.
993  *
994  * This is user purely for blockdev mappings.
995  */
996 static struct page *
997 grow_dev_page(struct block_device *bdev, sector_t block,
998                 pgoff_t index, int size)
999 {
1000         struct inode *inode = bdev->bd_inode;
1001         struct page *page;
1002         struct buffer_head *bh;
1003
1004         page = find_or_create_page(inode->i_mapping, index,
1005                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1006         if (!page)
1007                 return NULL;
1008
1009         BUG_ON(!PageLocked(page));
1010
1011         if (page_has_buffers(page)) {
1012                 bh = page_buffers(page);
1013                 if (bh->b_size == size) {
1014                         init_page_buffers(page, bdev, block, size);
1015                         return page;
1016                 }
1017                 if (!try_to_free_buffers(page))
1018                         goto failed;
1019         }
1020
1021         /*
1022          * Allocate some buffers for this page
1023          */
1024         bh = alloc_page_buffers(page, size, 0);
1025         if (!bh)
1026                 goto failed;
1027
1028         /*
1029          * Link the page to the buffers and initialise them.  Take the
1030          * lock to be atomic wrt __find_get_block(), which does not
1031          * run under the page lock.
1032          */
1033         spin_lock(&inode->i_mapping->private_lock);
1034         link_dev_buffers(page, bh);
1035         init_page_buffers(page, bdev, block, size);
1036         spin_unlock(&inode->i_mapping->private_lock);
1037         return page;
1038
1039 failed:
1040         BUG();
1041         unlock_page(page);
1042         page_cache_release(page);
1043         return NULL;
1044 }
1045
1046 /*
1047  * Create buffers for the specified block device block's page.  If
1048  * that page was dirty, the buffers are set dirty also.
1049  */
1050 static int
1051 grow_buffers(struct block_device *bdev, sector_t block, int size)
1052 {
1053         struct page *page;
1054         pgoff_t index;
1055         int sizebits;
1056
1057         sizebits = -1;
1058         do {
1059                 sizebits++;
1060         } while ((size << sizebits) < PAGE_SIZE);
1061
1062         index = block >> sizebits;
1063
1064         /*
1065          * Check for a block which wants to lie outside our maximum possible
1066          * pagecache index.  (this comparison is done using sector_t types).
1067          */
1068         if (unlikely(index != block >> sizebits)) {
1069                 char b[BDEVNAME_SIZE];
1070
1071                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1072                         "device %s\n",
1073                         __func__, (unsigned long long)block,
1074                         bdevname(bdev, b));
1075                 return -EIO;
1076         }
1077         block = index << sizebits;
1078         /* Create a page with the proper size buffers.. */
1079         page = grow_dev_page(bdev, block, index, size);
1080         if (!page)
1081                 return 0;
1082         unlock_page(page);
1083         page_cache_release(page);
1084         return 1;
1085 }
1086
1087 static struct buffer_head *
1088 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1089 {
1090         int ret;
1091         struct buffer_head *bh;
1092
1093         /* Size must be multiple of hard sectorsize */
1094         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1095                         (size < 512 || size > PAGE_SIZE))) {
1096                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1097                                         size);
1098                 printk(KERN_ERR "logical block size: %d\n",
1099                                         bdev_logical_block_size(bdev));
1100
1101                 dump_stack();
1102                 return NULL;
1103         }
1104
1105 retry:
1106         bh = __find_get_block(bdev, block, size);
1107         if (bh)
1108                 return bh;
1109
1110         ret = grow_buffers(bdev, block, size);
1111         if (ret == 0) {
1112                 free_more_memory();
1113                 goto retry;
1114         } else if (ret > 0) {
1115                 bh = __find_get_block(bdev, block, size);
1116                 if (bh)
1117                         return bh;
1118         }
1119         return NULL;
1120 }
1121
1122 /*
1123  * The relationship between dirty buffers and dirty pages:
1124  *
1125  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1126  * the page is tagged dirty in its radix tree.
1127  *
1128  * At all times, the dirtiness of the buffers represents the dirtiness of
1129  * subsections of the page.  If the page has buffers, the page dirty bit is
1130  * merely a hint about the true dirty state.
1131  *
1132  * When a page is set dirty in its entirety, all its buffers are marked dirty
1133  * (if the page has buffers).
1134  *
1135  * When a buffer is marked dirty, its page is dirtied, but the page's other
1136  * buffers are not.
1137  *
1138  * Also.  When blockdev buffers are explicitly read with bread(), they
1139  * individually become uptodate.  But their backing page remains not
1140  * uptodate - even if all of its buffers are uptodate.  A subsequent
1141  * block_read_full_page() against that page will discover all the uptodate
1142  * buffers, will set the page uptodate and will perform no I/O.
1143  */
1144
1145 /**
1146  * mark_buffer_dirty - mark a buffer_head as needing writeout
1147  * @bh: the buffer_head to mark dirty
1148  *
1149  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1150  * backing page dirty, then tag the page as dirty in its address_space's radix
1151  * tree and then attach the address_space's inode to its superblock's dirty
1152  * inode list.
1153  *
1154  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1155  * mapping->tree_lock and mapping->host->i_lock.
1156  */
1157 void mark_buffer_dirty(struct buffer_head *bh)
1158 {
1159         WARN_ON_ONCE(!buffer_uptodate(bh));
1160
1161         /*
1162          * Very *carefully* optimize the it-is-already-dirty case.
1163          *
1164          * Don't let the final "is it dirty" escape to before we
1165          * perhaps modified the buffer.
1166          */
1167         if (buffer_dirty(bh)) {
1168                 smp_mb();
1169                 if (buffer_dirty(bh))
1170                         return;
1171         }
1172
1173         if (!test_set_buffer_dirty(bh)) {
1174                 struct page *page = bh->b_page;
1175                 if (!TestSetPageDirty(page)) {
1176                         struct address_space *mapping = page_mapping(page);
1177                         if (mapping)
1178                                 __set_page_dirty(page, mapping, 0);
1179                 }
1180         }
1181 }
1182 EXPORT_SYMBOL(mark_buffer_dirty);
1183
1184 /*
1185  * Decrement a buffer_head's reference count.  If all buffers against a page
1186  * have zero reference count, are clean and unlocked, and if the page is clean
1187  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1188  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1189  * a page but it ends up not being freed, and buffers may later be reattached).
1190  */
1191 void __brelse(struct buffer_head * buf)
1192 {
1193         if (atomic_read(&buf->b_count)) {
1194                 put_bh(buf);
1195                 return;
1196         }
1197         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1198 }
1199 EXPORT_SYMBOL(__brelse);
1200
1201 /*
1202  * bforget() is like brelse(), except it discards any
1203  * potentially dirty data.
1204  */
1205 void __bforget(struct buffer_head *bh)
1206 {
1207         clear_buffer_dirty(bh);
1208         if (bh->b_assoc_map) {
1209                 struct address_space *buffer_mapping = bh->b_page->mapping;
1210
1211                 spin_lock(&buffer_mapping->private_lock);
1212                 list_del_init(&bh->b_assoc_buffers);
1213                 bh->b_assoc_map = NULL;
1214                 spin_unlock(&buffer_mapping->private_lock);
1215         }
1216         __brelse(bh);
1217 }
1218 EXPORT_SYMBOL(__bforget);
1219
1220 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1221 {
1222         lock_buffer(bh);
1223         if (buffer_uptodate(bh)) {
1224                 unlock_buffer(bh);
1225                 return bh;
1226         } else {
1227                 get_bh(bh);
1228                 bh->b_end_io = end_buffer_read_sync;
1229                 submit_bh(READ, bh);
1230                 wait_on_buffer(bh);
1231                 if (buffer_uptodate(bh))
1232                         return bh;
1233         }
1234         brelse(bh);
1235         return NULL;
1236 }
1237
1238 /*
1239  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1240  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1241  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1242  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1243  * CPU's LRUs at the same time.
1244  *
1245  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1246  * sb_find_get_block().
1247  *
1248  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1249  * a local interrupt disable for that.
1250  */
1251
1252 #define BH_LRU_SIZE     8
1253
1254 struct bh_lru {
1255         struct buffer_head *bhs[BH_LRU_SIZE];
1256 };
1257
1258 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1259
1260 #ifdef CONFIG_SMP
1261 #define bh_lru_lock()   local_irq_disable()
1262 #define bh_lru_unlock() local_irq_enable()
1263 #else
1264 #define bh_lru_lock()   preempt_disable()
1265 #define bh_lru_unlock() preempt_enable()
1266 #endif
1267
1268 static inline void check_irqs_on(void)
1269 {
1270 #ifdef irqs_disabled
1271         BUG_ON(irqs_disabled());
1272 #endif
1273 }
1274
1275 /*
1276  * The LRU management algorithm is dopey-but-simple.  Sorry.
1277  */
1278 static void bh_lru_install(struct buffer_head *bh)
1279 {
1280         struct buffer_head *evictee = NULL;
1281
1282         check_irqs_on();
1283         bh_lru_lock();
1284         if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1285                 struct buffer_head *bhs[BH_LRU_SIZE];
1286                 int in;
1287                 int out = 0;
1288
1289                 get_bh(bh);
1290                 bhs[out++] = bh;
1291                 for (in = 0; in < BH_LRU_SIZE; in++) {
1292                         struct buffer_head *bh2 =
1293                                 __this_cpu_read(bh_lrus.bhs[in]);
1294
1295                         if (bh2 == bh) {
1296                                 __brelse(bh2);
1297                         } else {
1298                                 if (out >= BH_LRU_SIZE) {
1299                                         BUG_ON(evictee != NULL);
1300                                         evictee = bh2;
1301                                 } else {
1302                                         bhs[out++] = bh2;
1303                                 }
1304                         }
1305                 }
1306                 while (out < BH_LRU_SIZE)
1307                         bhs[out++] = NULL;
1308                 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1309         }
1310         bh_lru_unlock();
1311
1312         if (evictee)
1313                 __brelse(evictee);
1314 }
1315
1316 /*
1317  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1318  */
1319 static struct buffer_head *
1320 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1321 {
1322         struct buffer_head *ret = NULL;
1323         unsigned int i;
1324
1325         check_irqs_on();
1326         bh_lru_lock();
1327         for (i = 0; i < BH_LRU_SIZE; i++) {
1328                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1329
1330                 if (bh && bh->b_bdev == bdev &&
1331                                 bh->b_blocknr == block && bh->b_size == size) {
1332                         if (i) {
1333                                 while (i) {
1334                                         __this_cpu_write(bh_lrus.bhs[i],
1335                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1336                                         i--;
1337                                 }
1338                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1339                         }
1340                         get_bh(bh);
1341                         ret = bh;
1342                         break;
1343                 }
1344         }
1345         bh_lru_unlock();
1346         return ret;
1347 }
1348
1349 /*
1350  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1351  * it in the LRU and mark it as accessed.  If it is not present then return
1352  * NULL
1353  */
1354 struct buffer_head *
1355 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1356 {
1357         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1358
1359         if (bh == NULL) {
1360                 bh = __find_get_block_slow(bdev, block);
1361                 if (bh)
1362                         bh_lru_install(bh);
1363         }
1364         if (bh)
1365                 touch_buffer(bh);
1366         return bh;
1367 }
1368 EXPORT_SYMBOL(__find_get_block);
1369
1370 /*
1371  * __getblk will locate (and, if necessary, create) the buffer_head
1372  * which corresponds to the passed block_device, block and size. The
1373  * returned buffer has its reference count incremented.
1374  *
1375  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1376  * illegal block number, __getblk() will happily return a buffer_head
1377  * which represents the non-existent block.  Very weird.
1378  *
1379  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1380  * attempt is failing.  FIXME, perhaps?
1381  */
1382 struct buffer_head *
1383 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1384 {
1385         struct buffer_head *bh = __find_get_block(bdev, block, size);
1386
1387         might_sleep();
1388         if (bh == NULL)
1389                 bh = __getblk_slow(bdev, block, size);
1390         return bh;
1391 }
1392 EXPORT_SYMBOL(__getblk);
1393
1394 /*
1395  * Do async read-ahead on a buffer..
1396  */
1397 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1398 {
1399         struct buffer_head *bh = __getblk(bdev, block, size);
1400         if (likely(bh)) {
1401                 ll_rw_block(READA, 1, &bh);
1402                 brelse(bh);
1403         }
1404 }
1405 EXPORT_SYMBOL(__breadahead);
1406
1407 /**
1408  *  __bread() - reads a specified block and returns the bh
1409  *  @bdev: the block_device to read from
1410  *  @block: number of block
1411  *  @size: size (in bytes) to read
1412  * 
1413  *  Reads a specified block, and returns buffer head that contains it.
1414  *  It returns NULL if the block was unreadable.
1415  */
1416 struct buffer_head *
1417 __bread(struct block_device *bdev, sector_t block, unsigned size)
1418 {
1419         struct buffer_head *bh = __getblk(bdev, block, size);
1420
1421         if (likely(bh) && !buffer_uptodate(bh))
1422                 bh = __bread_slow(bh);
1423         return bh;
1424 }
1425 EXPORT_SYMBOL(__bread);
1426
1427 /*
1428  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1429  * This doesn't race because it runs in each cpu either in irq
1430  * or with preempt disabled.
1431  */
1432 static void invalidate_bh_lru(void *arg)
1433 {
1434         struct bh_lru *b = &get_cpu_var(bh_lrus);
1435         int i;
1436
1437         for (i = 0; i < BH_LRU_SIZE; i++) {
1438                 brelse(b->bhs[i]);
1439                 b->bhs[i] = NULL;
1440         }
1441         put_cpu_var(bh_lrus);
1442 }
1443         
1444 void invalidate_bh_lrus(void)
1445 {
1446         on_each_cpu(invalidate_bh_lru, NULL, 1);
1447 }
1448 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1449
1450 void set_bh_page(struct buffer_head *bh,
1451                 struct page *page, unsigned long offset)
1452 {
1453         bh->b_page = page;
1454         BUG_ON(offset >= PAGE_SIZE);
1455         if (PageHighMem(page))
1456                 /*
1457                  * This catches illegal uses and preserves the offset:
1458                  */
1459                 bh->b_data = (char *)(0 + offset);
1460         else
1461                 bh->b_data = page_address(page) + offset;
1462 }
1463 EXPORT_SYMBOL(set_bh_page);
1464
1465 /*
1466  * Called when truncating a buffer on a page completely.
1467  */
1468 static void discard_buffer(struct buffer_head * bh)
1469 {
1470         lock_buffer(bh);
1471         clear_buffer_dirty(bh);
1472         bh->b_bdev = NULL;
1473         clear_buffer_mapped(bh);
1474         clear_buffer_req(bh);
1475         clear_buffer_new(bh);
1476         clear_buffer_delay(bh);
1477         clear_buffer_unwritten(bh);
1478         unlock_buffer(bh);
1479 }
1480
1481 /**
1482  * block_invalidatepage - invalidate part or all of a buffer-backed page
1483  *
1484  * @page: the page which is affected
1485  * @offset: the index of the truncation point
1486  *
1487  * block_invalidatepage() is called when all or part of the page has become
1488  * invalidated by a truncate operation.
1489  *
1490  * block_invalidatepage() does not have to release all buffers, but it must
1491  * ensure that no dirty buffer is left outside @offset and that no I/O
1492  * is underway against any of the blocks which are outside the truncation
1493  * point.  Because the caller is about to free (and possibly reuse) those
1494  * blocks on-disk.
1495  */
1496 void block_invalidatepage(struct page *page, unsigned long offset)
1497 {
1498         struct buffer_head *head, *bh, *next;
1499         unsigned int curr_off = 0;
1500
1501         BUG_ON(!PageLocked(page));
1502         if (!page_has_buffers(page))
1503                 goto out;
1504
1505         head = page_buffers(page);
1506         bh = head;
1507         do {
1508                 unsigned int next_off = curr_off + bh->b_size;
1509                 next = bh->b_this_page;
1510
1511                 /*
1512                  * is this block fully invalidated?
1513                  */
1514                 if (offset <= curr_off)
1515                         discard_buffer(bh);
1516                 curr_off = next_off;
1517                 bh = next;
1518         } while (bh != head);
1519
1520         /*
1521          * We release buffers only if the entire page is being invalidated.
1522          * The get_block cached value has been unconditionally invalidated,
1523          * so real IO is not possible anymore.
1524          */
1525         if (offset == 0)
1526                 try_to_release_page(page, 0);
1527 out:
1528         return;
1529 }
1530 EXPORT_SYMBOL(block_invalidatepage);
1531
1532 /*
1533  * We attach and possibly dirty the buffers atomically wrt
1534  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1535  * is already excluded via the page lock.
1536  */
1537 void create_empty_buffers(struct page *page,
1538                         unsigned long blocksize, unsigned long b_state)
1539 {
1540         struct buffer_head *bh, *head, *tail;
1541
1542         head = alloc_page_buffers(page, blocksize, 1);
1543         bh = head;
1544         do {
1545                 bh->b_state |= b_state;
1546                 tail = bh;
1547                 bh = bh->b_this_page;
1548         } while (bh);
1549         tail->b_this_page = head;
1550
1551         spin_lock(&page->mapping->private_lock);
1552         if (PageUptodate(page) || PageDirty(page)) {
1553                 bh = head;
1554                 do {
1555                         if (PageDirty(page))
1556                                 set_buffer_dirty(bh);
1557                         if (PageUptodate(page))
1558                                 set_buffer_uptodate(bh);
1559                         bh = bh->b_this_page;
1560                 } while (bh != head);
1561         }
1562         attach_page_buffers(page, head);
1563         spin_unlock(&page->mapping->private_lock);
1564 }
1565 EXPORT_SYMBOL(create_empty_buffers);
1566
1567 /*
1568  * We are taking a block for data and we don't want any output from any
1569  * buffer-cache aliases starting from return from that function and
1570  * until the moment when something will explicitly mark the buffer
1571  * dirty (hopefully that will not happen until we will free that block ;-)
1572  * We don't even need to mark it not-uptodate - nobody can expect
1573  * anything from a newly allocated buffer anyway. We used to used
1574  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1575  * don't want to mark the alias unmapped, for example - it would confuse
1576  * anyone who might pick it with bread() afterwards...
1577  *
1578  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1579  * be writeout I/O going on against recently-freed buffers.  We don't
1580  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1581  * only if we really need to.  That happens here.
1582  */
1583 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1584 {
1585         struct buffer_head *old_bh;
1586
1587         might_sleep();
1588
1589         old_bh = __find_get_block_slow(bdev, block);
1590         if (old_bh) {
1591                 clear_buffer_dirty(old_bh);
1592                 wait_on_buffer(old_bh);
1593                 clear_buffer_req(old_bh);
1594                 __brelse(old_bh);
1595         }
1596 }
1597 EXPORT_SYMBOL(unmap_underlying_metadata);
1598
1599 /*
1600  * NOTE! All mapped/uptodate combinations are valid:
1601  *
1602  *      Mapped  Uptodate        Meaning
1603  *
1604  *      No      No              "unknown" - must do get_block()
1605  *      No      Yes             "hole" - zero-filled
1606  *      Yes     No              "allocated" - allocated on disk, not read in
1607  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1608  *
1609  * "Dirty" is valid only with the last case (mapped+uptodate).
1610  */
1611
1612 /*
1613  * While block_write_full_page is writing back the dirty buffers under
1614  * the page lock, whoever dirtied the buffers may decide to clean them
1615  * again at any time.  We handle that by only looking at the buffer
1616  * state inside lock_buffer().
1617  *
1618  * If block_write_full_page() is called for regular writeback
1619  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1620  * locked buffer.   This only can happen if someone has written the buffer
1621  * directly, with submit_bh().  At the address_space level PageWriteback
1622  * prevents this contention from occurring.
1623  *
1624  * If block_write_full_page() is called with wbc->sync_mode ==
1625  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1626  * causes the writes to be flagged as synchronous writes.
1627  */
1628 static int __block_write_full_page(struct inode *inode, struct page *page,
1629                         get_block_t *get_block, struct writeback_control *wbc,
1630                         bh_end_io_t *handler)
1631 {
1632         int err;
1633         sector_t block;
1634         sector_t last_block;
1635         struct buffer_head *bh, *head;
1636         const unsigned blocksize = 1 << inode->i_blkbits;
1637         int nr_underway = 0;
1638         int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1639                         WRITE_SYNC : WRITE);
1640
1641         BUG_ON(!PageLocked(page));
1642
1643         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1644
1645         if (!page_has_buffers(page)) {
1646                 create_empty_buffers(page, blocksize,
1647                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1648         }
1649
1650         /*
1651          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1652          * here, and the (potentially unmapped) buffers may become dirty at
1653          * any time.  If a buffer becomes dirty here after we've inspected it
1654          * then we just miss that fact, and the page stays dirty.
1655          *
1656          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1657          * handle that here by just cleaning them.
1658          */
1659
1660         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1661         head = page_buffers(page);
1662         bh = head;
1663
1664         /*
1665          * Get all the dirty buffers mapped to disk addresses and
1666          * handle any aliases from the underlying blockdev's mapping.
1667          */
1668         do {
1669                 if (block > last_block) {
1670                         /*
1671                          * mapped buffers outside i_size will occur, because
1672                          * this page can be outside i_size when there is a
1673                          * truncate in progress.
1674                          */
1675                         /*
1676                          * The buffer was zeroed by block_write_full_page()
1677                          */
1678                         clear_buffer_dirty(bh);
1679                         set_buffer_uptodate(bh);
1680                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1681                            buffer_dirty(bh)) {
1682                         WARN_ON(bh->b_size != blocksize);
1683                         err = get_block(inode, block, bh, 1);
1684                         if (err)
1685                                 goto recover;
1686                         clear_buffer_delay(bh);
1687                         if (buffer_new(bh)) {
1688                                 /* blockdev mappings never come here */
1689                                 clear_buffer_new(bh);
1690                                 unmap_underlying_metadata(bh->b_bdev,
1691                                                         bh->b_blocknr);
1692                         }
1693                 }
1694                 bh = bh->b_this_page;
1695                 block++;
1696         } while (bh != head);
1697
1698         do {
1699                 if (!buffer_mapped(bh))
1700                         continue;
1701                 /*
1702                  * If it's a fully non-blocking write attempt and we cannot
1703                  * lock the buffer then redirty the page.  Note that this can
1704                  * potentially cause a busy-wait loop from writeback threads
1705                  * and kswapd activity, but those code paths have their own
1706                  * higher-level throttling.
1707                  */
1708                 if (wbc->sync_mode != WB_SYNC_NONE) {
1709                         lock_buffer(bh);
1710                 } else if (!trylock_buffer(bh)) {
1711                         redirty_page_for_writepage(wbc, page);
1712                         continue;
1713                 }
1714                 if (test_clear_buffer_dirty(bh)) {
1715                         mark_buffer_async_write_endio(bh, handler);
1716                 } else {
1717                         unlock_buffer(bh);
1718                 }
1719         } while ((bh = bh->b_this_page) != head);
1720
1721         /*
1722          * The page and its buffers are protected by PageWriteback(), so we can
1723          * drop the bh refcounts early.
1724          */
1725         BUG_ON(PageWriteback(page));
1726         set_page_writeback(page);
1727
1728         do {
1729                 struct buffer_head *next = bh->b_this_page;
1730                 if (buffer_async_write(bh)) {
1731                         submit_bh(write_op, bh);
1732                         nr_underway++;
1733                 }
1734                 bh = next;
1735         } while (bh != head);
1736         unlock_page(page);
1737
1738         err = 0;
1739 done:
1740         if (nr_underway == 0) {
1741                 /*
1742                  * The page was marked dirty, but the buffers were
1743                  * clean.  Someone wrote them back by hand with
1744                  * ll_rw_block/submit_bh.  A rare case.
1745                  */
1746                 end_page_writeback(page);
1747
1748                 /*
1749                  * The page and buffer_heads can be released at any time from
1750                  * here on.
1751                  */
1752         }
1753         return err;
1754
1755 recover:
1756         /*
1757          * ENOSPC, or some other error.  We may already have added some
1758          * blocks to the file, so we need to write these out to avoid
1759          * exposing stale data.
1760          * The page is currently locked and not marked for writeback
1761          */
1762         bh = head;
1763         /* Recovery: lock and submit the mapped buffers */
1764         do {
1765                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1766                     !buffer_delay(bh)) {
1767                         lock_buffer(bh);
1768                         mark_buffer_async_write_endio(bh, handler);
1769                 } else {
1770                         /*
1771                          * The buffer may have been set dirty during
1772                          * attachment to a dirty page.
1773                          */
1774                         clear_buffer_dirty(bh);
1775                 }
1776         } while ((bh = bh->b_this_page) != head);
1777         SetPageError(page);
1778         BUG_ON(PageWriteback(page));
1779         mapping_set_error(page->mapping, err);
1780         set_page_writeback(page);
1781         do {
1782                 struct buffer_head *next = bh->b_this_page;
1783                 if (buffer_async_write(bh)) {
1784                         clear_buffer_dirty(bh);
1785                         submit_bh(write_op, bh);
1786                         nr_underway++;
1787                 }
1788                 bh = next;
1789         } while (bh != head);
1790         unlock_page(page);
1791         goto done;
1792 }
1793
1794 /*
1795  * If a page has any new buffers, zero them out here, and mark them uptodate
1796  * and dirty so they'll be written out (in order to prevent uninitialised
1797  * block data from leaking). And clear the new bit.
1798  */
1799 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1800 {
1801         unsigned int block_start, block_end;
1802         struct buffer_head *head, *bh;
1803
1804         BUG_ON(!PageLocked(page));
1805         if (!page_has_buffers(page))
1806                 return;
1807
1808         bh = head = page_buffers(page);
1809         block_start = 0;
1810         do {
1811                 block_end = block_start + bh->b_size;
1812
1813                 if (buffer_new(bh)) {
1814                         if (block_end > from && block_start < to) {
1815                                 if (!PageUptodate(page)) {
1816                                         unsigned start, size;
1817
1818                                         start = max(from, block_start);
1819                                         size = min(to, block_end) - start;
1820
1821                                         zero_user(page, start, size);
1822                                         set_buffer_uptodate(bh);
1823                                 }
1824
1825                                 clear_buffer_new(bh);
1826                                 mark_buffer_dirty(bh);
1827                         }
1828                 }
1829
1830                 block_start = block_end;
1831                 bh = bh->b_this_page;
1832         } while (bh != head);
1833 }
1834 EXPORT_SYMBOL(page_zero_new_buffers);
1835
1836 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1837                 get_block_t *get_block)
1838 {
1839         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1840         unsigned to = from + len;
1841         struct inode *inode = page->mapping->host;
1842         unsigned block_start, block_end;
1843         sector_t block;
1844         int err = 0;
1845         unsigned blocksize, bbits;
1846         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1847
1848         BUG_ON(!PageLocked(page));
1849         BUG_ON(from > PAGE_CACHE_SIZE);
1850         BUG_ON(to > PAGE_CACHE_SIZE);
1851         BUG_ON(from > to);
1852
1853         blocksize = 1 << inode->i_blkbits;
1854         if (!page_has_buffers(page))
1855                 create_empty_buffers(page, blocksize, 0);
1856         head = page_buffers(page);
1857
1858         bbits = inode->i_blkbits;
1859         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1860
1861         for(bh = head, block_start = 0; bh != head || !block_start;
1862             block++, block_start=block_end, bh = bh->b_this_page) {
1863                 block_end = block_start + blocksize;
1864                 if (block_end <= from || block_start >= to) {
1865                         if (PageUptodate(page)) {
1866                                 if (!buffer_uptodate(bh))
1867                                         set_buffer_uptodate(bh);
1868                         }
1869                         continue;
1870                 }
1871                 if (buffer_new(bh))
1872                         clear_buffer_new(bh);
1873                 if (!buffer_mapped(bh)) {
1874                         WARN_ON(bh->b_size != blocksize);
1875                         err = get_block(inode, block, bh, 1);
1876                         if (err)
1877                                 break;
1878                         if (buffer_new(bh)) {
1879                                 unmap_underlying_metadata(bh->b_bdev,
1880                                                         bh->b_blocknr);
1881                                 if (PageUptodate(page)) {
1882                                         clear_buffer_new(bh);
1883                                         set_buffer_uptodate(bh);
1884                                         mark_buffer_dirty(bh);
1885                                         continue;
1886                                 }
1887                                 if (block_end > to || block_start < from)
1888                                         zero_user_segments(page,
1889                                                 to, block_end,
1890                                                 block_start, from);
1891                                 continue;
1892                         }
1893                 }
1894                 if (PageUptodate(page)) {
1895                         if (!buffer_uptodate(bh))
1896                                 set_buffer_uptodate(bh);
1897                         continue; 
1898                 }
1899                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1900                     !buffer_unwritten(bh) &&
1901                      (block_start < from || block_end > to)) {
1902                         ll_rw_block(READ, 1, &bh);
1903                         *wait_bh++=bh;
1904                 }
1905         }
1906         /*
1907          * If we issued read requests - let them complete.
1908          */
1909         while(wait_bh > wait) {
1910                 wait_on_buffer(*--wait_bh);
1911                 if (!buffer_uptodate(*wait_bh))
1912                         err = -EIO;
1913         }
1914         if (unlikely(err))
1915                 page_zero_new_buffers(page, from, to);
1916         return err;
1917 }
1918 EXPORT_SYMBOL(__block_write_begin);
1919
1920 static int __block_commit_write(struct inode *inode, struct page *page,
1921                 unsigned from, unsigned to)
1922 {
1923         unsigned block_start, block_end;
1924         int partial = 0;
1925         unsigned blocksize;
1926         struct buffer_head *bh, *head;
1927
1928         blocksize = 1 << inode->i_blkbits;
1929
1930         for(bh = head = page_buffers(page), block_start = 0;
1931             bh != head || !block_start;
1932             block_start=block_end, bh = bh->b_this_page) {
1933                 block_end = block_start + blocksize;
1934                 if (block_end <= from || block_start >= to) {
1935                         if (!buffer_uptodate(bh))
1936                                 partial = 1;
1937                 } else {
1938                         set_buffer_uptodate(bh);
1939                         mark_buffer_dirty(bh);
1940                 }
1941                 clear_buffer_new(bh);
1942         }
1943
1944         /*
1945          * If this is a partial write which happened to make all buffers
1946          * uptodate then we can optimize away a bogus readpage() for
1947          * the next read(). Here we 'discover' whether the page went
1948          * uptodate as a result of this (potentially partial) write.
1949          */
1950         if (!partial)
1951                 SetPageUptodate(page);
1952         return 0;
1953 }
1954
1955 /*
1956  * block_write_begin takes care of the basic task of block allocation and
1957  * bringing partial write blocks uptodate first.
1958  *
1959  * The filesystem needs to handle block truncation upon failure.
1960  */
1961 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1962                 unsigned flags, struct page **pagep, get_block_t *get_block)
1963 {
1964         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1965         struct page *page;
1966         int status;
1967
1968         page = grab_cache_page_write_begin(mapping, index, flags);
1969         if (!page)
1970                 return -ENOMEM;
1971
1972         status = __block_write_begin(page, pos, len, get_block);
1973         if (unlikely(status)) {
1974                 unlock_page(page);
1975                 page_cache_release(page);
1976                 page = NULL;
1977         }
1978
1979         *pagep = page;
1980         return status;
1981 }
1982 EXPORT_SYMBOL(block_write_begin);
1983
1984 int block_write_end(struct file *file, struct address_space *mapping,
1985                         loff_t pos, unsigned len, unsigned copied,
1986                         struct page *page, void *fsdata)
1987 {
1988         struct inode *inode = mapping->host;
1989         unsigned start;
1990
1991         start = pos & (PAGE_CACHE_SIZE - 1);
1992
1993         if (unlikely(copied < len)) {
1994                 /*
1995                  * The buffers that were written will now be uptodate, so we
1996                  * don't have to worry about a readpage reading them and
1997                  * overwriting a partial write. However if we have encountered
1998                  * a short write and only partially written into a buffer, it
1999                  * will not be marked uptodate, so a readpage might come in and
2000                  * destroy our partial write.
2001                  *
2002                  * Do the simplest thing, and just treat any short write to a
2003                  * non uptodate page as a zero-length write, and force the
2004                  * caller to redo the whole thing.
2005                  */
2006                 if (!PageUptodate(page))
2007                         copied = 0;
2008
2009                 page_zero_new_buffers(page, start+copied, start+len);
2010         }
2011         flush_dcache_page(page);
2012
2013         /* This could be a short (even 0-length) commit */
2014         __block_commit_write(inode, page, start, start+copied);
2015
2016         return copied;
2017 }
2018 EXPORT_SYMBOL(block_write_end);
2019
2020 int generic_write_end(struct file *file, struct address_space *mapping,
2021                         loff_t pos, unsigned len, unsigned copied,
2022                         struct page *page, void *fsdata)
2023 {
2024         struct inode *inode = mapping->host;
2025         int i_size_changed = 0;
2026
2027         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2028
2029         /*
2030          * No need to use i_size_read() here, the i_size
2031          * cannot change under us because we hold i_mutex.
2032          *
2033          * But it's important to update i_size while still holding page lock:
2034          * page writeout could otherwise come in and zero beyond i_size.
2035          */
2036         if (pos+copied > inode->i_size) {
2037                 i_size_write(inode, pos+copied);
2038                 i_size_changed = 1;
2039         }
2040
2041         unlock_page(page);
2042         page_cache_release(page);
2043
2044         /*
2045          * Don't mark the inode dirty under page lock. First, it unnecessarily
2046          * makes the holding time of page lock longer. Second, it forces lock
2047          * ordering of page lock and transaction start for journaling
2048          * filesystems.
2049          */
2050         if (i_size_changed)
2051                 mark_inode_dirty(inode);
2052
2053         return copied;
2054 }
2055 EXPORT_SYMBOL(generic_write_end);
2056
2057 /*
2058  * block_is_partially_uptodate checks whether buffers within a page are
2059  * uptodate or not.
2060  *
2061  * Returns true if all buffers which correspond to a file portion
2062  * we want to read are uptodate.
2063  */
2064 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2065                                         unsigned long from)
2066 {
2067         struct inode *inode = page->mapping->host;
2068         unsigned block_start, block_end, blocksize;
2069         unsigned to;
2070         struct buffer_head *bh, *head;
2071         int ret = 1;
2072
2073         if (!page_has_buffers(page))
2074                 return 0;
2075
2076         blocksize = 1 << inode->i_blkbits;
2077         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2078         to = from + to;
2079         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2080                 return 0;
2081
2082         head = page_buffers(page);
2083         bh = head;
2084         block_start = 0;
2085         do {
2086                 block_end = block_start + blocksize;
2087                 if (block_end > from && block_start < to) {
2088                         if (!buffer_uptodate(bh)) {
2089                                 ret = 0;
2090                                 break;
2091                         }
2092                         if (block_end >= to)
2093                                 break;
2094                 }
2095                 block_start = block_end;
2096                 bh = bh->b_this_page;
2097         } while (bh != head);
2098
2099         return ret;
2100 }
2101 EXPORT_SYMBOL(block_is_partially_uptodate);
2102
2103 /*
2104  * Generic "read page" function for block devices that have the normal
2105  * get_block functionality. This is most of the block device filesystems.
2106  * Reads the page asynchronously --- the unlock_buffer() and
2107  * set/clear_buffer_uptodate() functions propagate buffer state into the
2108  * page struct once IO has completed.
2109  */
2110 int block_read_full_page(struct page *page, get_block_t *get_block)
2111 {
2112         struct inode *inode = page->mapping->host;
2113         sector_t iblock, lblock;
2114         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2115         unsigned int blocksize;
2116         int nr, i;
2117         int fully_mapped = 1;
2118
2119         BUG_ON(!PageLocked(page));
2120         blocksize = 1 << inode->i_blkbits;
2121         if (!page_has_buffers(page))
2122                 create_empty_buffers(page, blocksize, 0);
2123         head = page_buffers(page);
2124
2125         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2126         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2127         bh = head;
2128         nr = 0;
2129         i = 0;
2130
2131         do {
2132                 if (buffer_uptodate(bh))
2133                         continue;
2134
2135                 if (!buffer_mapped(bh)) {
2136                         int err = 0;
2137
2138                         fully_mapped = 0;
2139                         if (iblock < lblock) {
2140                                 WARN_ON(bh->b_size != blocksize);
2141                                 err = get_block(inode, iblock, bh, 0);
2142                                 if (err)
2143                                         SetPageError(page);
2144                         }
2145                         if (!buffer_mapped(bh)) {
2146                                 zero_user(page, i * blocksize, blocksize);
2147                                 if (!err)
2148                                         set_buffer_uptodate(bh);
2149                                 continue;
2150                         }
2151                         /*
2152                          * get_block() might have updated the buffer
2153                          * synchronously
2154                          */
2155                         if (buffer_uptodate(bh))
2156                                 continue;
2157                 }
2158                 arr[nr++] = bh;
2159         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2160
2161         if (fully_mapped)
2162                 SetPageMappedToDisk(page);
2163
2164         if (!nr) {
2165                 /*
2166                  * All buffers are uptodate - we can set the page uptodate
2167                  * as well. But not if get_block() returned an error.
2168                  */
2169                 if (!PageError(page))
2170                         SetPageUptodate(page);
2171                 unlock_page(page);
2172                 return 0;
2173         }
2174
2175         /* Stage two: lock the buffers */
2176         for (i = 0; i < nr; i++) {
2177                 bh = arr[i];
2178                 lock_buffer(bh);
2179                 mark_buffer_async_read(bh);
2180         }
2181
2182         /*
2183          * Stage 3: start the IO.  Check for uptodateness
2184          * inside the buffer lock in case another process reading
2185          * the underlying blockdev brought it uptodate (the sct fix).
2186          */
2187         for (i = 0; i < nr; i++) {
2188                 bh = arr[i];
2189                 if (buffer_uptodate(bh))
2190                         end_buffer_async_read(bh, 1);
2191                 else
2192                         submit_bh(READ, bh);
2193         }
2194         return 0;
2195 }
2196 EXPORT_SYMBOL(block_read_full_page);
2197
2198 /* utility function for filesystems that need to do work on expanding
2199  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2200  * deal with the hole.  
2201  */
2202 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2203 {
2204         struct address_space *mapping = inode->i_mapping;
2205         struct page *page;
2206         void *fsdata;
2207         int err;
2208
2209         err = inode_newsize_ok(inode, size);
2210         if (err)
2211                 goto out;
2212
2213         err = pagecache_write_begin(NULL, mapping, size, 0,
2214                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2215                                 &page, &fsdata);
2216         if (err)
2217                 goto out;
2218
2219         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2220         BUG_ON(err > 0);
2221
2222 out:
2223         return err;
2224 }
2225 EXPORT_SYMBOL(generic_cont_expand_simple);
2226
2227 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2228                             loff_t pos, loff_t *bytes)
2229 {
2230         struct inode *inode = mapping->host;
2231         unsigned blocksize = 1 << inode->i_blkbits;
2232         struct page *page;
2233         void *fsdata;
2234         pgoff_t index, curidx;
2235         loff_t curpos;
2236         unsigned zerofrom, offset, len;
2237         int err = 0;
2238
2239         index = pos >> PAGE_CACHE_SHIFT;
2240         offset = pos & ~PAGE_CACHE_MASK;
2241
2242         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2243                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2244                 if (zerofrom & (blocksize-1)) {
2245                         *bytes |= (blocksize-1);
2246                         (*bytes)++;
2247                 }
2248                 len = PAGE_CACHE_SIZE - zerofrom;
2249
2250                 err = pagecache_write_begin(file, mapping, curpos, len,
2251                                                 AOP_FLAG_UNINTERRUPTIBLE,
2252                                                 &page, &fsdata);
2253                 if (err)
2254                         goto out;
2255                 zero_user(page, zerofrom, len);
2256                 err = pagecache_write_end(file, mapping, curpos, len, len,
2257                                                 page, fsdata);
2258                 if (err < 0)
2259                         goto out;
2260                 BUG_ON(err != len);
2261                 err = 0;
2262
2263                 balance_dirty_pages_ratelimited(mapping);
2264         }
2265
2266         /* page covers the boundary, find the boundary offset */
2267         if (index == curidx) {
2268                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2269                 /* if we will expand the thing last block will be filled */
2270                 if (offset <= zerofrom) {
2271                         goto out;
2272                 }
2273                 if (zerofrom & (blocksize-1)) {
2274                         *bytes |= (blocksize-1);
2275                         (*bytes)++;
2276                 }
2277                 len = offset - zerofrom;
2278
2279                 err = pagecache_write_begin(file, mapping, curpos, len,
2280                                                 AOP_FLAG_UNINTERRUPTIBLE,
2281                                                 &page, &fsdata);
2282                 if (err)
2283                         goto out;
2284                 zero_user(page, zerofrom, len);
2285                 err = pagecache_write_end(file, mapping, curpos, len, len,
2286                                                 page, fsdata);
2287                 if (err < 0)
2288                         goto out;
2289                 BUG_ON(err != len);
2290                 err = 0;
2291         }
2292 out:
2293         return err;
2294 }
2295
2296 /*
2297  * For moronic filesystems that do not allow holes in file.
2298  * We may have to extend the file.
2299  */
2300 int cont_write_begin(struct file *file, struct address_space *mapping,
2301                         loff_t pos, unsigned len, unsigned flags,
2302                         struct page **pagep, void **fsdata,
2303                         get_block_t *get_block, loff_t *bytes)
2304 {
2305         struct inode *inode = mapping->host;
2306         unsigned blocksize = 1 << inode->i_blkbits;
2307         unsigned zerofrom;
2308         int err;
2309
2310         err = cont_expand_zero(file, mapping, pos, bytes);
2311         if (err)
2312                 return err;
2313
2314         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2315         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2316                 *bytes |= (blocksize-1);
2317                 (*bytes)++;
2318         }
2319
2320         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2321 }
2322 EXPORT_SYMBOL(cont_write_begin);
2323
2324 int block_commit_write(struct page *page, unsigned from, unsigned to)
2325 {
2326         struct inode *inode = page->mapping->host;
2327         __block_commit_write(inode,page,from,to);
2328         return 0;
2329 }
2330 EXPORT_SYMBOL(block_commit_write);
2331
2332 /*
2333  * block_page_mkwrite() is not allowed to change the file size as it gets
2334  * called from a page fault handler when a page is first dirtied. Hence we must
2335  * be careful to check for EOF conditions here. We set the page up correctly
2336  * for a written page which means we get ENOSPC checking when writing into
2337  * holes and correct delalloc and unwritten extent mapping on filesystems that
2338  * support these features.
2339  *
2340  * We are not allowed to take the i_mutex here so we have to play games to
2341  * protect against truncate races as the page could now be beyond EOF.  Because
2342  * truncate writes the inode size before removing pages, once we have the
2343  * page lock we can determine safely if the page is beyond EOF. If it is not
2344  * beyond EOF, then the page is guaranteed safe against truncation until we
2345  * unlock the page.
2346  *
2347  * Direct callers of this function should call vfs_check_frozen() so that page
2348  * fault does not busyloop until the fs is thawed.
2349  */
2350 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2351                          get_block_t get_block)
2352 {
2353         struct page *page = vmf->page;
2354         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2355         unsigned long end;
2356         loff_t size;
2357         int ret;
2358
2359         lock_page(page);
2360         size = i_size_read(inode);
2361         if ((page->mapping != inode->i_mapping) ||
2362             (page_offset(page) > size)) {
2363                 /* We overload EFAULT to mean page got truncated */
2364                 ret = -EFAULT;
2365                 goto out_unlock;
2366         }
2367
2368         /* page is wholly or partially inside EOF */
2369         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2370                 end = size & ~PAGE_CACHE_MASK;
2371         else
2372                 end = PAGE_CACHE_SIZE;
2373
2374         ret = __block_write_begin(page, 0, end, get_block);
2375         if (!ret)
2376                 ret = block_commit_write(page, 0, end);
2377
2378         if (unlikely(ret < 0))
2379                 goto out_unlock;
2380         /*
2381          * Freezing in progress? We check after the page is marked dirty and
2382          * with page lock held so if the test here fails, we are sure freezing
2383          * code will wait during syncing until the page fault is done - at that
2384          * point page will be dirty and unlocked so freezing code will write it
2385          * and writeprotect it again.
2386          */
2387         set_page_dirty(page);
2388         if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2389                 ret = -EAGAIN;
2390                 goto out_unlock;
2391         }
2392         wait_on_page_writeback(page);
2393         return 0;
2394 out_unlock:
2395         unlock_page(page);
2396         return ret;
2397 }
2398 EXPORT_SYMBOL(__block_page_mkwrite);
2399
2400 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2401                    get_block_t get_block)
2402 {
2403         int ret;
2404         struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2405
2406         /*
2407          * This check is racy but catches the common case. The check in
2408          * __block_page_mkwrite() is reliable.
2409          */
2410         vfs_check_frozen(sb, SB_FREEZE_WRITE);
2411         ret = __block_page_mkwrite(vma, vmf, get_block);
2412         return block_page_mkwrite_return(ret);
2413 }
2414 EXPORT_SYMBOL(block_page_mkwrite);
2415
2416 /*
2417  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2418  * immediately, while under the page lock.  So it needs a special end_io
2419  * handler which does not touch the bh after unlocking it.
2420  */
2421 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2422 {
2423         __end_buffer_read_notouch(bh, uptodate);
2424 }
2425
2426 /*
2427  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2428  * the page (converting it to circular linked list and taking care of page
2429  * dirty races).
2430  */
2431 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2432 {
2433         struct buffer_head *bh;
2434
2435         BUG_ON(!PageLocked(page));
2436
2437         spin_lock(&page->mapping->private_lock);
2438         bh = head;
2439         do {
2440                 if (PageDirty(page))
2441                         set_buffer_dirty(bh);
2442                 if (!bh->b_this_page)
2443                         bh->b_this_page = head;
2444                 bh = bh->b_this_page;
2445         } while (bh != head);
2446         attach_page_buffers(page, head);
2447         spin_unlock(&page->mapping->private_lock);
2448 }
2449
2450 /*
2451  * On entry, the page is fully not uptodate.
2452  * On exit the page is fully uptodate in the areas outside (from,to)
2453  * The filesystem needs to handle block truncation upon failure.
2454  */
2455 int nobh_write_begin(struct address_space *mapping,
2456                         loff_t pos, unsigned len, unsigned flags,
2457                         struct page **pagep, void **fsdata,
2458                         get_block_t *get_block)
2459 {
2460         struct inode *inode = mapping->host;
2461         const unsigned blkbits = inode->i_blkbits;
2462         const unsigned blocksize = 1 << blkbits;
2463         struct buffer_head *head, *bh;
2464         struct page *page;
2465         pgoff_t index;
2466         unsigned from, to;
2467         unsigned block_in_page;
2468         unsigned block_start, block_end;
2469         sector_t block_in_file;
2470         int nr_reads = 0;
2471         int ret = 0;
2472         int is_mapped_to_disk = 1;
2473
2474         index = pos >> PAGE_CACHE_SHIFT;
2475         from = pos & (PAGE_CACHE_SIZE - 1);
2476         to = from + len;
2477
2478         page = grab_cache_page_write_begin(mapping, index, flags);
2479         if (!page)
2480                 return -ENOMEM;
2481         *pagep = page;
2482         *fsdata = NULL;
2483
2484         if (page_has_buffers(page)) {
2485                 ret = __block_write_begin(page, pos, len, get_block);
2486                 if (unlikely(ret))
2487                         goto out_release;
2488                 return ret;
2489         }
2490
2491         if (PageMappedToDisk(page))
2492                 return 0;
2493
2494         /*
2495          * Allocate buffers so that we can keep track of state, and potentially
2496          * attach them to the page if an error occurs. In the common case of
2497          * no error, they will just be freed again without ever being attached
2498          * to the page (which is all OK, because we're under the page lock).
2499          *
2500          * Be careful: the buffer linked list is a NULL terminated one, rather
2501          * than the circular one we're used to.
2502          */
2503         head = alloc_page_buffers(page, blocksize, 0);
2504         if (!head) {
2505                 ret = -ENOMEM;
2506                 goto out_release;
2507         }
2508
2509         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2510
2511         /*
2512          * We loop across all blocks in the page, whether or not they are
2513          * part of the affected region.  This is so we can discover if the
2514          * page is fully mapped-to-disk.
2515          */
2516         for (block_start = 0, block_in_page = 0, bh = head;
2517                   block_start < PAGE_CACHE_SIZE;
2518                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2519                 int create;
2520
2521                 block_end = block_start + blocksize;
2522                 bh->b_state = 0;
2523                 create = 1;
2524                 if (block_start >= to)
2525                         create = 0;
2526                 ret = get_block(inode, block_in_file + block_in_page,
2527                                         bh, create);
2528                 if (ret)
2529                         goto failed;
2530                 if (!buffer_mapped(bh))
2531                         is_mapped_to_disk = 0;
2532                 if (buffer_new(bh))
2533                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2534                 if (PageUptodate(page)) {
2535                         set_buffer_uptodate(bh);
2536                         continue;
2537                 }
2538                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2539                         zero_user_segments(page, block_start, from,
2540                                                         to, block_end);
2541                         continue;
2542                 }
2543                 if (buffer_uptodate(bh))
2544                         continue;       /* reiserfs does this */
2545                 if (block_start < from || block_end > to) {
2546                         lock_buffer(bh);
2547                         bh->b_end_io = end_buffer_read_nobh;
2548                         submit_bh(READ, bh);
2549                         nr_reads++;
2550                 }
2551         }
2552
2553         if (nr_reads) {
2554                 /*
2555                  * The page is locked, so these buffers are protected from
2556                  * any VM or truncate activity.  Hence we don't need to care
2557                  * for the buffer_head refcounts.
2558                  */
2559                 for (bh = head; bh; bh = bh->b_this_page) {
2560                         wait_on_buffer(bh);
2561                         if (!buffer_uptodate(bh))
2562                                 ret = -EIO;
2563                 }
2564                 if (ret)
2565                         goto failed;
2566         }
2567
2568         if (is_mapped_to_disk)
2569                 SetPageMappedToDisk(page);
2570
2571         *fsdata = head; /* to be released by nobh_write_end */
2572
2573         return 0;
2574
2575 failed:
2576         BUG_ON(!ret);
2577         /*
2578          * Error recovery is a bit difficult. We need to zero out blocks that
2579          * were newly allocated, and dirty them to ensure they get written out.
2580          * Buffers need to be attached to the page at this point, otherwise
2581          * the handling of potential IO errors during writeout would be hard
2582          * (could try doing synchronous writeout, but what if that fails too?)
2583          */
2584         attach_nobh_buffers(page, head);
2585         page_zero_new_buffers(page, from, to);
2586
2587 out_release:
2588         unlock_page(page);
2589         page_cache_release(page);
2590         *pagep = NULL;
2591
2592         return ret;
2593 }
2594 EXPORT_SYMBOL(nobh_write_begin);
2595
2596 int nobh_write_end(struct file *file, struct address_space *mapping,
2597                         loff_t pos, unsigned len, unsigned copied,
2598                         struct page *page, void *fsdata)
2599 {
2600         struct inode *inode = page->mapping->host;
2601         struct buffer_head *head = fsdata;
2602         struct buffer_head *bh;
2603         BUG_ON(fsdata != NULL && page_has_buffers(page));
2604
2605         if (unlikely(copied < len) && head)
2606                 attach_nobh_buffers(page, head);
2607         if (page_has_buffers(page))
2608                 return generic_write_end(file, mapping, pos, len,
2609                                         copied, page, fsdata);
2610
2611         SetPageUptodate(page);
2612         set_page_dirty(page);
2613         if (pos+copied > inode->i_size) {
2614                 i_size_write(inode, pos+copied);
2615                 mark_inode_dirty(inode);
2616         }
2617
2618         unlock_page(page);
2619         page_cache_release(page);
2620
2621         while (head) {
2622                 bh = head;
2623                 head = head->b_this_page;
2624                 free_buffer_head(bh);
2625         }
2626
2627         return copied;
2628 }
2629 EXPORT_SYMBOL(nobh_write_end);
2630
2631 /*
2632  * nobh_writepage() - based on block_full_write_page() except
2633  * that it tries to operate without attaching bufferheads to
2634  * the page.
2635  */
2636 int nobh_writepage(struct page *page, get_block_t *get_block,
2637                         struct writeback_control *wbc)
2638 {
2639         struct inode * const inode = page->mapping->host;
2640         loff_t i_size = i_size_read(inode);
2641         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2642         unsigned offset;
2643         int ret;
2644
2645         /* Is the page fully inside i_size? */
2646         if (page->index < end_index)
2647                 goto out;
2648
2649         /* Is the page fully outside i_size? (truncate in progress) */
2650         offset = i_size & (PAGE_CACHE_SIZE-1);
2651         if (page->index >= end_index+1 || !offset) {
2652                 /*
2653                  * The page may have dirty, unmapped buffers.  For example,
2654                  * they may have been added in ext3_writepage().  Make them
2655                  * freeable here, so the page does not leak.
2656                  */
2657 #if 0
2658                 /* Not really sure about this  - do we need this ? */
2659                 if (page->mapping->a_ops->invalidatepage)
2660                         page->mapping->a_ops->invalidatepage(page, offset);
2661 #endif
2662                 unlock_page(page);
2663                 return 0; /* don't care */
2664         }
2665
2666         /*
2667          * The page straddles i_size.  It must be zeroed out on each and every
2668          * writepage invocation because it may be mmapped.  "A file is mapped
2669          * in multiples of the page size.  For a file that is not a multiple of
2670          * the  page size, the remaining memory is zeroed when mapped, and
2671          * writes to that region are not written out to the file."
2672          */
2673         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2674 out:
2675         ret = mpage_writepage(page, get_block, wbc);
2676         if (ret == -EAGAIN)
2677                 ret = __block_write_full_page(inode, page, get_block, wbc,
2678                                               end_buffer_async_write);
2679         return ret;
2680 }
2681 EXPORT_SYMBOL(nobh_writepage);
2682
2683 int nobh_truncate_page(struct address_space *mapping,
2684                         loff_t from, get_block_t *get_block)
2685 {
2686         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2687         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2688         unsigned blocksize;
2689         sector_t iblock;
2690         unsigned length, pos;
2691         struct inode *inode = mapping->host;
2692         struct page *page;
2693         struct buffer_head map_bh;
2694         int err;
2695
2696         blocksize = 1 << inode->i_blkbits;
2697         length = offset & (blocksize - 1);
2698
2699         /* Block boundary? Nothing to do */
2700         if (!length)
2701                 return 0;
2702
2703         length = blocksize - length;
2704         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2705
2706         page = grab_cache_page(mapping, index);
2707         err = -ENOMEM;
2708         if (!page)
2709                 goto out;
2710
2711         if (page_has_buffers(page)) {
2712 has_buffers:
2713                 unlock_page(page);
2714                 page_cache_release(page);
2715                 return block_truncate_page(mapping, from, get_block);
2716         }
2717
2718         /* Find the buffer that contains "offset" */
2719         pos = blocksize;
2720         while (offset >= pos) {
2721                 iblock++;
2722                 pos += blocksize;
2723         }
2724
2725         map_bh.b_size = blocksize;
2726         map_bh.b_state = 0;
2727         err = get_block(inode, iblock, &map_bh, 0);
2728         if (err)
2729                 goto unlock;
2730         /* unmapped? It's a hole - nothing to do */
2731         if (!buffer_mapped(&map_bh))
2732                 goto unlock;
2733
2734         /* Ok, it's mapped. Make sure it's up-to-date */
2735         if (!PageUptodate(page)) {
2736                 err = mapping->a_ops->readpage(NULL, page);
2737                 if (err) {
2738                         page_cache_release(page);
2739                         goto out;
2740                 }
2741                 lock_page(page);
2742                 if (!PageUptodate(page)) {
2743                         err = -EIO;
2744                         goto unlock;
2745                 }
2746                 if (page_has_buffers(page))
2747                         goto has_buffers;
2748         }
2749         zero_user(page, offset, length);
2750         set_page_dirty(page);
2751         err = 0;
2752
2753 unlock:
2754         unlock_page(page);
2755         page_cache_release(page);
2756 out:
2757         return err;
2758 }
2759 EXPORT_SYMBOL(nobh_truncate_page);
2760
2761 int block_truncate_page(struct address_space *mapping,
2762                         loff_t from, get_block_t *get_block)
2763 {
2764         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2765         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2766         unsigned blocksize;
2767         sector_t iblock;
2768         unsigned length, pos;
2769         struct inode *inode = mapping->host;
2770         struct page *page;
2771         struct buffer_head *bh;
2772         int err;
2773
2774         blocksize = 1 << inode->i_blkbits;
2775         length = offset & (blocksize - 1);
2776
2777         /* Block boundary? Nothing to do */
2778         if (!length)
2779                 return 0;
2780
2781         length = blocksize - length;
2782         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2783         
2784         page = grab_cache_page(mapping, index);
2785         err = -ENOMEM;
2786         if (!page)
2787                 goto out;
2788
2789         if (!page_has_buffers(page))
2790                 create_empty_buffers(page, blocksize, 0);
2791
2792         /* Find the buffer that contains "offset" */
2793         bh = page_buffers(page);
2794         pos = blocksize;
2795         while (offset >= pos) {
2796                 bh = bh->b_this_page;
2797                 iblock++;
2798                 pos += blocksize;
2799         }
2800
2801         err = 0;
2802         if (!buffer_mapped(bh)) {
2803                 WARN_ON(bh->b_size != blocksize);
2804                 err = get_block(inode, iblock, bh, 0);
2805                 if (err)
2806                         goto unlock;
2807                 /* unmapped? It's a hole - nothing to do */
2808                 if (!buffer_mapped(bh))
2809                         goto unlock;
2810         }
2811
2812         /* Ok, it's mapped. Make sure it's up-to-date */
2813         if (PageUptodate(page))
2814                 set_buffer_uptodate(bh);
2815
2816         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2817                 err = -EIO;
2818                 ll_rw_block(READ, 1, &bh);
2819                 wait_on_buffer(bh);
2820                 /* Uhhuh. Read error. Complain and punt. */
2821                 if (!buffer_uptodate(bh))
2822                         goto unlock;
2823         }
2824
2825         zero_user(page, offset, length);
2826         mark_buffer_dirty(bh);
2827         err = 0;
2828
2829 unlock:
2830         unlock_page(page);
2831         page_cache_release(page);
2832 out:
2833         return err;
2834 }
2835 EXPORT_SYMBOL(block_truncate_page);
2836
2837 /*
2838  * The generic ->writepage function for buffer-backed address_spaces
2839  * this form passes in the end_io handler used to finish the IO.
2840  */
2841 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2842                         struct writeback_control *wbc, bh_end_io_t *handler)
2843 {
2844         struct inode * const inode = page->mapping->host;
2845         loff_t i_size = i_size_read(inode);
2846         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2847         unsigned offset;
2848
2849         /* Is the page fully inside i_size? */
2850         if (page->index < end_index)
2851                 return __block_write_full_page(inode, page, get_block, wbc,
2852                                                handler);
2853
2854         /* Is the page fully outside i_size? (truncate in progress) */
2855         offset = i_size & (PAGE_CACHE_SIZE-1);
2856         if (page->index >= end_index+1 || !offset) {
2857                 /*
2858                  * The page may have dirty, unmapped buffers.  For example,
2859                  * they may have been added in ext3_writepage().  Make them
2860                  * freeable here, so the page does not leak.
2861                  */
2862                 do_invalidatepage(page, 0);
2863                 unlock_page(page);
2864                 return 0; /* don't care */
2865         }
2866
2867         /*
2868          * The page straddles i_size.  It must be zeroed out on each and every
2869          * writepage invocation because it may be mmapped.  "A file is mapped
2870          * in multiples of the page size.  For a file that is not a multiple of
2871          * the  page size, the remaining memory is zeroed when mapped, and
2872          * writes to that region are not written out to the file."
2873          */
2874         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2875         return __block_write_full_page(inode, page, get_block, wbc, handler);
2876 }
2877 EXPORT_SYMBOL(block_write_full_page_endio);
2878
2879 /*
2880  * The generic ->writepage function for buffer-backed address_spaces
2881  */
2882 int block_write_full_page(struct page *page, get_block_t *get_block,
2883                         struct writeback_control *wbc)
2884 {
2885         return block_write_full_page_endio(page, get_block, wbc,
2886                                            end_buffer_async_write);
2887 }
2888 EXPORT_SYMBOL(block_write_full_page);
2889
2890 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2891                             get_block_t *get_block)
2892 {
2893         struct buffer_head tmp;
2894         struct inode *inode = mapping->host;
2895         tmp.b_state = 0;
2896         tmp.b_blocknr = 0;
2897         tmp.b_size = 1 << inode->i_blkbits;
2898         get_block(inode, block, &tmp, 0);
2899         return tmp.b_blocknr;
2900 }
2901 EXPORT_SYMBOL(generic_block_bmap);
2902
2903 static void end_bio_bh_io_sync(struct bio *bio, int err)
2904 {
2905         struct buffer_head *bh = bio->bi_private;
2906
2907         if (err == -EOPNOTSUPP) {
2908                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2909         }
2910
2911         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2912                 set_bit(BH_Quiet, &bh->b_state);
2913
2914         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2915         bio_put(bio);
2916 }
2917
2918 int submit_bh(int rw, struct buffer_head * bh)
2919 {
2920         struct bio *bio;
2921         int ret = 0;
2922
2923         BUG_ON(!buffer_locked(bh));
2924         BUG_ON(!buffer_mapped(bh));
2925         BUG_ON(!bh->b_end_io);
2926         BUG_ON(buffer_delay(bh));
2927         BUG_ON(buffer_unwritten(bh));
2928
2929         /*
2930          * Only clear out a write error when rewriting
2931          */
2932         if (test_set_buffer_req(bh) && (rw & WRITE))
2933                 clear_buffer_write_io_error(bh);
2934
2935         /*
2936          * from here on down, it's all bio -- do the initial mapping,
2937          * submit_bio -> generic_make_request may further map this bio around
2938          */
2939         bio = bio_alloc(GFP_NOIO, 1);
2940
2941         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2942         bio->bi_bdev = bh->b_bdev;
2943         bio->bi_io_vec[0].bv_page = bh->b_page;
2944         bio->bi_io_vec[0].bv_len = bh->b_size;
2945         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2946
2947         bio->bi_vcnt = 1;
2948         bio->bi_idx = 0;
2949         bio->bi_size = bh->b_size;
2950
2951         bio->bi_end_io = end_bio_bh_io_sync;
2952         bio->bi_private = bh;
2953
2954         bio_get(bio);
2955         submit_bio(rw, bio);
2956
2957         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2958                 ret = -EOPNOTSUPP;
2959
2960         bio_put(bio);
2961         return ret;
2962 }
2963 EXPORT_SYMBOL(submit_bh);
2964
2965 /**
2966  * ll_rw_block: low-level access to block devices (DEPRECATED)
2967  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2968  * @nr: number of &struct buffer_heads in the array
2969  * @bhs: array of pointers to &struct buffer_head
2970  *
2971  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2972  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2973  * %READA option is described in the documentation for generic_make_request()
2974  * which ll_rw_block() calls.
2975  *
2976  * This function drops any buffer that it cannot get a lock on (with the
2977  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2978  * request, and any buffer that appears to be up-to-date when doing read
2979  * request.  Further it marks as clean buffers that are processed for
2980  * writing (the buffer cache won't assume that they are actually clean
2981  * until the buffer gets unlocked).
2982  *
2983  * ll_rw_block sets b_end_io to simple completion handler that marks
2984  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2985  * any waiters. 
2986  *
2987  * All of the buffers must be for the same device, and must also be a
2988  * multiple of the current approved size for the device.
2989  */
2990 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2991 {
2992         int i;
2993
2994         for (i = 0; i < nr; i++) {
2995                 struct buffer_head *bh = bhs[i];
2996
2997                 if (!trylock_buffer(bh))
2998                         continue;
2999                 if (rw == WRITE) {
3000                         if (test_clear_buffer_dirty(bh)) {
3001                                 bh->b_end_io = end_buffer_write_sync;
3002                                 get_bh(bh);
3003                                 submit_bh(WRITE, bh);
3004                                 continue;
3005                         }
3006                 } else {
3007                         if (!buffer_uptodate(bh)) {
3008                                 bh->b_end_io = end_buffer_read_sync;
3009                                 get_bh(bh);
3010                                 submit_bh(rw, bh);
3011                                 continue;
3012                         }
3013                 }
3014                 unlock_buffer(bh);
3015         }
3016 }
3017 EXPORT_SYMBOL(ll_rw_block);
3018
3019 void write_dirty_buffer(struct buffer_head *bh, int rw)
3020 {
3021         lock_buffer(bh);
3022         if (!test_clear_buffer_dirty(bh)) {
3023                 unlock_buffer(bh);
3024                 return;
3025         }
3026         bh->b_end_io = end_buffer_write_sync;
3027         get_bh(bh);
3028         submit_bh(rw, bh);
3029 }
3030 EXPORT_SYMBOL(write_dirty_buffer);
3031
3032 /*
3033  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3034  * and then start new I/O and then wait upon it.  The caller must have a ref on
3035  * the buffer_head.
3036  */
3037 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3038 {
3039         int ret = 0;
3040
3041         WARN_ON(atomic_read(&bh->b_count) < 1);
3042         lock_buffer(bh);
3043         if (test_clear_buffer_dirty(bh)) {
3044                 get_bh(bh);
3045                 bh->b_end_io = end_buffer_write_sync;
3046                 ret = submit_bh(rw, bh);
3047                 wait_on_buffer(bh);
3048                 if (!ret && !buffer_uptodate(bh))
3049                         ret = -EIO;
3050         } else {
3051                 unlock_buffer(bh);
3052         }
3053         return ret;
3054 }
3055 EXPORT_SYMBOL(__sync_dirty_buffer);
3056
3057 int sync_dirty_buffer(struct buffer_head *bh)
3058 {
3059         return __sync_dirty_buffer(bh, WRITE_SYNC);
3060 }
3061 EXPORT_SYMBOL(sync_dirty_buffer);
3062
3063 /*
3064  * try_to_free_buffers() checks if all the buffers on this particular page
3065  * are unused, and releases them if so.
3066  *
3067  * Exclusion against try_to_free_buffers may be obtained by either
3068  * locking the page or by holding its mapping's private_lock.
3069  *
3070  * If the page is dirty but all the buffers are clean then we need to
3071  * be sure to mark the page clean as well.  This is because the page
3072  * may be against a block device, and a later reattachment of buffers
3073  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3074  * filesystem data on the same device.
3075  *
3076  * The same applies to regular filesystem pages: if all the buffers are
3077  * clean then we set the page clean and proceed.  To do that, we require
3078  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3079  * private_lock.
3080  *
3081  * try_to_free_buffers() is non-blocking.
3082  */
3083 static inline int buffer_busy(struct buffer_head *bh)
3084 {
3085         return atomic_read(&bh->b_count) |
3086                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3087 }
3088
3089 static int
3090 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3091 {
3092         struct buffer_head *head = page_buffers(page);
3093         struct buffer_head *bh;
3094
3095         bh = head;
3096         do {
3097                 if (buffer_write_io_error(bh) && page->mapping)
3098                         set_bit(AS_EIO, &page->mapping->flags);
3099                 if (buffer_busy(bh))
3100                         goto failed;
3101                 bh = bh->b_this_page;
3102         } while (bh != head);
3103
3104         do {
3105                 struct buffer_head *next = bh->b_this_page;
3106
3107                 if (bh->b_assoc_map)
3108                         __remove_assoc_queue(bh);
3109                 bh = next;
3110         } while (bh != head);
3111         *buffers_to_free = head;
3112         __clear_page_buffers(page);
3113         return 1;
3114 failed:
3115         return 0;
3116 }
3117
3118 int try_to_free_buffers(struct page *page)
3119 {
3120         struct address_space * const mapping = page->mapping;
3121         struct buffer_head *buffers_to_free = NULL;
3122         int ret = 0;
3123
3124         BUG_ON(!PageLocked(page));
3125         if (PageWriteback(page))
3126                 return 0;
3127
3128         if (mapping == NULL) {          /* can this still happen? */
3129                 ret = drop_buffers(page, &buffers_to_free);
3130                 goto out;
3131         }
3132
3133         spin_lock(&mapping->private_lock);
3134         ret = drop_buffers(page, &buffers_to_free);
3135
3136         /*
3137          * If the filesystem writes its buffers by hand (eg ext3)
3138          * then we can have clean buffers against a dirty page.  We
3139          * clean the page here; otherwise the VM will never notice
3140          * that the filesystem did any IO at all.
3141          *
3142          * Also, during truncate, discard_buffer will have marked all
3143          * the page's buffers clean.  We discover that here and clean
3144          * the page also.
3145          *
3146          * private_lock must be held over this entire operation in order
3147          * to synchronise against __set_page_dirty_buffers and prevent the
3148          * dirty bit from being lost.
3149          */
3150         if (ret)
3151                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3152         spin_unlock(&mapping->private_lock);
3153 out:
3154         if (buffers_to_free) {
3155                 struct buffer_head *bh = buffers_to_free;
3156
3157                 do {
3158                         struct buffer_head *next = bh->b_this_page;
3159                         free_buffer_head(bh);
3160                         bh = next;
3161                 } while (bh != buffers_to_free);
3162         }
3163         return ret;
3164 }
3165 EXPORT_SYMBOL(try_to_free_buffers);
3166
3167 /*
3168  * There are no bdflush tunables left.  But distributions are
3169  * still running obsolete flush daemons, so we terminate them here.
3170  *
3171  * Use of bdflush() is deprecated and will be removed in a future kernel.
3172  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3173  */
3174 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3175 {
3176         static int msg_count;
3177
3178         if (!capable(CAP_SYS_ADMIN))
3179                 return -EPERM;
3180
3181         if (msg_count < 5) {
3182                 msg_count++;
3183                 printk(KERN_INFO
3184                         "warning: process `%s' used the obsolete bdflush"
3185                         " system call\n", current->comm);
3186                 printk(KERN_INFO "Fix your initscripts?\n");
3187         }
3188
3189         if (func == 1)
3190                 do_exit(0);
3191         return 0;
3192 }
3193
3194 /*
3195  * Buffer-head allocation
3196  */
3197 static struct kmem_cache *bh_cachep;
3198
3199 /*
3200  * Once the number of bh's in the machine exceeds this level, we start
3201  * stripping them in writeback.
3202  */
3203 static int max_buffer_heads;
3204
3205 int buffer_heads_over_limit;
3206
3207 struct bh_accounting {
3208         int nr;                 /* Number of live bh's */
3209         int ratelimit;          /* Limit cacheline bouncing */
3210 };
3211
3212 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3213
3214 static void recalc_bh_state(void)
3215 {
3216         int i;
3217         int tot = 0;
3218
3219         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3220                 return;
3221         __this_cpu_write(bh_accounting.ratelimit, 0);
3222         for_each_online_cpu(i)
3223                 tot += per_cpu(bh_accounting, i).nr;
3224         buffer_heads_over_limit = (tot > max_buffer_heads);
3225 }
3226
3227 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3228 {
3229         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3230         if (ret) {
3231                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3232                 preempt_disable();
3233                 __this_cpu_inc(bh_accounting.nr);
3234                 recalc_bh_state();
3235                 preempt_enable();
3236         }
3237         return ret;
3238 }
3239 EXPORT_SYMBOL(alloc_buffer_head);
3240
3241 void free_buffer_head(struct buffer_head *bh)
3242 {
3243         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3244         kmem_cache_free(bh_cachep, bh);
3245         preempt_disable();
3246         __this_cpu_dec(bh_accounting.nr);
3247         recalc_bh_state();
3248         preempt_enable();
3249 }
3250 EXPORT_SYMBOL(free_buffer_head);
3251
3252 static void buffer_exit_cpu(int cpu)
3253 {
3254         int i;
3255         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3256
3257         for (i = 0; i < BH_LRU_SIZE; i++) {
3258                 brelse(b->bhs[i]);
3259                 b->bhs[i] = NULL;
3260         }
3261         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3262         per_cpu(bh_accounting, cpu).nr = 0;
3263 }
3264
3265 static int buffer_cpu_notify(struct notifier_block *self,
3266                               unsigned long action, void *hcpu)
3267 {
3268         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3269                 buffer_exit_cpu((unsigned long)hcpu);
3270         return NOTIFY_OK;
3271 }
3272
3273 /**
3274  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3275  * @bh: struct buffer_head
3276  *
3277  * Return true if the buffer is up-to-date and false,
3278  * with the buffer locked, if not.
3279  */
3280 int bh_uptodate_or_lock(struct buffer_head *bh)
3281 {
3282         if (!buffer_uptodate(bh)) {
3283                 lock_buffer(bh);
3284                 if (!buffer_uptodate(bh))
3285                         return 0;
3286                 unlock_buffer(bh);
3287         }
3288         return 1;
3289 }
3290 EXPORT_SYMBOL(bh_uptodate_or_lock);
3291
3292 /**
3293  * bh_submit_read - Submit a locked buffer for reading
3294  * @bh: struct buffer_head
3295  *
3296  * Returns zero on success and -EIO on error.
3297  */
3298 int bh_submit_read(struct buffer_head *bh)
3299 {
3300         BUG_ON(!buffer_locked(bh));
3301
3302         if (buffer_uptodate(bh)) {
3303                 unlock_buffer(bh);
3304                 return 0;
3305         }
3306
3307         get_bh(bh);
3308         bh->b_end_io = end_buffer_read_sync;
3309         submit_bh(READ, bh);
3310         wait_on_buffer(bh);
3311         if (buffer_uptodate(bh))
3312                 return 0;
3313         return -EIO;
3314 }
3315 EXPORT_SYMBOL(bh_submit_read);
3316
3317 void __init buffer_init(void)
3318 {
3319         int nrpages;
3320
3321         bh_cachep = kmem_cache_create("buffer_head",
3322                         sizeof(struct buffer_head), 0,
3323                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3324                                 SLAB_MEM_SPREAD),
3325                                 NULL);
3326
3327         /*
3328          * Limit the bh occupancy to 10% of ZONE_NORMAL
3329          */
3330         nrpages = (nr_free_buffer_pages() * 10) / 100;
3331         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3332         hotcpu_notifier(buffer_cpu_notify, 0);
3333 }