twl4030_charger: increase end-of-charge current
[pandora-kernel.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/cleancache.h>
45
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50 inline void
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 {
53         bh->b_end_io = handler;
54         bh->b_private = private;
55 }
56 EXPORT_SYMBOL(init_buffer);
57
58 static int sleep_on_buffer(void *word)
59 {
60         io_schedule();
61         return 0;
62 }
63
64 void __lock_buffer(struct buffer_head *bh)
65 {
66         wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67                                                         TASK_UNINTERRUPTIBLE);
68 }
69 EXPORT_SYMBOL(__lock_buffer);
70
71 void unlock_buffer(struct buffer_head *bh)
72 {
73         clear_bit_unlock(BH_Lock, &bh->b_state);
74         smp_mb__after_clear_bit();
75         wake_up_bit(&bh->b_state, BH_Lock);
76 }
77 EXPORT_SYMBOL(unlock_buffer);
78
79 /*
80  * Block until a buffer comes unlocked.  This doesn't stop it
81  * from becoming locked again - you have to lock it yourself
82  * if you want to preserve its state.
83  */
84 void __wait_on_buffer(struct buffer_head * bh)
85 {
86         wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
87 }
88 EXPORT_SYMBOL(__wait_on_buffer);
89
90 static void
91 __clear_page_buffers(struct page *page)
92 {
93         ClearPagePrivate(page);
94         set_page_private(page, 0);
95         page_cache_release(page);
96 }
97
98
99 static int quiet_error(struct buffer_head *bh)
100 {
101         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
102                 return 0;
103         return 1;
104 }
105
106
107 static void buffer_io_error(struct buffer_head *bh)
108 {
109         char b[BDEVNAME_SIZE];
110         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111                         bdevname(bh->b_bdev, b),
112                         (unsigned long long)bh->b_blocknr);
113 }
114
115 /*
116  * End-of-IO handler helper function which does not touch the bh after
117  * unlocking it.
118  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119  * a race there is benign: unlock_buffer() only use the bh's address for
120  * hashing after unlocking the buffer, so it doesn't actually touch the bh
121  * itself.
122  */
123 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
124 {
125         if (uptodate) {
126                 set_buffer_uptodate(bh);
127         } else {
128                 /* This happens, due to failed READA attempts. */
129                 clear_buffer_uptodate(bh);
130         }
131         unlock_buffer(bh);
132 }
133
134 /*
135  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
136  * unlock the buffer. This is what ll_rw_block uses too.
137  */
138 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
139 {
140         __end_buffer_read_notouch(bh, uptodate);
141         put_bh(bh);
142 }
143 EXPORT_SYMBOL(end_buffer_read_sync);
144
145 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
146 {
147         char b[BDEVNAME_SIZE];
148
149         if (uptodate) {
150                 set_buffer_uptodate(bh);
151         } else {
152                 if (!quiet_error(bh)) {
153                         buffer_io_error(bh);
154                         printk(KERN_WARNING "lost page write due to "
155                                         "I/O error on %s\n",
156                                        bdevname(bh->b_bdev, b));
157                 }
158                 set_buffer_write_io_error(bh);
159                 clear_buffer_uptodate(bh);
160         }
161         unlock_buffer(bh);
162         put_bh(bh);
163 }
164 EXPORT_SYMBOL(end_buffer_write_sync);
165
166 /*
167  * Various filesystems appear to want __find_get_block to be non-blocking.
168  * But it's the page lock which protects the buffers.  To get around this,
169  * we get exclusion from try_to_free_buffers with the blockdev mapping's
170  * private_lock.
171  *
172  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173  * may be quite high.  This code could TryLock the page, and if that
174  * succeeds, there is no need to take private_lock. (But if
175  * private_lock is contended then so is mapping->tree_lock).
176  */
177 static struct buffer_head *
178 __find_get_block_slow(struct block_device *bdev, sector_t block)
179 {
180         struct inode *bd_inode = bdev->bd_inode;
181         struct address_space *bd_mapping = bd_inode->i_mapping;
182         struct buffer_head *ret = NULL;
183         pgoff_t index;
184         struct buffer_head *bh;
185         struct buffer_head *head;
186         struct page *page;
187         int all_mapped = 1;
188
189         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190         page = find_get_page(bd_mapping, index);
191         if (!page)
192                 goto out;
193
194         spin_lock(&bd_mapping->private_lock);
195         if (!page_has_buffers(page))
196                 goto out_unlock;
197         head = page_buffers(page);
198         bh = head;
199         do {
200                 if (!buffer_mapped(bh))
201                         all_mapped = 0;
202                 else if (bh->b_blocknr == block) {
203                         ret = bh;
204                         get_bh(bh);
205                         goto out_unlock;
206                 }
207                 bh = bh->b_this_page;
208         } while (bh != head);
209
210         /* we might be here because some of the buffers on this page are
211          * not mapped.  This is due to various races between
212          * file io on the block device and getblk.  It gets dealt with
213          * elsewhere, don't buffer_error if we had some unmapped buffers
214          */
215         if (all_mapped) {
216                 char b[BDEVNAME_SIZE];
217
218                 printk("__find_get_block_slow() failed. "
219                         "block=%llu, b_blocknr=%llu\n",
220                         (unsigned long long)block,
221                         (unsigned long long)bh->b_blocknr);
222                 printk("b_state=0x%08lx, b_size=%zu\n",
223                         bh->b_state, bh->b_size);
224                 printk("device %s blocksize: %d\n", bdevname(bdev, b),
225                         1 << bd_inode->i_blkbits);
226         }
227 out_unlock:
228         spin_unlock(&bd_mapping->private_lock);
229         page_cache_release(page);
230 out:
231         return ret;
232 }
233
234 /* If invalidate_buffers() will trash dirty buffers, it means some kind
235    of fs corruption is going on. Trashing dirty data always imply losing
236    information that was supposed to be just stored on the physical layer
237    by the user.
238
239    Thus invalidate_buffers in general usage is not allwowed to trash
240    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
241    be preserved.  These buffers are simply skipped.
242   
243    We also skip buffers which are still in use.  For example this can
244    happen if a userspace program is reading the block device.
245
246    NOTE: In the case where the user removed a removable-media-disk even if
247    there's still dirty data not synced on disk (due a bug in the device driver
248    or due an error of the user), by not destroying the dirty buffers we could
249    generate corruption also on the next media inserted, thus a parameter is
250    necessary to handle this case in the most safe way possible (trying
251    to not corrupt also the new disk inserted with the data belonging to
252    the old now corrupted disk). Also for the ramdisk the natural thing
253    to do in order to release the ramdisk memory is to destroy dirty buffers.
254
255    These are two special cases. Normal usage imply the device driver
256    to issue a sync on the device (without waiting I/O completion) and
257    then an invalidate_buffers call that doesn't trash dirty buffers.
258
259    For handling cache coherency with the blkdev pagecache the 'update' case
260    is been introduced. It is needed to re-read from disk any pinned
261    buffer. NOTE: re-reading from disk is destructive so we can do it only
262    when we assume nobody is changing the buffercache under our I/O and when
263    we think the disk contains more recent information than the buffercache.
264    The update == 1 pass marks the buffers we need to update, the update == 2
265    pass does the actual I/O. */
266 void invalidate_bdev(struct block_device *bdev)
267 {
268         struct address_space *mapping = bdev->bd_inode->i_mapping;
269
270         if (mapping->nrpages == 0)
271                 return;
272
273         invalidate_bh_lrus();
274         lru_add_drain_all();    /* make sure all lru add caches are flushed */
275         invalidate_mapping_pages(mapping, 0, -1);
276         /* 99% of the time, we don't need to flush the cleancache on the bdev.
277          * But, for the strange corners, lets be cautious
278          */
279         cleancache_flush_inode(mapping);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288         struct zone *zone;
289         int nid;
290
291         wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
292         yield();
293
294         for_each_online_node(nid) {
295                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296                                                 gfp_zone(GFP_NOFS), NULL,
297                                                 &zone);
298                 if (zone)
299                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300                                                 GFP_NOFS, NULL);
301         }
302 }
303
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310         unsigned long flags;
311         struct buffer_head *first;
312         struct buffer_head *tmp;
313         struct page *page;
314         int page_uptodate = 1;
315
316         BUG_ON(!buffer_async_read(bh));
317
318         page = bh->b_page;
319         if (uptodate) {
320                 set_buffer_uptodate(bh);
321         } else {
322                 clear_buffer_uptodate(bh);
323                 if (!quiet_error(bh))
324                         buffer_io_error(bh);
325                 SetPageError(page);
326         }
327
328         /*
329          * Be _very_ careful from here on. Bad things can happen if
330          * two buffer heads end IO at almost the same time and both
331          * decide that the page is now completely done.
332          */
333         first = page_buffers(page);
334         local_irq_save(flags);
335         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336         clear_buffer_async_read(bh);
337         unlock_buffer(bh);
338         tmp = bh;
339         do {
340                 if (!buffer_uptodate(tmp))
341                         page_uptodate = 0;
342                 if (buffer_async_read(tmp)) {
343                         BUG_ON(!buffer_locked(tmp));
344                         goto still_busy;
345                 }
346                 tmp = tmp->b_this_page;
347         } while (tmp != bh);
348         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349         local_irq_restore(flags);
350
351         /*
352          * If none of the buffers had errors and they are all
353          * uptodate then we can set the page uptodate.
354          */
355         if (page_uptodate && !PageError(page))
356                 SetPageUptodate(page);
357         unlock_page(page);
358         return;
359
360 still_busy:
361         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362         local_irq_restore(flags);
363         return;
364 }
365
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372         char b[BDEVNAME_SIZE];
373         unsigned long flags;
374         struct buffer_head *first;
375         struct buffer_head *tmp;
376         struct page *page;
377
378         BUG_ON(!buffer_async_write(bh));
379
380         page = bh->b_page;
381         if (uptodate) {
382                 set_buffer_uptodate(bh);
383         } else {
384                 if (!quiet_error(bh)) {
385                         buffer_io_error(bh);
386                         printk(KERN_WARNING "lost page write due to "
387                                         "I/O error on %s\n",
388                                bdevname(bh->b_bdev, b));
389                 }
390                 set_bit(AS_EIO, &page->mapping->flags);
391                 set_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393                 SetPageError(page);
394         }
395
396         first = page_buffers(page);
397         local_irq_save(flags);
398         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411         local_irq_restore(flags);
412         end_page_writeback(page);
413         return;
414
415 still_busy:
416         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417         local_irq_restore(flags);
418         return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445         bh->b_end_io = end_buffer_async_read;
446         set_buffer_async_read(bh);
447 }
448
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450                                           bh_end_io_t *handler)
451 {
452         bh->b_end_io = handler;
453         set_buffer_async_write(bh);
454 }
455
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458         mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space 
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517         list_del_init(&bh->b_assoc_buffers);
518         WARN_ON(!bh->b_assoc_map);
519         if (buffer_write_io_error(bh))
520                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521         bh->b_assoc_map = NULL;
522 }
523
524 int inode_has_buffers(struct inode *inode)
525 {
526         return !list_empty(&inode->i_data.private_list);
527 }
528
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541         struct buffer_head *bh;
542         struct list_head *p;
543         int err = 0;
544
545         spin_lock(lock);
546 repeat:
547         list_for_each_prev(p, list) {
548                 bh = BH_ENTRY(p);
549                 if (buffer_locked(bh)) {
550                         get_bh(bh);
551                         spin_unlock(lock);
552                         wait_on_buffer(bh);
553                         if (!buffer_uptodate(bh))
554                                 err = -EIO;
555                         brelse(bh);
556                         spin_lock(lock);
557                         goto repeat;
558                 }
559         }
560         spin_unlock(lock);
561         return err;
562 }
563
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566         char b[BDEVNAME_SIZE];
567         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568                 printk(KERN_WARNING "Emergency Thaw on %s\n",
569                        bdevname(sb->s_bdev, b));
570 }
571
572 static void do_thaw_all(struct work_struct *work)
573 {
574         iterate_supers(do_thaw_one, NULL);
575         kfree(work);
576         printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586         struct work_struct *work;
587
588         work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         if (work) {
590                 INIT_WORK(work, do_thaw_all);
591                 schedule_work(work);
592         }
593 }
594
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608         struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611                 return 0;
612
613         return fsync_buffers_list(&buffer_mapping->private_lock,
614                                         &mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625                         sector_t bblock, unsigned blocksize)
626 {
627         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628         if (bh) {
629                 if (buffer_dirty(bh))
630                         ll_rw_block(WRITE, 1, &bh);
631                 put_bh(bh);
632         }
633 }
634
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637         struct address_space *mapping = inode->i_mapping;
638         struct address_space *buffer_mapping = bh->b_page->mapping;
639
640         mark_buffer_dirty(bh);
641         if (!mapping->assoc_mapping) {
642                 mapping->assoc_mapping = buffer_mapping;
643         } else {
644                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
645         }
646         if (!bh->b_assoc_map) {
647                 spin_lock(&buffer_mapping->private_lock);
648                 list_move_tail(&bh->b_assoc_buffers,
649                                 &mapping->private_list);
650                 bh->b_assoc_map = mapping;
651                 spin_unlock(&buffer_mapping->private_lock);
652         }
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664                 struct address_space *mapping, int warn)
665 {
666         unsigned long flags;
667
668         spin_lock_irqsave(&mapping->tree_lock, flags);
669         if (page->mapping) {    /* Race with truncate? */
670                 WARN_ON_ONCE(warn && !PageUptodate(page));
671                 account_page_dirtied(page, mapping);
672                 radix_tree_tag_set(&mapping->page_tree,
673                                 page_index(page), PAGECACHE_TAG_DIRTY);
674         }
675         spin_unlock_irqrestore(&mapping->tree_lock, flags);
676         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
677 }
678
679 /*
680  * Add a page to the dirty page list.
681  *
682  * It is a sad fact of life that this function is called from several places
683  * deeply under spinlocking.  It may not sleep.
684  *
685  * If the page has buffers, the uptodate buffers are set dirty, to preserve
686  * dirty-state coherency between the page and the buffers.  It the page does
687  * not have buffers then when they are later attached they will all be set
688  * dirty.
689  *
690  * The buffers are dirtied before the page is dirtied.  There's a small race
691  * window in which a writepage caller may see the page cleanness but not the
692  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
693  * before the buffers, a concurrent writepage caller could clear the page dirty
694  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
695  * page on the dirty page list.
696  *
697  * We use private_lock to lock against try_to_free_buffers while using the
698  * page's buffer list.  Also use this to protect against clean buffers being
699  * added to the page after it was set dirty.
700  *
701  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
702  * address_space though.
703  */
704 int __set_page_dirty_buffers(struct page *page)
705 {
706         int newly_dirty;
707         struct address_space *mapping = page_mapping(page);
708
709         if (unlikely(!mapping))
710                 return !TestSetPageDirty(page);
711
712         spin_lock(&mapping->private_lock);
713         if (page_has_buffers(page)) {
714                 struct buffer_head *head = page_buffers(page);
715                 struct buffer_head *bh = head;
716
717                 do {
718                         set_buffer_dirty(bh);
719                         bh = bh->b_this_page;
720                 } while (bh != head);
721         }
722         newly_dirty = !TestSetPageDirty(page);
723         spin_unlock(&mapping->private_lock);
724
725         if (newly_dirty)
726                 __set_page_dirty(page, mapping, 1);
727         return newly_dirty;
728 }
729 EXPORT_SYMBOL(__set_page_dirty_buffers);
730
731 /*
732  * Write out and wait upon a list of buffers.
733  *
734  * We have conflicting pressures: we want to make sure that all
735  * initially dirty buffers get waited on, but that any subsequently
736  * dirtied buffers don't.  After all, we don't want fsync to last
737  * forever if somebody is actively writing to the file.
738  *
739  * Do this in two main stages: first we copy dirty buffers to a
740  * temporary inode list, queueing the writes as we go.  Then we clean
741  * up, waiting for those writes to complete.
742  * 
743  * During this second stage, any subsequent updates to the file may end
744  * up refiling the buffer on the original inode's dirty list again, so
745  * there is a chance we will end up with a buffer queued for write but
746  * not yet completed on that list.  So, as a final cleanup we go through
747  * the osync code to catch these locked, dirty buffers without requeuing
748  * any newly dirty buffers for write.
749  */
750 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
751 {
752         struct buffer_head *bh;
753         struct list_head tmp;
754         struct address_space *mapping;
755         int err = 0, err2;
756         struct blk_plug plug;
757
758         INIT_LIST_HEAD(&tmp);
759         blk_start_plug(&plug);
760
761         spin_lock(lock);
762         while (!list_empty(list)) {
763                 bh = BH_ENTRY(list->next);
764                 mapping = bh->b_assoc_map;
765                 __remove_assoc_queue(bh);
766                 /* Avoid race with mark_buffer_dirty_inode() which does
767                  * a lockless check and we rely on seeing the dirty bit */
768                 smp_mb();
769                 if (buffer_dirty(bh) || buffer_locked(bh)) {
770                         list_add(&bh->b_assoc_buffers, &tmp);
771                         bh->b_assoc_map = mapping;
772                         if (buffer_dirty(bh)) {
773                                 get_bh(bh);
774                                 spin_unlock(lock);
775                                 /*
776                                  * Ensure any pending I/O completes so that
777                                  * write_dirty_buffer() actually writes the
778                                  * current contents - it is a noop if I/O is
779                                  * still in flight on potentially older
780                                  * contents.
781                                  */
782                                 write_dirty_buffer(bh, WRITE_SYNC);
783
784                                 /*
785                                  * Kick off IO for the previous mapping. Note
786                                  * that we will not run the very last mapping,
787                                  * wait_on_buffer() will do that for us
788                                  * through sync_buffer().
789                                  */
790                                 brelse(bh);
791                                 spin_lock(lock);
792                         }
793                 }
794         }
795
796         spin_unlock(lock);
797         blk_finish_plug(&plug);
798         spin_lock(lock);
799
800         while (!list_empty(&tmp)) {
801                 bh = BH_ENTRY(tmp.prev);
802                 get_bh(bh);
803                 mapping = bh->b_assoc_map;
804                 __remove_assoc_queue(bh);
805                 /* Avoid race with mark_buffer_dirty_inode() which does
806                  * a lockless check and we rely on seeing the dirty bit */
807                 smp_mb();
808                 if (buffer_dirty(bh)) {
809                         list_add(&bh->b_assoc_buffers,
810                                  &mapping->private_list);
811                         bh->b_assoc_map = mapping;
812                 }
813                 spin_unlock(lock);
814                 wait_on_buffer(bh);
815                 if (!buffer_uptodate(bh))
816                         err = -EIO;
817                 brelse(bh);
818                 spin_lock(lock);
819         }
820         
821         spin_unlock(lock);
822         err2 = osync_buffers_list(lock, list);
823         if (err)
824                 return err;
825         else
826                 return err2;
827 }
828
829 /*
830  * Invalidate any and all dirty buffers on a given inode.  We are
831  * probably unmounting the fs, but that doesn't mean we have already
832  * done a sync().  Just drop the buffers from the inode list.
833  *
834  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
835  * assumes that all the buffers are against the blockdev.  Not true
836  * for reiserfs.
837  */
838 void invalidate_inode_buffers(struct inode *inode)
839 {
840         if (inode_has_buffers(inode)) {
841                 struct address_space *mapping = &inode->i_data;
842                 struct list_head *list = &mapping->private_list;
843                 struct address_space *buffer_mapping = mapping->assoc_mapping;
844
845                 spin_lock(&buffer_mapping->private_lock);
846                 while (!list_empty(list))
847                         __remove_assoc_queue(BH_ENTRY(list->next));
848                 spin_unlock(&buffer_mapping->private_lock);
849         }
850 }
851 EXPORT_SYMBOL(invalidate_inode_buffers);
852
853 /*
854  * Remove any clean buffers from the inode's buffer list.  This is called
855  * when we're trying to free the inode itself.  Those buffers can pin it.
856  *
857  * Returns true if all buffers were removed.
858  */
859 int remove_inode_buffers(struct inode *inode)
860 {
861         int ret = 1;
862
863         if (inode_has_buffers(inode)) {
864                 struct address_space *mapping = &inode->i_data;
865                 struct list_head *list = &mapping->private_list;
866                 struct address_space *buffer_mapping = mapping->assoc_mapping;
867
868                 spin_lock(&buffer_mapping->private_lock);
869                 while (!list_empty(list)) {
870                         struct buffer_head *bh = BH_ENTRY(list->next);
871                         if (buffer_dirty(bh)) {
872                                 ret = 0;
873                                 break;
874                         }
875                         __remove_assoc_queue(bh);
876                 }
877                 spin_unlock(&buffer_mapping->private_lock);
878         }
879         return ret;
880 }
881
882 /*
883  * Create the appropriate buffers when given a page for data area and
884  * the size of each buffer.. Use the bh->b_this_page linked list to
885  * follow the buffers created.  Return NULL if unable to create more
886  * buffers.
887  *
888  * The retry flag is used to differentiate async IO (paging, swapping)
889  * which may not fail from ordinary buffer allocations.
890  */
891 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
892                 int retry)
893 {
894         struct buffer_head *bh, *head;
895         long offset;
896
897 try_again:
898         head = NULL;
899         offset = PAGE_SIZE;
900         while ((offset -= size) >= 0) {
901                 bh = alloc_buffer_head(GFP_NOFS);
902                 if (!bh)
903                         goto no_grow;
904
905                 bh->b_bdev = NULL;
906                 bh->b_this_page = head;
907                 bh->b_blocknr = -1;
908                 head = bh;
909
910                 bh->b_state = 0;
911                 atomic_set(&bh->b_count, 0);
912                 bh->b_size = size;
913
914                 /* Link the buffer to its page */
915                 set_bh_page(bh, page, offset);
916
917                 init_buffer(bh, NULL, NULL);
918         }
919         return head;
920 /*
921  * In case anything failed, we just free everything we got.
922  */
923 no_grow:
924         if (head) {
925                 do {
926                         bh = head;
927                         head = head->b_this_page;
928                         free_buffer_head(bh);
929                 } while (head);
930         }
931
932         /*
933          * Return failure for non-async IO requests.  Async IO requests
934          * are not allowed to fail, so we have to wait until buffer heads
935          * become available.  But we don't want tasks sleeping with 
936          * partially complete buffers, so all were released above.
937          */
938         if (!retry)
939                 return NULL;
940
941         /* We're _really_ low on memory. Now we just
942          * wait for old buffer heads to become free due to
943          * finishing IO.  Since this is an async request and
944          * the reserve list is empty, we're sure there are 
945          * async buffer heads in use.
946          */
947         free_more_memory();
948         goto try_again;
949 }
950 EXPORT_SYMBOL_GPL(alloc_page_buffers);
951
952 static inline void
953 link_dev_buffers(struct page *page, struct buffer_head *head)
954 {
955         struct buffer_head *bh, *tail;
956
957         bh = head;
958         do {
959                 tail = bh;
960                 bh = bh->b_this_page;
961         } while (bh);
962         tail->b_this_page = head;
963         attach_page_buffers(page, head);
964 }
965
966 /*
967  * Initialise the state of a blockdev page's buffers.
968  */ 
969 static sector_t
970 init_page_buffers(struct page *page, struct block_device *bdev,
971                         sector_t block, int size)
972 {
973         struct buffer_head *head = page_buffers(page);
974         struct buffer_head *bh = head;
975         int uptodate = PageUptodate(page);
976         sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
977
978         do {
979                 if (!buffer_mapped(bh)) {
980                         init_buffer(bh, NULL, NULL);
981                         bh->b_bdev = bdev;
982                         bh->b_blocknr = block;
983                         if (uptodate)
984                                 set_buffer_uptodate(bh);
985                         if (block < end_block)
986                                 set_buffer_mapped(bh);
987                 }
988                 block++;
989                 bh = bh->b_this_page;
990         } while (bh != head);
991
992         /*
993          * Caller needs to validate requested block against end of device.
994          */
995         return end_block;
996 }
997
998 /*
999  * Create the page-cache page that contains the requested block.
1000  *
1001  * This is used purely for blockdev mappings.
1002  */
1003 static int
1004 grow_dev_page(struct block_device *bdev, sector_t block,
1005               pgoff_t index, int size, int sizebits, gfp_t gfp)
1006 {
1007         struct inode *inode = bdev->bd_inode;
1008         struct page *page;
1009         struct buffer_head *bh;
1010         sector_t end_block;
1011         int ret = 0;            /* Will call free_more_memory() */
1012
1013         page = find_or_create_page(inode->i_mapping, index,
1014                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp);
1015         if (!page)
1016                 return ret;
1017
1018         BUG_ON(!PageLocked(page));
1019
1020         if (page_has_buffers(page)) {
1021                 bh = page_buffers(page);
1022                 if (bh->b_size == size) {
1023                         end_block = init_page_buffers(page, bdev,
1024                                                 (sector_t)index << sizebits,
1025                                                 size);
1026                         goto done;
1027                 }
1028                 if (!try_to_free_buffers(page))
1029                         goto failed;
1030         }
1031
1032         /*
1033          * Allocate some buffers for this page
1034          */
1035         bh = alloc_page_buffers(page, size, 0);
1036         if (!bh)
1037                 goto failed;
1038
1039         /*
1040          * Link the page to the buffers and initialise them.  Take the
1041          * lock to be atomic wrt __find_get_block(), which does not
1042          * run under the page lock.
1043          */
1044         spin_lock(&inode->i_mapping->private_lock);
1045         link_dev_buffers(page, bh);
1046         end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1047                         size);
1048         spin_unlock(&inode->i_mapping->private_lock);
1049 done:
1050         ret = (block < end_block) ? 1 : -ENXIO;
1051 failed:
1052         unlock_page(page);
1053         page_cache_release(page);
1054         return ret;
1055 }
1056
1057 /*
1058  * Create buffers for the specified block device block's page.  If
1059  * that page was dirty, the buffers are set dirty also.
1060  */
1061 static int
1062 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1063 {
1064         pgoff_t index;
1065         int sizebits;
1066
1067         sizebits = -1;
1068         do {
1069                 sizebits++;
1070         } while ((size << sizebits) < PAGE_SIZE);
1071
1072         index = block >> sizebits;
1073
1074         /*
1075          * Check for a block which wants to lie outside our maximum possible
1076          * pagecache index.  (this comparison is done using sector_t types).
1077          */
1078         if (unlikely(index != block >> sizebits)) {
1079                 char b[BDEVNAME_SIZE];
1080
1081                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1082                         "device %s\n",
1083                         __func__, (unsigned long long)block,
1084                         bdevname(bdev, b));
1085                 return -EIO;
1086         }
1087
1088         /* Create a page with the proper size buffers.. */
1089         return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1090 }
1091
1092 struct buffer_head *
1093 __getblk_slow(struct block_device *bdev, sector_t block,
1094              unsigned size, gfp_t gfp)
1095 {
1096         /* Size must be multiple of hard sectorsize */
1097         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1098                         (size < 512 || size > PAGE_SIZE))) {
1099                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1100                                         size);
1101                 printk(KERN_ERR "logical block size: %d\n",
1102                                         bdev_logical_block_size(bdev));
1103
1104                 dump_stack();
1105                 return NULL;
1106         }
1107
1108         for (;;) {
1109                 struct buffer_head *bh;
1110                 int ret;
1111
1112                 bh = __find_get_block(bdev, block, size);
1113                 if (bh)
1114                         return bh;
1115
1116                 ret = grow_buffers(bdev, block, size, gfp);
1117                 if (ret < 0)
1118                         return NULL;
1119                 if (ret == 0)
1120                         free_more_memory();
1121         }
1122 }
1123 EXPORT_SYMBOL(__getblk_slow);
1124
1125 /*
1126  * The relationship between dirty buffers and dirty pages:
1127  *
1128  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1129  * the page is tagged dirty in its radix tree.
1130  *
1131  * At all times, the dirtiness of the buffers represents the dirtiness of
1132  * subsections of the page.  If the page has buffers, the page dirty bit is
1133  * merely a hint about the true dirty state.
1134  *
1135  * When a page is set dirty in its entirety, all its buffers are marked dirty
1136  * (if the page has buffers).
1137  *
1138  * When a buffer is marked dirty, its page is dirtied, but the page's other
1139  * buffers are not.
1140  *
1141  * Also.  When blockdev buffers are explicitly read with bread(), they
1142  * individually become uptodate.  But their backing page remains not
1143  * uptodate - even if all of its buffers are uptodate.  A subsequent
1144  * block_read_full_page() against that page will discover all the uptodate
1145  * buffers, will set the page uptodate and will perform no I/O.
1146  */
1147
1148 /**
1149  * mark_buffer_dirty - mark a buffer_head as needing writeout
1150  * @bh: the buffer_head to mark dirty
1151  *
1152  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1153  * backing page dirty, then tag the page as dirty in its address_space's radix
1154  * tree and then attach the address_space's inode to its superblock's dirty
1155  * inode list.
1156  *
1157  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1158  * mapping->tree_lock and mapping->host->i_lock.
1159  */
1160 void mark_buffer_dirty(struct buffer_head *bh)
1161 {
1162         WARN_ON_ONCE(!buffer_uptodate(bh));
1163
1164         /*
1165          * Very *carefully* optimize the it-is-already-dirty case.
1166          *
1167          * Don't let the final "is it dirty" escape to before we
1168          * perhaps modified the buffer.
1169          */
1170         if (buffer_dirty(bh)) {
1171                 smp_mb();
1172                 if (buffer_dirty(bh))
1173                         return;
1174         }
1175
1176         if (!test_set_buffer_dirty(bh)) {
1177                 struct page *page = bh->b_page;
1178                 if (!TestSetPageDirty(page)) {
1179                         struct address_space *mapping = page_mapping(page);
1180                         if (mapping)
1181                                 __set_page_dirty(page, mapping, 0);
1182                 }
1183         }
1184 }
1185 EXPORT_SYMBOL(mark_buffer_dirty);
1186
1187 /*
1188  * Decrement a buffer_head's reference count.  If all buffers against a page
1189  * have zero reference count, are clean and unlocked, and if the page is clean
1190  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1191  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1192  * a page but it ends up not being freed, and buffers may later be reattached).
1193  */
1194 void __brelse(struct buffer_head * buf)
1195 {
1196         if (atomic_read(&buf->b_count)) {
1197                 put_bh(buf);
1198                 return;
1199         }
1200         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1201 }
1202 EXPORT_SYMBOL(__brelse);
1203
1204 /*
1205  * bforget() is like brelse(), except it discards any
1206  * potentially dirty data.
1207  */
1208 void __bforget(struct buffer_head *bh)
1209 {
1210         clear_buffer_dirty(bh);
1211         if (bh->b_assoc_map) {
1212                 struct address_space *buffer_mapping = bh->b_page->mapping;
1213
1214                 spin_lock(&buffer_mapping->private_lock);
1215                 list_del_init(&bh->b_assoc_buffers);
1216                 bh->b_assoc_map = NULL;
1217                 spin_unlock(&buffer_mapping->private_lock);
1218         }
1219         __brelse(bh);
1220 }
1221 EXPORT_SYMBOL(__bforget);
1222
1223 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1224 {
1225         lock_buffer(bh);
1226         if (buffer_uptodate(bh)) {
1227                 unlock_buffer(bh);
1228                 return bh;
1229         } else {
1230                 get_bh(bh);
1231                 bh->b_end_io = end_buffer_read_sync;
1232                 submit_bh(READ, bh);
1233                 wait_on_buffer(bh);
1234                 if (buffer_uptodate(bh))
1235                         return bh;
1236         }
1237         brelse(bh);
1238         return NULL;
1239 }
1240
1241 /*
1242  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1243  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1244  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1245  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1246  * CPU's LRUs at the same time.
1247  *
1248  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1249  * sb_find_get_block().
1250  *
1251  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1252  * a local interrupt disable for that.
1253  */
1254
1255 #define BH_LRU_SIZE     8
1256
1257 struct bh_lru {
1258         struct buffer_head *bhs[BH_LRU_SIZE];
1259 };
1260
1261 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1262
1263 #ifdef CONFIG_SMP
1264 #define bh_lru_lock()   local_irq_disable()
1265 #define bh_lru_unlock() local_irq_enable()
1266 #else
1267 #define bh_lru_lock()   preempt_disable()
1268 #define bh_lru_unlock() preempt_enable()
1269 #endif
1270
1271 static inline void check_irqs_on(void)
1272 {
1273 #ifdef irqs_disabled
1274         BUG_ON(irqs_disabled());
1275 #endif
1276 }
1277
1278 /*
1279  * The LRU management algorithm is dopey-but-simple.  Sorry.
1280  */
1281 static void bh_lru_install(struct buffer_head *bh)
1282 {
1283         struct buffer_head *evictee = NULL;
1284
1285         check_irqs_on();
1286         bh_lru_lock();
1287         if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1288                 struct buffer_head *bhs[BH_LRU_SIZE];
1289                 int in;
1290                 int out = 0;
1291
1292                 get_bh(bh);
1293                 bhs[out++] = bh;
1294                 for (in = 0; in < BH_LRU_SIZE; in++) {
1295                         struct buffer_head *bh2 =
1296                                 __this_cpu_read(bh_lrus.bhs[in]);
1297
1298                         if (bh2 == bh) {
1299                                 __brelse(bh2);
1300                         } else {
1301                                 if (out >= BH_LRU_SIZE) {
1302                                         BUG_ON(evictee != NULL);
1303                                         evictee = bh2;
1304                                 } else {
1305                                         bhs[out++] = bh2;
1306                                 }
1307                         }
1308                 }
1309                 while (out < BH_LRU_SIZE)
1310                         bhs[out++] = NULL;
1311                 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1312         }
1313         bh_lru_unlock();
1314
1315         if (evictee)
1316                 __brelse(evictee);
1317 }
1318
1319 /*
1320  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1321  */
1322 static struct buffer_head *
1323 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1324 {
1325         struct buffer_head *ret = NULL;
1326         unsigned int i;
1327
1328         check_irqs_on();
1329         bh_lru_lock();
1330         for (i = 0; i < BH_LRU_SIZE; i++) {
1331                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1332
1333                 if (bh && bh->b_bdev == bdev &&
1334                                 bh->b_blocknr == block && bh->b_size == size) {
1335                         if (i) {
1336                                 while (i) {
1337                                         __this_cpu_write(bh_lrus.bhs[i],
1338                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1339                                         i--;
1340                                 }
1341                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1342                         }
1343                         get_bh(bh);
1344                         ret = bh;
1345                         break;
1346                 }
1347         }
1348         bh_lru_unlock();
1349         return ret;
1350 }
1351
1352 /*
1353  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1354  * it in the LRU and mark it as accessed.  If it is not present then return
1355  * NULL
1356  */
1357 struct buffer_head *
1358 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1359 {
1360         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1361
1362         if (bh == NULL) {
1363                 bh = __find_get_block_slow(bdev, block);
1364                 if (bh)
1365                         bh_lru_install(bh);
1366         }
1367         if (bh)
1368                 touch_buffer(bh);
1369         return bh;
1370 }
1371 EXPORT_SYMBOL(__find_get_block);
1372
1373 /*
1374  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1375  * which corresponds to the passed block_device, block and size. The
1376  * returned buffer has its reference count incremented.
1377  *
1378  * __getblk_gfp() will lock up the machine if grow_dev_page's
1379  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1380  */
1381 struct buffer_head *
1382 __getblk_gfp(struct block_device *bdev, sector_t block,
1383              unsigned size, gfp_t gfp)
1384 {
1385         struct buffer_head *bh = __find_get_block(bdev, block, size);
1386
1387         might_sleep();
1388         if (bh == NULL)
1389                 bh = __getblk_slow(bdev, block, size, gfp);
1390         return bh;
1391 }
1392 EXPORT_SYMBOL(__getblk_gfp);
1393
1394 /*
1395  * Do async read-ahead on a buffer..
1396  */
1397 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1398 {
1399         struct buffer_head *bh = __getblk(bdev, block, size);
1400         if (likely(bh)) {
1401                 ll_rw_block(READA, 1, &bh);
1402                 brelse(bh);
1403         }
1404 }
1405 EXPORT_SYMBOL(__breadahead);
1406
1407 /**
1408  *  __bread_gfp() - reads a specified block and returns the bh
1409  *  @bdev: the block_device to read from
1410  *  @block: number of block
1411  *  @size: size (in bytes) to read
1412  *  @gfp: page allocation flag
1413  *
1414  *  Reads a specified block, and returns buffer head that contains it.
1415  *  The page cache can be allocated from non-movable area
1416  *  not to prevent page migration if you set gfp to zero.
1417  *  It returns NULL if the block was unreadable.
1418  */
1419 struct buffer_head *
1420 __bread_gfp(struct block_device *bdev, sector_t block,
1421                    unsigned size, gfp_t gfp)
1422 {
1423         struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1424
1425         if (likely(bh) && !buffer_uptodate(bh))
1426                 bh = __bread_slow(bh);
1427         return bh;
1428 }
1429 EXPORT_SYMBOL(__bread_gfp);
1430
1431 /*
1432  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1433  * This doesn't race because it runs in each cpu either in irq
1434  * or with preempt disabled.
1435  */
1436 static void invalidate_bh_lru(void *arg)
1437 {
1438         struct bh_lru *b = &get_cpu_var(bh_lrus);
1439         int i;
1440
1441         for (i = 0; i < BH_LRU_SIZE; i++) {
1442                 brelse(b->bhs[i]);
1443                 b->bhs[i] = NULL;
1444         }
1445         put_cpu_var(bh_lrus);
1446 }
1447         
1448 void invalidate_bh_lrus(void)
1449 {
1450         on_each_cpu(invalidate_bh_lru, NULL, 1);
1451 }
1452 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1453
1454 void set_bh_page(struct buffer_head *bh,
1455                 struct page *page, unsigned long offset)
1456 {
1457         bh->b_page = page;
1458         BUG_ON(offset >= PAGE_SIZE);
1459         if (PageHighMem(page))
1460                 /*
1461                  * This catches illegal uses and preserves the offset:
1462                  */
1463                 bh->b_data = (char *)(0 + offset);
1464         else
1465                 bh->b_data = page_address(page) + offset;
1466 }
1467 EXPORT_SYMBOL(set_bh_page);
1468
1469 /*
1470  * Called when truncating a buffer on a page completely.
1471  */
1472 static void discard_buffer(struct buffer_head * bh)
1473 {
1474         lock_buffer(bh);
1475         clear_buffer_dirty(bh);
1476         bh->b_bdev = NULL;
1477         clear_buffer_mapped(bh);
1478         clear_buffer_req(bh);
1479         clear_buffer_new(bh);
1480         clear_buffer_delay(bh);
1481         clear_buffer_unwritten(bh);
1482         unlock_buffer(bh);
1483 }
1484
1485 /**
1486  * block_invalidatepage - invalidate part or all of a buffer-backed page
1487  *
1488  * @page: the page which is affected
1489  * @offset: the index of the truncation point
1490  *
1491  * block_invalidatepage() is called when all or part of the page has become
1492  * invalidated by a truncate operation.
1493  *
1494  * block_invalidatepage() does not have to release all buffers, but it must
1495  * ensure that no dirty buffer is left outside @offset and that no I/O
1496  * is underway against any of the blocks which are outside the truncation
1497  * point.  Because the caller is about to free (and possibly reuse) those
1498  * blocks on-disk.
1499  */
1500 void block_invalidatepage(struct page *page, unsigned long offset)
1501 {
1502         struct buffer_head *head, *bh, *next;
1503         unsigned int curr_off = 0;
1504
1505         BUG_ON(!PageLocked(page));
1506         if (!page_has_buffers(page))
1507                 goto out;
1508
1509         head = page_buffers(page);
1510         bh = head;
1511         do {
1512                 unsigned int next_off = curr_off + bh->b_size;
1513                 next = bh->b_this_page;
1514
1515                 /*
1516                  * is this block fully invalidated?
1517                  */
1518                 if (offset <= curr_off)
1519                         discard_buffer(bh);
1520                 curr_off = next_off;
1521                 bh = next;
1522         } while (bh != head);
1523
1524         /*
1525          * We release buffers only if the entire page is being invalidated.
1526          * The get_block cached value has been unconditionally invalidated,
1527          * so real IO is not possible anymore.
1528          */
1529         if (offset == 0)
1530                 try_to_release_page(page, 0);
1531 out:
1532         return;
1533 }
1534 EXPORT_SYMBOL(block_invalidatepage);
1535
1536 /*
1537  * We attach and possibly dirty the buffers atomically wrt
1538  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1539  * is already excluded via the page lock.
1540  */
1541 void create_empty_buffers(struct page *page,
1542                         unsigned long blocksize, unsigned long b_state)
1543 {
1544         struct buffer_head *bh, *head, *tail;
1545
1546         head = alloc_page_buffers(page, blocksize, 1);
1547         bh = head;
1548         do {
1549                 bh->b_state |= b_state;
1550                 tail = bh;
1551                 bh = bh->b_this_page;
1552         } while (bh);
1553         tail->b_this_page = head;
1554
1555         spin_lock(&page->mapping->private_lock);
1556         if (PageUptodate(page) || PageDirty(page)) {
1557                 bh = head;
1558                 do {
1559                         if (PageDirty(page))
1560                                 set_buffer_dirty(bh);
1561                         if (PageUptodate(page))
1562                                 set_buffer_uptodate(bh);
1563                         bh = bh->b_this_page;
1564                 } while (bh != head);
1565         }
1566         attach_page_buffers(page, head);
1567         spin_unlock(&page->mapping->private_lock);
1568 }
1569 EXPORT_SYMBOL(create_empty_buffers);
1570
1571 /*
1572  * We are taking a block for data and we don't want any output from any
1573  * buffer-cache aliases starting from return from that function and
1574  * until the moment when something will explicitly mark the buffer
1575  * dirty (hopefully that will not happen until we will free that block ;-)
1576  * We don't even need to mark it not-uptodate - nobody can expect
1577  * anything from a newly allocated buffer anyway. We used to used
1578  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1579  * don't want to mark the alias unmapped, for example - it would confuse
1580  * anyone who might pick it with bread() afterwards...
1581  *
1582  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1583  * be writeout I/O going on against recently-freed buffers.  We don't
1584  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1585  * only if we really need to.  That happens here.
1586  */
1587 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1588 {
1589         struct buffer_head *old_bh;
1590
1591         might_sleep();
1592
1593         old_bh = __find_get_block_slow(bdev, block);
1594         if (old_bh) {
1595                 clear_buffer_dirty(old_bh);
1596                 wait_on_buffer(old_bh);
1597                 clear_buffer_req(old_bh);
1598                 __brelse(old_bh);
1599         }
1600 }
1601 EXPORT_SYMBOL(unmap_underlying_metadata);
1602
1603 /*
1604  * NOTE! All mapped/uptodate combinations are valid:
1605  *
1606  *      Mapped  Uptodate        Meaning
1607  *
1608  *      No      No              "unknown" - must do get_block()
1609  *      No      Yes             "hole" - zero-filled
1610  *      Yes     No              "allocated" - allocated on disk, not read in
1611  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1612  *
1613  * "Dirty" is valid only with the last case (mapped+uptodate).
1614  */
1615
1616 /*
1617  * While block_write_full_page is writing back the dirty buffers under
1618  * the page lock, whoever dirtied the buffers may decide to clean them
1619  * again at any time.  We handle that by only looking at the buffer
1620  * state inside lock_buffer().
1621  *
1622  * If block_write_full_page() is called for regular writeback
1623  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1624  * locked buffer.   This only can happen if someone has written the buffer
1625  * directly, with submit_bh().  At the address_space level PageWriteback
1626  * prevents this contention from occurring.
1627  *
1628  * If block_write_full_page() is called with wbc->sync_mode ==
1629  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1630  * causes the writes to be flagged as synchronous writes.
1631  */
1632 static int __block_write_full_page(struct inode *inode, struct page *page,
1633                         get_block_t *get_block, struct writeback_control *wbc,
1634                         bh_end_io_t *handler)
1635 {
1636         int err;
1637         sector_t block;
1638         sector_t last_block;
1639         struct buffer_head *bh, *head;
1640         const unsigned blocksize = 1 << inode->i_blkbits;
1641         int nr_underway = 0;
1642         int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1643                         WRITE_SYNC : WRITE);
1644
1645         BUG_ON(!PageLocked(page));
1646
1647         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1648
1649         if (!page_has_buffers(page)) {
1650                 create_empty_buffers(page, blocksize,
1651                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1652         }
1653
1654         /*
1655          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1656          * here, and the (potentially unmapped) buffers may become dirty at
1657          * any time.  If a buffer becomes dirty here after we've inspected it
1658          * then we just miss that fact, and the page stays dirty.
1659          *
1660          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1661          * handle that here by just cleaning them.
1662          */
1663
1664         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1665         head = page_buffers(page);
1666         bh = head;
1667
1668         /*
1669          * Get all the dirty buffers mapped to disk addresses and
1670          * handle any aliases from the underlying blockdev's mapping.
1671          */
1672         do {
1673                 if (block > last_block) {
1674                         /*
1675                          * mapped buffers outside i_size will occur, because
1676                          * this page can be outside i_size when there is a
1677                          * truncate in progress.
1678                          */
1679                         /*
1680                          * The buffer was zeroed by block_write_full_page()
1681                          */
1682                         clear_buffer_dirty(bh);
1683                         set_buffer_uptodate(bh);
1684                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1685                            buffer_dirty(bh)) {
1686                         WARN_ON(bh->b_size != blocksize);
1687                         err = get_block(inode, block, bh, 1);
1688                         if (err)
1689                                 goto recover;
1690                         clear_buffer_delay(bh);
1691                         if (buffer_new(bh)) {
1692                                 /* blockdev mappings never come here */
1693                                 clear_buffer_new(bh);
1694                                 unmap_underlying_metadata(bh->b_bdev,
1695                                                         bh->b_blocknr);
1696                         }
1697                 }
1698                 bh = bh->b_this_page;
1699                 block++;
1700         } while (bh != head);
1701
1702         do {
1703                 if (!buffer_mapped(bh))
1704                         continue;
1705                 /*
1706                  * If it's a fully non-blocking write attempt and we cannot
1707                  * lock the buffer then redirty the page.  Note that this can
1708                  * potentially cause a busy-wait loop from writeback threads
1709                  * and kswapd activity, but those code paths have their own
1710                  * higher-level throttling.
1711                  */
1712                 if (wbc->sync_mode != WB_SYNC_NONE) {
1713                         lock_buffer(bh);
1714                 } else if (!trylock_buffer(bh)) {
1715                         redirty_page_for_writepage(wbc, page);
1716                         continue;
1717                 }
1718                 if (test_clear_buffer_dirty(bh)) {
1719                         mark_buffer_async_write_endio(bh, handler);
1720                 } else {
1721                         unlock_buffer(bh);
1722                 }
1723         } while ((bh = bh->b_this_page) != head);
1724
1725         /*
1726          * The page and its buffers are protected by PageWriteback(), so we can
1727          * drop the bh refcounts early.
1728          */
1729         BUG_ON(PageWriteback(page));
1730         set_page_writeback(page);
1731
1732         do {
1733                 struct buffer_head *next = bh->b_this_page;
1734                 if (buffer_async_write(bh)) {
1735                         submit_bh(write_op, bh);
1736                         nr_underway++;
1737                 }
1738                 bh = next;
1739         } while (bh != head);
1740         unlock_page(page);
1741
1742         err = 0;
1743 done:
1744         if (nr_underway == 0) {
1745                 /*
1746                  * The page was marked dirty, but the buffers were
1747                  * clean.  Someone wrote them back by hand with
1748                  * ll_rw_block/submit_bh.  A rare case.
1749                  */
1750                 end_page_writeback(page);
1751
1752                 /*
1753                  * The page and buffer_heads can be released at any time from
1754                  * here on.
1755                  */
1756         }
1757         return err;
1758
1759 recover:
1760         /*
1761          * ENOSPC, or some other error.  We may already have added some
1762          * blocks to the file, so we need to write these out to avoid
1763          * exposing stale data.
1764          * The page is currently locked and not marked for writeback
1765          */
1766         bh = head;
1767         /* Recovery: lock and submit the mapped buffers */
1768         do {
1769                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1770                     !buffer_delay(bh)) {
1771                         lock_buffer(bh);
1772                         mark_buffer_async_write_endio(bh, handler);
1773                 } else {
1774                         /*
1775                          * The buffer may have been set dirty during
1776                          * attachment to a dirty page.
1777                          */
1778                         clear_buffer_dirty(bh);
1779                 }
1780         } while ((bh = bh->b_this_page) != head);
1781         SetPageError(page);
1782         BUG_ON(PageWriteback(page));
1783         mapping_set_error(page->mapping, err);
1784         set_page_writeback(page);
1785         do {
1786                 struct buffer_head *next = bh->b_this_page;
1787                 if (buffer_async_write(bh)) {
1788                         clear_buffer_dirty(bh);
1789                         submit_bh(write_op, bh);
1790                         nr_underway++;
1791                 }
1792                 bh = next;
1793         } while (bh != head);
1794         unlock_page(page);
1795         goto done;
1796 }
1797
1798 /*
1799  * If a page has any new buffers, zero them out here, and mark them uptodate
1800  * and dirty so they'll be written out (in order to prevent uninitialised
1801  * block data from leaking). And clear the new bit.
1802  */
1803 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1804 {
1805         unsigned int block_start, block_end;
1806         struct buffer_head *head, *bh;
1807
1808         BUG_ON(!PageLocked(page));
1809         if (!page_has_buffers(page))
1810                 return;
1811
1812         bh = head = page_buffers(page);
1813         block_start = 0;
1814         do {
1815                 block_end = block_start + bh->b_size;
1816
1817                 if (buffer_new(bh)) {
1818                         if (block_end > from && block_start < to) {
1819                                 if (!PageUptodate(page)) {
1820                                         unsigned start, size;
1821
1822                                         start = max(from, block_start);
1823                                         size = min(to, block_end) - start;
1824
1825                                         zero_user(page, start, size);
1826                                         set_buffer_uptodate(bh);
1827                                 }
1828
1829                                 clear_buffer_new(bh);
1830                                 mark_buffer_dirty(bh);
1831                         }
1832                 }
1833
1834                 block_start = block_end;
1835                 bh = bh->b_this_page;
1836         } while (bh != head);
1837 }
1838 EXPORT_SYMBOL(page_zero_new_buffers);
1839
1840 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1841                 get_block_t *get_block)
1842 {
1843         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1844         unsigned to = from + len;
1845         struct inode *inode = page->mapping->host;
1846         unsigned block_start, block_end;
1847         sector_t block;
1848         int err = 0;
1849         unsigned blocksize, bbits;
1850         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1851
1852         BUG_ON(!PageLocked(page));
1853         BUG_ON(from > PAGE_CACHE_SIZE);
1854         BUG_ON(to > PAGE_CACHE_SIZE);
1855         BUG_ON(from > to);
1856
1857         blocksize = 1 << inode->i_blkbits;
1858         if (!page_has_buffers(page))
1859                 create_empty_buffers(page, blocksize, 0);
1860         head = page_buffers(page);
1861
1862         bbits = inode->i_blkbits;
1863         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1864
1865         for(bh = head, block_start = 0; bh != head || !block_start;
1866             block++, block_start=block_end, bh = bh->b_this_page) {
1867                 block_end = block_start + blocksize;
1868                 if (block_end <= from || block_start >= to) {
1869                         if (PageUptodate(page)) {
1870                                 if (!buffer_uptodate(bh))
1871                                         set_buffer_uptodate(bh);
1872                         }
1873                         continue;
1874                 }
1875                 if (buffer_new(bh))
1876                         clear_buffer_new(bh);
1877                 if (!buffer_mapped(bh)) {
1878                         WARN_ON(bh->b_size != blocksize);
1879                         err = get_block(inode, block, bh, 1);
1880                         if (err)
1881                                 break;
1882                         if (buffer_new(bh)) {
1883                                 unmap_underlying_metadata(bh->b_bdev,
1884                                                         bh->b_blocknr);
1885                                 if (PageUptodate(page)) {
1886                                         clear_buffer_new(bh);
1887                                         set_buffer_uptodate(bh);
1888                                         mark_buffer_dirty(bh);
1889                                         continue;
1890                                 }
1891                                 if (block_end > to || block_start < from)
1892                                         zero_user_segments(page,
1893                                                 to, block_end,
1894                                                 block_start, from);
1895                                 continue;
1896                         }
1897                 }
1898                 if (PageUptodate(page)) {
1899                         if (!buffer_uptodate(bh))
1900                                 set_buffer_uptodate(bh);
1901                         continue; 
1902                 }
1903                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1904                     !buffer_unwritten(bh) &&
1905                      (block_start < from || block_end > to)) {
1906                         ll_rw_block(READ, 1, &bh);
1907                         *wait_bh++=bh;
1908                 }
1909         }
1910         /*
1911          * If we issued read requests - let them complete.
1912          */
1913         while(wait_bh > wait) {
1914                 wait_on_buffer(*--wait_bh);
1915                 if (!buffer_uptodate(*wait_bh))
1916                         err = -EIO;
1917         }
1918         if (unlikely(err))
1919                 page_zero_new_buffers(page, from, to);
1920         return err;
1921 }
1922 EXPORT_SYMBOL(__block_write_begin);
1923
1924 static int __block_commit_write(struct inode *inode, struct page *page,
1925                 unsigned from, unsigned to)
1926 {
1927         unsigned block_start, block_end;
1928         int partial = 0;
1929         unsigned blocksize;
1930         struct buffer_head *bh, *head;
1931
1932         blocksize = 1 << inode->i_blkbits;
1933
1934         for(bh = head = page_buffers(page), block_start = 0;
1935             bh != head || !block_start;
1936             block_start=block_end, bh = bh->b_this_page) {
1937                 block_end = block_start + blocksize;
1938                 if (block_end <= from || block_start >= to) {
1939                         if (!buffer_uptodate(bh))
1940                                 partial = 1;
1941                 } else {
1942                         set_buffer_uptodate(bh);
1943                         mark_buffer_dirty(bh);
1944                 }
1945                 clear_buffer_new(bh);
1946         }
1947
1948         /*
1949          * If this is a partial write which happened to make all buffers
1950          * uptodate then we can optimize away a bogus readpage() for
1951          * the next read(). Here we 'discover' whether the page went
1952          * uptodate as a result of this (potentially partial) write.
1953          */
1954         if (!partial)
1955                 SetPageUptodate(page);
1956         return 0;
1957 }
1958
1959 /*
1960  * block_write_begin takes care of the basic task of block allocation and
1961  * bringing partial write blocks uptodate first.
1962  *
1963  * The filesystem needs to handle block truncation upon failure.
1964  */
1965 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1966                 unsigned flags, struct page **pagep, get_block_t *get_block)
1967 {
1968         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1969         struct page *page;
1970         int status;
1971
1972         page = grab_cache_page_write_begin(mapping, index, flags);
1973         if (!page)
1974                 return -ENOMEM;
1975
1976         status = __block_write_begin(page, pos, len, get_block);
1977         if (unlikely(status)) {
1978                 unlock_page(page);
1979                 page_cache_release(page);
1980                 page = NULL;
1981         }
1982
1983         *pagep = page;
1984         return status;
1985 }
1986 EXPORT_SYMBOL(block_write_begin);
1987
1988 int block_write_end(struct file *file, struct address_space *mapping,
1989                         loff_t pos, unsigned len, unsigned copied,
1990                         struct page *page, void *fsdata)
1991 {
1992         struct inode *inode = mapping->host;
1993         unsigned start;
1994
1995         start = pos & (PAGE_CACHE_SIZE - 1);
1996
1997         if (unlikely(copied < len)) {
1998                 /*
1999                  * The buffers that were written will now be uptodate, so we
2000                  * don't have to worry about a readpage reading them and
2001                  * overwriting a partial write. However if we have encountered
2002                  * a short write and only partially written into a buffer, it
2003                  * will not be marked uptodate, so a readpage might come in and
2004                  * destroy our partial write.
2005                  *
2006                  * Do the simplest thing, and just treat any short write to a
2007                  * non uptodate page as a zero-length write, and force the
2008                  * caller to redo the whole thing.
2009                  */
2010                 if (!PageUptodate(page))
2011                         copied = 0;
2012
2013                 page_zero_new_buffers(page, start+copied, start+len);
2014         }
2015         flush_dcache_page(page);
2016
2017         /* This could be a short (even 0-length) commit */
2018         __block_commit_write(inode, page, start, start+copied);
2019
2020         return copied;
2021 }
2022 EXPORT_SYMBOL(block_write_end);
2023
2024 int generic_write_end(struct file *file, struct address_space *mapping,
2025                         loff_t pos, unsigned len, unsigned copied,
2026                         struct page *page, void *fsdata)
2027 {
2028         struct inode *inode = mapping->host;
2029         loff_t old_size = inode->i_size;
2030         int i_size_changed = 0;
2031
2032         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2033
2034         /*
2035          * No need to use i_size_read() here, the i_size
2036          * cannot change under us because we hold i_mutex.
2037          *
2038          * But it's important to update i_size while still holding page lock:
2039          * page writeout could otherwise come in and zero beyond i_size.
2040          */
2041         if (pos+copied > inode->i_size) {
2042                 i_size_write(inode, pos+copied);
2043                 i_size_changed = 1;
2044         }
2045
2046         unlock_page(page);
2047         page_cache_release(page);
2048
2049         if (old_size < pos)
2050                 pagecache_isize_extended(inode, old_size, pos);
2051         /*
2052          * Don't mark the inode dirty under page lock. First, it unnecessarily
2053          * makes the holding time of page lock longer. Second, it forces lock
2054          * ordering of page lock and transaction start for journaling
2055          * filesystems.
2056          */
2057         if (i_size_changed)
2058                 mark_inode_dirty(inode);
2059
2060         return copied;
2061 }
2062 EXPORT_SYMBOL(generic_write_end);
2063
2064 /*
2065  * block_is_partially_uptodate checks whether buffers within a page are
2066  * uptodate or not.
2067  *
2068  * Returns true if all buffers which correspond to a file portion
2069  * we want to read are uptodate.
2070  */
2071 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2072                                         unsigned long from)
2073 {
2074         struct inode *inode = page->mapping->host;
2075         unsigned block_start, block_end, blocksize;
2076         unsigned to;
2077         struct buffer_head *bh, *head;
2078         int ret = 1;
2079
2080         if (!page_has_buffers(page))
2081                 return 0;
2082
2083         blocksize = 1 << inode->i_blkbits;
2084         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2085         to = from + to;
2086         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2087                 return 0;
2088
2089         head = page_buffers(page);
2090         bh = head;
2091         block_start = 0;
2092         do {
2093                 block_end = block_start + blocksize;
2094                 if (block_end > from && block_start < to) {
2095                         if (!buffer_uptodate(bh)) {
2096                                 ret = 0;
2097                                 break;
2098                         }
2099                         if (block_end >= to)
2100                                 break;
2101                 }
2102                 block_start = block_end;
2103                 bh = bh->b_this_page;
2104         } while (bh != head);
2105
2106         return ret;
2107 }
2108 EXPORT_SYMBOL(block_is_partially_uptodate);
2109
2110 /*
2111  * Generic "read page" function for block devices that have the normal
2112  * get_block functionality. This is most of the block device filesystems.
2113  * Reads the page asynchronously --- the unlock_buffer() and
2114  * set/clear_buffer_uptodate() functions propagate buffer state into the
2115  * page struct once IO has completed.
2116  */
2117 int block_read_full_page(struct page *page, get_block_t *get_block)
2118 {
2119         struct inode *inode = page->mapping->host;
2120         sector_t iblock, lblock;
2121         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2122         unsigned int blocksize;
2123         int nr, i;
2124         int fully_mapped = 1;
2125
2126         BUG_ON(!PageLocked(page));
2127         blocksize = 1 << inode->i_blkbits;
2128         if (!page_has_buffers(page))
2129                 create_empty_buffers(page, blocksize, 0);
2130         head = page_buffers(page);
2131
2132         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2133         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2134         bh = head;
2135         nr = 0;
2136         i = 0;
2137
2138         do {
2139                 if (buffer_uptodate(bh))
2140                         continue;
2141
2142                 if (!buffer_mapped(bh)) {
2143                         int err = 0;
2144
2145                         fully_mapped = 0;
2146                         if (iblock < lblock) {
2147                                 WARN_ON(bh->b_size != blocksize);
2148                                 err = get_block(inode, iblock, bh, 0);
2149                                 if (err)
2150                                         SetPageError(page);
2151                         }
2152                         if (!buffer_mapped(bh)) {
2153                                 zero_user(page, i * blocksize, blocksize);
2154                                 if (!err)
2155                                         set_buffer_uptodate(bh);
2156                                 continue;
2157                         }
2158                         /*
2159                          * get_block() might have updated the buffer
2160                          * synchronously
2161                          */
2162                         if (buffer_uptodate(bh))
2163                                 continue;
2164                 }
2165                 arr[nr++] = bh;
2166         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2167
2168         if (fully_mapped)
2169                 SetPageMappedToDisk(page);
2170
2171         if (!nr) {
2172                 /*
2173                  * All buffers are uptodate - we can set the page uptodate
2174                  * as well. But not if get_block() returned an error.
2175                  */
2176                 if (!PageError(page))
2177                         SetPageUptodate(page);
2178                 unlock_page(page);
2179                 return 0;
2180         }
2181
2182         /* Stage two: lock the buffers */
2183         for (i = 0; i < nr; i++) {
2184                 bh = arr[i];
2185                 lock_buffer(bh);
2186                 mark_buffer_async_read(bh);
2187         }
2188
2189         /*
2190          * Stage 3: start the IO.  Check for uptodateness
2191          * inside the buffer lock in case another process reading
2192          * the underlying blockdev brought it uptodate (the sct fix).
2193          */
2194         for (i = 0; i < nr; i++) {
2195                 bh = arr[i];
2196                 if (buffer_uptodate(bh))
2197                         end_buffer_async_read(bh, 1);
2198                 else
2199                         submit_bh(READ, bh);
2200         }
2201         return 0;
2202 }
2203 EXPORT_SYMBOL(block_read_full_page);
2204
2205 /* utility function for filesystems that need to do work on expanding
2206  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2207  * deal with the hole.  
2208  */
2209 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2210 {
2211         struct address_space *mapping = inode->i_mapping;
2212         struct page *page;
2213         void *fsdata;
2214         int err;
2215
2216         err = inode_newsize_ok(inode, size);
2217         if (err)
2218                 goto out;
2219
2220         err = pagecache_write_begin(NULL, mapping, size, 0,
2221                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2222                                 &page, &fsdata);
2223         if (err)
2224                 goto out;
2225
2226         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2227         BUG_ON(err > 0);
2228
2229 out:
2230         return err;
2231 }
2232 EXPORT_SYMBOL(generic_cont_expand_simple);
2233
2234 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2235                             loff_t pos, loff_t *bytes)
2236 {
2237         struct inode *inode = mapping->host;
2238         unsigned blocksize = 1 << inode->i_blkbits;
2239         struct page *page;
2240         void *fsdata;
2241         pgoff_t index, curidx;
2242         loff_t curpos;
2243         unsigned zerofrom, offset, len;
2244         int err = 0;
2245
2246         index = pos >> PAGE_CACHE_SHIFT;
2247         offset = pos & ~PAGE_CACHE_MASK;
2248
2249         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2250                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2251                 if (zerofrom & (blocksize-1)) {
2252                         *bytes |= (blocksize-1);
2253                         (*bytes)++;
2254                 }
2255                 len = PAGE_CACHE_SIZE - zerofrom;
2256
2257                 err = pagecache_write_begin(file, mapping, curpos, len,
2258                                                 AOP_FLAG_UNINTERRUPTIBLE,
2259                                                 &page, &fsdata);
2260                 if (err)
2261                         goto out;
2262                 zero_user(page, zerofrom, len);
2263                 err = pagecache_write_end(file, mapping, curpos, len, len,
2264                                                 page, fsdata);
2265                 if (err < 0)
2266                         goto out;
2267                 BUG_ON(err != len);
2268                 err = 0;
2269
2270                 balance_dirty_pages_ratelimited(mapping);
2271
2272                 if (unlikely(fatal_signal_pending(current))) {
2273                         err = -EINTR;
2274                         goto out;
2275                 }
2276         }
2277
2278         /* page covers the boundary, find the boundary offset */
2279         if (index == curidx) {
2280                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2281                 /* if we will expand the thing last block will be filled */
2282                 if (offset <= zerofrom) {
2283                         goto out;
2284                 }
2285                 if (zerofrom & (blocksize-1)) {
2286                         *bytes |= (blocksize-1);
2287                         (*bytes)++;
2288                 }
2289                 len = offset - zerofrom;
2290
2291                 err = pagecache_write_begin(file, mapping, curpos, len,
2292                                                 AOP_FLAG_UNINTERRUPTIBLE,
2293                                                 &page, &fsdata);
2294                 if (err)
2295                         goto out;
2296                 zero_user(page, zerofrom, len);
2297                 err = pagecache_write_end(file, mapping, curpos, len, len,
2298                                                 page, fsdata);
2299                 if (err < 0)
2300                         goto out;
2301                 BUG_ON(err != len);
2302                 err = 0;
2303         }
2304 out:
2305         return err;
2306 }
2307
2308 /*
2309  * For moronic filesystems that do not allow holes in file.
2310  * We may have to extend the file.
2311  */
2312 int cont_write_begin(struct file *file, struct address_space *mapping,
2313                         loff_t pos, unsigned len, unsigned flags,
2314                         struct page **pagep, void **fsdata,
2315                         get_block_t *get_block, loff_t *bytes)
2316 {
2317         struct inode *inode = mapping->host;
2318         unsigned blocksize = 1 << inode->i_blkbits;
2319         unsigned zerofrom;
2320         int err;
2321
2322         err = cont_expand_zero(file, mapping, pos, bytes);
2323         if (err)
2324                 return err;
2325
2326         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2327         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2328                 *bytes |= (blocksize-1);
2329                 (*bytes)++;
2330         }
2331
2332         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2333 }
2334 EXPORT_SYMBOL(cont_write_begin);
2335
2336 int block_commit_write(struct page *page, unsigned from, unsigned to)
2337 {
2338         struct inode *inode = page->mapping->host;
2339         __block_commit_write(inode,page,from,to);
2340         return 0;
2341 }
2342 EXPORT_SYMBOL(block_commit_write);
2343
2344 /*
2345  * block_page_mkwrite() is not allowed to change the file size as it gets
2346  * called from a page fault handler when a page is first dirtied. Hence we must
2347  * be careful to check for EOF conditions here. We set the page up correctly
2348  * for a written page which means we get ENOSPC checking when writing into
2349  * holes and correct delalloc and unwritten extent mapping on filesystems that
2350  * support these features.
2351  *
2352  * We are not allowed to take the i_mutex here so we have to play games to
2353  * protect against truncate races as the page could now be beyond EOF.  Because
2354  * truncate writes the inode size before removing pages, once we have the
2355  * page lock we can determine safely if the page is beyond EOF. If it is not
2356  * beyond EOF, then the page is guaranteed safe against truncation until we
2357  * unlock the page.
2358  *
2359  * Direct callers of this function should call vfs_check_frozen() so that page
2360  * fault does not busyloop until the fs is thawed.
2361  */
2362 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2363                          get_block_t get_block)
2364 {
2365         struct page *page = vmf->page;
2366         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2367         unsigned long end;
2368         loff_t size;
2369         int ret;
2370
2371         lock_page(page);
2372         size = i_size_read(inode);
2373         if ((page->mapping != inode->i_mapping) ||
2374             (page_offset(page) > size)) {
2375                 /* We overload EFAULT to mean page got truncated */
2376                 ret = -EFAULT;
2377                 goto out_unlock;
2378         }
2379
2380         /* page is wholly or partially inside EOF */
2381         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2382                 end = size & ~PAGE_CACHE_MASK;
2383         else
2384                 end = PAGE_CACHE_SIZE;
2385
2386         ret = __block_write_begin(page, 0, end, get_block);
2387         if (!ret)
2388                 ret = block_commit_write(page, 0, end);
2389
2390         if (unlikely(ret < 0))
2391                 goto out_unlock;
2392         /*
2393          * Freezing in progress? We check after the page is marked dirty and
2394          * with page lock held so if the test here fails, we are sure freezing
2395          * code will wait during syncing until the page fault is done - at that
2396          * point page will be dirty and unlocked so freezing code will write it
2397          * and writeprotect it again.
2398          */
2399         set_page_dirty(page);
2400         if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2401                 ret = -EAGAIN;
2402                 goto out_unlock;
2403         }
2404         wait_on_page_writeback(page);
2405         return 0;
2406 out_unlock:
2407         unlock_page(page);
2408         return ret;
2409 }
2410 EXPORT_SYMBOL(__block_page_mkwrite);
2411
2412 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2413                    get_block_t get_block)
2414 {
2415         int ret;
2416         struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2417
2418         /*
2419          * This check is racy but catches the common case. The check in
2420          * __block_page_mkwrite() is reliable.
2421          */
2422         vfs_check_frozen(sb, SB_FREEZE_WRITE);
2423         ret = __block_page_mkwrite(vma, vmf, get_block);
2424         return block_page_mkwrite_return(ret);
2425 }
2426 EXPORT_SYMBOL(block_page_mkwrite);
2427
2428 /*
2429  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2430  * immediately, while under the page lock.  So it needs a special end_io
2431  * handler which does not touch the bh after unlocking it.
2432  */
2433 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2434 {
2435         __end_buffer_read_notouch(bh, uptodate);
2436 }
2437
2438 /*
2439  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2440  * the page (converting it to circular linked list and taking care of page
2441  * dirty races).
2442  */
2443 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2444 {
2445         struct buffer_head *bh;
2446
2447         BUG_ON(!PageLocked(page));
2448
2449         spin_lock(&page->mapping->private_lock);
2450         bh = head;
2451         do {
2452                 if (PageDirty(page))
2453                         set_buffer_dirty(bh);
2454                 if (!bh->b_this_page)
2455                         bh->b_this_page = head;
2456                 bh = bh->b_this_page;
2457         } while (bh != head);
2458         attach_page_buffers(page, head);
2459         spin_unlock(&page->mapping->private_lock);
2460 }
2461
2462 /*
2463  * On entry, the page is fully not uptodate.
2464  * On exit the page is fully uptodate in the areas outside (from,to)
2465  * The filesystem needs to handle block truncation upon failure.
2466  */
2467 int nobh_write_begin(struct address_space *mapping,
2468                         loff_t pos, unsigned len, unsigned flags,
2469                         struct page **pagep, void **fsdata,
2470                         get_block_t *get_block)
2471 {
2472         struct inode *inode = mapping->host;
2473         const unsigned blkbits = inode->i_blkbits;
2474         const unsigned blocksize = 1 << blkbits;
2475         struct buffer_head *head, *bh;
2476         struct page *page;
2477         pgoff_t index;
2478         unsigned from, to;
2479         unsigned block_in_page;
2480         unsigned block_start, block_end;
2481         sector_t block_in_file;
2482         int nr_reads = 0;
2483         int ret = 0;
2484         int is_mapped_to_disk = 1;
2485
2486         index = pos >> PAGE_CACHE_SHIFT;
2487         from = pos & (PAGE_CACHE_SIZE - 1);
2488         to = from + len;
2489
2490         page = grab_cache_page_write_begin(mapping, index, flags);
2491         if (!page)
2492                 return -ENOMEM;
2493         *pagep = page;
2494         *fsdata = NULL;
2495
2496         if (page_has_buffers(page)) {
2497                 ret = __block_write_begin(page, pos, len, get_block);
2498                 if (unlikely(ret))
2499                         goto out_release;
2500                 return ret;
2501         }
2502
2503         if (PageMappedToDisk(page))
2504                 return 0;
2505
2506         /*
2507          * Allocate buffers so that we can keep track of state, and potentially
2508          * attach them to the page if an error occurs. In the common case of
2509          * no error, they will just be freed again without ever being attached
2510          * to the page (which is all OK, because we're under the page lock).
2511          *
2512          * Be careful: the buffer linked list is a NULL terminated one, rather
2513          * than the circular one we're used to.
2514          */
2515         head = alloc_page_buffers(page, blocksize, 0);
2516         if (!head) {
2517                 ret = -ENOMEM;
2518                 goto out_release;
2519         }
2520
2521         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2522
2523         /*
2524          * We loop across all blocks in the page, whether or not they are
2525          * part of the affected region.  This is so we can discover if the
2526          * page is fully mapped-to-disk.
2527          */
2528         for (block_start = 0, block_in_page = 0, bh = head;
2529                   block_start < PAGE_CACHE_SIZE;
2530                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2531                 int create;
2532
2533                 block_end = block_start + blocksize;
2534                 bh->b_state = 0;
2535                 create = 1;
2536                 if (block_start >= to)
2537                         create = 0;
2538                 ret = get_block(inode, block_in_file + block_in_page,
2539                                         bh, create);
2540                 if (ret)
2541                         goto failed;
2542                 if (!buffer_mapped(bh))
2543                         is_mapped_to_disk = 0;
2544                 if (buffer_new(bh))
2545                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2546                 if (PageUptodate(page)) {
2547                         set_buffer_uptodate(bh);
2548                         continue;
2549                 }
2550                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2551                         zero_user_segments(page, block_start, from,
2552                                                         to, block_end);
2553                         continue;
2554                 }
2555                 if (buffer_uptodate(bh))
2556                         continue;       /* reiserfs does this */
2557                 if (block_start < from || block_end > to) {
2558                         lock_buffer(bh);
2559                         bh->b_end_io = end_buffer_read_nobh;
2560                         submit_bh(READ, bh);
2561                         nr_reads++;
2562                 }
2563         }
2564
2565         if (nr_reads) {
2566                 /*
2567                  * The page is locked, so these buffers are protected from
2568                  * any VM or truncate activity.  Hence we don't need to care
2569                  * for the buffer_head refcounts.
2570                  */
2571                 for (bh = head; bh; bh = bh->b_this_page) {
2572                         wait_on_buffer(bh);
2573                         if (!buffer_uptodate(bh))
2574                                 ret = -EIO;
2575                 }
2576                 if (ret)
2577                         goto failed;
2578         }
2579
2580         if (is_mapped_to_disk)
2581                 SetPageMappedToDisk(page);
2582
2583         *fsdata = head; /* to be released by nobh_write_end */
2584
2585         return 0;
2586
2587 failed:
2588         BUG_ON(!ret);
2589         /*
2590          * Error recovery is a bit difficult. We need to zero out blocks that
2591          * were newly allocated, and dirty them to ensure they get written out.
2592          * Buffers need to be attached to the page at this point, otherwise
2593          * the handling of potential IO errors during writeout would be hard
2594          * (could try doing synchronous writeout, but what if that fails too?)
2595          */
2596         attach_nobh_buffers(page, head);
2597         page_zero_new_buffers(page, from, to);
2598
2599 out_release:
2600         unlock_page(page);
2601         page_cache_release(page);
2602         *pagep = NULL;
2603
2604         return ret;
2605 }
2606 EXPORT_SYMBOL(nobh_write_begin);
2607
2608 int nobh_write_end(struct file *file, struct address_space *mapping,
2609                         loff_t pos, unsigned len, unsigned copied,
2610                         struct page *page, void *fsdata)
2611 {
2612         struct inode *inode = page->mapping->host;
2613         struct buffer_head *head = fsdata;
2614         struct buffer_head *bh;
2615         BUG_ON(fsdata != NULL && page_has_buffers(page));
2616
2617         if (unlikely(copied < len) && head)
2618                 attach_nobh_buffers(page, head);
2619         if (page_has_buffers(page))
2620                 return generic_write_end(file, mapping, pos, len,
2621                                         copied, page, fsdata);
2622
2623         SetPageUptodate(page);
2624         set_page_dirty(page);
2625         if (pos+copied > inode->i_size) {
2626                 i_size_write(inode, pos+copied);
2627                 mark_inode_dirty(inode);
2628         }
2629
2630         unlock_page(page);
2631         page_cache_release(page);
2632
2633         while (head) {
2634                 bh = head;
2635                 head = head->b_this_page;
2636                 free_buffer_head(bh);
2637         }
2638
2639         return copied;
2640 }
2641 EXPORT_SYMBOL(nobh_write_end);
2642
2643 /*
2644  * nobh_writepage() - based on block_full_write_page() except
2645  * that it tries to operate without attaching bufferheads to
2646  * the page.
2647  */
2648 int nobh_writepage(struct page *page, get_block_t *get_block,
2649                         struct writeback_control *wbc)
2650 {
2651         struct inode * const inode = page->mapping->host;
2652         loff_t i_size = i_size_read(inode);
2653         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2654         unsigned offset;
2655         int ret;
2656
2657         /* Is the page fully inside i_size? */
2658         if (page->index < end_index)
2659                 goto out;
2660
2661         /* Is the page fully outside i_size? (truncate in progress) */
2662         offset = i_size & (PAGE_CACHE_SIZE-1);
2663         if (page->index >= end_index+1 || !offset) {
2664                 /*
2665                  * The page may have dirty, unmapped buffers.  For example,
2666                  * they may have been added in ext3_writepage().  Make them
2667                  * freeable here, so the page does not leak.
2668                  */
2669 #if 0
2670                 /* Not really sure about this  - do we need this ? */
2671                 if (page->mapping->a_ops->invalidatepage)
2672                         page->mapping->a_ops->invalidatepage(page, offset);
2673 #endif
2674                 unlock_page(page);
2675                 return 0; /* don't care */
2676         }
2677
2678         /*
2679          * The page straddles i_size.  It must be zeroed out on each and every
2680          * writepage invocation because it may be mmapped.  "A file is mapped
2681          * in multiples of the page size.  For a file that is not a multiple of
2682          * the  page size, the remaining memory is zeroed when mapped, and
2683          * writes to that region are not written out to the file."
2684          */
2685         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2686 out:
2687         ret = mpage_writepage(page, get_block, wbc);
2688         if (ret == -EAGAIN)
2689                 ret = __block_write_full_page(inode, page, get_block, wbc,
2690                                               end_buffer_async_write);
2691         return ret;
2692 }
2693 EXPORT_SYMBOL(nobh_writepage);
2694
2695 int nobh_truncate_page(struct address_space *mapping,
2696                         loff_t from, get_block_t *get_block)
2697 {
2698         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2699         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2700         unsigned blocksize;
2701         sector_t iblock;
2702         unsigned length, pos;
2703         struct inode *inode = mapping->host;
2704         struct page *page;
2705         struct buffer_head map_bh;
2706         int err;
2707
2708         blocksize = 1 << inode->i_blkbits;
2709         length = offset & (blocksize - 1);
2710
2711         /* Block boundary? Nothing to do */
2712         if (!length)
2713                 return 0;
2714
2715         length = blocksize - length;
2716         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2717
2718         page = grab_cache_page(mapping, index);
2719         err = -ENOMEM;
2720         if (!page)
2721                 goto out;
2722
2723         if (page_has_buffers(page)) {
2724 has_buffers:
2725                 unlock_page(page);
2726                 page_cache_release(page);
2727                 return block_truncate_page(mapping, from, get_block);
2728         }
2729
2730         /* Find the buffer that contains "offset" */
2731         pos = blocksize;
2732         while (offset >= pos) {
2733                 iblock++;
2734                 pos += blocksize;
2735         }
2736
2737         map_bh.b_size = blocksize;
2738         map_bh.b_state = 0;
2739         err = get_block(inode, iblock, &map_bh, 0);
2740         if (err)
2741                 goto unlock;
2742         /* unmapped? It's a hole - nothing to do */
2743         if (!buffer_mapped(&map_bh))
2744                 goto unlock;
2745
2746         /* Ok, it's mapped. Make sure it's up-to-date */
2747         if (!PageUptodate(page)) {
2748                 err = mapping->a_ops->readpage(NULL, page);
2749                 if (err) {
2750                         page_cache_release(page);
2751                         goto out;
2752                 }
2753                 lock_page(page);
2754                 if (!PageUptodate(page)) {
2755                         err = -EIO;
2756                         goto unlock;
2757                 }
2758                 if (page_has_buffers(page))
2759                         goto has_buffers;
2760         }
2761         zero_user(page, offset, length);
2762         set_page_dirty(page);
2763         err = 0;
2764
2765 unlock:
2766         unlock_page(page);
2767         page_cache_release(page);
2768 out:
2769         return err;
2770 }
2771 EXPORT_SYMBOL(nobh_truncate_page);
2772
2773 int block_truncate_page(struct address_space *mapping,
2774                         loff_t from, get_block_t *get_block)
2775 {
2776         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2777         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2778         unsigned blocksize;
2779         sector_t iblock;
2780         unsigned length, pos;
2781         struct inode *inode = mapping->host;
2782         struct page *page;
2783         struct buffer_head *bh;
2784         int err;
2785
2786         blocksize = 1 << inode->i_blkbits;
2787         length = offset & (blocksize - 1);
2788
2789         /* Block boundary? Nothing to do */
2790         if (!length)
2791                 return 0;
2792
2793         length = blocksize - length;
2794         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2795         
2796         page = grab_cache_page(mapping, index);
2797         err = -ENOMEM;
2798         if (!page)
2799                 goto out;
2800
2801         if (!page_has_buffers(page))
2802                 create_empty_buffers(page, blocksize, 0);
2803
2804         /* Find the buffer that contains "offset" */
2805         bh = page_buffers(page);
2806         pos = blocksize;
2807         while (offset >= pos) {
2808                 bh = bh->b_this_page;
2809                 iblock++;
2810                 pos += blocksize;
2811         }
2812
2813         err = 0;
2814         if (!buffer_mapped(bh)) {
2815                 WARN_ON(bh->b_size != blocksize);
2816                 err = get_block(inode, iblock, bh, 0);
2817                 if (err)
2818                         goto unlock;
2819                 /* unmapped? It's a hole - nothing to do */
2820                 if (!buffer_mapped(bh))
2821                         goto unlock;
2822         }
2823
2824         /* Ok, it's mapped. Make sure it's up-to-date */
2825         if (PageUptodate(page))
2826                 set_buffer_uptodate(bh);
2827
2828         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2829                 err = -EIO;
2830                 ll_rw_block(READ, 1, &bh);
2831                 wait_on_buffer(bh);
2832                 /* Uhhuh. Read error. Complain and punt. */
2833                 if (!buffer_uptodate(bh))
2834                         goto unlock;
2835         }
2836
2837         zero_user(page, offset, length);
2838         mark_buffer_dirty(bh);
2839         err = 0;
2840
2841 unlock:
2842         unlock_page(page);
2843         page_cache_release(page);
2844 out:
2845         return err;
2846 }
2847 EXPORT_SYMBOL(block_truncate_page);
2848
2849 /*
2850  * The generic ->writepage function for buffer-backed address_spaces
2851  * this form passes in the end_io handler used to finish the IO.
2852  */
2853 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2854                         struct writeback_control *wbc, bh_end_io_t *handler)
2855 {
2856         struct inode * const inode = page->mapping->host;
2857         loff_t i_size = i_size_read(inode);
2858         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2859         unsigned offset;
2860
2861         /* Is the page fully inside i_size? */
2862         if (page->index < end_index)
2863                 return __block_write_full_page(inode, page, get_block, wbc,
2864                                                handler);
2865
2866         /* Is the page fully outside i_size? (truncate in progress) */
2867         offset = i_size & (PAGE_CACHE_SIZE-1);
2868         if (page->index >= end_index+1 || !offset) {
2869                 /*
2870                  * The page may have dirty, unmapped buffers.  For example,
2871                  * they may have been added in ext3_writepage().  Make them
2872                  * freeable here, so the page does not leak.
2873                  */
2874                 do_invalidatepage(page, 0);
2875                 unlock_page(page);
2876                 return 0; /* don't care */
2877         }
2878
2879         /*
2880          * The page straddles i_size.  It must be zeroed out on each and every
2881          * writepage invocation because it may be mmapped.  "A file is mapped
2882          * in multiples of the page size.  For a file that is not a multiple of
2883          * the  page size, the remaining memory is zeroed when mapped, and
2884          * writes to that region are not written out to the file."
2885          */
2886         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2887         return __block_write_full_page(inode, page, get_block, wbc, handler);
2888 }
2889 EXPORT_SYMBOL(block_write_full_page_endio);
2890
2891 /*
2892  * The generic ->writepage function for buffer-backed address_spaces
2893  */
2894 int block_write_full_page(struct page *page, get_block_t *get_block,
2895                         struct writeback_control *wbc)
2896 {
2897         return block_write_full_page_endio(page, get_block, wbc,
2898                                            end_buffer_async_write);
2899 }
2900 EXPORT_SYMBOL(block_write_full_page);
2901
2902 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2903                             get_block_t *get_block)
2904 {
2905         struct buffer_head tmp;
2906         struct inode *inode = mapping->host;
2907         tmp.b_state = 0;
2908         tmp.b_blocknr = 0;
2909         tmp.b_size = 1 << inode->i_blkbits;
2910         get_block(inode, block, &tmp, 0);
2911         return tmp.b_blocknr;
2912 }
2913 EXPORT_SYMBOL(generic_block_bmap);
2914
2915 static void end_bio_bh_io_sync(struct bio *bio, int err)
2916 {
2917         struct buffer_head *bh = bio->bi_private;
2918
2919         if (err == -EOPNOTSUPP) {
2920                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2921         }
2922
2923         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2924                 set_bit(BH_Quiet, &bh->b_state);
2925
2926         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2927         bio_put(bio);
2928 }
2929
2930 int submit_bh(int rw, struct buffer_head * bh)
2931 {
2932         struct bio *bio;
2933         int ret = 0;
2934
2935         BUG_ON(!buffer_locked(bh));
2936         BUG_ON(!buffer_mapped(bh));
2937         BUG_ON(!bh->b_end_io);
2938         BUG_ON(buffer_delay(bh));
2939         BUG_ON(buffer_unwritten(bh));
2940
2941         /*
2942          * Only clear out a write error when rewriting
2943          */
2944         if (test_set_buffer_req(bh) && (rw & WRITE))
2945                 clear_buffer_write_io_error(bh);
2946
2947         /*
2948          * from here on down, it's all bio -- do the initial mapping,
2949          * submit_bio -> generic_make_request may further map this bio around
2950          */
2951         bio = bio_alloc(GFP_NOIO, 1);
2952
2953         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2954         bio->bi_bdev = bh->b_bdev;
2955         bio->bi_io_vec[0].bv_page = bh->b_page;
2956         bio->bi_io_vec[0].bv_len = bh->b_size;
2957         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2958
2959         bio->bi_vcnt = 1;
2960         bio->bi_idx = 0;
2961         bio->bi_size = bh->b_size;
2962
2963         bio->bi_end_io = end_bio_bh_io_sync;
2964         bio->bi_private = bh;
2965
2966         bio_get(bio);
2967         submit_bio(rw, bio);
2968
2969         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2970                 ret = -EOPNOTSUPP;
2971
2972         bio_put(bio);
2973         return ret;
2974 }
2975 EXPORT_SYMBOL(submit_bh);
2976
2977 /**
2978  * ll_rw_block: low-level access to block devices (DEPRECATED)
2979  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2980  * @nr: number of &struct buffer_heads in the array
2981  * @bhs: array of pointers to &struct buffer_head
2982  *
2983  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2984  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2985  * %READA option is described in the documentation for generic_make_request()
2986  * which ll_rw_block() calls.
2987  *
2988  * This function drops any buffer that it cannot get a lock on (with the
2989  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2990  * request, and any buffer that appears to be up-to-date when doing read
2991  * request.  Further it marks as clean buffers that are processed for
2992  * writing (the buffer cache won't assume that they are actually clean
2993  * until the buffer gets unlocked).
2994  *
2995  * ll_rw_block sets b_end_io to simple completion handler that marks
2996  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2997  * any waiters. 
2998  *
2999  * All of the buffers must be for the same device, and must also be a
3000  * multiple of the current approved size for the device.
3001  */
3002 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3003 {
3004         int i;
3005
3006         for (i = 0; i < nr; i++) {
3007                 struct buffer_head *bh = bhs[i];
3008
3009                 if (!trylock_buffer(bh))
3010                         continue;
3011                 if (rw == WRITE) {
3012                         if (test_clear_buffer_dirty(bh)) {
3013                                 bh->b_end_io = end_buffer_write_sync;
3014                                 get_bh(bh);
3015                                 submit_bh(WRITE, bh);
3016                                 continue;
3017                         }
3018                 } else {
3019                         if (!buffer_uptodate(bh)) {
3020                                 bh->b_end_io = end_buffer_read_sync;
3021                                 get_bh(bh);
3022                                 submit_bh(rw, bh);
3023                                 continue;
3024                         }
3025                 }
3026                 unlock_buffer(bh);
3027         }
3028 }
3029 EXPORT_SYMBOL(ll_rw_block);
3030
3031 void write_dirty_buffer(struct buffer_head *bh, int rw)
3032 {
3033         lock_buffer(bh);
3034         if (!test_clear_buffer_dirty(bh)) {
3035                 unlock_buffer(bh);
3036                 return;
3037         }
3038         bh->b_end_io = end_buffer_write_sync;
3039         get_bh(bh);
3040         submit_bh(rw, bh);
3041 }
3042 EXPORT_SYMBOL(write_dirty_buffer);
3043
3044 /*
3045  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3046  * and then start new I/O and then wait upon it.  The caller must have a ref on
3047  * the buffer_head.
3048  */
3049 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3050 {
3051         int ret = 0;
3052
3053         WARN_ON(atomic_read(&bh->b_count) < 1);
3054         lock_buffer(bh);
3055         if (test_clear_buffer_dirty(bh)) {
3056                 get_bh(bh);
3057                 bh->b_end_io = end_buffer_write_sync;
3058                 ret = submit_bh(rw, bh);
3059                 wait_on_buffer(bh);
3060                 if (!ret && !buffer_uptodate(bh))
3061                         ret = -EIO;
3062         } else {
3063                 unlock_buffer(bh);
3064         }
3065         return ret;
3066 }
3067 EXPORT_SYMBOL(__sync_dirty_buffer);
3068
3069 int sync_dirty_buffer(struct buffer_head *bh)
3070 {
3071         return __sync_dirty_buffer(bh, WRITE_SYNC);
3072 }
3073 EXPORT_SYMBOL(sync_dirty_buffer);
3074
3075 /*
3076  * try_to_free_buffers() checks if all the buffers on this particular page
3077  * are unused, and releases them if so.
3078  *
3079  * Exclusion against try_to_free_buffers may be obtained by either
3080  * locking the page or by holding its mapping's private_lock.
3081  *
3082  * If the page is dirty but all the buffers are clean then we need to
3083  * be sure to mark the page clean as well.  This is because the page
3084  * may be against a block device, and a later reattachment of buffers
3085  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3086  * filesystem data on the same device.
3087  *
3088  * The same applies to regular filesystem pages: if all the buffers are
3089  * clean then we set the page clean and proceed.  To do that, we require
3090  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3091  * private_lock.
3092  *
3093  * try_to_free_buffers() is non-blocking.
3094  */
3095 static inline int buffer_busy(struct buffer_head *bh)
3096 {
3097         return atomic_read(&bh->b_count) |
3098                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3099 }
3100
3101 static int
3102 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3103 {
3104         struct buffer_head *head = page_buffers(page);
3105         struct buffer_head *bh;
3106
3107         bh = head;
3108         do {
3109                 if (buffer_write_io_error(bh) && page->mapping)
3110                         set_bit(AS_EIO, &page->mapping->flags);
3111                 if (buffer_busy(bh))
3112                         goto failed;
3113                 bh = bh->b_this_page;
3114         } while (bh != head);
3115
3116         do {
3117                 struct buffer_head *next = bh->b_this_page;
3118
3119                 if (bh->b_assoc_map)
3120                         __remove_assoc_queue(bh);
3121                 bh = next;
3122         } while (bh != head);
3123         *buffers_to_free = head;
3124         __clear_page_buffers(page);
3125         return 1;
3126 failed:
3127         return 0;
3128 }
3129
3130 int try_to_free_buffers(struct page *page)
3131 {
3132         struct address_space * const mapping = page->mapping;
3133         struct buffer_head *buffers_to_free = NULL;
3134         int ret = 0;
3135
3136         BUG_ON(!PageLocked(page));
3137         if (PageWriteback(page))
3138                 return 0;
3139
3140         if (mapping == NULL) {          /* can this still happen? */
3141                 ret = drop_buffers(page, &buffers_to_free);
3142                 goto out;
3143         }
3144
3145         spin_lock(&mapping->private_lock);
3146         ret = drop_buffers(page, &buffers_to_free);
3147
3148         /*
3149          * If the filesystem writes its buffers by hand (eg ext3)
3150          * then we can have clean buffers against a dirty page.  We
3151          * clean the page here; otherwise the VM will never notice
3152          * that the filesystem did any IO at all.
3153          *
3154          * Also, during truncate, discard_buffer will have marked all
3155          * the page's buffers clean.  We discover that here and clean
3156          * the page also.
3157          *
3158          * private_lock must be held over this entire operation in order
3159          * to synchronise against __set_page_dirty_buffers and prevent the
3160          * dirty bit from being lost.
3161          */
3162         if (ret)
3163                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3164         spin_unlock(&mapping->private_lock);
3165 out:
3166         if (buffers_to_free) {
3167                 struct buffer_head *bh = buffers_to_free;
3168
3169                 do {
3170                         struct buffer_head *next = bh->b_this_page;
3171                         free_buffer_head(bh);
3172                         bh = next;
3173                 } while (bh != buffers_to_free);
3174         }
3175         return ret;
3176 }
3177 EXPORT_SYMBOL(try_to_free_buffers);
3178
3179 /*
3180  * There are no bdflush tunables left.  But distributions are
3181  * still running obsolete flush daemons, so we terminate them here.
3182  *
3183  * Use of bdflush() is deprecated and will be removed in a future kernel.
3184  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3185  */
3186 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3187 {
3188         static int msg_count;
3189
3190         if (!capable(CAP_SYS_ADMIN))
3191                 return -EPERM;
3192
3193         if (msg_count < 5) {
3194                 msg_count++;
3195                 printk(KERN_INFO
3196                         "warning: process `%s' used the obsolete bdflush"
3197                         " system call\n", current->comm);
3198                 printk(KERN_INFO "Fix your initscripts?\n");
3199         }
3200
3201         if (func == 1)
3202                 do_exit(0);
3203         return 0;
3204 }
3205
3206 /*
3207  * Buffer-head allocation
3208  */
3209 static struct kmem_cache *bh_cachep;
3210
3211 /*
3212  * Once the number of bh's in the machine exceeds this level, we start
3213  * stripping them in writeback.
3214  */
3215 static int max_buffer_heads;
3216
3217 int buffer_heads_over_limit;
3218
3219 struct bh_accounting {
3220         int nr;                 /* Number of live bh's */
3221         int ratelimit;          /* Limit cacheline bouncing */
3222 };
3223
3224 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3225
3226 static void recalc_bh_state(void)
3227 {
3228         int i;
3229         int tot = 0;
3230
3231         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3232                 return;
3233         __this_cpu_write(bh_accounting.ratelimit, 0);
3234         for_each_online_cpu(i)
3235                 tot += per_cpu(bh_accounting, i).nr;
3236         buffer_heads_over_limit = (tot > max_buffer_heads);
3237 }
3238
3239 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3240 {
3241         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3242         if (ret) {
3243                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3244                 preempt_disable();
3245                 __this_cpu_inc(bh_accounting.nr);
3246                 recalc_bh_state();
3247                 preempt_enable();
3248         }
3249         return ret;
3250 }
3251 EXPORT_SYMBOL(alloc_buffer_head);
3252
3253 void free_buffer_head(struct buffer_head *bh)
3254 {
3255         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3256         kmem_cache_free(bh_cachep, bh);
3257         preempt_disable();
3258         __this_cpu_dec(bh_accounting.nr);
3259         recalc_bh_state();
3260         preempt_enable();
3261 }
3262 EXPORT_SYMBOL(free_buffer_head);
3263
3264 static void buffer_exit_cpu(int cpu)
3265 {
3266         int i;
3267         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3268
3269         for (i = 0; i < BH_LRU_SIZE; i++) {
3270                 brelse(b->bhs[i]);
3271                 b->bhs[i] = NULL;
3272         }
3273         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3274         per_cpu(bh_accounting, cpu).nr = 0;
3275 }
3276
3277 static int buffer_cpu_notify(struct notifier_block *self,
3278                               unsigned long action, void *hcpu)
3279 {
3280         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3281                 buffer_exit_cpu((unsigned long)hcpu);
3282         return NOTIFY_OK;
3283 }
3284
3285 /**
3286  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3287  * @bh: struct buffer_head
3288  *
3289  * Return true if the buffer is up-to-date and false,
3290  * with the buffer locked, if not.
3291  */
3292 int bh_uptodate_or_lock(struct buffer_head *bh)
3293 {
3294         if (!buffer_uptodate(bh)) {
3295                 lock_buffer(bh);
3296                 if (!buffer_uptodate(bh))
3297                         return 0;
3298                 unlock_buffer(bh);
3299         }
3300         return 1;
3301 }
3302 EXPORT_SYMBOL(bh_uptodate_or_lock);
3303
3304 /**
3305  * bh_submit_read - Submit a locked buffer for reading
3306  * @bh: struct buffer_head
3307  *
3308  * Returns zero on success and -EIO on error.
3309  */
3310 int bh_submit_read(struct buffer_head *bh)
3311 {
3312         BUG_ON(!buffer_locked(bh));
3313
3314         if (buffer_uptodate(bh)) {
3315                 unlock_buffer(bh);
3316                 return 0;
3317         }
3318
3319         get_bh(bh);
3320         bh->b_end_io = end_buffer_read_sync;
3321         submit_bh(READ, bh);
3322         wait_on_buffer(bh);
3323         if (buffer_uptodate(bh))
3324                 return 0;
3325         return -EIO;
3326 }
3327 EXPORT_SYMBOL(bh_submit_read);
3328
3329 void __init buffer_init(void)
3330 {
3331         int nrpages;
3332
3333         bh_cachep = kmem_cache_create("buffer_head",
3334                         sizeof(struct buffer_head), 0,
3335                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3336                                 SLAB_MEM_SPREAD),
3337                                 NULL);
3338
3339         /*
3340          * Limit the bh occupancy to 10% of ZONE_NORMAL
3341          */
3342         nrpages = (nr_free_buffer_pages() * 10) / 100;
3343         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3344         hotcpu_notifier(buffer_cpu_notify, 0);
3345 }