aio: check return value of create_workqueue()
[pandora-kernel.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56
57 static int sync_buffer(void *word)
58 {
59         struct block_device *bd;
60         struct buffer_head *bh
61                 = container_of(word, struct buffer_head, b_state);
62
63         smp_mb();
64         bd = bh->b_bdev;
65         if (bd)
66                 blk_run_address_space(bd->bd_inode->i_mapping);
67         io_schedule();
68         return 0;
69 }
70
71 void __lock_buffer(struct buffer_head *bh)
72 {
73         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74                                                         TASK_UNINTERRUPTIBLE);
75 }
76 EXPORT_SYMBOL(__lock_buffer);
77
78 void unlock_buffer(struct buffer_head *bh)
79 {
80         clear_bit_unlock(BH_Lock, &bh->b_state);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 EXPORT_SYMBOL(unlock_buffer);
85
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 EXPORT_SYMBOL(__wait_on_buffer);
96
97 static void
98 __clear_page_buffers(struct page *page)
99 {
100         ClearPagePrivate(page);
101         set_page_private(page, 0);
102         page_cache_release(page);
103 }
104
105
106 static int quiet_error(struct buffer_head *bh)
107 {
108         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109                 return 0;
110         return 1;
111 }
112
113
114 static void buffer_io_error(struct buffer_head *bh)
115 {
116         char b[BDEVNAME_SIZE];
117         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118                         bdevname(bh->b_bdev, b),
119                         (unsigned long long)bh->b_blocknr);
120 }
121
122 /*
123  * End-of-IO handler helper function which does not touch the bh after
124  * unlocking it.
125  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126  * a race there is benign: unlock_buffer() only use the bh's address for
127  * hashing after unlocking the buffer, so it doesn't actually touch the bh
128  * itself.
129  */
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
131 {
132         if (uptodate) {
133                 set_buffer_uptodate(bh);
134         } else {
135                 /* This happens, due to failed READA attempts. */
136                 clear_buffer_uptodate(bh);
137         }
138         unlock_buffer(bh);
139 }
140
141 /*
142  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
143  * unlock the buffer. This is what ll_rw_block uses too.
144  */
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146 {
147         __end_buffer_read_notouch(bh, uptodate);
148         put_bh(bh);
149 }
150 EXPORT_SYMBOL(end_buffer_read_sync);
151
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153 {
154         char b[BDEVNAME_SIZE];
155
156         if (uptodate) {
157                 set_buffer_uptodate(bh);
158         } else {
159                 if (!quiet_error(bh)) {
160                         buffer_io_error(bh);
161                         printk(KERN_WARNING "lost page write due to "
162                                         "I/O error on %s\n",
163                                        bdevname(bh->b_bdev, b));
164                 }
165                 set_buffer_write_io_error(bh);
166                 clear_buffer_uptodate(bh);
167         }
168         unlock_buffer(bh);
169         put_bh(bh);
170 }
171 EXPORT_SYMBOL(end_buffer_write_sync);
172
173 /*
174  * Various filesystems appear to want __find_get_block to be non-blocking.
175  * But it's the page lock which protects the buffers.  To get around this,
176  * we get exclusion from try_to_free_buffers with the blockdev mapping's
177  * private_lock.
178  *
179  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180  * may be quite high.  This code could TryLock the page, and if that
181  * succeeds, there is no need to take private_lock. (But if
182  * private_lock is contended then so is mapping->tree_lock).
183  */
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
186 {
187         struct inode *bd_inode = bdev->bd_inode;
188         struct address_space *bd_mapping = bd_inode->i_mapping;
189         struct buffer_head *ret = NULL;
190         pgoff_t index;
191         struct buffer_head *bh;
192         struct buffer_head *head;
193         struct page *page;
194         int all_mapped = 1;
195
196         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197         page = find_get_page(bd_mapping, index);
198         if (!page)
199                 goto out;
200
201         spin_lock(&bd_mapping->private_lock);
202         if (!page_has_buffers(page))
203                 goto out_unlock;
204         head = page_buffers(page);
205         bh = head;
206         do {
207                 if (!buffer_mapped(bh))
208                         all_mapped = 0;
209                 else if (bh->b_blocknr == block) {
210                         ret = bh;
211                         get_bh(bh);
212                         goto out_unlock;
213                 }
214                 bh = bh->b_this_page;
215         } while (bh != head);
216
217         /* we might be here because some of the buffers on this page are
218          * not mapped.  This is due to various races between
219          * file io on the block device and getblk.  It gets dealt with
220          * elsewhere, don't buffer_error if we had some unmapped buffers
221          */
222         if (all_mapped) {
223                 printk("__find_get_block_slow() failed. "
224                         "block=%llu, b_blocknr=%llu\n",
225                         (unsigned long long)block,
226                         (unsigned long long)bh->b_blocknr);
227                 printk("b_state=0x%08lx, b_size=%zu\n",
228                         bh->b_state, bh->b_size);
229                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230         }
231 out_unlock:
232         spin_unlock(&bd_mapping->private_lock);
233         page_cache_release(page);
234 out:
235         return ret;
236 }
237
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239    of fs corruption is going on. Trashing dirty data always imply losing
240    information that was supposed to be just stored on the physical layer
241    by the user.
242
243    Thus invalidate_buffers in general usage is not allwowed to trash
244    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245    be preserved.  These buffers are simply skipped.
246   
247    We also skip buffers which are still in use.  For example this can
248    happen if a userspace program is reading the block device.
249
250    NOTE: In the case where the user removed a removable-media-disk even if
251    there's still dirty data not synced on disk (due a bug in the device driver
252    or due an error of the user), by not destroying the dirty buffers we could
253    generate corruption also on the next media inserted, thus a parameter is
254    necessary to handle this case in the most safe way possible (trying
255    to not corrupt also the new disk inserted with the data belonging to
256    the old now corrupted disk). Also for the ramdisk the natural thing
257    to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259    These are two special cases. Normal usage imply the device driver
260    to issue a sync on the device (without waiting I/O completion) and
261    then an invalidate_buffers call that doesn't trash dirty buffers.
262
263    For handling cache coherency with the blkdev pagecache the 'update' case
264    is been introduced. It is needed to re-read from disk any pinned
265    buffer. NOTE: re-reading from disk is destructive so we can do it only
266    when we assume nobody is changing the buffercache under our I/O and when
267    we think the disk contains more recent information than the buffercache.
268    The update == 1 pass marks the buffers we need to update, the update == 2
269    pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
271 {
272         struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274         if (mapping->nrpages == 0)
275                 return;
276
277         invalidate_bh_lrus();
278         lru_add_drain_all();    /* make sure all lru add caches are flushed */
279         invalidate_mapping_pages(mapping, 0, -1);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288         struct zone *zone;
289         int nid;
290
291         wakeup_flusher_threads(1024);
292         yield();
293
294         for_each_online_node(nid) {
295                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296                                                 gfp_zone(GFP_NOFS), NULL,
297                                                 &zone);
298                 if (zone)
299                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300                                                 GFP_NOFS, NULL);
301         }
302 }
303
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310         unsigned long flags;
311         struct buffer_head *first;
312         struct buffer_head *tmp;
313         struct page *page;
314         int page_uptodate = 1;
315
316         BUG_ON(!buffer_async_read(bh));
317
318         page = bh->b_page;
319         if (uptodate) {
320                 set_buffer_uptodate(bh);
321         } else {
322                 clear_buffer_uptodate(bh);
323                 if (!quiet_error(bh))
324                         buffer_io_error(bh);
325                 SetPageError(page);
326         }
327
328         /*
329          * Be _very_ careful from here on. Bad things can happen if
330          * two buffer heads end IO at almost the same time and both
331          * decide that the page is now completely done.
332          */
333         first = page_buffers(page);
334         local_irq_save(flags);
335         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336         clear_buffer_async_read(bh);
337         unlock_buffer(bh);
338         tmp = bh;
339         do {
340                 if (!buffer_uptodate(tmp))
341                         page_uptodate = 0;
342                 if (buffer_async_read(tmp)) {
343                         BUG_ON(!buffer_locked(tmp));
344                         goto still_busy;
345                 }
346                 tmp = tmp->b_this_page;
347         } while (tmp != bh);
348         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349         local_irq_restore(flags);
350
351         /*
352          * If none of the buffers had errors and they are all
353          * uptodate then we can set the page uptodate.
354          */
355         if (page_uptodate && !PageError(page))
356                 SetPageUptodate(page);
357         unlock_page(page);
358         return;
359
360 still_busy:
361         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362         local_irq_restore(flags);
363         return;
364 }
365
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372         char b[BDEVNAME_SIZE];
373         unsigned long flags;
374         struct buffer_head *first;
375         struct buffer_head *tmp;
376         struct page *page;
377
378         BUG_ON(!buffer_async_write(bh));
379
380         page = bh->b_page;
381         if (uptodate) {
382                 set_buffer_uptodate(bh);
383         } else {
384                 if (!quiet_error(bh)) {
385                         buffer_io_error(bh);
386                         printk(KERN_WARNING "lost page write due to "
387                                         "I/O error on %s\n",
388                                bdevname(bh->b_bdev, b));
389                 }
390                 set_bit(AS_EIO, &page->mapping->flags);
391                 set_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393                 SetPageError(page);
394         }
395
396         first = page_buffers(page);
397         local_irq_save(flags);
398         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411         local_irq_restore(flags);
412         end_page_writeback(page);
413         return;
414
415 still_busy:
416         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417         local_irq_restore(flags);
418         return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445         bh->b_end_io = end_buffer_async_read;
446         set_buffer_async_read(bh);
447 }
448
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450                                           bh_end_io_t *handler)
451 {
452         bh->b_end_io = handler;
453         set_buffer_async_write(bh);
454 }
455
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458         mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space 
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517         list_del_init(&bh->b_assoc_buffers);
518         WARN_ON(!bh->b_assoc_map);
519         if (buffer_write_io_error(bh))
520                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521         bh->b_assoc_map = NULL;
522 }
523
524 int inode_has_buffers(struct inode *inode)
525 {
526         return !list_empty(&inode->i_data.private_list);
527 }
528
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541         struct buffer_head *bh;
542         struct list_head *p;
543         int err = 0;
544
545         spin_lock(lock);
546 repeat:
547         list_for_each_prev(p, list) {
548                 bh = BH_ENTRY(p);
549                 if (buffer_locked(bh)) {
550                         get_bh(bh);
551                         spin_unlock(lock);
552                         wait_on_buffer(bh);
553                         if (!buffer_uptodate(bh))
554                                 err = -EIO;
555                         brelse(bh);
556                         spin_lock(lock);
557                         goto repeat;
558                 }
559         }
560         spin_unlock(lock);
561         return err;
562 }
563
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566         char b[BDEVNAME_SIZE];
567         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568                 printk(KERN_WARNING "Emergency Thaw on %s\n",
569                        bdevname(sb->s_bdev, b));
570 }
571
572 static void do_thaw_all(struct work_struct *work)
573 {
574         iterate_supers(do_thaw_one, NULL);
575         kfree(work);
576         printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586         struct work_struct *work;
587
588         work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         if (work) {
590                 INIT_WORK(work, do_thaw_all);
591                 schedule_work(work);
592         }
593 }
594
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608         struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611                 return 0;
612
613         return fsync_buffers_list(&buffer_mapping->private_lock,
614                                         &mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625                         sector_t bblock, unsigned blocksize)
626 {
627         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628         if (bh) {
629                 if (buffer_dirty(bh))
630                         ll_rw_block(WRITE, 1, &bh);
631                 put_bh(bh);
632         }
633 }
634
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637         struct address_space *mapping = inode->i_mapping;
638         struct address_space *buffer_mapping = bh->b_page->mapping;
639
640         mark_buffer_dirty(bh);
641         if (!mapping->assoc_mapping) {
642                 mapping->assoc_mapping = buffer_mapping;
643         } else {
644                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
645         }
646         if (!bh->b_assoc_map) {
647                 spin_lock(&buffer_mapping->private_lock);
648                 list_move_tail(&bh->b_assoc_buffers,
649                                 &mapping->private_list);
650                 bh->b_assoc_map = mapping;
651                 spin_unlock(&buffer_mapping->private_lock);
652         }
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664                 struct address_space *mapping, int warn)
665 {
666         spin_lock_irq(&mapping->tree_lock);
667         if (page->mapping) {    /* Race with truncate? */
668                 WARN_ON_ONCE(warn && !PageUptodate(page));
669                 account_page_dirtied(page, mapping);
670                 radix_tree_tag_set(&mapping->page_tree,
671                                 page_index(page), PAGECACHE_TAG_DIRTY);
672         }
673         spin_unlock_irq(&mapping->tree_lock);
674         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 }
676
677 /*
678  * Add a page to the dirty page list.
679  *
680  * It is a sad fact of life that this function is called from several places
681  * deeply under spinlocking.  It may not sleep.
682  *
683  * If the page has buffers, the uptodate buffers are set dirty, to preserve
684  * dirty-state coherency between the page and the buffers.  It the page does
685  * not have buffers then when they are later attached they will all be set
686  * dirty.
687  *
688  * The buffers are dirtied before the page is dirtied.  There's a small race
689  * window in which a writepage caller may see the page cleanness but not the
690  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
691  * before the buffers, a concurrent writepage caller could clear the page dirty
692  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693  * page on the dirty page list.
694  *
695  * We use private_lock to lock against try_to_free_buffers while using the
696  * page's buffer list.  Also use this to protect against clean buffers being
697  * added to the page after it was set dirty.
698  *
699  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
700  * address_space though.
701  */
702 int __set_page_dirty_buffers(struct page *page)
703 {
704         int newly_dirty;
705         struct address_space *mapping = page_mapping(page);
706
707         if (unlikely(!mapping))
708                 return !TestSetPageDirty(page);
709
710         spin_lock(&mapping->private_lock);
711         if (page_has_buffers(page)) {
712                 struct buffer_head *head = page_buffers(page);
713                 struct buffer_head *bh = head;
714
715                 do {
716                         set_buffer_dirty(bh);
717                         bh = bh->b_this_page;
718                 } while (bh != head);
719         }
720         newly_dirty = !TestSetPageDirty(page);
721         spin_unlock(&mapping->private_lock);
722
723         if (newly_dirty)
724                 __set_page_dirty(page, mapping, 1);
725         return newly_dirty;
726 }
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
728
729 /*
730  * Write out and wait upon a list of buffers.
731  *
732  * We have conflicting pressures: we want to make sure that all
733  * initially dirty buffers get waited on, but that any subsequently
734  * dirtied buffers don't.  After all, we don't want fsync to last
735  * forever if somebody is actively writing to the file.
736  *
737  * Do this in two main stages: first we copy dirty buffers to a
738  * temporary inode list, queueing the writes as we go.  Then we clean
739  * up, waiting for those writes to complete.
740  * 
741  * During this second stage, any subsequent updates to the file may end
742  * up refiling the buffer on the original inode's dirty list again, so
743  * there is a chance we will end up with a buffer queued for write but
744  * not yet completed on that list.  So, as a final cleanup we go through
745  * the osync code to catch these locked, dirty buffers without requeuing
746  * any newly dirty buffers for write.
747  */
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749 {
750         struct buffer_head *bh;
751         struct list_head tmp;
752         struct address_space *mapping, *prev_mapping = NULL;
753         int err = 0, err2;
754
755         INIT_LIST_HEAD(&tmp);
756
757         spin_lock(lock);
758         while (!list_empty(list)) {
759                 bh = BH_ENTRY(list->next);
760                 mapping = bh->b_assoc_map;
761                 __remove_assoc_queue(bh);
762                 /* Avoid race with mark_buffer_dirty_inode() which does
763                  * a lockless check and we rely on seeing the dirty bit */
764                 smp_mb();
765                 if (buffer_dirty(bh) || buffer_locked(bh)) {
766                         list_add(&bh->b_assoc_buffers, &tmp);
767                         bh->b_assoc_map = mapping;
768                         if (buffer_dirty(bh)) {
769                                 get_bh(bh);
770                                 spin_unlock(lock);
771                                 /*
772                                  * Ensure any pending I/O completes so that
773                                  * write_dirty_buffer() actually writes the
774                                  * current contents - it is a noop if I/O is
775                                  * still in flight on potentially older
776                                  * contents.
777                                  */
778                                 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
779
780                                 /*
781                                  * Kick off IO for the previous mapping. Note
782                                  * that we will not run the very last mapping,
783                                  * wait_on_buffer() will do that for us
784                                  * through sync_buffer().
785                                  */
786                                 if (prev_mapping && prev_mapping != mapping)
787                                         blk_run_address_space(prev_mapping);
788                                 prev_mapping = mapping;
789
790                                 brelse(bh);
791                                 spin_lock(lock);
792                         }
793                 }
794         }
795
796         while (!list_empty(&tmp)) {
797                 bh = BH_ENTRY(tmp.prev);
798                 get_bh(bh);
799                 mapping = bh->b_assoc_map;
800                 __remove_assoc_queue(bh);
801                 /* Avoid race with mark_buffer_dirty_inode() which does
802                  * a lockless check and we rely on seeing the dirty bit */
803                 smp_mb();
804                 if (buffer_dirty(bh)) {
805                         list_add(&bh->b_assoc_buffers,
806                                  &mapping->private_list);
807                         bh->b_assoc_map = mapping;
808                 }
809                 spin_unlock(lock);
810                 wait_on_buffer(bh);
811                 if (!buffer_uptodate(bh))
812                         err = -EIO;
813                 brelse(bh);
814                 spin_lock(lock);
815         }
816         
817         spin_unlock(lock);
818         err2 = osync_buffers_list(lock, list);
819         if (err)
820                 return err;
821         else
822                 return err2;
823 }
824
825 /*
826  * Invalidate any and all dirty buffers on a given inode.  We are
827  * probably unmounting the fs, but that doesn't mean we have already
828  * done a sync().  Just drop the buffers from the inode list.
829  *
830  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
831  * assumes that all the buffers are against the blockdev.  Not true
832  * for reiserfs.
833  */
834 void invalidate_inode_buffers(struct inode *inode)
835 {
836         if (inode_has_buffers(inode)) {
837                 struct address_space *mapping = &inode->i_data;
838                 struct list_head *list = &mapping->private_list;
839                 struct address_space *buffer_mapping = mapping->assoc_mapping;
840
841                 spin_lock(&buffer_mapping->private_lock);
842                 while (!list_empty(list))
843                         __remove_assoc_queue(BH_ENTRY(list->next));
844                 spin_unlock(&buffer_mapping->private_lock);
845         }
846 }
847 EXPORT_SYMBOL(invalidate_inode_buffers);
848
849 /*
850  * Remove any clean buffers from the inode's buffer list.  This is called
851  * when we're trying to free the inode itself.  Those buffers can pin it.
852  *
853  * Returns true if all buffers were removed.
854  */
855 int remove_inode_buffers(struct inode *inode)
856 {
857         int ret = 1;
858
859         if (inode_has_buffers(inode)) {
860                 struct address_space *mapping = &inode->i_data;
861                 struct list_head *list = &mapping->private_list;
862                 struct address_space *buffer_mapping = mapping->assoc_mapping;
863
864                 spin_lock(&buffer_mapping->private_lock);
865                 while (!list_empty(list)) {
866                         struct buffer_head *bh = BH_ENTRY(list->next);
867                         if (buffer_dirty(bh)) {
868                                 ret = 0;
869                                 break;
870                         }
871                         __remove_assoc_queue(bh);
872                 }
873                 spin_unlock(&buffer_mapping->private_lock);
874         }
875         return ret;
876 }
877
878 /*
879  * Create the appropriate buffers when given a page for data area and
880  * the size of each buffer.. Use the bh->b_this_page linked list to
881  * follow the buffers created.  Return NULL if unable to create more
882  * buffers.
883  *
884  * The retry flag is used to differentiate async IO (paging, swapping)
885  * which may not fail from ordinary buffer allocations.
886  */
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
888                 int retry)
889 {
890         struct buffer_head *bh, *head;
891         long offset;
892
893 try_again:
894         head = NULL;
895         offset = PAGE_SIZE;
896         while ((offset -= size) >= 0) {
897                 bh = alloc_buffer_head(GFP_NOFS);
898                 if (!bh)
899                         goto no_grow;
900
901                 bh->b_bdev = NULL;
902                 bh->b_this_page = head;
903                 bh->b_blocknr = -1;
904                 head = bh;
905
906                 bh->b_state = 0;
907                 atomic_set(&bh->b_count, 0);
908                 bh->b_size = size;
909
910                 /* Link the buffer to its page */
911                 set_bh_page(bh, page, offset);
912
913                 init_buffer(bh, NULL, NULL);
914         }
915         return head;
916 /*
917  * In case anything failed, we just free everything we got.
918  */
919 no_grow:
920         if (head) {
921                 do {
922                         bh = head;
923                         head = head->b_this_page;
924                         free_buffer_head(bh);
925                 } while (head);
926         }
927
928         /*
929          * Return failure for non-async IO requests.  Async IO requests
930          * are not allowed to fail, so we have to wait until buffer heads
931          * become available.  But we don't want tasks sleeping with 
932          * partially complete buffers, so all were released above.
933          */
934         if (!retry)
935                 return NULL;
936
937         /* We're _really_ low on memory. Now we just
938          * wait for old buffer heads to become free due to
939          * finishing IO.  Since this is an async request and
940          * the reserve list is empty, we're sure there are 
941          * async buffer heads in use.
942          */
943         free_more_memory();
944         goto try_again;
945 }
946 EXPORT_SYMBOL_GPL(alloc_page_buffers);
947
948 static inline void
949 link_dev_buffers(struct page *page, struct buffer_head *head)
950 {
951         struct buffer_head *bh, *tail;
952
953         bh = head;
954         do {
955                 tail = bh;
956                 bh = bh->b_this_page;
957         } while (bh);
958         tail->b_this_page = head;
959         attach_page_buffers(page, head);
960 }
961
962 /*
963  * Initialise the state of a blockdev page's buffers.
964  */ 
965 static void
966 init_page_buffers(struct page *page, struct block_device *bdev,
967                         sector_t block, int size)
968 {
969         struct buffer_head *head = page_buffers(page);
970         struct buffer_head *bh = head;
971         int uptodate = PageUptodate(page);
972
973         do {
974                 if (!buffer_mapped(bh)) {
975                         init_buffer(bh, NULL, NULL);
976                         bh->b_bdev = bdev;
977                         bh->b_blocknr = block;
978                         if (uptodate)
979                                 set_buffer_uptodate(bh);
980                         set_buffer_mapped(bh);
981                 }
982                 block++;
983                 bh = bh->b_this_page;
984         } while (bh != head);
985 }
986
987 /*
988  * Create the page-cache page that contains the requested block.
989  *
990  * This is user purely for blockdev mappings.
991  */
992 static struct page *
993 grow_dev_page(struct block_device *bdev, sector_t block,
994                 pgoff_t index, int size)
995 {
996         struct inode *inode = bdev->bd_inode;
997         struct page *page;
998         struct buffer_head *bh;
999
1000         page = find_or_create_page(inode->i_mapping, index,
1001                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1002         if (!page)
1003                 return NULL;
1004
1005         BUG_ON(!PageLocked(page));
1006
1007         if (page_has_buffers(page)) {
1008                 bh = page_buffers(page);
1009                 if (bh->b_size == size) {
1010                         init_page_buffers(page, bdev, block, size);
1011                         return page;
1012                 }
1013                 if (!try_to_free_buffers(page))
1014                         goto failed;
1015         }
1016
1017         /*
1018          * Allocate some buffers for this page
1019          */
1020         bh = alloc_page_buffers(page, size, 0);
1021         if (!bh)
1022                 goto failed;
1023
1024         /*
1025          * Link the page to the buffers and initialise them.  Take the
1026          * lock to be atomic wrt __find_get_block(), which does not
1027          * run under the page lock.
1028          */
1029         spin_lock(&inode->i_mapping->private_lock);
1030         link_dev_buffers(page, bh);
1031         init_page_buffers(page, bdev, block, size);
1032         spin_unlock(&inode->i_mapping->private_lock);
1033         return page;
1034
1035 failed:
1036         BUG();
1037         unlock_page(page);
1038         page_cache_release(page);
1039         return NULL;
1040 }
1041
1042 /*
1043  * Create buffers for the specified block device block's page.  If
1044  * that page was dirty, the buffers are set dirty also.
1045  */
1046 static int
1047 grow_buffers(struct block_device *bdev, sector_t block, int size)
1048 {
1049         struct page *page;
1050         pgoff_t index;
1051         int sizebits;
1052
1053         sizebits = -1;
1054         do {
1055                 sizebits++;
1056         } while ((size << sizebits) < PAGE_SIZE);
1057
1058         index = block >> sizebits;
1059
1060         /*
1061          * Check for a block which wants to lie outside our maximum possible
1062          * pagecache index.  (this comparison is done using sector_t types).
1063          */
1064         if (unlikely(index != block >> sizebits)) {
1065                 char b[BDEVNAME_SIZE];
1066
1067                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1068                         "device %s\n",
1069                         __func__, (unsigned long long)block,
1070                         bdevname(bdev, b));
1071                 return -EIO;
1072         }
1073         block = index << sizebits;
1074         /* Create a page with the proper size buffers.. */
1075         page = grow_dev_page(bdev, block, index, size);
1076         if (!page)
1077                 return 0;
1078         unlock_page(page);
1079         page_cache_release(page);
1080         return 1;
1081 }
1082
1083 static struct buffer_head *
1084 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1085 {
1086         /* Size must be multiple of hard sectorsize */
1087         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088                         (size < 512 || size > PAGE_SIZE))) {
1089                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1090                                         size);
1091                 printk(KERN_ERR "logical block size: %d\n",
1092                                         bdev_logical_block_size(bdev));
1093
1094                 dump_stack();
1095                 return NULL;
1096         }
1097
1098         for (;;) {
1099                 struct buffer_head * bh;
1100                 int ret;
1101
1102                 bh = __find_get_block(bdev, block, size);
1103                 if (bh)
1104                         return bh;
1105
1106                 ret = grow_buffers(bdev, block, size);
1107                 if (ret < 0)
1108                         return NULL;
1109                 if (ret == 0)
1110                         free_more_memory();
1111         }
1112 }
1113
1114 /*
1115  * The relationship between dirty buffers and dirty pages:
1116  *
1117  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118  * the page is tagged dirty in its radix tree.
1119  *
1120  * At all times, the dirtiness of the buffers represents the dirtiness of
1121  * subsections of the page.  If the page has buffers, the page dirty bit is
1122  * merely a hint about the true dirty state.
1123  *
1124  * When a page is set dirty in its entirety, all its buffers are marked dirty
1125  * (if the page has buffers).
1126  *
1127  * When a buffer is marked dirty, its page is dirtied, but the page's other
1128  * buffers are not.
1129  *
1130  * Also.  When blockdev buffers are explicitly read with bread(), they
1131  * individually become uptodate.  But their backing page remains not
1132  * uptodate - even if all of its buffers are uptodate.  A subsequent
1133  * block_read_full_page() against that page will discover all the uptodate
1134  * buffers, will set the page uptodate and will perform no I/O.
1135  */
1136
1137 /**
1138  * mark_buffer_dirty - mark a buffer_head as needing writeout
1139  * @bh: the buffer_head to mark dirty
1140  *
1141  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142  * backing page dirty, then tag the page as dirty in its address_space's radix
1143  * tree and then attach the address_space's inode to its superblock's dirty
1144  * inode list.
1145  *
1146  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1147  * mapping->tree_lock and the global inode_lock.
1148  */
1149 void mark_buffer_dirty(struct buffer_head *bh)
1150 {
1151         WARN_ON_ONCE(!buffer_uptodate(bh));
1152
1153         /*
1154          * Very *carefully* optimize the it-is-already-dirty case.
1155          *
1156          * Don't let the final "is it dirty" escape to before we
1157          * perhaps modified the buffer.
1158          */
1159         if (buffer_dirty(bh)) {
1160                 smp_mb();
1161                 if (buffer_dirty(bh))
1162                         return;
1163         }
1164
1165         if (!test_set_buffer_dirty(bh)) {
1166                 struct page *page = bh->b_page;
1167                 if (!TestSetPageDirty(page)) {
1168                         struct address_space *mapping = page_mapping(page);
1169                         if (mapping)
1170                                 __set_page_dirty(page, mapping, 0);
1171                 }
1172         }
1173 }
1174 EXPORT_SYMBOL(mark_buffer_dirty);
1175
1176 /*
1177  * Decrement a buffer_head's reference count.  If all buffers against a page
1178  * have zero reference count, are clean and unlocked, and if the page is clean
1179  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1180  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1181  * a page but it ends up not being freed, and buffers may later be reattached).
1182  */
1183 void __brelse(struct buffer_head * buf)
1184 {
1185         if (atomic_read(&buf->b_count)) {
1186                 put_bh(buf);
1187                 return;
1188         }
1189         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 }
1191 EXPORT_SYMBOL(__brelse);
1192
1193 /*
1194  * bforget() is like brelse(), except it discards any
1195  * potentially dirty data.
1196  */
1197 void __bforget(struct buffer_head *bh)
1198 {
1199         clear_buffer_dirty(bh);
1200         if (bh->b_assoc_map) {
1201                 struct address_space *buffer_mapping = bh->b_page->mapping;
1202
1203                 spin_lock(&buffer_mapping->private_lock);
1204                 list_del_init(&bh->b_assoc_buffers);
1205                 bh->b_assoc_map = NULL;
1206                 spin_unlock(&buffer_mapping->private_lock);
1207         }
1208         __brelse(bh);
1209 }
1210 EXPORT_SYMBOL(__bforget);
1211
1212 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1213 {
1214         lock_buffer(bh);
1215         if (buffer_uptodate(bh)) {
1216                 unlock_buffer(bh);
1217                 return bh;
1218         } else {
1219                 get_bh(bh);
1220                 bh->b_end_io = end_buffer_read_sync;
1221                 submit_bh(READ, bh);
1222                 wait_on_buffer(bh);
1223                 if (buffer_uptodate(bh))
1224                         return bh;
1225         }
1226         brelse(bh);
1227         return NULL;
1228 }
1229
1230 /*
1231  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1232  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1233  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1234  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1235  * CPU's LRUs at the same time.
1236  *
1237  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1238  * sb_find_get_block().
1239  *
1240  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1241  * a local interrupt disable for that.
1242  */
1243
1244 #define BH_LRU_SIZE     8
1245
1246 struct bh_lru {
1247         struct buffer_head *bhs[BH_LRU_SIZE];
1248 };
1249
1250 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1251
1252 #ifdef CONFIG_SMP
1253 #define bh_lru_lock()   local_irq_disable()
1254 #define bh_lru_unlock() local_irq_enable()
1255 #else
1256 #define bh_lru_lock()   preempt_disable()
1257 #define bh_lru_unlock() preempt_enable()
1258 #endif
1259
1260 static inline void check_irqs_on(void)
1261 {
1262 #ifdef irqs_disabled
1263         BUG_ON(irqs_disabled());
1264 #endif
1265 }
1266
1267 /*
1268  * The LRU management algorithm is dopey-but-simple.  Sorry.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272         struct buffer_head *evictee = NULL;
1273
1274         check_irqs_on();
1275         bh_lru_lock();
1276         if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1277                 struct buffer_head *bhs[BH_LRU_SIZE];
1278                 int in;
1279                 int out = 0;
1280
1281                 get_bh(bh);
1282                 bhs[out++] = bh;
1283                 for (in = 0; in < BH_LRU_SIZE; in++) {
1284                         struct buffer_head *bh2 =
1285                                 __this_cpu_read(bh_lrus.bhs[in]);
1286
1287                         if (bh2 == bh) {
1288                                 __brelse(bh2);
1289                         } else {
1290                                 if (out >= BH_LRU_SIZE) {
1291                                         BUG_ON(evictee != NULL);
1292                                         evictee = bh2;
1293                                 } else {
1294                                         bhs[out++] = bh2;
1295                                 }
1296                         }
1297                 }
1298                 while (out < BH_LRU_SIZE)
1299                         bhs[out++] = NULL;
1300                 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1301         }
1302         bh_lru_unlock();
1303
1304         if (evictee)
1305                 __brelse(evictee);
1306 }
1307
1308 /*
1309  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1310  */
1311 static struct buffer_head *
1312 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1313 {
1314         struct buffer_head *ret = NULL;
1315         unsigned int i;
1316
1317         check_irqs_on();
1318         bh_lru_lock();
1319         for (i = 0; i < BH_LRU_SIZE; i++) {
1320                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1321
1322                 if (bh && bh->b_bdev == bdev &&
1323                                 bh->b_blocknr == block && bh->b_size == size) {
1324                         if (i) {
1325                                 while (i) {
1326                                         __this_cpu_write(bh_lrus.bhs[i],
1327                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1328                                         i--;
1329                                 }
1330                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1331                         }
1332                         get_bh(bh);
1333                         ret = bh;
1334                         break;
1335                 }
1336         }
1337         bh_lru_unlock();
1338         return ret;
1339 }
1340
1341 /*
1342  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1343  * it in the LRU and mark it as accessed.  If it is not present then return
1344  * NULL
1345  */
1346 struct buffer_head *
1347 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1348 {
1349         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1350
1351         if (bh == NULL) {
1352                 bh = __find_get_block_slow(bdev, block);
1353                 if (bh)
1354                         bh_lru_install(bh);
1355         }
1356         if (bh)
1357                 touch_buffer(bh);
1358         return bh;
1359 }
1360 EXPORT_SYMBOL(__find_get_block);
1361
1362 /*
1363  * __getblk will locate (and, if necessary, create) the buffer_head
1364  * which corresponds to the passed block_device, block and size. The
1365  * returned buffer has its reference count incremented.
1366  *
1367  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1368  * illegal block number, __getblk() will happily return a buffer_head
1369  * which represents the non-existent block.  Very weird.
1370  *
1371  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1372  * attempt is failing.  FIXME, perhaps?
1373  */
1374 struct buffer_head *
1375 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1376 {
1377         struct buffer_head *bh = __find_get_block(bdev, block, size);
1378
1379         might_sleep();
1380         if (bh == NULL)
1381                 bh = __getblk_slow(bdev, block, size);
1382         return bh;
1383 }
1384 EXPORT_SYMBOL(__getblk);
1385
1386 /*
1387  * Do async read-ahead on a buffer..
1388  */
1389 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1390 {
1391         struct buffer_head *bh = __getblk(bdev, block, size);
1392         if (likely(bh)) {
1393                 ll_rw_block(READA, 1, &bh);
1394                 brelse(bh);
1395         }
1396 }
1397 EXPORT_SYMBOL(__breadahead);
1398
1399 /**
1400  *  __bread() - reads a specified block and returns the bh
1401  *  @bdev: the block_device to read from
1402  *  @block: number of block
1403  *  @size: size (in bytes) to read
1404  * 
1405  *  Reads a specified block, and returns buffer head that contains it.
1406  *  It returns NULL if the block was unreadable.
1407  */
1408 struct buffer_head *
1409 __bread(struct block_device *bdev, sector_t block, unsigned size)
1410 {
1411         struct buffer_head *bh = __getblk(bdev, block, size);
1412
1413         if (likely(bh) && !buffer_uptodate(bh))
1414                 bh = __bread_slow(bh);
1415         return bh;
1416 }
1417 EXPORT_SYMBOL(__bread);
1418
1419 /*
1420  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1421  * This doesn't race because it runs in each cpu either in irq
1422  * or with preempt disabled.
1423  */
1424 static void invalidate_bh_lru(void *arg)
1425 {
1426         struct bh_lru *b = &get_cpu_var(bh_lrus);
1427         int i;
1428
1429         for (i = 0; i < BH_LRU_SIZE; i++) {
1430                 brelse(b->bhs[i]);
1431                 b->bhs[i] = NULL;
1432         }
1433         put_cpu_var(bh_lrus);
1434 }
1435         
1436 void invalidate_bh_lrus(void)
1437 {
1438         on_each_cpu(invalidate_bh_lru, NULL, 1);
1439 }
1440 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1441
1442 void set_bh_page(struct buffer_head *bh,
1443                 struct page *page, unsigned long offset)
1444 {
1445         bh->b_page = page;
1446         BUG_ON(offset >= PAGE_SIZE);
1447         if (PageHighMem(page))
1448                 /*
1449                  * This catches illegal uses and preserves the offset:
1450                  */
1451                 bh->b_data = (char *)(0 + offset);
1452         else
1453                 bh->b_data = page_address(page) + offset;
1454 }
1455 EXPORT_SYMBOL(set_bh_page);
1456
1457 /*
1458  * Called when truncating a buffer on a page completely.
1459  */
1460 static void discard_buffer(struct buffer_head * bh)
1461 {
1462         lock_buffer(bh);
1463         clear_buffer_dirty(bh);
1464         bh->b_bdev = NULL;
1465         clear_buffer_mapped(bh);
1466         clear_buffer_req(bh);
1467         clear_buffer_new(bh);
1468         clear_buffer_delay(bh);
1469         clear_buffer_unwritten(bh);
1470         unlock_buffer(bh);
1471 }
1472
1473 /**
1474  * block_invalidatepage - invalidate part of all of a buffer-backed page
1475  *
1476  * @page: the page which is affected
1477  * @offset: the index of the truncation point
1478  *
1479  * block_invalidatepage() is called when all or part of the page has become
1480  * invalidatedby a truncate operation.
1481  *
1482  * block_invalidatepage() does not have to release all buffers, but it must
1483  * ensure that no dirty buffer is left outside @offset and that no I/O
1484  * is underway against any of the blocks which are outside the truncation
1485  * point.  Because the caller is about to free (and possibly reuse) those
1486  * blocks on-disk.
1487  */
1488 void block_invalidatepage(struct page *page, unsigned long offset)
1489 {
1490         struct buffer_head *head, *bh, *next;
1491         unsigned int curr_off = 0;
1492
1493         BUG_ON(!PageLocked(page));
1494         if (!page_has_buffers(page))
1495                 goto out;
1496
1497         head = page_buffers(page);
1498         bh = head;
1499         do {
1500                 unsigned int next_off = curr_off + bh->b_size;
1501                 next = bh->b_this_page;
1502
1503                 /*
1504                  * is this block fully invalidated?
1505                  */
1506                 if (offset <= curr_off)
1507                         discard_buffer(bh);
1508                 curr_off = next_off;
1509                 bh = next;
1510         } while (bh != head);
1511
1512         /*
1513          * We release buffers only if the entire page is being invalidated.
1514          * The get_block cached value has been unconditionally invalidated,
1515          * so real IO is not possible anymore.
1516          */
1517         if (offset == 0)
1518                 try_to_release_page(page, 0);
1519 out:
1520         return;
1521 }
1522 EXPORT_SYMBOL(block_invalidatepage);
1523
1524 /*
1525  * We attach and possibly dirty the buffers atomically wrt
1526  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1527  * is already excluded via the page lock.
1528  */
1529 void create_empty_buffers(struct page *page,
1530                         unsigned long blocksize, unsigned long b_state)
1531 {
1532         struct buffer_head *bh, *head, *tail;
1533
1534         head = alloc_page_buffers(page, blocksize, 1);
1535         bh = head;
1536         do {
1537                 bh->b_state |= b_state;
1538                 tail = bh;
1539                 bh = bh->b_this_page;
1540         } while (bh);
1541         tail->b_this_page = head;
1542
1543         spin_lock(&page->mapping->private_lock);
1544         if (PageUptodate(page) || PageDirty(page)) {
1545                 bh = head;
1546                 do {
1547                         if (PageDirty(page))
1548                                 set_buffer_dirty(bh);
1549                         if (PageUptodate(page))
1550                                 set_buffer_uptodate(bh);
1551                         bh = bh->b_this_page;
1552                 } while (bh != head);
1553         }
1554         attach_page_buffers(page, head);
1555         spin_unlock(&page->mapping->private_lock);
1556 }
1557 EXPORT_SYMBOL(create_empty_buffers);
1558
1559 /*
1560  * We are taking a block for data and we don't want any output from any
1561  * buffer-cache aliases starting from return from that function and
1562  * until the moment when something will explicitly mark the buffer
1563  * dirty (hopefully that will not happen until we will free that block ;-)
1564  * We don't even need to mark it not-uptodate - nobody can expect
1565  * anything from a newly allocated buffer anyway. We used to used
1566  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1567  * don't want to mark the alias unmapped, for example - it would confuse
1568  * anyone who might pick it with bread() afterwards...
1569  *
1570  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1571  * be writeout I/O going on against recently-freed buffers.  We don't
1572  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1573  * only if we really need to.  That happens here.
1574  */
1575 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1576 {
1577         struct buffer_head *old_bh;
1578
1579         might_sleep();
1580
1581         old_bh = __find_get_block_slow(bdev, block);
1582         if (old_bh) {
1583                 clear_buffer_dirty(old_bh);
1584                 wait_on_buffer(old_bh);
1585                 clear_buffer_req(old_bh);
1586                 __brelse(old_bh);
1587         }
1588 }
1589 EXPORT_SYMBOL(unmap_underlying_metadata);
1590
1591 /*
1592  * NOTE! All mapped/uptodate combinations are valid:
1593  *
1594  *      Mapped  Uptodate        Meaning
1595  *
1596  *      No      No              "unknown" - must do get_block()
1597  *      No      Yes             "hole" - zero-filled
1598  *      Yes     No              "allocated" - allocated on disk, not read in
1599  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1600  *
1601  * "Dirty" is valid only with the last case (mapped+uptodate).
1602  */
1603
1604 /*
1605  * While block_write_full_page is writing back the dirty buffers under
1606  * the page lock, whoever dirtied the buffers may decide to clean them
1607  * again at any time.  We handle that by only looking at the buffer
1608  * state inside lock_buffer().
1609  *
1610  * If block_write_full_page() is called for regular writeback
1611  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1612  * locked buffer.   This only can happen if someone has written the buffer
1613  * directly, with submit_bh().  At the address_space level PageWriteback
1614  * prevents this contention from occurring.
1615  *
1616  * If block_write_full_page() is called with wbc->sync_mode ==
1617  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1618  * causes the writes to be flagged as synchronous writes, but the
1619  * block device queue will NOT be unplugged, since usually many pages
1620  * will be pushed to the out before the higher-level caller actually
1621  * waits for the writes to be completed.  The various wait functions,
1622  * such as wait_on_writeback_range() will ultimately call sync_page()
1623  * which will ultimately call blk_run_backing_dev(), which will end up
1624  * unplugging the device queue.
1625  */
1626 static int __block_write_full_page(struct inode *inode, struct page *page,
1627                         get_block_t *get_block, struct writeback_control *wbc,
1628                         bh_end_io_t *handler)
1629 {
1630         int err;
1631         sector_t block;
1632         sector_t last_block;
1633         struct buffer_head *bh, *head;
1634         const unsigned blocksize = 1 << inode->i_blkbits;
1635         int nr_underway = 0;
1636         int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1637                         WRITE_SYNC_PLUG : WRITE);
1638
1639         BUG_ON(!PageLocked(page));
1640
1641         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1642
1643         if (!page_has_buffers(page)) {
1644                 create_empty_buffers(page, blocksize,
1645                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1646         }
1647
1648         /*
1649          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1650          * here, and the (potentially unmapped) buffers may become dirty at
1651          * any time.  If a buffer becomes dirty here after we've inspected it
1652          * then we just miss that fact, and the page stays dirty.
1653          *
1654          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1655          * handle that here by just cleaning them.
1656          */
1657
1658         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1659         head = page_buffers(page);
1660         bh = head;
1661
1662         /*
1663          * Get all the dirty buffers mapped to disk addresses and
1664          * handle any aliases from the underlying blockdev's mapping.
1665          */
1666         do {
1667                 if (block > last_block) {
1668                         /*
1669                          * mapped buffers outside i_size will occur, because
1670                          * this page can be outside i_size when there is a
1671                          * truncate in progress.
1672                          */
1673                         /*
1674                          * The buffer was zeroed by block_write_full_page()
1675                          */
1676                         clear_buffer_dirty(bh);
1677                         set_buffer_uptodate(bh);
1678                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1679                            buffer_dirty(bh)) {
1680                         WARN_ON(bh->b_size != blocksize);
1681                         err = get_block(inode, block, bh, 1);
1682                         if (err)
1683                                 goto recover;
1684                         clear_buffer_delay(bh);
1685                         if (buffer_new(bh)) {
1686                                 /* blockdev mappings never come here */
1687                                 clear_buffer_new(bh);
1688                                 unmap_underlying_metadata(bh->b_bdev,
1689                                                         bh->b_blocknr);
1690                         }
1691                 }
1692                 bh = bh->b_this_page;
1693                 block++;
1694         } while (bh != head);
1695
1696         do {
1697                 if (!buffer_mapped(bh))
1698                         continue;
1699                 /*
1700                  * If it's a fully non-blocking write attempt and we cannot
1701                  * lock the buffer then redirty the page.  Note that this can
1702                  * potentially cause a busy-wait loop from writeback threads
1703                  * and kswapd activity, but those code paths have their own
1704                  * higher-level throttling.
1705                  */
1706                 if (wbc->sync_mode != WB_SYNC_NONE) {
1707                         lock_buffer(bh);
1708                 } else if (!trylock_buffer(bh)) {
1709                         redirty_page_for_writepage(wbc, page);
1710                         continue;
1711                 }
1712                 if (test_clear_buffer_dirty(bh)) {
1713                         mark_buffer_async_write_endio(bh, handler);
1714                 } else {
1715                         unlock_buffer(bh);
1716                 }
1717         } while ((bh = bh->b_this_page) != head);
1718
1719         /*
1720          * The page and its buffers are protected by PageWriteback(), so we can
1721          * drop the bh refcounts early.
1722          */
1723         BUG_ON(PageWriteback(page));
1724         set_page_writeback(page);
1725
1726         do {
1727                 struct buffer_head *next = bh->b_this_page;
1728                 if (buffer_async_write(bh)) {
1729                         submit_bh(write_op, bh);
1730                         nr_underway++;
1731                 }
1732                 bh = next;
1733         } while (bh != head);
1734         unlock_page(page);
1735
1736         err = 0;
1737 done:
1738         if (nr_underway == 0) {
1739                 /*
1740                  * The page was marked dirty, but the buffers were
1741                  * clean.  Someone wrote them back by hand with
1742                  * ll_rw_block/submit_bh.  A rare case.
1743                  */
1744                 end_page_writeback(page);
1745
1746                 /*
1747                  * The page and buffer_heads can be released at any time from
1748                  * here on.
1749                  */
1750         }
1751         return err;
1752
1753 recover:
1754         /*
1755          * ENOSPC, or some other error.  We may already have added some
1756          * blocks to the file, so we need to write these out to avoid
1757          * exposing stale data.
1758          * The page is currently locked and not marked for writeback
1759          */
1760         bh = head;
1761         /* Recovery: lock and submit the mapped buffers */
1762         do {
1763                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1764                     !buffer_delay(bh)) {
1765                         lock_buffer(bh);
1766                         mark_buffer_async_write_endio(bh, handler);
1767                 } else {
1768                         /*
1769                          * The buffer may have been set dirty during
1770                          * attachment to a dirty page.
1771                          */
1772                         clear_buffer_dirty(bh);
1773                 }
1774         } while ((bh = bh->b_this_page) != head);
1775         SetPageError(page);
1776         BUG_ON(PageWriteback(page));
1777         mapping_set_error(page->mapping, err);
1778         set_page_writeback(page);
1779         do {
1780                 struct buffer_head *next = bh->b_this_page;
1781                 if (buffer_async_write(bh)) {
1782                         clear_buffer_dirty(bh);
1783                         submit_bh(write_op, bh);
1784                         nr_underway++;
1785                 }
1786                 bh = next;
1787         } while (bh != head);
1788         unlock_page(page);
1789         goto done;
1790 }
1791
1792 /*
1793  * If a page has any new buffers, zero them out here, and mark them uptodate
1794  * and dirty so they'll be written out (in order to prevent uninitialised
1795  * block data from leaking). And clear the new bit.
1796  */
1797 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1798 {
1799         unsigned int block_start, block_end;
1800         struct buffer_head *head, *bh;
1801
1802         BUG_ON(!PageLocked(page));
1803         if (!page_has_buffers(page))
1804                 return;
1805
1806         bh = head = page_buffers(page);
1807         block_start = 0;
1808         do {
1809                 block_end = block_start + bh->b_size;
1810
1811                 if (buffer_new(bh)) {
1812                         if (block_end > from && block_start < to) {
1813                                 if (!PageUptodate(page)) {
1814                                         unsigned start, size;
1815
1816                                         start = max(from, block_start);
1817                                         size = min(to, block_end) - start;
1818
1819                                         zero_user(page, start, size);
1820                                         set_buffer_uptodate(bh);
1821                                 }
1822
1823                                 clear_buffer_new(bh);
1824                                 mark_buffer_dirty(bh);
1825                         }
1826                 }
1827
1828                 block_start = block_end;
1829                 bh = bh->b_this_page;
1830         } while (bh != head);
1831 }
1832 EXPORT_SYMBOL(page_zero_new_buffers);
1833
1834 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1835                 get_block_t *get_block)
1836 {
1837         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1838         unsigned to = from + len;
1839         struct inode *inode = page->mapping->host;
1840         unsigned block_start, block_end;
1841         sector_t block;
1842         int err = 0;
1843         unsigned blocksize, bbits;
1844         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1845
1846         BUG_ON(!PageLocked(page));
1847         BUG_ON(from > PAGE_CACHE_SIZE);
1848         BUG_ON(to > PAGE_CACHE_SIZE);
1849         BUG_ON(from > to);
1850
1851         blocksize = 1 << inode->i_blkbits;
1852         if (!page_has_buffers(page))
1853                 create_empty_buffers(page, blocksize, 0);
1854         head = page_buffers(page);
1855
1856         bbits = inode->i_blkbits;
1857         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1858
1859         for(bh = head, block_start = 0; bh != head || !block_start;
1860             block++, block_start=block_end, bh = bh->b_this_page) {
1861                 block_end = block_start + blocksize;
1862                 if (block_end <= from || block_start >= to) {
1863                         if (PageUptodate(page)) {
1864                                 if (!buffer_uptodate(bh))
1865                                         set_buffer_uptodate(bh);
1866                         }
1867                         continue;
1868                 }
1869                 if (buffer_new(bh))
1870                         clear_buffer_new(bh);
1871                 if (!buffer_mapped(bh)) {
1872                         WARN_ON(bh->b_size != blocksize);
1873                         err = get_block(inode, block, bh, 1);
1874                         if (err)
1875                                 break;
1876                         if (buffer_new(bh)) {
1877                                 unmap_underlying_metadata(bh->b_bdev,
1878                                                         bh->b_blocknr);
1879                                 if (PageUptodate(page)) {
1880                                         clear_buffer_new(bh);
1881                                         set_buffer_uptodate(bh);
1882                                         mark_buffer_dirty(bh);
1883                                         continue;
1884                                 }
1885                                 if (block_end > to || block_start < from)
1886                                         zero_user_segments(page,
1887                                                 to, block_end,
1888                                                 block_start, from);
1889                                 continue;
1890                         }
1891                 }
1892                 if (PageUptodate(page)) {
1893                         if (!buffer_uptodate(bh))
1894                                 set_buffer_uptodate(bh);
1895                         continue; 
1896                 }
1897                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1898                     !buffer_unwritten(bh) &&
1899                      (block_start < from || block_end > to)) {
1900                         ll_rw_block(READ, 1, &bh);
1901                         *wait_bh++=bh;
1902                 }
1903         }
1904         /*
1905          * If we issued read requests - let them complete.
1906          */
1907         while(wait_bh > wait) {
1908                 wait_on_buffer(*--wait_bh);
1909                 if (!buffer_uptodate(*wait_bh))
1910                         err = -EIO;
1911         }
1912         if (unlikely(err)) {
1913                 page_zero_new_buffers(page, from, to);
1914                 ClearPageUptodate(page);
1915         }
1916         return err;
1917 }
1918 EXPORT_SYMBOL(__block_write_begin);
1919
1920 static int __block_commit_write(struct inode *inode, struct page *page,
1921                 unsigned from, unsigned to)
1922 {
1923         unsigned block_start, block_end;
1924         int partial = 0;
1925         unsigned blocksize;
1926         struct buffer_head *bh, *head;
1927
1928         blocksize = 1 << inode->i_blkbits;
1929
1930         for(bh = head = page_buffers(page), block_start = 0;
1931             bh != head || !block_start;
1932             block_start=block_end, bh = bh->b_this_page) {
1933                 block_end = block_start + blocksize;
1934                 if (block_end <= from || block_start >= to) {
1935                         if (!buffer_uptodate(bh))
1936                                 partial = 1;
1937                 } else {
1938                         set_buffer_uptodate(bh);
1939                         mark_buffer_dirty(bh);
1940                 }
1941                 clear_buffer_new(bh);
1942         }
1943
1944         /*
1945          * If this is a partial write which happened to make all buffers
1946          * uptodate then we can optimize away a bogus readpage() for
1947          * the next read(). Here we 'discover' whether the page went
1948          * uptodate as a result of this (potentially partial) write.
1949          */
1950         if (!partial)
1951                 SetPageUptodate(page);
1952         return 0;
1953 }
1954
1955 /*
1956  * block_write_begin takes care of the basic task of block allocation and
1957  * bringing partial write blocks uptodate first.
1958  *
1959  * The filesystem needs to handle block truncation upon failure.
1960  */
1961 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1962                 unsigned flags, struct page **pagep, get_block_t *get_block)
1963 {
1964         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1965         struct page *page;
1966         int status;
1967
1968         page = grab_cache_page_write_begin(mapping, index, flags);
1969         if (!page)
1970                 return -ENOMEM;
1971
1972         status = __block_write_begin(page, pos, len, get_block);
1973         if (unlikely(status)) {
1974                 unlock_page(page);
1975                 page_cache_release(page);
1976                 page = NULL;
1977         }
1978
1979         *pagep = page;
1980         return status;
1981 }
1982 EXPORT_SYMBOL(block_write_begin);
1983
1984 int block_write_end(struct file *file, struct address_space *mapping,
1985                         loff_t pos, unsigned len, unsigned copied,
1986                         struct page *page, void *fsdata)
1987 {
1988         struct inode *inode = mapping->host;
1989         unsigned start;
1990
1991         start = pos & (PAGE_CACHE_SIZE - 1);
1992
1993         if (unlikely(copied < len)) {
1994                 /*
1995                  * The buffers that were written will now be uptodate, so we
1996                  * don't have to worry about a readpage reading them and
1997                  * overwriting a partial write. However if we have encountered
1998                  * a short write and only partially written into a buffer, it
1999                  * will not be marked uptodate, so a readpage might come in and
2000                  * destroy our partial write.
2001                  *
2002                  * Do the simplest thing, and just treat any short write to a
2003                  * non uptodate page as a zero-length write, and force the
2004                  * caller to redo the whole thing.
2005                  */
2006                 if (!PageUptodate(page))
2007                         copied = 0;
2008
2009                 page_zero_new_buffers(page, start+copied, start+len);
2010         }
2011         flush_dcache_page(page);
2012
2013         /* This could be a short (even 0-length) commit */
2014         __block_commit_write(inode, page, start, start+copied);
2015
2016         return copied;
2017 }
2018 EXPORT_SYMBOL(block_write_end);
2019
2020 int generic_write_end(struct file *file, struct address_space *mapping,
2021                         loff_t pos, unsigned len, unsigned copied,
2022                         struct page *page, void *fsdata)
2023 {
2024         struct inode *inode = mapping->host;
2025         int i_size_changed = 0;
2026
2027         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2028
2029         /*
2030          * No need to use i_size_read() here, the i_size
2031          * cannot change under us because we hold i_mutex.
2032          *
2033          * But it's important to update i_size while still holding page lock:
2034          * page writeout could otherwise come in and zero beyond i_size.
2035          */
2036         if (pos+copied > inode->i_size) {
2037                 i_size_write(inode, pos+copied);
2038                 i_size_changed = 1;
2039         }
2040
2041         unlock_page(page);
2042         page_cache_release(page);
2043
2044         /*
2045          * Don't mark the inode dirty under page lock. First, it unnecessarily
2046          * makes the holding time of page lock longer. Second, it forces lock
2047          * ordering of page lock and transaction start for journaling
2048          * filesystems.
2049          */
2050         if (i_size_changed)
2051                 mark_inode_dirty(inode);
2052
2053         return copied;
2054 }
2055 EXPORT_SYMBOL(generic_write_end);
2056
2057 /*
2058  * block_is_partially_uptodate checks whether buffers within a page are
2059  * uptodate or not.
2060  *
2061  * Returns true if all buffers which correspond to a file portion
2062  * we want to read are uptodate.
2063  */
2064 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2065                                         unsigned long from)
2066 {
2067         struct inode *inode = page->mapping->host;
2068         unsigned block_start, block_end, blocksize;
2069         unsigned to;
2070         struct buffer_head *bh, *head;
2071         int ret = 1;
2072
2073         if (!page_has_buffers(page))
2074                 return 0;
2075
2076         blocksize = 1 << inode->i_blkbits;
2077         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2078         to = from + to;
2079         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2080                 return 0;
2081
2082         head = page_buffers(page);
2083         bh = head;
2084         block_start = 0;
2085         do {
2086                 block_end = block_start + blocksize;
2087                 if (block_end > from && block_start < to) {
2088                         if (!buffer_uptodate(bh)) {
2089                                 ret = 0;
2090                                 break;
2091                         }
2092                         if (block_end >= to)
2093                                 break;
2094                 }
2095                 block_start = block_end;
2096                 bh = bh->b_this_page;
2097         } while (bh != head);
2098
2099         return ret;
2100 }
2101 EXPORT_SYMBOL(block_is_partially_uptodate);
2102
2103 /*
2104  * Generic "read page" function for block devices that have the normal
2105  * get_block functionality. This is most of the block device filesystems.
2106  * Reads the page asynchronously --- the unlock_buffer() and
2107  * set/clear_buffer_uptodate() functions propagate buffer state into the
2108  * page struct once IO has completed.
2109  */
2110 int block_read_full_page(struct page *page, get_block_t *get_block)
2111 {
2112         struct inode *inode = page->mapping->host;
2113         sector_t iblock, lblock;
2114         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2115         unsigned int blocksize;
2116         int nr, i;
2117         int fully_mapped = 1;
2118
2119         BUG_ON(!PageLocked(page));
2120         blocksize = 1 << inode->i_blkbits;
2121         if (!page_has_buffers(page))
2122                 create_empty_buffers(page, blocksize, 0);
2123         head = page_buffers(page);
2124
2125         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2126         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2127         bh = head;
2128         nr = 0;
2129         i = 0;
2130
2131         do {
2132                 if (buffer_uptodate(bh))
2133                         continue;
2134
2135                 if (!buffer_mapped(bh)) {
2136                         int err = 0;
2137
2138                         fully_mapped = 0;
2139                         if (iblock < lblock) {
2140                                 WARN_ON(bh->b_size != blocksize);
2141                                 err = get_block(inode, iblock, bh, 0);
2142                                 if (err)
2143                                         SetPageError(page);
2144                         }
2145                         if (!buffer_mapped(bh)) {
2146                                 zero_user(page, i * blocksize, blocksize);
2147                                 if (!err)
2148                                         set_buffer_uptodate(bh);
2149                                 continue;
2150                         }
2151                         /*
2152                          * get_block() might have updated the buffer
2153                          * synchronously
2154                          */
2155                         if (buffer_uptodate(bh))
2156                                 continue;
2157                 }
2158                 arr[nr++] = bh;
2159         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2160
2161         if (fully_mapped)
2162                 SetPageMappedToDisk(page);
2163
2164         if (!nr) {
2165                 /*
2166                  * All buffers are uptodate - we can set the page uptodate
2167                  * as well. But not if get_block() returned an error.
2168                  */
2169                 if (!PageError(page))
2170                         SetPageUptodate(page);
2171                 unlock_page(page);
2172                 return 0;
2173         }
2174
2175         /* Stage two: lock the buffers */
2176         for (i = 0; i < nr; i++) {
2177                 bh = arr[i];
2178                 lock_buffer(bh);
2179                 mark_buffer_async_read(bh);
2180         }
2181
2182         /*
2183          * Stage 3: start the IO.  Check for uptodateness
2184          * inside the buffer lock in case another process reading
2185          * the underlying blockdev brought it uptodate (the sct fix).
2186          */
2187         for (i = 0; i < nr; i++) {
2188                 bh = arr[i];
2189                 if (buffer_uptodate(bh))
2190                         end_buffer_async_read(bh, 1);
2191                 else
2192                         submit_bh(READ, bh);
2193         }
2194         return 0;
2195 }
2196 EXPORT_SYMBOL(block_read_full_page);
2197
2198 /* utility function for filesystems that need to do work on expanding
2199  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2200  * deal with the hole.  
2201  */
2202 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2203 {
2204         struct address_space *mapping = inode->i_mapping;
2205         struct page *page;
2206         void *fsdata;
2207         int err;
2208
2209         err = inode_newsize_ok(inode, size);
2210         if (err)
2211                 goto out;
2212
2213         err = pagecache_write_begin(NULL, mapping, size, 0,
2214                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2215                                 &page, &fsdata);
2216         if (err)
2217                 goto out;
2218
2219         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2220         BUG_ON(err > 0);
2221
2222 out:
2223         return err;
2224 }
2225 EXPORT_SYMBOL(generic_cont_expand_simple);
2226
2227 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2228                             loff_t pos, loff_t *bytes)
2229 {
2230         struct inode *inode = mapping->host;
2231         unsigned blocksize = 1 << inode->i_blkbits;
2232         struct page *page;
2233         void *fsdata;
2234         pgoff_t index, curidx;
2235         loff_t curpos;
2236         unsigned zerofrom, offset, len;
2237         int err = 0;
2238
2239         index = pos >> PAGE_CACHE_SHIFT;
2240         offset = pos & ~PAGE_CACHE_MASK;
2241
2242         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2243                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2244                 if (zerofrom & (blocksize-1)) {
2245                         *bytes |= (blocksize-1);
2246                         (*bytes)++;
2247                 }
2248                 len = PAGE_CACHE_SIZE - zerofrom;
2249
2250                 err = pagecache_write_begin(file, mapping, curpos, len,
2251                                                 AOP_FLAG_UNINTERRUPTIBLE,
2252                                                 &page, &fsdata);
2253                 if (err)
2254                         goto out;
2255                 zero_user(page, zerofrom, len);
2256                 err = pagecache_write_end(file, mapping, curpos, len, len,
2257                                                 page, fsdata);
2258                 if (err < 0)
2259                         goto out;
2260                 BUG_ON(err != len);
2261                 err = 0;
2262
2263                 balance_dirty_pages_ratelimited(mapping);
2264         }
2265
2266         /* page covers the boundary, find the boundary offset */
2267         if (index == curidx) {
2268                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2269                 /* if we will expand the thing last block will be filled */
2270                 if (offset <= zerofrom) {
2271                         goto out;
2272                 }
2273                 if (zerofrom & (blocksize-1)) {
2274                         *bytes |= (blocksize-1);
2275                         (*bytes)++;
2276                 }
2277                 len = offset - zerofrom;
2278
2279                 err = pagecache_write_begin(file, mapping, curpos, len,
2280                                                 AOP_FLAG_UNINTERRUPTIBLE,
2281                                                 &page, &fsdata);
2282                 if (err)
2283                         goto out;
2284                 zero_user(page, zerofrom, len);
2285                 err = pagecache_write_end(file, mapping, curpos, len, len,
2286                                                 page, fsdata);
2287                 if (err < 0)
2288                         goto out;
2289                 BUG_ON(err != len);
2290                 err = 0;
2291         }
2292 out:
2293         return err;
2294 }
2295
2296 /*
2297  * For moronic filesystems that do not allow holes in file.
2298  * We may have to extend the file.
2299  */
2300 int cont_write_begin(struct file *file, struct address_space *mapping,
2301                         loff_t pos, unsigned len, unsigned flags,
2302                         struct page **pagep, void **fsdata,
2303                         get_block_t *get_block, loff_t *bytes)
2304 {
2305         struct inode *inode = mapping->host;
2306         unsigned blocksize = 1 << inode->i_blkbits;
2307         unsigned zerofrom;
2308         int err;
2309
2310         err = cont_expand_zero(file, mapping, pos, bytes);
2311         if (err)
2312                 return err;
2313
2314         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2315         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2316                 *bytes |= (blocksize-1);
2317                 (*bytes)++;
2318         }
2319
2320         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2321 }
2322 EXPORT_SYMBOL(cont_write_begin);
2323
2324 int block_commit_write(struct page *page, unsigned from, unsigned to)
2325 {
2326         struct inode *inode = page->mapping->host;
2327         __block_commit_write(inode,page,from,to);
2328         return 0;
2329 }
2330 EXPORT_SYMBOL(block_commit_write);
2331
2332 /*
2333  * block_page_mkwrite() is not allowed to change the file size as it gets
2334  * called from a page fault handler when a page is first dirtied. Hence we must
2335  * be careful to check for EOF conditions here. We set the page up correctly
2336  * for a written page which means we get ENOSPC checking when writing into
2337  * holes and correct delalloc and unwritten extent mapping on filesystems that
2338  * support these features.
2339  *
2340  * We are not allowed to take the i_mutex here so we have to play games to
2341  * protect against truncate races as the page could now be beyond EOF.  Because
2342  * truncate writes the inode size before removing pages, once we have the
2343  * page lock we can determine safely if the page is beyond EOF. If it is not
2344  * beyond EOF, then the page is guaranteed safe against truncation until we
2345  * unlock the page.
2346  */
2347 int
2348 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2349                    get_block_t get_block)
2350 {
2351         struct page *page = vmf->page;
2352         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2353         unsigned long end;
2354         loff_t size;
2355         int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2356
2357         lock_page(page);
2358         size = i_size_read(inode);
2359         if ((page->mapping != inode->i_mapping) ||
2360             (page_offset(page) > size)) {
2361                 /* page got truncated out from underneath us */
2362                 unlock_page(page);
2363                 goto out;
2364         }
2365
2366         /* page is wholly or partially inside EOF */
2367         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2368                 end = size & ~PAGE_CACHE_MASK;
2369         else
2370                 end = PAGE_CACHE_SIZE;
2371
2372         ret = __block_write_begin(page, 0, end, get_block);
2373         if (!ret)
2374                 ret = block_commit_write(page, 0, end);
2375
2376         if (unlikely(ret)) {
2377                 unlock_page(page);
2378                 if (ret == -ENOMEM)
2379                         ret = VM_FAULT_OOM;
2380                 else /* -ENOSPC, -EIO, etc */
2381                         ret = VM_FAULT_SIGBUS;
2382         } else
2383                 ret = VM_FAULT_LOCKED;
2384
2385 out:
2386         return ret;
2387 }
2388 EXPORT_SYMBOL(block_page_mkwrite);
2389
2390 /*
2391  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2392  * immediately, while under the page lock.  So it needs a special end_io
2393  * handler which does not touch the bh after unlocking it.
2394  */
2395 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2396 {
2397         __end_buffer_read_notouch(bh, uptodate);
2398 }
2399
2400 /*
2401  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2402  * the page (converting it to circular linked list and taking care of page
2403  * dirty races).
2404  */
2405 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2406 {
2407         struct buffer_head *bh;
2408
2409         BUG_ON(!PageLocked(page));
2410
2411         spin_lock(&page->mapping->private_lock);
2412         bh = head;
2413         do {
2414                 if (PageDirty(page))
2415                         set_buffer_dirty(bh);
2416                 if (!bh->b_this_page)
2417                         bh->b_this_page = head;
2418                 bh = bh->b_this_page;
2419         } while (bh != head);
2420         attach_page_buffers(page, head);
2421         spin_unlock(&page->mapping->private_lock);
2422 }
2423
2424 /*
2425  * On entry, the page is fully not uptodate.
2426  * On exit the page is fully uptodate in the areas outside (from,to)
2427  * The filesystem needs to handle block truncation upon failure.
2428  */
2429 int nobh_write_begin(struct address_space *mapping,
2430                         loff_t pos, unsigned len, unsigned flags,
2431                         struct page **pagep, void **fsdata,
2432                         get_block_t *get_block)
2433 {
2434         struct inode *inode = mapping->host;
2435         const unsigned blkbits = inode->i_blkbits;
2436         const unsigned blocksize = 1 << blkbits;
2437         struct buffer_head *head, *bh;
2438         struct page *page;
2439         pgoff_t index;
2440         unsigned from, to;
2441         unsigned block_in_page;
2442         unsigned block_start, block_end;
2443         sector_t block_in_file;
2444         int nr_reads = 0;
2445         int ret = 0;
2446         int is_mapped_to_disk = 1;
2447
2448         index = pos >> PAGE_CACHE_SHIFT;
2449         from = pos & (PAGE_CACHE_SIZE - 1);
2450         to = from + len;
2451
2452         page = grab_cache_page_write_begin(mapping, index, flags);
2453         if (!page)
2454                 return -ENOMEM;
2455         *pagep = page;
2456         *fsdata = NULL;
2457
2458         if (page_has_buffers(page)) {
2459                 ret = __block_write_begin(page, pos, len, get_block);
2460                 if (unlikely(ret))
2461                         goto out_release;
2462                 return ret;
2463         }
2464
2465         if (PageMappedToDisk(page))
2466                 return 0;
2467
2468         /*
2469          * Allocate buffers so that we can keep track of state, and potentially
2470          * attach them to the page if an error occurs. In the common case of
2471          * no error, they will just be freed again without ever being attached
2472          * to the page (which is all OK, because we're under the page lock).
2473          *
2474          * Be careful: the buffer linked list is a NULL terminated one, rather
2475          * than the circular one we're used to.
2476          */
2477         head = alloc_page_buffers(page, blocksize, 0);
2478         if (!head) {
2479                 ret = -ENOMEM;
2480                 goto out_release;
2481         }
2482
2483         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2484
2485         /*
2486          * We loop across all blocks in the page, whether or not they are
2487          * part of the affected region.  This is so we can discover if the
2488          * page is fully mapped-to-disk.
2489          */
2490         for (block_start = 0, block_in_page = 0, bh = head;
2491                   block_start < PAGE_CACHE_SIZE;
2492                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2493                 int create;
2494
2495                 block_end = block_start + blocksize;
2496                 bh->b_state = 0;
2497                 create = 1;
2498                 if (block_start >= to)
2499                         create = 0;
2500                 ret = get_block(inode, block_in_file + block_in_page,
2501                                         bh, create);
2502                 if (ret)
2503                         goto failed;
2504                 if (!buffer_mapped(bh))
2505                         is_mapped_to_disk = 0;
2506                 if (buffer_new(bh))
2507                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2508                 if (PageUptodate(page)) {
2509                         set_buffer_uptodate(bh);
2510                         continue;
2511                 }
2512                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2513                         zero_user_segments(page, block_start, from,
2514                                                         to, block_end);
2515                         continue;
2516                 }
2517                 if (buffer_uptodate(bh))
2518                         continue;       /* reiserfs does this */
2519                 if (block_start < from || block_end > to) {
2520                         lock_buffer(bh);
2521                         bh->b_end_io = end_buffer_read_nobh;
2522                         submit_bh(READ, bh);
2523                         nr_reads++;
2524                 }
2525         }
2526
2527         if (nr_reads) {
2528                 /*
2529                  * The page is locked, so these buffers are protected from
2530                  * any VM or truncate activity.  Hence we don't need to care
2531                  * for the buffer_head refcounts.
2532                  */
2533                 for (bh = head; bh; bh = bh->b_this_page) {
2534                         wait_on_buffer(bh);
2535                         if (!buffer_uptodate(bh))
2536                                 ret = -EIO;
2537                 }
2538                 if (ret)
2539                         goto failed;
2540         }
2541
2542         if (is_mapped_to_disk)
2543                 SetPageMappedToDisk(page);
2544
2545         *fsdata = head; /* to be released by nobh_write_end */
2546
2547         return 0;
2548
2549 failed:
2550         BUG_ON(!ret);
2551         /*
2552          * Error recovery is a bit difficult. We need to zero out blocks that
2553          * were newly allocated, and dirty them to ensure they get written out.
2554          * Buffers need to be attached to the page at this point, otherwise
2555          * the handling of potential IO errors during writeout would be hard
2556          * (could try doing synchronous writeout, but what if that fails too?)
2557          */
2558         attach_nobh_buffers(page, head);
2559         page_zero_new_buffers(page, from, to);
2560
2561 out_release:
2562         unlock_page(page);
2563         page_cache_release(page);
2564         *pagep = NULL;
2565
2566         return ret;
2567 }
2568 EXPORT_SYMBOL(nobh_write_begin);
2569
2570 int nobh_write_end(struct file *file, struct address_space *mapping,
2571                         loff_t pos, unsigned len, unsigned copied,
2572                         struct page *page, void *fsdata)
2573 {
2574         struct inode *inode = page->mapping->host;
2575         struct buffer_head *head = fsdata;
2576         struct buffer_head *bh;
2577         BUG_ON(fsdata != NULL && page_has_buffers(page));
2578
2579         if (unlikely(copied < len) && head)
2580                 attach_nobh_buffers(page, head);
2581         if (page_has_buffers(page))
2582                 return generic_write_end(file, mapping, pos, len,
2583                                         copied, page, fsdata);
2584
2585         SetPageUptodate(page);
2586         set_page_dirty(page);
2587         if (pos+copied > inode->i_size) {
2588                 i_size_write(inode, pos+copied);
2589                 mark_inode_dirty(inode);
2590         }
2591
2592         unlock_page(page);
2593         page_cache_release(page);
2594
2595         while (head) {
2596                 bh = head;
2597                 head = head->b_this_page;
2598                 free_buffer_head(bh);
2599         }
2600
2601         return copied;
2602 }
2603 EXPORT_SYMBOL(nobh_write_end);
2604
2605 /*
2606  * nobh_writepage() - based on block_full_write_page() except
2607  * that it tries to operate without attaching bufferheads to
2608  * the page.
2609  */
2610 int nobh_writepage(struct page *page, get_block_t *get_block,
2611                         struct writeback_control *wbc)
2612 {
2613         struct inode * const inode = page->mapping->host;
2614         loff_t i_size = i_size_read(inode);
2615         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2616         unsigned offset;
2617         int ret;
2618
2619         /* Is the page fully inside i_size? */
2620         if (page->index < end_index)
2621                 goto out;
2622
2623         /* Is the page fully outside i_size? (truncate in progress) */
2624         offset = i_size & (PAGE_CACHE_SIZE-1);
2625         if (page->index >= end_index+1 || !offset) {
2626                 /*
2627                  * The page may have dirty, unmapped buffers.  For example,
2628                  * they may have been added in ext3_writepage().  Make them
2629                  * freeable here, so the page does not leak.
2630                  */
2631 #if 0
2632                 /* Not really sure about this  - do we need this ? */
2633                 if (page->mapping->a_ops->invalidatepage)
2634                         page->mapping->a_ops->invalidatepage(page, offset);
2635 #endif
2636                 unlock_page(page);
2637                 return 0; /* don't care */
2638         }
2639
2640         /*
2641          * The page straddles i_size.  It must be zeroed out on each and every
2642          * writepage invocation because it may be mmapped.  "A file is mapped
2643          * in multiples of the page size.  For a file that is not a multiple of
2644          * the  page size, the remaining memory is zeroed when mapped, and
2645          * writes to that region are not written out to the file."
2646          */
2647         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2648 out:
2649         ret = mpage_writepage(page, get_block, wbc);
2650         if (ret == -EAGAIN)
2651                 ret = __block_write_full_page(inode, page, get_block, wbc,
2652                                               end_buffer_async_write);
2653         return ret;
2654 }
2655 EXPORT_SYMBOL(nobh_writepage);
2656
2657 int nobh_truncate_page(struct address_space *mapping,
2658                         loff_t from, get_block_t *get_block)
2659 {
2660         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2661         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2662         unsigned blocksize;
2663         sector_t iblock;
2664         unsigned length, pos;
2665         struct inode *inode = mapping->host;
2666         struct page *page;
2667         struct buffer_head map_bh;
2668         int err;
2669
2670         blocksize = 1 << inode->i_blkbits;
2671         length = offset & (blocksize - 1);
2672
2673         /* Block boundary? Nothing to do */
2674         if (!length)
2675                 return 0;
2676
2677         length = blocksize - length;
2678         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2679
2680         page = grab_cache_page(mapping, index);
2681         err = -ENOMEM;
2682         if (!page)
2683                 goto out;
2684
2685         if (page_has_buffers(page)) {
2686 has_buffers:
2687                 unlock_page(page);
2688                 page_cache_release(page);
2689                 return block_truncate_page(mapping, from, get_block);
2690         }
2691
2692         /* Find the buffer that contains "offset" */
2693         pos = blocksize;
2694         while (offset >= pos) {
2695                 iblock++;
2696                 pos += blocksize;
2697         }
2698
2699         map_bh.b_size = blocksize;
2700         map_bh.b_state = 0;
2701         err = get_block(inode, iblock, &map_bh, 0);
2702         if (err)
2703                 goto unlock;
2704         /* unmapped? It's a hole - nothing to do */
2705         if (!buffer_mapped(&map_bh))
2706                 goto unlock;
2707
2708         /* Ok, it's mapped. Make sure it's up-to-date */
2709         if (!PageUptodate(page)) {
2710                 err = mapping->a_ops->readpage(NULL, page);
2711                 if (err) {
2712                         page_cache_release(page);
2713                         goto out;
2714                 }
2715                 lock_page(page);
2716                 if (!PageUptodate(page)) {
2717                         err = -EIO;
2718                         goto unlock;
2719                 }
2720                 if (page_has_buffers(page))
2721                         goto has_buffers;
2722         }
2723         zero_user(page, offset, length);
2724         set_page_dirty(page);
2725         err = 0;
2726
2727 unlock:
2728         unlock_page(page);
2729         page_cache_release(page);
2730 out:
2731         return err;
2732 }
2733 EXPORT_SYMBOL(nobh_truncate_page);
2734
2735 int block_truncate_page(struct address_space *mapping,
2736                         loff_t from, get_block_t *get_block)
2737 {
2738         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2739         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2740         unsigned blocksize;
2741         sector_t iblock;
2742         unsigned length, pos;
2743         struct inode *inode = mapping->host;
2744         struct page *page;
2745         struct buffer_head *bh;
2746         int err;
2747
2748         blocksize = 1 << inode->i_blkbits;
2749         length = offset & (blocksize - 1);
2750
2751         /* Block boundary? Nothing to do */
2752         if (!length)
2753                 return 0;
2754
2755         length = blocksize - length;
2756         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2757         
2758         page = grab_cache_page(mapping, index);
2759         err = -ENOMEM;
2760         if (!page)
2761                 goto out;
2762
2763         if (!page_has_buffers(page))
2764                 create_empty_buffers(page, blocksize, 0);
2765
2766         /* Find the buffer that contains "offset" */
2767         bh = page_buffers(page);
2768         pos = blocksize;
2769         while (offset >= pos) {
2770                 bh = bh->b_this_page;
2771                 iblock++;
2772                 pos += blocksize;
2773         }
2774
2775         err = 0;
2776         if (!buffer_mapped(bh)) {
2777                 WARN_ON(bh->b_size != blocksize);
2778                 err = get_block(inode, iblock, bh, 0);
2779                 if (err)
2780                         goto unlock;
2781                 /* unmapped? It's a hole - nothing to do */
2782                 if (!buffer_mapped(bh))
2783                         goto unlock;
2784         }
2785
2786         /* Ok, it's mapped. Make sure it's up-to-date */
2787         if (PageUptodate(page))
2788                 set_buffer_uptodate(bh);
2789
2790         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2791                 err = -EIO;
2792                 ll_rw_block(READ, 1, &bh);
2793                 wait_on_buffer(bh);
2794                 /* Uhhuh. Read error. Complain and punt. */
2795                 if (!buffer_uptodate(bh))
2796                         goto unlock;
2797         }
2798
2799         zero_user(page, offset, length);
2800         mark_buffer_dirty(bh);
2801         err = 0;
2802
2803 unlock:
2804         unlock_page(page);
2805         page_cache_release(page);
2806 out:
2807         return err;
2808 }
2809 EXPORT_SYMBOL(block_truncate_page);
2810
2811 /*
2812  * The generic ->writepage function for buffer-backed address_spaces
2813  * this form passes in the end_io handler used to finish the IO.
2814  */
2815 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2816                         struct writeback_control *wbc, bh_end_io_t *handler)
2817 {
2818         struct inode * const inode = page->mapping->host;
2819         loff_t i_size = i_size_read(inode);
2820         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2821         unsigned offset;
2822
2823         /* Is the page fully inside i_size? */
2824         if (page->index < end_index)
2825                 return __block_write_full_page(inode, page, get_block, wbc,
2826                                                handler);
2827
2828         /* Is the page fully outside i_size? (truncate in progress) */
2829         offset = i_size & (PAGE_CACHE_SIZE-1);
2830         if (page->index >= end_index+1 || !offset) {
2831                 /*
2832                  * The page may have dirty, unmapped buffers.  For example,
2833                  * they may have been added in ext3_writepage().  Make them
2834                  * freeable here, so the page does not leak.
2835                  */
2836                 do_invalidatepage(page, 0);
2837                 unlock_page(page);
2838                 return 0; /* don't care */
2839         }
2840
2841         /*
2842          * The page straddles i_size.  It must be zeroed out on each and every
2843          * writepage invocation because it may be mmapped.  "A file is mapped
2844          * in multiples of the page size.  For a file that is not a multiple of
2845          * the  page size, the remaining memory is zeroed when mapped, and
2846          * writes to that region are not written out to the file."
2847          */
2848         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2849         return __block_write_full_page(inode, page, get_block, wbc, handler);
2850 }
2851 EXPORT_SYMBOL(block_write_full_page_endio);
2852
2853 /*
2854  * The generic ->writepage function for buffer-backed address_spaces
2855  */
2856 int block_write_full_page(struct page *page, get_block_t *get_block,
2857                         struct writeback_control *wbc)
2858 {
2859         return block_write_full_page_endio(page, get_block, wbc,
2860                                            end_buffer_async_write);
2861 }
2862 EXPORT_SYMBOL(block_write_full_page);
2863
2864 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2865                             get_block_t *get_block)
2866 {
2867         struct buffer_head tmp;
2868         struct inode *inode = mapping->host;
2869         tmp.b_state = 0;
2870         tmp.b_blocknr = 0;
2871         tmp.b_size = 1 << inode->i_blkbits;
2872         get_block(inode, block, &tmp, 0);
2873         return tmp.b_blocknr;
2874 }
2875 EXPORT_SYMBOL(generic_block_bmap);
2876
2877 static void end_bio_bh_io_sync(struct bio *bio, int err)
2878 {
2879         struct buffer_head *bh = bio->bi_private;
2880
2881         if (err == -EOPNOTSUPP) {
2882                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2883         }
2884
2885         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2886                 set_bit(BH_Quiet, &bh->b_state);
2887
2888         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2889         bio_put(bio);
2890 }
2891
2892 int submit_bh(int rw, struct buffer_head * bh)
2893 {
2894         struct bio *bio;
2895         int ret = 0;
2896
2897         BUG_ON(!buffer_locked(bh));
2898         BUG_ON(!buffer_mapped(bh));
2899         BUG_ON(!bh->b_end_io);
2900         BUG_ON(buffer_delay(bh));
2901         BUG_ON(buffer_unwritten(bh));
2902
2903         /*
2904          * Only clear out a write error when rewriting
2905          */
2906         if (test_set_buffer_req(bh) && (rw & WRITE))
2907                 clear_buffer_write_io_error(bh);
2908
2909         /*
2910          * from here on down, it's all bio -- do the initial mapping,
2911          * submit_bio -> generic_make_request may further map this bio around
2912          */
2913         bio = bio_alloc(GFP_NOIO, 1);
2914
2915         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2916         bio->bi_bdev = bh->b_bdev;
2917         bio->bi_io_vec[0].bv_page = bh->b_page;
2918         bio->bi_io_vec[0].bv_len = bh->b_size;
2919         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2920
2921         bio->bi_vcnt = 1;
2922         bio->bi_idx = 0;
2923         bio->bi_size = bh->b_size;
2924
2925         bio->bi_end_io = end_bio_bh_io_sync;
2926         bio->bi_private = bh;
2927
2928         bio_get(bio);
2929         submit_bio(rw, bio);
2930
2931         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2932                 ret = -EOPNOTSUPP;
2933
2934         bio_put(bio);
2935         return ret;
2936 }
2937 EXPORT_SYMBOL(submit_bh);
2938
2939 /**
2940  * ll_rw_block: low-level access to block devices (DEPRECATED)
2941  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2942  * @nr: number of &struct buffer_heads in the array
2943  * @bhs: array of pointers to &struct buffer_head
2944  *
2945  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2946  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2947  * %READA option is described in the documentation for generic_make_request()
2948  * which ll_rw_block() calls.
2949  *
2950  * This function drops any buffer that it cannot get a lock on (with the
2951  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2952  * request, and any buffer that appears to be up-to-date when doing read
2953  * request.  Further it marks as clean buffers that are processed for
2954  * writing (the buffer cache won't assume that they are actually clean
2955  * until the buffer gets unlocked).
2956  *
2957  * ll_rw_block sets b_end_io to simple completion handler that marks
2958  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2959  * any waiters. 
2960  *
2961  * All of the buffers must be for the same device, and must also be a
2962  * multiple of the current approved size for the device.
2963  */
2964 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2965 {
2966         int i;
2967
2968         for (i = 0; i < nr; i++) {
2969                 struct buffer_head *bh = bhs[i];
2970
2971                 if (!trylock_buffer(bh))
2972                         continue;
2973                 if (rw == WRITE) {
2974                         if (test_clear_buffer_dirty(bh)) {
2975                                 bh->b_end_io = end_buffer_write_sync;
2976                                 get_bh(bh);
2977                                 submit_bh(WRITE, bh);
2978                                 continue;
2979                         }
2980                 } else {
2981                         if (!buffer_uptodate(bh)) {
2982                                 bh->b_end_io = end_buffer_read_sync;
2983                                 get_bh(bh);
2984                                 submit_bh(rw, bh);
2985                                 continue;
2986                         }
2987                 }
2988                 unlock_buffer(bh);
2989         }
2990 }
2991 EXPORT_SYMBOL(ll_rw_block);
2992
2993 void write_dirty_buffer(struct buffer_head *bh, int rw)
2994 {
2995         lock_buffer(bh);
2996         if (!test_clear_buffer_dirty(bh)) {
2997                 unlock_buffer(bh);
2998                 return;
2999         }
3000         bh->b_end_io = end_buffer_write_sync;
3001         get_bh(bh);
3002         submit_bh(rw, bh);
3003 }
3004 EXPORT_SYMBOL(write_dirty_buffer);
3005
3006 /*
3007  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3008  * and then start new I/O and then wait upon it.  The caller must have a ref on
3009  * the buffer_head.
3010  */
3011 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3012 {
3013         int ret = 0;
3014
3015         WARN_ON(atomic_read(&bh->b_count) < 1);
3016         lock_buffer(bh);
3017         if (test_clear_buffer_dirty(bh)) {
3018                 get_bh(bh);
3019                 bh->b_end_io = end_buffer_write_sync;
3020                 ret = submit_bh(rw, bh);
3021                 wait_on_buffer(bh);
3022                 if (!ret && !buffer_uptodate(bh))
3023                         ret = -EIO;
3024         } else {
3025                 unlock_buffer(bh);
3026         }
3027         return ret;
3028 }
3029 EXPORT_SYMBOL(__sync_dirty_buffer);
3030
3031 int sync_dirty_buffer(struct buffer_head *bh)
3032 {
3033         return __sync_dirty_buffer(bh, WRITE_SYNC);
3034 }
3035 EXPORT_SYMBOL(sync_dirty_buffer);
3036
3037 /*
3038  * try_to_free_buffers() checks if all the buffers on this particular page
3039  * are unused, and releases them if so.
3040  *
3041  * Exclusion against try_to_free_buffers may be obtained by either
3042  * locking the page or by holding its mapping's private_lock.
3043  *
3044  * If the page is dirty but all the buffers are clean then we need to
3045  * be sure to mark the page clean as well.  This is because the page
3046  * may be against a block device, and a later reattachment of buffers
3047  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3048  * filesystem data on the same device.
3049  *
3050  * The same applies to regular filesystem pages: if all the buffers are
3051  * clean then we set the page clean and proceed.  To do that, we require
3052  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3053  * private_lock.
3054  *
3055  * try_to_free_buffers() is non-blocking.
3056  */
3057 static inline int buffer_busy(struct buffer_head *bh)
3058 {
3059         return atomic_read(&bh->b_count) |
3060                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3061 }
3062
3063 static int
3064 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3065 {
3066         struct buffer_head *head = page_buffers(page);
3067         struct buffer_head *bh;
3068
3069         bh = head;
3070         do {
3071                 if (buffer_write_io_error(bh) && page->mapping)
3072                         set_bit(AS_EIO, &page->mapping->flags);
3073                 if (buffer_busy(bh))
3074                         goto failed;
3075                 bh = bh->b_this_page;
3076         } while (bh != head);
3077
3078         do {
3079                 struct buffer_head *next = bh->b_this_page;
3080
3081                 if (bh->b_assoc_map)
3082                         __remove_assoc_queue(bh);
3083                 bh = next;
3084         } while (bh != head);
3085         *buffers_to_free = head;
3086         __clear_page_buffers(page);
3087         return 1;
3088 failed:
3089         return 0;
3090 }
3091
3092 int try_to_free_buffers(struct page *page)
3093 {
3094         struct address_space * const mapping = page->mapping;
3095         struct buffer_head *buffers_to_free = NULL;
3096         int ret = 0;
3097
3098         BUG_ON(!PageLocked(page));
3099         if (PageWriteback(page))
3100                 return 0;
3101
3102         if (mapping == NULL) {          /* can this still happen? */
3103                 ret = drop_buffers(page, &buffers_to_free);
3104                 goto out;
3105         }
3106
3107         spin_lock(&mapping->private_lock);
3108         ret = drop_buffers(page, &buffers_to_free);
3109
3110         /*
3111          * If the filesystem writes its buffers by hand (eg ext3)
3112          * then we can have clean buffers against a dirty page.  We
3113          * clean the page here; otherwise the VM will never notice
3114          * that the filesystem did any IO at all.
3115          *
3116          * Also, during truncate, discard_buffer will have marked all
3117          * the page's buffers clean.  We discover that here and clean
3118          * the page also.
3119          *
3120          * private_lock must be held over this entire operation in order
3121          * to synchronise against __set_page_dirty_buffers and prevent the
3122          * dirty bit from being lost.
3123          */
3124         if (ret)
3125                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3126         spin_unlock(&mapping->private_lock);
3127 out:
3128         if (buffers_to_free) {
3129                 struct buffer_head *bh = buffers_to_free;
3130
3131                 do {
3132                         struct buffer_head *next = bh->b_this_page;
3133                         free_buffer_head(bh);
3134                         bh = next;
3135                 } while (bh != buffers_to_free);
3136         }
3137         return ret;
3138 }
3139 EXPORT_SYMBOL(try_to_free_buffers);
3140
3141 void block_sync_page(struct page *page)
3142 {
3143         struct address_space *mapping;
3144
3145         smp_mb();
3146         mapping = page_mapping(page);
3147         if (mapping)
3148                 blk_run_backing_dev(mapping->backing_dev_info, page);
3149 }
3150 EXPORT_SYMBOL(block_sync_page);
3151
3152 /*
3153  * There are no bdflush tunables left.  But distributions are
3154  * still running obsolete flush daemons, so we terminate them here.
3155  *
3156  * Use of bdflush() is deprecated and will be removed in a future kernel.
3157  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3158  */
3159 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3160 {
3161         static int msg_count;
3162
3163         if (!capable(CAP_SYS_ADMIN))
3164                 return -EPERM;
3165
3166         if (msg_count < 5) {
3167                 msg_count++;
3168                 printk(KERN_INFO
3169                         "warning: process `%s' used the obsolete bdflush"
3170                         " system call\n", current->comm);
3171                 printk(KERN_INFO "Fix your initscripts?\n");
3172         }
3173
3174         if (func == 1)
3175                 do_exit(0);
3176         return 0;
3177 }
3178
3179 /*
3180  * Buffer-head allocation
3181  */
3182 static struct kmem_cache *bh_cachep;
3183
3184 /*
3185  * Once the number of bh's in the machine exceeds this level, we start
3186  * stripping them in writeback.
3187  */
3188 static int max_buffer_heads;
3189
3190 int buffer_heads_over_limit;
3191
3192 struct bh_accounting {
3193         int nr;                 /* Number of live bh's */
3194         int ratelimit;          /* Limit cacheline bouncing */
3195 };
3196
3197 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3198
3199 static void recalc_bh_state(void)
3200 {
3201         int i;
3202         int tot = 0;
3203
3204         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3205                 return;
3206         __this_cpu_write(bh_accounting.ratelimit, 0);
3207         for_each_online_cpu(i)
3208                 tot += per_cpu(bh_accounting, i).nr;
3209         buffer_heads_over_limit = (tot > max_buffer_heads);
3210 }
3211
3212 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3213 {
3214         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3215         if (ret) {
3216                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3217                 preempt_disable();
3218                 __this_cpu_inc(bh_accounting.nr);
3219                 recalc_bh_state();
3220                 preempt_enable();
3221         }
3222         return ret;
3223 }
3224 EXPORT_SYMBOL(alloc_buffer_head);
3225
3226 void free_buffer_head(struct buffer_head *bh)
3227 {
3228         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3229         kmem_cache_free(bh_cachep, bh);
3230         preempt_disable();
3231         __this_cpu_dec(bh_accounting.nr);
3232         recalc_bh_state();
3233         preempt_enable();
3234 }
3235 EXPORT_SYMBOL(free_buffer_head);
3236
3237 static void buffer_exit_cpu(int cpu)
3238 {
3239         int i;
3240         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3241
3242         for (i = 0; i < BH_LRU_SIZE; i++) {
3243                 brelse(b->bhs[i]);
3244                 b->bhs[i] = NULL;
3245         }
3246         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3247         per_cpu(bh_accounting, cpu).nr = 0;
3248 }
3249
3250 static int buffer_cpu_notify(struct notifier_block *self,
3251                               unsigned long action, void *hcpu)
3252 {
3253         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3254                 buffer_exit_cpu((unsigned long)hcpu);
3255         return NOTIFY_OK;
3256 }
3257
3258 /**
3259  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3260  * @bh: struct buffer_head
3261  *
3262  * Return true if the buffer is up-to-date and false,
3263  * with the buffer locked, if not.
3264  */
3265 int bh_uptodate_or_lock(struct buffer_head *bh)
3266 {
3267         if (!buffer_uptodate(bh)) {
3268                 lock_buffer(bh);
3269                 if (!buffer_uptodate(bh))
3270                         return 0;
3271                 unlock_buffer(bh);
3272         }
3273         return 1;
3274 }
3275 EXPORT_SYMBOL(bh_uptodate_or_lock);
3276
3277 /**
3278  * bh_submit_read - Submit a locked buffer for reading
3279  * @bh: struct buffer_head
3280  *
3281  * Returns zero on success and -EIO on error.
3282  */
3283 int bh_submit_read(struct buffer_head *bh)
3284 {
3285         BUG_ON(!buffer_locked(bh));
3286
3287         if (buffer_uptodate(bh)) {
3288                 unlock_buffer(bh);
3289                 return 0;
3290         }
3291
3292         get_bh(bh);
3293         bh->b_end_io = end_buffer_read_sync;
3294         submit_bh(READ, bh);
3295         wait_on_buffer(bh);
3296         if (buffer_uptodate(bh))
3297                 return 0;
3298         return -EIO;
3299 }
3300 EXPORT_SYMBOL(bh_submit_read);
3301
3302 void __init buffer_init(void)
3303 {
3304         int nrpages;
3305
3306         bh_cachep = kmem_cache_create("buffer_head",
3307                         sizeof(struct buffer_head), 0,
3308                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3309                                 SLAB_MEM_SPREAD),
3310                                 NULL);
3311
3312         /*
3313          * Limit the bh occupancy to 10% of ZONE_NORMAL
3314          */
3315         nrpages = (nr_free_buffer_pages() * 10) / 100;
3316         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3317         hotcpu_notifier(buffer_cpu_notify, 0);
3318 }