Merge tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / mm / filemap.c
1 /*
2  *      linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/export.h>
13 #include <linux/compiler.h>
14 #include <linux/fs.h>
15 #include <linux/uaccess.h>
16 #include <linux/aio.h>
17 #include <linux/capability.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagevec.h>
30 #include <linux/blkdev.h>
31 #include <linux/security.h>
32 #include <linux/syscalls.h>
33 #include <linux/cpuset.h>
34 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35 #include <linux/memcontrol.h>
36 #include <linux/cleancache.h>
37 #include "internal.h"
38
39 /*
40  * FIXME: remove all knowledge of the buffer layer from the core VM
41  */
42 #include <linux/buffer_head.h> /* for try_to_free_buffers */
43
44 #include <asm/mman.h>
45
46 /*
47  * Shared mappings implemented 30.11.1994. It's not fully working yet,
48  * though.
49  *
50  * Shared mappings now work. 15.8.1995  Bruno.
51  *
52  * finished 'unifying' the page and buffer cache and SMP-threaded the
53  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
54  *
55  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
56  */
57
58 /*
59  * Lock ordering:
60  *
61  *  ->i_mmap_mutex              (truncate_pagecache)
62  *    ->private_lock            (__free_pte->__set_page_dirty_buffers)
63  *      ->swap_lock             (exclusive_swap_page, others)
64  *        ->mapping->tree_lock
65  *
66  *  ->i_mutex
67  *    ->i_mmap_mutex            (truncate->unmap_mapping_range)
68  *
69  *  ->mmap_sem
70  *    ->i_mmap_mutex
71  *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
72  *        ->mapping->tree_lock  (arch-dependent flush_dcache_mmap_lock)
73  *
74  *  ->mmap_sem
75  *    ->lock_page               (access_process_vm)
76  *
77  *  ->i_mutex                   (generic_file_buffered_write)
78  *    ->mmap_sem                (fault_in_pages_readable->do_page_fault)
79  *
80  *  bdi->wb.list_lock
81  *    sb_lock                   (fs/fs-writeback.c)
82  *    ->mapping->tree_lock      (__sync_single_inode)
83  *
84  *  ->i_mmap_mutex
85  *    ->anon_vma.lock           (vma_adjust)
86  *
87  *  ->anon_vma.lock
88  *    ->page_table_lock or pte_lock     (anon_vma_prepare and various)
89  *
90  *  ->page_table_lock or pte_lock
91  *    ->swap_lock               (try_to_unmap_one)
92  *    ->private_lock            (try_to_unmap_one)
93  *    ->tree_lock               (try_to_unmap_one)
94  *    ->zone.lru_lock           (follow_page->mark_page_accessed)
95  *    ->zone.lru_lock           (check_pte_range->isolate_lru_page)
96  *    ->private_lock            (page_remove_rmap->set_page_dirty)
97  *    ->tree_lock               (page_remove_rmap->set_page_dirty)
98  *    bdi.wb->list_lock         (page_remove_rmap->set_page_dirty)
99  *    ->inode->i_lock           (page_remove_rmap->set_page_dirty)
100  *    bdi.wb->list_lock         (zap_pte_range->set_page_dirty)
101  *    ->inode->i_lock           (zap_pte_range->set_page_dirty)
102  *    ->private_lock            (zap_pte_range->__set_page_dirty_buffers)
103  *
104  *  (code doesn't rely on that order, so you could switch it around)
105  *  ->tasklist_lock             (memory_failure, collect_procs_ao)
106  *    ->i_mmap_mutex
107  */
108
109 /*
110  * Delete a page from the page cache and free it. Caller has to make
111  * sure the page is locked and that nobody else uses it - or that usage
112  * is safe.  The caller must hold the mapping's tree_lock.
113  */
114 void __delete_from_page_cache(struct page *page)
115 {
116         struct address_space *mapping = page->mapping;
117
118         /*
119          * if we're uptodate, flush out into the cleancache, otherwise
120          * invalidate any existing cleancache entries.  We can't leave
121          * stale data around in the cleancache once our page is gone
122          */
123         if (PageUptodate(page) && PageMappedToDisk(page))
124                 cleancache_put_page(page);
125         else
126                 cleancache_flush_page(mapping, page);
127
128         radix_tree_delete(&mapping->page_tree, page->index);
129         page->mapping = NULL;
130         /* Leave page->index set: truncation lookup relies upon it */
131         mapping->nrpages--;
132         __dec_zone_page_state(page, NR_FILE_PAGES);
133         if (PageSwapBacked(page))
134                 __dec_zone_page_state(page, NR_SHMEM);
135         BUG_ON(page_mapped(page));
136
137         /*
138          * Some filesystems seem to re-dirty the page even after
139          * the VM has canceled the dirty bit (eg ext3 journaling).
140          *
141          * Fix it up by doing a final dirty accounting check after
142          * having removed the page entirely.
143          */
144         if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
145                 dec_zone_page_state(page, NR_FILE_DIRTY);
146                 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
147         }
148 }
149
150 /**
151  * delete_from_page_cache - delete page from page cache
152  * @page: the page which the kernel is trying to remove from page cache
153  *
154  * This must be called only on pages that have been verified to be in the page
155  * cache and locked.  It will never put the page into the free list, the caller
156  * has a reference on the page.
157  */
158 void delete_from_page_cache(struct page *page)
159 {
160         struct address_space *mapping = page->mapping;
161         void (*freepage)(struct page *);
162
163         BUG_ON(!PageLocked(page));
164
165         freepage = mapping->a_ops->freepage;
166         spin_lock_irq(&mapping->tree_lock);
167         __delete_from_page_cache(page);
168         spin_unlock_irq(&mapping->tree_lock);
169         mem_cgroup_uncharge_cache_page(page);
170
171         if (freepage)
172                 freepage(page);
173         page_cache_release(page);
174 }
175 EXPORT_SYMBOL(delete_from_page_cache);
176
177 static int sleep_on_page(void *word)
178 {
179         io_schedule();
180         return 0;
181 }
182
183 static int sleep_on_page_killable(void *word)
184 {
185         sleep_on_page(word);
186         return fatal_signal_pending(current) ? -EINTR : 0;
187 }
188
189 /**
190  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
191  * @mapping:    address space structure to write
192  * @start:      offset in bytes where the range starts
193  * @end:        offset in bytes where the range ends (inclusive)
194  * @sync_mode:  enable synchronous operation
195  *
196  * Start writeback against all of a mapping's dirty pages that lie
197  * within the byte offsets <start, end> inclusive.
198  *
199  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
200  * opposed to a regular memory cleansing writeback.  The difference between
201  * these two operations is that if a dirty page/buffer is encountered, it must
202  * be waited upon, and not just skipped over.
203  */
204 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
205                                 loff_t end, int sync_mode)
206 {
207         int ret;
208         struct writeback_control wbc = {
209                 .sync_mode = sync_mode,
210                 .nr_to_write = LONG_MAX,
211                 .range_start = start,
212                 .range_end = end,
213         };
214
215         if (!mapping_cap_writeback_dirty(mapping))
216                 return 0;
217
218         ret = do_writepages(mapping, &wbc);
219         return ret;
220 }
221
222 static inline int __filemap_fdatawrite(struct address_space *mapping,
223         int sync_mode)
224 {
225         return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
226 }
227
228 int filemap_fdatawrite(struct address_space *mapping)
229 {
230         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
231 }
232 EXPORT_SYMBOL(filemap_fdatawrite);
233
234 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
235                                 loff_t end)
236 {
237         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
238 }
239 EXPORT_SYMBOL(filemap_fdatawrite_range);
240
241 /**
242  * filemap_flush - mostly a non-blocking flush
243  * @mapping:    target address_space
244  *
245  * This is a mostly non-blocking flush.  Not suitable for data-integrity
246  * purposes - I/O may not be started against all dirty pages.
247  */
248 int filemap_flush(struct address_space *mapping)
249 {
250         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
251 }
252 EXPORT_SYMBOL(filemap_flush);
253
254 /**
255  * filemap_fdatawait_range - wait for writeback to complete
256  * @mapping:            address space structure to wait for
257  * @start_byte:         offset in bytes where the range starts
258  * @end_byte:           offset in bytes where the range ends (inclusive)
259  *
260  * Walk the list of under-writeback pages of the given address space
261  * in the given range and wait for all of them.
262  */
263 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
264                             loff_t end_byte)
265 {
266         pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
267         pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
268         struct pagevec pvec;
269         int nr_pages;
270         int ret = 0;
271
272         if (end_byte < start_byte)
273                 return 0;
274
275         pagevec_init(&pvec, 0);
276         while ((index <= end) &&
277                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
278                         PAGECACHE_TAG_WRITEBACK,
279                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
280                 unsigned i;
281
282                 for (i = 0; i < nr_pages; i++) {
283                         struct page *page = pvec.pages[i];
284
285                         /* until radix tree lookup accepts end_index */
286                         if (page->index > end)
287                                 continue;
288
289                         wait_on_page_writeback(page);
290                         if (TestClearPageError(page))
291                                 ret = -EIO;
292                 }
293                 pagevec_release(&pvec);
294                 cond_resched();
295         }
296
297         /* Check for outstanding write errors */
298         if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
299                 ret = -ENOSPC;
300         if (test_and_clear_bit(AS_EIO, &mapping->flags))
301                 ret = -EIO;
302
303         return ret;
304 }
305 EXPORT_SYMBOL(filemap_fdatawait_range);
306
307 /**
308  * filemap_fdatawait - wait for all under-writeback pages to complete
309  * @mapping: address space structure to wait for
310  *
311  * Walk the list of under-writeback pages of the given address space
312  * and wait for all of them.
313  */
314 int filemap_fdatawait(struct address_space *mapping)
315 {
316         loff_t i_size = i_size_read(mapping->host);
317
318         if (i_size == 0)
319                 return 0;
320
321         return filemap_fdatawait_range(mapping, 0, i_size - 1);
322 }
323 EXPORT_SYMBOL(filemap_fdatawait);
324
325 int filemap_write_and_wait(struct address_space *mapping)
326 {
327         int err = 0;
328
329         if (mapping->nrpages) {
330                 err = filemap_fdatawrite(mapping);
331                 /*
332                  * Even if the above returned error, the pages may be
333                  * written partially (e.g. -ENOSPC), so we wait for it.
334                  * But the -EIO is special case, it may indicate the worst
335                  * thing (e.g. bug) happened, so we avoid waiting for it.
336                  */
337                 if (err != -EIO) {
338                         int err2 = filemap_fdatawait(mapping);
339                         if (!err)
340                                 err = err2;
341                 }
342         }
343         return err;
344 }
345 EXPORT_SYMBOL(filemap_write_and_wait);
346
347 /**
348  * filemap_write_and_wait_range - write out & wait on a file range
349  * @mapping:    the address_space for the pages
350  * @lstart:     offset in bytes where the range starts
351  * @lend:       offset in bytes where the range ends (inclusive)
352  *
353  * Write out and wait upon file offsets lstart->lend, inclusive.
354  *
355  * Note that `lend' is inclusive (describes the last byte to be written) so
356  * that this function can be used to write to the very end-of-file (end = -1).
357  */
358 int filemap_write_and_wait_range(struct address_space *mapping,
359                                  loff_t lstart, loff_t lend)
360 {
361         int err = 0;
362
363         if (mapping->nrpages) {
364                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
365                                                  WB_SYNC_ALL);
366                 /* See comment of filemap_write_and_wait() */
367                 if (err != -EIO) {
368                         int err2 = filemap_fdatawait_range(mapping,
369                                                 lstart, lend);
370                         if (!err)
371                                 err = err2;
372                 }
373         }
374         return err;
375 }
376 EXPORT_SYMBOL(filemap_write_and_wait_range);
377
378 /**
379  * replace_page_cache_page - replace a pagecache page with a new one
380  * @old:        page to be replaced
381  * @new:        page to replace with
382  * @gfp_mask:   allocation mode
383  *
384  * This function replaces a page in the pagecache with a new one.  On
385  * success it acquires the pagecache reference for the new page and
386  * drops it for the old page.  Both the old and new pages must be
387  * locked.  This function does not add the new page to the LRU, the
388  * caller must do that.
389  *
390  * The remove + add is atomic.  The only way this function can fail is
391  * memory allocation failure.
392  */
393 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
394 {
395         int error;
396         struct mem_cgroup *memcg = NULL;
397
398         VM_BUG_ON(!PageLocked(old));
399         VM_BUG_ON(!PageLocked(new));
400         VM_BUG_ON(new->mapping);
401
402         /*
403          * This is not page migration, but prepare_migration and
404          * end_migration does enough work for charge replacement.
405          *
406          * In the longer term we probably want a specialized function
407          * for moving the charge from old to new in a more efficient
408          * manner.
409          */
410         error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
411         if (error)
412                 return error;
413
414         error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
415         if (!error) {
416                 struct address_space *mapping = old->mapping;
417                 void (*freepage)(struct page *);
418
419                 pgoff_t offset = old->index;
420                 freepage = mapping->a_ops->freepage;
421
422                 page_cache_get(new);
423                 new->mapping = mapping;
424                 new->index = offset;
425
426                 spin_lock_irq(&mapping->tree_lock);
427                 __delete_from_page_cache(old);
428                 error = radix_tree_insert(&mapping->page_tree, offset, new);
429                 BUG_ON(error);
430                 mapping->nrpages++;
431                 __inc_zone_page_state(new, NR_FILE_PAGES);
432                 if (PageSwapBacked(new))
433                         __inc_zone_page_state(new, NR_SHMEM);
434                 spin_unlock_irq(&mapping->tree_lock);
435                 radix_tree_preload_end();
436                 if (freepage)
437                         freepage(old);
438                 page_cache_release(old);
439                 mem_cgroup_end_migration(memcg, old, new, true);
440         } else {
441                 mem_cgroup_end_migration(memcg, old, new, false);
442         }
443
444         return error;
445 }
446 EXPORT_SYMBOL_GPL(replace_page_cache_page);
447
448 /**
449  * add_to_page_cache_locked - add a locked page to the pagecache
450  * @page:       page to add
451  * @mapping:    the page's address_space
452  * @offset:     page index
453  * @gfp_mask:   page allocation mode
454  *
455  * This function is used to add a page to the pagecache. It must be locked.
456  * This function does not add the page to the LRU.  The caller must do that.
457  */
458 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
459                 pgoff_t offset, gfp_t gfp_mask)
460 {
461         int error;
462
463         VM_BUG_ON(!PageLocked(page));
464         VM_BUG_ON(PageSwapBacked(page));
465
466         error = mem_cgroup_cache_charge(page, current->mm,
467                                         gfp_mask & GFP_RECLAIM_MASK);
468         if (error)
469                 goto out;
470
471         error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
472         if (error == 0) {
473                 page_cache_get(page);
474                 page->mapping = mapping;
475                 page->index = offset;
476
477                 spin_lock_irq(&mapping->tree_lock);
478                 error = radix_tree_insert(&mapping->page_tree, offset, page);
479                 if (likely(!error)) {
480                         mapping->nrpages++;
481                         __inc_zone_page_state(page, NR_FILE_PAGES);
482                         spin_unlock_irq(&mapping->tree_lock);
483                 } else {
484                         page->mapping = NULL;
485                         /* Leave page->index set: truncation relies upon it */
486                         spin_unlock_irq(&mapping->tree_lock);
487                         mem_cgroup_uncharge_cache_page(page);
488                         page_cache_release(page);
489                 }
490                 radix_tree_preload_end();
491         } else
492                 mem_cgroup_uncharge_cache_page(page);
493 out:
494         return error;
495 }
496 EXPORT_SYMBOL(add_to_page_cache_locked);
497
498 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
499                                 pgoff_t offset, gfp_t gfp_mask)
500 {
501         int ret;
502
503         ret = add_to_page_cache(page, mapping, offset, gfp_mask);
504         if (ret == 0)
505                 lru_cache_add_file(page);
506         return ret;
507 }
508 EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
509
510 #ifdef CONFIG_NUMA
511 struct page *__page_cache_alloc(gfp_t gfp)
512 {
513         int n;
514         struct page *page;
515
516         if (cpuset_do_page_mem_spread()) {
517                 get_mems_allowed();
518                 n = cpuset_mem_spread_node();
519                 page = alloc_pages_exact_node(n, gfp, 0);
520                 put_mems_allowed();
521                 return page;
522         }
523         return alloc_pages(gfp, 0);
524 }
525 EXPORT_SYMBOL(__page_cache_alloc);
526 #endif
527
528 /*
529  * In order to wait for pages to become available there must be
530  * waitqueues associated with pages. By using a hash table of
531  * waitqueues where the bucket discipline is to maintain all
532  * waiters on the same queue and wake all when any of the pages
533  * become available, and for the woken contexts to check to be
534  * sure the appropriate page became available, this saves space
535  * at a cost of "thundering herd" phenomena during rare hash
536  * collisions.
537  */
538 static wait_queue_head_t *page_waitqueue(struct page *page)
539 {
540         const struct zone *zone = page_zone(page);
541
542         return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
543 }
544
545 static inline void wake_up_page(struct page *page, int bit)
546 {
547         __wake_up_bit(page_waitqueue(page), &page->flags, bit);
548 }
549
550 void wait_on_page_bit(struct page *page, int bit_nr)
551 {
552         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
553
554         if (test_bit(bit_nr, &page->flags))
555                 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
556                                                         TASK_UNINTERRUPTIBLE);
557 }
558 EXPORT_SYMBOL(wait_on_page_bit);
559
560 int wait_on_page_bit_killable(struct page *page, int bit_nr)
561 {
562         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
563
564         if (!test_bit(bit_nr, &page->flags))
565                 return 0;
566
567         return __wait_on_bit(page_waitqueue(page), &wait,
568                              sleep_on_page_killable, TASK_KILLABLE);
569 }
570
571 /**
572  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
573  * @page: Page defining the wait queue of interest
574  * @waiter: Waiter to add to the queue
575  *
576  * Add an arbitrary @waiter to the wait queue for the nominated @page.
577  */
578 void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
579 {
580         wait_queue_head_t *q = page_waitqueue(page);
581         unsigned long flags;
582
583         spin_lock_irqsave(&q->lock, flags);
584         __add_wait_queue(q, waiter);
585         spin_unlock_irqrestore(&q->lock, flags);
586 }
587 EXPORT_SYMBOL_GPL(add_page_wait_queue);
588
589 /**
590  * unlock_page - unlock a locked page
591  * @page: the page
592  *
593  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
594  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
595  * mechananism between PageLocked pages and PageWriteback pages is shared.
596  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
597  *
598  * The mb is necessary to enforce ordering between the clear_bit and the read
599  * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
600  */
601 void unlock_page(struct page *page)
602 {
603         VM_BUG_ON(!PageLocked(page));
604         clear_bit_unlock(PG_locked, &page->flags);
605         smp_mb__after_clear_bit();
606         wake_up_page(page, PG_locked);
607 }
608 EXPORT_SYMBOL(unlock_page);
609
610 /**
611  * end_page_writeback - end writeback against a page
612  * @page: the page
613  */
614 void end_page_writeback(struct page *page)
615 {
616         if (TestClearPageReclaim(page))
617                 rotate_reclaimable_page(page);
618
619         if (!test_clear_page_writeback(page))
620                 BUG();
621
622         smp_mb__after_clear_bit();
623         wake_up_page(page, PG_writeback);
624 }
625 EXPORT_SYMBOL(end_page_writeback);
626
627 /**
628  * __lock_page - get a lock on the page, assuming we need to sleep to get it
629  * @page: the page to lock
630  */
631 void __lock_page(struct page *page)
632 {
633         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
634
635         __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
636                                                         TASK_UNINTERRUPTIBLE);
637 }
638 EXPORT_SYMBOL(__lock_page);
639
640 int __lock_page_killable(struct page *page)
641 {
642         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
643
644         return __wait_on_bit_lock(page_waitqueue(page), &wait,
645                                         sleep_on_page_killable, TASK_KILLABLE);
646 }
647 EXPORT_SYMBOL_GPL(__lock_page_killable);
648
649 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
650                          unsigned int flags)
651 {
652         if (flags & FAULT_FLAG_ALLOW_RETRY) {
653                 /*
654                  * CAUTION! In this case, mmap_sem is not released
655                  * even though return 0.
656                  */
657                 if (flags & FAULT_FLAG_RETRY_NOWAIT)
658                         return 0;
659
660                 up_read(&mm->mmap_sem);
661                 if (flags & FAULT_FLAG_KILLABLE)
662                         wait_on_page_locked_killable(page);
663                 else
664                         wait_on_page_locked(page);
665                 return 0;
666         } else {
667                 if (flags & FAULT_FLAG_KILLABLE) {
668                         int ret;
669
670                         ret = __lock_page_killable(page);
671                         if (ret) {
672                                 up_read(&mm->mmap_sem);
673                                 return 0;
674                         }
675                 } else
676                         __lock_page(page);
677                 return 1;
678         }
679 }
680
681 /**
682  * find_get_page - find and get a page reference
683  * @mapping: the address_space to search
684  * @offset: the page index
685  *
686  * Is there a pagecache struct page at the given (mapping, offset) tuple?
687  * If yes, increment its refcount and return it; if no, return NULL.
688  */
689 struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
690 {
691         void **pagep;
692         struct page *page;
693
694         rcu_read_lock();
695 repeat:
696         page = NULL;
697         pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
698         if (pagep) {
699                 page = radix_tree_deref_slot(pagep);
700                 if (unlikely(!page))
701                         goto out;
702                 if (radix_tree_exception(page)) {
703                         if (radix_tree_deref_retry(page))
704                                 goto repeat;
705                         /*
706                          * Otherwise, shmem/tmpfs must be storing a swap entry
707                          * here as an exceptional entry: so return it without
708                          * attempting to raise page count.
709                          */
710                         goto out;
711                 }
712                 if (!page_cache_get_speculative(page))
713                         goto repeat;
714
715                 /*
716                  * Has the page moved?
717                  * This is part of the lockless pagecache protocol. See
718                  * include/linux/pagemap.h for details.
719                  */
720                 if (unlikely(page != *pagep)) {
721                         page_cache_release(page);
722                         goto repeat;
723                 }
724         }
725 out:
726         rcu_read_unlock();
727
728         return page;
729 }
730 EXPORT_SYMBOL(find_get_page);
731
732 /**
733  * find_lock_page - locate, pin and lock a pagecache page
734  * @mapping: the address_space to search
735  * @offset: the page index
736  *
737  * Locates the desired pagecache page, locks it, increments its reference
738  * count and returns its address.
739  *
740  * Returns zero if the page was not present. find_lock_page() may sleep.
741  */
742 struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
743 {
744         struct page *page;
745
746 repeat:
747         page = find_get_page(mapping, offset);
748         if (page && !radix_tree_exception(page)) {
749                 lock_page(page);
750                 /* Has the page been truncated? */
751                 if (unlikely(page->mapping != mapping)) {
752                         unlock_page(page);
753                         page_cache_release(page);
754                         goto repeat;
755                 }
756                 VM_BUG_ON(page->index != offset);
757         }
758         return page;
759 }
760 EXPORT_SYMBOL(find_lock_page);
761
762 /**
763  * find_or_create_page - locate or add a pagecache page
764  * @mapping: the page's address_space
765  * @index: the page's index into the mapping
766  * @gfp_mask: page allocation mode
767  *
768  * Locates a page in the pagecache.  If the page is not present, a new page
769  * is allocated using @gfp_mask and is added to the pagecache and to the VM's
770  * LRU list.  The returned page is locked and has its reference count
771  * incremented.
772  *
773  * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
774  * allocation!
775  *
776  * find_or_create_page() returns the desired page's address, or zero on
777  * memory exhaustion.
778  */
779 struct page *find_or_create_page(struct address_space *mapping,
780                 pgoff_t index, gfp_t gfp_mask)
781 {
782         struct page *page;
783         int err;
784 repeat:
785         page = find_lock_page(mapping, index);
786         if (!page) {
787                 page = __page_cache_alloc(gfp_mask);
788                 if (!page)
789                         return NULL;
790                 /*
791                  * We want a regular kernel memory (not highmem or DMA etc)
792                  * allocation for the radix tree nodes, but we need to honour
793                  * the context-specific requirements the caller has asked for.
794                  * GFP_RECLAIM_MASK collects those requirements.
795                  */
796                 err = add_to_page_cache_lru(page, mapping, index,
797                         (gfp_mask & GFP_RECLAIM_MASK));
798                 if (unlikely(err)) {
799                         page_cache_release(page);
800                         page = NULL;
801                         if (err == -EEXIST)
802                                 goto repeat;
803                 }
804         }
805         return page;
806 }
807 EXPORT_SYMBOL(find_or_create_page);
808
809 /**
810  * find_get_pages - gang pagecache lookup
811  * @mapping:    The address_space to search
812  * @start:      The starting page index
813  * @nr_pages:   The maximum number of pages
814  * @pages:      Where the resulting pages are placed
815  *
816  * find_get_pages() will search for and return a group of up to
817  * @nr_pages pages in the mapping.  The pages are placed at @pages.
818  * find_get_pages() takes a reference against the returned pages.
819  *
820  * The search returns a group of mapping-contiguous pages with ascending
821  * indexes.  There may be holes in the indices due to not-present pages.
822  *
823  * find_get_pages() returns the number of pages which were found.
824  */
825 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
826                             unsigned int nr_pages, struct page **pages)
827 {
828         unsigned int i;
829         unsigned int ret;
830         unsigned int nr_found, nr_skip;
831
832         rcu_read_lock();
833 restart:
834         nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
835                                 (void ***)pages, NULL, start, nr_pages);
836         ret = 0;
837         nr_skip = 0;
838         for (i = 0; i < nr_found; i++) {
839                 struct page *page;
840 repeat:
841                 page = radix_tree_deref_slot((void **)pages[i]);
842                 if (unlikely(!page))
843                         continue;
844
845                 if (radix_tree_exception(page)) {
846                         if (radix_tree_deref_retry(page)) {
847                                 /*
848                                  * Transient condition which can only trigger
849                                  * when entry at index 0 moves out of or back
850                                  * to root: none yet gotten, safe to restart.
851                                  */
852                                 WARN_ON(start | i);
853                                 goto restart;
854                         }
855                         /*
856                          * Otherwise, shmem/tmpfs must be storing a swap entry
857                          * here as an exceptional entry: so skip over it -
858                          * we only reach this from invalidate_mapping_pages().
859                          */
860                         nr_skip++;
861                         continue;
862                 }
863
864                 if (!page_cache_get_speculative(page))
865                         goto repeat;
866
867                 /* Has the page moved? */
868                 if (unlikely(page != *((void **)pages[i]))) {
869                         page_cache_release(page);
870                         goto repeat;
871                 }
872
873                 pages[ret] = page;
874                 ret++;
875         }
876
877         /*
878          * If all entries were removed before we could secure them,
879          * try again, because callers stop trying once 0 is returned.
880          */
881         if (unlikely(!ret && nr_found > nr_skip))
882                 goto restart;
883         rcu_read_unlock();
884         return ret;
885 }
886
887 /**
888  * find_get_pages_contig - gang contiguous pagecache lookup
889  * @mapping:    The address_space to search
890  * @index:      The starting page index
891  * @nr_pages:   The maximum number of pages
892  * @pages:      Where the resulting pages are placed
893  *
894  * find_get_pages_contig() works exactly like find_get_pages(), except
895  * that the returned number of pages are guaranteed to be contiguous.
896  *
897  * find_get_pages_contig() returns the number of pages which were found.
898  */
899 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
900                                unsigned int nr_pages, struct page **pages)
901 {
902         unsigned int i;
903         unsigned int ret;
904         unsigned int nr_found;
905
906         rcu_read_lock();
907 restart:
908         nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
909                                 (void ***)pages, NULL, index, nr_pages);
910         ret = 0;
911         for (i = 0; i < nr_found; i++) {
912                 struct page *page;
913 repeat:
914                 page = radix_tree_deref_slot((void **)pages[i]);
915                 if (unlikely(!page))
916                         continue;
917
918                 if (radix_tree_exception(page)) {
919                         if (radix_tree_deref_retry(page)) {
920                                 /*
921                                  * Transient condition which can only trigger
922                                  * when entry at index 0 moves out of or back
923                                  * to root: none yet gotten, safe to restart.
924                                  */
925                                 goto restart;
926                         }
927                         /*
928                          * Otherwise, shmem/tmpfs must be storing a swap entry
929                          * here as an exceptional entry: so stop looking for
930                          * contiguous pages.
931                          */
932                         break;
933                 }
934
935                 if (!page_cache_get_speculative(page))
936                         goto repeat;
937
938                 /* Has the page moved? */
939                 if (unlikely(page != *((void **)pages[i]))) {
940                         page_cache_release(page);
941                         goto repeat;
942                 }
943
944                 /*
945                  * must check mapping and index after taking the ref.
946                  * otherwise we can get both false positives and false
947                  * negatives, which is just confusing to the caller.
948                  */
949                 if (page->mapping == NULL || page->index != index) {
950                         page_cache_release(page);
951                         break;
952                 }
953
954                 pages[ret] = page;
955                 ret++;
956                 index++;
957         }
958         rcu_read_unlock();
959         return ret;
960 }
961 EXPORT_SYMBOL(find_get_pages_contig);
962
963 /**
964  * find_get_pages_tag - find and return pages that match @tag
965  * @mapping:    the address_space to search
966  * @index:      the starting page index
967  * @tag:        the tag index
968  * @nr_pages:   the maximum number of pages
969  * @pages:      where the resulting pages are placed
970  *
971  * Like find_get_pages, except we only return pages which are tagged with
972  * @tag.   We update @index to index the next page for the traversal.
973  */
974 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
975                         int tag, unsigned int nr_pages, struct page **pages)
976 {
977         unsigned int i;
978         unsigned int ret;
979         unsigned int nr_found;
980
981         rcu_read_lock();
982 restart:
983         nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
984                                 (void ***)pages, *index, nr_pages, tag);
985         ret = 0;
986         for (i = 0; i < nr_found; i++) {
987                 struct page *page;
988 repeat:
989                 page = radix_tree_deref_slot((void **)pages[i]);
990                 if (unlikely(!page))
991                         continue;
992
993                 if (radix_tree_exception(page)) {
994                         if (radix_tree_deref_retry(page)) {
995                                 /*
996                                  * Transient condition which can only trigger
997                                  * when entry at index 0 moves out of or back
998                                  * to root: none yet gotten, safe to restart.
999                                  */
1000                                 goto restart;
1001                         }
1002                         /*
1003                          * This function is never used on a shmem/tmpfs
1004                          * mapping, so a swap entry won't be found here.
1005                          */
1006                         BUG();
1007                 }
1008
1009                 if (!page_cache_get_speculative(page))
1010                         goto repeat;
1011
1012                 /* Has the page moved? */
1013                 if (unlikely(page != *((void **)pages[i]))) {
1014                         page_cache_release(page);
1015                         goto repeat;
1016                 }
1017
1018                 pages[ret] = page;
1019                 ret++;
1020         }
1021
1022         /*
1023          * If all entries were removed before we could secure them,
1024          * try again, because callers stop trying once 0 is returned.
1025          */
1026         if (unlikely(!ret && nr_found))
1027                 goto restart;
1028         rcu_read_unlock();
1029
1030         if (ret)
1031                 *index = pages[ret - 1]->index + 1;
1032
1033         return ret;
1034 }
1035 EXPORT_SYMBOL(find_get_pages_tag);
1036
1037 /**
1038  * grab_cache_page_nowait - returns locked page at given index in given cache
1039  * @mapping: target address_space
1040  * @index: the page index
1041  *
1042  * Same as grab_cache_page(), but do not wait if the page is unavailable.
1043  * This is intended for speculative data generators, where the data can
1044  * be regenerated if the page couldn't be grabbed.  This routine should
1045  * be safe to call while holding the lock for another page.
1046  *
1047  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1048  * and deadlock against the caller's locked page.
1049  */
1050 struct page *
1051 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1052 {
1053         struct page *page = find_get_page(mapping, index);
1054
1055         if (page) {
1056                 if (trylock_page(page))
1057                         return page;
1058                 page_cache_release(page);
1059                 return NULL;
1060         }
1061         page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1062         if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1063                 page_cache_release(page);
1064                 page = NULL;
1065         }
1066         return page;
1067 }
1068 EXPORT_SYMBOL(grab_cache_page_nowait);
1069
1070 /*
1071  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1072  * a _large_ part of the i/o request. Imagine the worst scenario:
1073  *
1074  *      ---R__________________________________________B__________
1075  *         ^ reading here                             ^ bad block(assume 4k)
1076  *
1077  * read(R) => miss => readahead(R...B) => media error => frustrating retries
1078  * => failing the whole request => read(R) => read(R+1) =>
1079  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1080  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1081  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1082  *
1083  * It is going insane. Fix it by quickly scaling down the readahead size.
1084  */
1085 static void shrink_readahead_size_eio(struct file *filp,
1086                                         struct file_ra_state *ra)
1087 {
1088         ra->ra_pages /= 4;
1089 }
1090
1091 /**
1092  * do_generic_file_read - generic file read routine
1093  * @filp:       the file to read
1094  * @ppos:       current file position
1095  * @desc:       read_descriptor
1096  * @actor:      read method
1097  *
1098  * This is a generic file read routine, and uses the
1099  * mapping->a_ops->readpage() function for the actual low-level stuff.
1100  *
1101  * This is really ugly. But the goto's actually try to clarify some
1102  * of the logic when it comes to error handling etc.
1103  */
1104 static void do_generic_file_read(struct file *filp, loff_t *ppos,
1105                 read_descriptor_t *desc, read_actor_t actor)
1106 {
1107         struct address_space *mapping = filp->f_mapping;
1108         struct inode *inode = mapping->host;
1109         struct file_ra_state *ra = &filp->f_ra;
1110         pgoff_t index;
1111         pgoff_t last_index;
1112         pgoff_t prev_index;
1113         unsigned long offset;      /* offset into pagecache page */
1114         unsigned int prev_offset;
1115         int error;
1116
1117         index = *ppos >> PAGE_CACHE_SHIFT;
1118         prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1119         prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1120         last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1121         offset = *ppos & ~PAGE_CACHE_MASK;
1122
1123         for (;;) {
1124                 struct page *page;
1125                 pgoff_t end_index;
1126                 loff_t isize;
1127                 unsigned long nr, ret;
1128
1129                 cond_resched();
1130 find_page:
1131                 page = find_get_page(mapping, index);
1132                 if (!page) {
1133                         page_cache_sync_readahead(mapping,
1134                                         ra, filp,
1135                                         index, last_index - index);
1136                         page = find_get_page(mapping, index);
1137                         if (unlikely(page == NULL))
1138                                 goto no_cached_page;
1139                 }
1140                 if (PageReadahead(page)) {
1141                         page_cache_async_readahead(mapping,
1142                                         ra, filp, page,
1143                                         index, last_index - index);
1144                 }
1145                 if (!PageUptodate(page)) {
1146                         if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1147                                         !mapping->a_ops->is_partially_uptodate)
1148                                 goto page_not_up_to_date;
1149                         if (!trylock_page(page))
1150                                 goto page_not_up_to_date;
1151                         /* Did it get truncated before we got the lock? */
1152                         if (!page->mapping)
1153                                 goto page_not_up_to_date_locked;
1154                         if (!mapping->a_ops->is_partially_uptodate(page,
1155                                                                 desc, offset))
1156                                 goto page_not_up_to_date_locked;
1157                         unlock_page(page);
1158                 }
1159 page_ok:
1160                 /*
1161                  * i_size must be checked after we know the page is Uptodate.
1162                  *
1163                  * Checking i_size after the check allows us to calculate
1164                  * the correct value for "nr", which means the zero-filled
1165                  * part of the page is not copied back to userspace (unless
1166                  * another truncate extends the file - this is desired though).
1167                  */
1168
1169                 isize = i_size_read(inode);
1170                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1171                 if (unlikely(!isize || index > end_index)) {
1172                         page_cache_release(page);
1173                         goto out;
1174                 }
1175
1176                 /* nr is the maximum number of bytes to copy from this page */
1177                 nr = PAGE_CACHE_SIZE;
1178                 if (index == end_index) {
1179                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1180                         if (nr <= offset) {
1181                                 page_cache_release(page);
1182                                 goto out;
1183                         }
1184                 }
1185                 nr = nr - offset;
1186
1187                 /* If users can be writing to this page using arbitrary
1188                  * virtual addresses, take care about potential aliasing
1189                  * before reading the page on the kernel side.
1190                  */
1191                 if (mapping_writably_mapped(mapping))
1192                         flush_dcache_page(page);
1193
1194                 /*
1195                  * When a sequential read accesses a page several times,
1196                  * only mark it as accessed the first time.
1197                  */
1198                 if (prev_index != index || offset != prev_offset)
1199                         mark_page_accessed(page);
1200                 prev_index = index;
1201
1202                 /*
1203                  * Ok, we have the page, and it's up-to-date, so
1204                  * now we can copy it to user space...
1205                  *
1206                  * The actor routine returns how many bytes were actually used..
1207                  * NOTE! This may not be the same as how much of a user buffer
1208                  * we filled up (we may be padding etc), so we can only update
1209                  * "pos" here (the actor routine has to update the user buffer
1210                  * pointers and the remaining count).
1211                  */
1212                 ret = actor(desc, page, offset, nr);
1213                 offset += ret;
1214                 index += offset >> PAGE_CACHE_SHIFT;
1215                 offset &= ~PAGE_CACHE_MASK;
1216                 prev_offset = offset;
1217
1218                 page_cache_release(page);
1219                 if (ret == nr && desc->count)
1220                         continue;
1221                 goto out;
1222
1223 page_not_up_to_date:
1224                 /* Get exclusive access to the page ... */
1225                 error = lock_page_killable(page);
1226                 if (unlikely(error))
1227                         goto readpage_error;
1228
1229 page_not_up_to_date_locked:
1230                 /* Did it get truncated before we got the lock? */
1231                 if (!page->mapping) {
1232                         unlock_page(page);
1233                         page_cache_release(page);
1234                         continue;
1235                 }
1236
1237                 /* Did somebody else fill it already? */
1238                 if (PageUptodate(page)) {
1239                         unlock_page(page);
1240                         goto page_ok;
1241                 }
1242
1243 readpage:
1244                 /*
1245                  * A previous I/O error may have been due to temporary
1246                  * failures, eg. multipath errors.
1247                  * PG_error will be set again if readpage fails.
1248                  */
1249                 ClearPageError(page);
1250                 /* Start the actual read. The read will unlock the page. */
1251                 error = mapping->a_ops->readpage(filp, page);
1252
1253                 if (unlikely(error)) {
1254                         if (error == AOP_TRUNCATED_PAGE) {
1255                                 page_cache_release(page);
1256                                 goto find_page;
1257                         }
1258                         goto readpage_error;
1259                 }
1260
1261                 if (!PageUptodate(page)) {
1262                         error = lock_page_killable(page);
1263                         if (unlikely(error))
1264                                 goto readpage_error;
1265                         if (!PageUptodate(page)) {
1266                                 if (page->mapping == NULL) {
1267                                         /*
1268                                          * invalidate_mapping_pages got it
1269                                          */
1270                                         unlock_page(page);
1271                                         page_cache_release(page);
1272                                         goto find_page;
1273                                 }
1274                                 unlock_page(page);
1275                                 shrink_readahead_size_eio(filp, ra);
1276                                 error = -EIO;
1277                                 goto readpage_error;
1278                         }
1279                         unlock_page(page);
1280                 }
1281
1282                 goto page_ok;
1283
1284 readpage_error:
1285                 /* UHHUH! A synchronous read error occurred. Report it */
1286                 desc->error = error;
1287                 page_cache_release(page);
1288                 goto out;
1289
1290 no_cached_page:
1291                 /*
1292                  * Ok, it wasn't cached, so we need to create a new
1293                  * page..
1294                  */
1295                 page = page_cache_alloc_cold(mapping);
1296                 if (!page) {
1297                         desc->error = -ENOMEM;
1298                         goto out;
1299                 }
1300                 error = add_to_page_cache_lru(page, mapping,
1301                                                 index, GFP_KERNEL);
1302                 if (error) {
1303                         page_cache_release(page);
1304                         if (error == -EEXIST)
1305                                 goto find_page;
1306                         desc->error = error;
1307                         goto out;
1308                 }
1309                 goto readpage;
1310         }
1311
1312 out:
1313         ra->prev_pos = prev_index;
1314         ra->prev_pos <<= PAGE_CACHE_SHIFT;
1315         ra->prev_pos |= prev_offset;
1316
1317         *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1318         file_accessed(filp);
1319 }
1320
1321 int file_read_actor(read_descriptor_t *desc, struct page *page,
1322                         unsigned long offset, unsigned long size)
1323 {
1324         char *kaddr;
1325         unsigned long left, count = desc->count;
1326
1327         if (size > count)
1328                 size = count;
1329
1330         /*
1331          * Faults on the destination of a read are common, so do it before
1332          * taking the kmap.
1333          */
1334         if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1335                 kaddr = kmap_atomic(page, KM_USER0);
1336                 left = __copy_to_user_inatomic(desc->arg.buf,
1337                                                 kaddr + offset, size);
1338                 kunmap_atomic(kaddr, KM_USER0);
1339                 if (left == 0)
1340                         goto success;
1341         }
1342
1343         /* Do it the slow way */
1344         kaddr = kmap(page);
1345         left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1346         kunmap(page);
1347
1348         if (left) {
1349                 size -= left;
1350                 desc->error = -EFAULT;
1351         }
1352 success:
1353         desc->count = count - size;
1354         desc->written += size;
1355         desc->arg.buf += size;
1356         return size;
1357 }
1358
1359 /*
1360  * Performs necessary checks before doing a write
1361  * @iov:        io vector request
1362  * @nr_segs:    number of segments in the iovec
1363  * @count:      number of bytes to write
1364  * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1365  *
1366  * Adjust number of segments and amount of bytes to write (nr_segs should be
1367  * properly initialized first). Returns appropriate error code that caller
1368  * should return or zero in case that write should be allowed.
1369  */
1370 int generic_segment_checks(const struct iovec *iov,
1371                         unsigned long *nr_segs, size_t *count, int access_flags)
1372 {
1373         unsigned long   seg;
1374         size_t cnt = 0;
1375         for (seg = 0; seg < *nr_segs; seg++) {
1376                 const struct iovec *iv = &iov[seg];
1377
1378                 /*
1379                  * If any segment has a negative length, or the cumulative
1380                  * length ever wraps negative then return -EINVAL.
1381                  */
1382                 cnt += iv->iov_len;
1383                 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1384                         return -EINVAL;
1385                 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1386                         continue;
1387                 if (seg == 0)
1388                         return -EFAULT;
1389                 *nr_segs = seg;
1390                 cnt -= iv->iov_len;     /* This segment is no good */
1391                 break;
1392         }
1393         *count = cnt;
1394         return 0;
1395 }
1396 EXPORT_SYMBOL(generic_segment_checks);
1397
1398 /**
1399  * generic_file_aio_read - generic filesystem read routine
1400  * @iocb:       kernel I/O control block
1401  * @iov:        io vector request
1402  * @nr_segs:    number of segments in the iovec
1403  * @pos:        current file position
1404  *
1405  * This is the "read()" routine for all filesystems
1406  * that can use the page cache directly.
1407  */
1408 ssize_t
1409 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1410                 unsigned long nr_segs, loff_t pos)
1411 {
1412         struct file *filp = iocb->ki_filp;
1413         ssize_t retval;
1414         unsigned long seg = 0;
1415         size_t count;
1416         loff_t *ppos = &iocb->ki_pos;
1417         struct blk_plug plug;
1418
1419         count = 0;
1420         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1421         if (retval)
1422                 return retval;
1423
1424         blk_start_plug(&plug);
1425
1426         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1427         if (filp->f_flags & O_DIRECT) {
1428                 loff_t size;
1429                 struct address_space *mapping;
1430                 struct inode *inode;
1431
1432                 mapping = filp->f_mapping;
1433                 inode = mapping->host;
1434                 if (!count)
1435                         goto out; /* skip atime */
1436                 size = i_size_read(inode);
1437                 if (pos < size) {
1438                         retval = filemap_write_and_wait_range(mapping, pos,
1439                                         pos + iov_length(iov, nr_segs) - 1);
1440                         if (!retval) {
1441                                 retval = mapping->a_ops->direct_IO(READ, iocb,
1442                                                         iov, pos, nr_segs);
1443                         }
1444                         if (retval > 0) {
1445                                 *ppos = pos + retval;
1446                                 count -= retval;
1447                         }
1448
1449                         /*
1450                          * Btrfs can have a short DIO read if we encounter
1451                          * compressed extents, so if there was an error, or if
1452                          * we've already read everything we wanted to, or if
1453                          * there was a short read because we hit EOF, go ahead
1454                          * and return.  Otherwise fallthrough to buffered io for
1455                          * the rest of the read.
1456                          */
1457                         if (retval < 0 || !count || *ppos >= size) {
1458                                 file_accessed(filp);
1459                                 goto out;
1460                         }
1461                 }
1462         }
1463
1464         count = retval;
1465         for (seg = 0; seg < nr_segs; seg++) {
1466                 read_descriptor_t desc;
1467                 loff_t offset = 0;
1468
1469                 /*
1470                  * If we did a short DIO read we need to skip the section of the
1471                  * iov that we've already read data into.
1472                  */
1473                 if (count) {
1474                         if (count > iov[seg].iov_len) {
1475                                 count -= iov[seg].iov_len;
1476                                 continue;
1477                         }
1478                         offset = count;
1479                         count = 0;
1480                 }
1481
1482                 desc.written = 0;
1483                 desc.arg.buf = iov[seg].iov_base + offset;
1484                 desc.count = iov[seg].iov_len - offset;
1485                 if (desc.count == 0)
1486                         continue;
1487                 desc.error = 0;
1488                 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1489                 retval += desc.written;
1490                 if (desc.error) {
1491                         retval = retval ?: desc.error;
1492                         break;
1493                 }
1494                 if (desc.count > 0)
1495                         break;
1496         }
1497 out:
1498         blk_finish_plug(&plug);
1499         return retval;
1500 }
1501 EXPORT_SYMBOL(generic_file_aio_read);
1502
1503 static ssize_t
1504 do_readahead(struct address_space *mapping, struct file *filp,
1505              pgoff_t index, unsigned long nr)
1506 {
1507         if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1508                 return -EINVAL;
1509
1510         force_page_cache_readahead(mapping, filp, index, nr);
1511         return 0;
1512 }
1513
1514 SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1515 {
1516         ssize_t ret;
1517         struct file *file;
1518
1519         ret = -EBADF;
1520         file = fget(fd);
1521         if (file) {
1522                 if (file->f_mode & FMODE_READ) {
1523                         struct address_space *mapping = file->f_mapping;
1524                         pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1525                         pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1526                         unsigned long len = end - start + 1;
1527                         ret = do_readahead(mapping, file, start, len);
1528                 }
1529                 fput(file);
1530         }
1531         return ret;
1532 }
1533 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1534 asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1535 {
1536         return SYSC_readahead((int) fd, offset, (size_t) count);
1537 }
1538 SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1539 #endif
1540
1541 #ifdef CONFIG_MMU
1542 /**
1543  * page_cache_read - adds requested page to the page cache if not already there
1544  * @file:       file to read
1545  * @offset:     page index
1546  *
1547  * This adds the requested page to the page cache if it isn't already there,
1548  * and schedules an I/O to read in its contents from disk.
1549  */
1550 static int page_cache_read(struct file *file, pgoff_t offset)
1551 {
1552         struct address_space *mapping = file->f_mapping;
1553         struct page *page; 
1554         int ret;
1555
1556         do {
1557                 page = page_cache_alloc_cold(mapping);
1558                 if (!page)
1559                         return -ENOMEM;
1560
1561                 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1562                 if (ret == 0)
1563                         ret = mapping->a_ops->readpage(file, page);
1564                 else if (ret == -EEXIST)
1565                         ret = 0; /* losing race to add is OK */
1566
1567                 page_cache_release(page);
1568
1569         } while (ret == AOP_TRUNCATED_PAGE);
1570                 
1571         return ret;
1572 }
1573
1574 #define MMAP_LOTSAMISS  (100)
1575
1576 /*
1577  * Synchronous readahead happens when we don't even find
1578  * a page in the page cache at all.
1579  */
1580 static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1581                                    struct file_ra_state *ra,
1582                                    struct file *file,
1583                                    pgoff_t offset)
1584 {
1585         unsigned long ra_pages;
1586         struct address_space *mapping = file->f_mapping;
1587
1588         /* If we don't want any read-ahead, don't bother */
1589         if (VM_RandomReadHint(vma))
1590                 return;
1591         if (!ra->ra_pages)
1592                 return;
1593
1594         if (VM_SequentialReadHint(vma)) {
1595                 page_cache_sync_readahead(mapping, ra, file, offset,
1596                                           ra->ra_pages);
1597                 return;
1598         }
1599
1600         /* Avoid banging the cache line if not needed */
1601         if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1602                 ra->mmap_miss++;
1603
1604         /*
1605          * Do we miss much more than hit in this file? If so,
1606          * stop bothering with read-ahead. It will only hurt.
1607          */
1608         if (ra->mmap_miss > MMAP_LOTSAMISS)
1609                 return;
1610
1611         /*
1612          * mmap read-around
1613          */
1614         ra_pages = max_sane_readahead(ra->ra_pages);
1615         ra->start = max_t(long, 0, offset - ra_pages / 2);
1616         ra->size = ra_pages;
1617         ra->async_size = ra_pages / 4;
1618         ra_submit(ra, mapping, file);
1619 }
1620
1621 /*
1622  * Asynchronous readahead happens when we find the page and PG_readahead,
1623  * so we want to possibly extend the readahead further..
1624  */
1625 static void do_async_mmap_readahead(struct vm_area_struct *vma,
1626                                     struct file_ra_state *ra,
1627                                     struct file *file,
1628                                     struct page *page,
1629                                     pgoff_t offset)
1630 {
1631         struct address_space *mapping = file->f_mapping;
1632
1633         /* If we don't want any read-ahead, don't bother */
1634         if (VM_RandomReadHint(vma))
1635                 return;
1636         if (ra->mmap_miss > 0)
1637                 ra->mmap_miss--;
1638         if (PageReadahead(page))
1639                 page_cache_async_readahead(mapping, ra, file,
1640                                            page, offset, ra->ra_pages);
1641 }
1642
1643 /**
1644  * filemap_fault - read in file data for page fault handling
1645  * @vma:        vma in which the fault was taken
1646  * @vmf:        struct vm_fault containing details of the fault
1647  *
1648  * filemap_fault() is invoked via the vma operations vector for a
1649  * mapped memory region to read in file data during a page fault.
1650  *
1651  * The goto's are kind of ugly, but this streamlines the normal case of having
1652  * it in the page cache, and handles the special cases reasonably without
1653  * having a lot of duplicated code.
1654  */
1655 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1656 {
1657         int error;
1658         struct file *file = vma->vm_file;
1659         struct address_space *mapping = file->f_mapping;
1660         struct file_ra_state *ra = &file->f_ra;
1661         struct inode *inode = mapping->host;
1662         pgoff_t offset = vmf->pgoff;
1663         struct page *page;
1664         pgoff_t size;
1665         int ret = 0;
1666
1667         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1668         if (offset >= size)
1669                 return VM_FAULT_SIGBUS;
1670
1671         /*
1672          * Do we have something in the page cache already?
1673          */
1674         page = find_get_page(mapping, offset);
1675         if (likely(page)) {
1676                 /*
1677                  * We found the page, so try async readahead before
1678                  * waiting for the lock.
1679                  */
1680                 do_async_mmap_readahead(vma, ra, file, page, offset);
1681         } else {
1682                 /* No page in the page cache at all */
1683                 do_sync_mmap_readahead(vma, ra, file, offset);
1684                 count_vm_event(PGMAJFAULT);
1685                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1686                 ret = VM_FAULT_MAJOR;
1687 retry_find:
1688                 page = find_get_page(mapping, offset);
1689                 if (!page)
1690                         goto no_cached_page;
1691         }
1692
1693         if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1694                 page_cache_release(page);
1695                 return ret | VM_FAULT_RETRY;
1696         }
1697
1698         /* Did it get truncated? */
1699         if (unlikely(page->mapping != mapping)) {
1700                 unlock_page(page);
1701                 put_page(page);
1702                 goto retry_find;
1703         }
1704         VM_BUG_ON(page->index != offset);
1705
1706         /*
1707          * We have a locked page in the page cache, now we need to check
1708          * that it's up-to-date. If not, it is going to be due to an error.
1709          */
1710         if (unlikely(!PageUptodate(page)))
1711                 goto page_not_uptodate;
1712
1713         /*
1714          * Found the page and have a reference on it.
1715          * We must recheck i_size under page lock.
1716          */
1717         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1718         if (unlikely(offset >= size)) {
1719                 unlock_page(page);
1720                 page_cache_release(page);
1721                 return VM_FAULT_SIGBUS;
1722         }
1723
1724         vmf->page = page;
1725         return ret | VM_FAULT_LOCKED;
1726
1727 no_cached_page:
1728         /*
1729          * We're only likely to ever get here if MADV_RANDOM is in
1730          * effect.
1731          */
1732         error = page_cache_read(file, offset);
1733
1734         /*
1735          * The page we want has now been added to the page cache.
1736          * In the unlikely event that someone removed it in the
1737          * meantime, we'll just come back here and read it again.
1738          */
1739         if (error >= 0)
1740                 goto retry_find;
1741
1742         /*
1743          * An error return from page_cache_read can result if the
1744          * system is low on memory, or a problem occurs while trying
1745          * to schedule I/O.
1746          */
1747         if (error == -ENOMEM)
1748                 return VM_FAULT_OOM;
1749         return VM_FAULT_SIGBUS;
1750
1751 page_not_uptodate:
1752         /*
1753          * Umm, take care of errors if the page isn't up-to-date.
1754          * Try to re-read it _once_. We do this synchronously,
1755          * because there really aren't any performance issues here
1756          * and we need to check for errors.
1757          */
1758         ClearPageError(page);
1759         error = mapping->a_ops->readpage(file, page);
1760         if (!error) {
1761                 wait_on_page_locked(page);
1762                 if (!PageUptodate(page))
1763                         error = -EIO;
1764         }
1765         page_cache_release(page);
1766
1767         if (!error || error == AOP_TRUNCATED_PAGE)
1768                 goto retry_find;
1769
1770         /* Things didn't work out. Return zero to tell the mm layer so. */
1771         shrink_readahead_size_eio(file, ra);
1772         return VM_FAULT_SIGBUS;
1773 }
1774 EXPORT_SYMBOL(filemap_fault);
1775
1776 const struct vm_operations_struct generic_file_vm_ops = {
1777         .fault          = filemap_fault,
1778 };
1779
1780 /* This is used for a general mmap of a disk file */
1781
1782 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1783 {
1784         struct address_space *mapping = file->f_mapping;
1785
1786         if (!mapping->a_ops->readpage)
1787                 return -ENOEXEC;
1788         file_accessed(file);
1789         vma->vm_ops = &generic_file_vm_ops;
1790         vma->vm_flags |= VM_CAN_NONLINEAR;
1791         return 0;
1792 }
1793
1794 /*
1795  * This is for filesystems which do not implement ->writepage.
1796  */
1797 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1798 {
1799         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1800                 return -EINVAL;
1801         return generic_file_mmap(file, vma);
1802 }
1803 #else
1804 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1805 {
1806         return -ENOSYS;
1807 }
1808 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1809 {
1810         return -ENOSYS;
1811 }
1812 #endif /* CONFIG_MMU */
1813
1814 EXPORT_SYMBOL(generic_file_mmap);
1815 EXPORT_SYMBOL(generic_file_readonly_mmap);
1816
1817 static struct page *__read_cache_page(struct address_space *mapping,
1818                                 pgoff_t index,
1819                                 int (*filler)(void *, struct page *),
1820                                 void *data,
1821                                 gfp_t gfp)
1822 {
1823         struct page *page;
1824         int err;
1825 repeat:
1826         page = find_get_page(mapping, index);
1827         if (!page) {
1828                 page = __page_cache_alloc(gfp | __GFP_COLD);
1829                 if (!page)
1830                         return ERR_PTR(-ENOMEM);
1831                 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1832                 if (unlikely(err)) {
1833                         page_cache_release(page);
1834                         if (err == -EEXIST)
1835                                 goto repeat;
1836                         /* Presumably ENOMEM for radix tree node */
1837                         return ERR_PTR(err);
1838                 }
1839                 err = filler(data, page);
1840                 if (err < 0) {
1841                         page_cache_release(page);
1842                         page = ERR_PTR(err);
1843                 }
1844         }
1845         return page;
1846 }
1847
1848 static struct page *do_read_cache_page(struct address_space *mapping,
1849                                 pgoff_t index,
1850                                 int (*filler)(void *, struct page *),
1851                                 void *data,
1852                                 gfp_t gfp)
1853
1854 {
1855         struct page *page;
1856         int err;
1857
1858 retry:
1859         page = __read_cache_page(mapping, index, filler, data, gfp);
1860         if (IS_ERR(page))
1861                 return page;
1862         if (PageUptodate(page))
1863                 goto out;
1864
1865         lock_page(page);
1866         if (!page->mapping) {
1867                 unlock_page(page);
1868                 page_cache_release(page);
1869                 goto retry;
1870         }
1871         if (PageUptodate(page)) {
1872                 unlock_page(page);
1873                 goto out;
1874         }
1875         err = filler(data, page);
1876         if (err < 0) {
1877                 page_cache_release(page);
1878                 return ERR_PTR(err);
1879         }
1880 out:
1881         mark_page_accessed(page);
1882         return page;
1883 }
1884
1885 /**
1886  * read_cache_page_async - read into page cache, fill it if needed
1887  * @mapping:    the page's address_space
1888  * @index:      the page index
1889  * @filler:     function to perform the read
1890  * @data:       first arg to filler(data, page) function, often left as NULL
1891  *
1892  * Same as read_cache_page, but don't wait for page to become unlocked
1893  * after submitting it to the filler.
1894  *
1895  * Read into the page cache. If a page already exists, and PageUptodate() is
1896  * not set, try to fill the page but don't wait for it to become unlocked.
1897  *
1898  * If the page does not get brought uptodate, return -EIO.
1899  */
1900 struct page *read_cache_page_async(struct address_space *mapping,
1901                                 pgoff_t index,
1902                                 int (*filler)(void *, struct page *),
1903                                 void *data)
1904 {
1905         return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1906 }
1907 EXPORT_SYMBOL(read_cache_page_async);
1908
1909 static struct page *wait_on_page_read(struct page *page)
1910 {
1911         if (!IS_ERR(page)) {
1912                 wait_on_page_locked(page);
1913                 if (!PageUptodate(page)) {
1914                         page_cache_release(page);
1915                         page = ERR_PTR(-EIO);
1916                 }
1917         }
1918         return page;
1919 }
1920
1921 /**
1922  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1923  * @mapping:    the page's address_space
1924  * @index:      the page index
1925  * @gfp:        the page allocator flags to use if allocating
1926  *
1927  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1928  * any new page allocations done using the specified allocation flags. Note
1929  * that the Radix tree operations will still use GFP_KERNEL, so you can't
1930  * expect to do this atomically or anything like that - but you can pass in
1931  * other page requirements.
1932  *
1933  * If the page does not get brought uptodate, return -EIO.
1934  */
1935 struct page *read_cache_page_gfp(struct address_space *mapping,
1936                                 pgoff_t index,
1937                                 gfp_t gfp)
1938 {
1939         filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1940
1941         return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1942 }
1943 EXPORT_SYMBOL(read_cache_page_gfp);
1944
1945 /**
1946  * read_cache_page - read into page cache, fill it if needed
1947  * @mapping:    the page's address_space
1948  * @index:      the page index
1949  * @filler:     function to perform the read
1950  * @data:       first arg to filler(data, page) function, often left as NULL
1951  *
1952  * Read into the page cache. If a page already exists, and PageUptodate() is
1953  * not set, try to fill the page then wait for it to become unlocked.
1954  *
1955  * If the page does not get brought uptodate, return -EIO.
1956  */
1957 struct page *read_cache_page(struct address_space *mapping,
1958                                 pgoff_t index,
1959                                 int (*filler)(void *, struct page *),
1960                                 void *data)
1961 {
1962         return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1963 }
1964 EXPORT_SYMBOL(read_cache_page);
1965
1966 /*
1967  * The logic we want is
1968  *
1969  *      if suid or (sgid and xgrp)
1970  *              remove privs
1971  */
1972 int should_remove_suid(struct dentry *dentry)
1973 {
1974         mode_t mode = dentry->d_inode->i_mode;
1975         int kill = 0;
1976
1977         /* suid always must be killed */
1978         if (unlikely(mode & S_ISUID))
1979                 kill = ATTR_KILL_SUID;
1980
1981         /*
1982          * sgid without any exec bits is just a mandatory locking mark; leave
1983          * it alone.  If some exec bits are set, it's a real sgid; kill it.
1984          */
1985         if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1986                 kill |= ATTR_KILL_SGID;
1987
1988         if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1989                 return kill;
1990
1991         return 0;
1992 }
1993 EXPORT_SYMBOL(should_remove_suid);
1994
1995 static int __remove_suid(struct dentry *dentry, int kill)
1996 {
1997         struct iattr newattrs;
1998
1999         newattrs.ia_valid = ATTR_FORCE | kill;
2000         return notify_change(dentry, &newattrs);
2001 }
2002
2003 int file_remove_suid(struct file *file)
2004 {
2005         struct dentry *dentry = file->f_path.dentry;
2006         struct inode *inode = dentry->d_inode;
2007         int killsuid;
2008         int killpriv;
2009         int error = 0;
2010
2011         /* Fast path for nothing security related */
2012         if (IS_NOSEC(inode))
2013                 return 0;
2014
2015         killsuid = should_remove_suid(dentry);
2016         killpriv = security_inode_need_killpriv(dentry);
2017
2018         if (killpriv < 0)
2019                 return killpriv;
2020         if (killpriv)
2021                 error = security_inode_killpriv(dentry);
2022         if (!error && killsuid)
2023                 error = __remove_suid(dentry, killsuid);
2024         if (!error && (inode->i_sb->s_flags & MS_NOSEC))
2025                 inode->i_flags |= S_NOSEC;
2026
2027         return error;
2028 }
2029 EXPORT_SYMBOL(file_remove_suid);
2030
2031 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
2032                         const struct iovec *iov, size_t base, size_t bytes)
2033 {
2034         size_t copied = 0, left = 0;
2035
2036         while (bytes) {
2037                 char __user *buf = iov->iov_base + base;
2038                 int copy = min(bytes, iov->iov_len - base);
2039
2040                 base = 0;
2041                 left = __copy_from_user_inatomic(vaddr, buf, copy);
2042                 copied += copy;
2043                 bytes -= copy;
2044                 vaddr += copy;
2045                 iov++;
2046
2047                 if (unlikely(left))
2048                         break;
2049         }
2050         return copied - left;
2051 }
2052
2053 /*
2054  * Copy as much as we can into the page and return the number of bytes which
2055  * were successfully copied.  If a fault is encountered then return the number of
2056  * bytes which were copied.
2057  */
2058 size_t iov_iter_copy_from_user_atomic(struct page *page,
2059                 struct iov_iter *i, unsigned long offset, size_t bytes)
2060 {
2061         char *kaddr;
2062         size_t copied;
2063
2064         BUG_ON(!in_atomic());
2065         kaddr = kmap_atomic(page, KM_USER0);
2066         if (likely(i->nr_segs == 1)) {
2067                 int left;
2068                 char __user *buf = i->iov->iov_base + i->iov_offset;
2069                 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2070                 copied = bytes - left;
2071         } else {
2072                 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2073                                                 i->iov, i->iov_offset, bytes);
2074         }
2075         kunmap_atomic(kaddr, KM_USER0);
2076
2077         return copied;
2078 }
2079 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2080
2081 /*
2082  * This has the same sideeffects and return value as
2083  * iov_iter_copy_from_user_atomic().
2084  * The difference is that it attempts to resolve faults.
2085  * Page must not be locked.
2086  */
2087 size_t iov_iter_copy_from_user(struct page *page,
2088                 struct iov_iter *i, unsigned long offset, size_t bytes)
2089 {
2090         char *kaddr;
2091         size_t copied;
2092
2093         kaddr = kmap(page);
2094         if (likely(i->nr_segs == 1)) {
2095                 int left;
2096                 char __user *buf = i->iov->iov_base + i->iov_offset;
2097                 left = __copy_from_user(kaddr + offset, buf, bytes);
2098                 copied = bytes - left;
2099         } else {
2100                 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2101                                                 i->iov, i->iov_offset, bytes);
2102         }
2103         kunmap(page);
2104         return copied;
2105 }
2106 EXPORT_SYMBOL(iov_iter_copy_from_user);
2107
2108 void iov_iter_advance(struct iov_iter *i, size_t bytes)
2109 {
2110         BUG_ON(i->count < bytes);
2111
2112         if (likely(i->nr_segs == 1)) {
2113                 i->iov_offset += bytes;
2114                 i->count -= bytes;
2115         } else {
2116                 const struct iovec *iov = i->iov;
2117                 size_t base = i->iov_offset;
2118                 unsigned long nr_segs = i->nr_segs;
2119
2120                 /*
2121                  * The !iov->iov_len check ensures we skip over unlikely
2122                  * zero-length segments (without overruning the iovec).
2123                  */
2124                 while (bytes || unlikely(i->count && !iov->iov_len)) {
2125                         int copy;
2126
2127                         copy = min(bytes, iov->iov_len - base);
2128                         BUG_ON(!i->count || i->count < copy);
2129                         i->count -= copy;
2130                         bytes -= copy;
2131                         base += copy;
2132                         if (iov->iov_len == base) {
2133                                 iov++;
2134                                 nr_segs--;
2135                                 base = 0;
2136                         }
2137                 }
2138                 i->iov = iov;
2139                 i->iov_offset = base;
2140                 i->nr_segs = nr_segs;
2141         }
2142 }
2143 EXPORT_SYMBOL(iov_iter_advance);
2144
2145 /*
2146  * Fault in the first iovec of the given iov_iter, to a maximum length
2147  * of bytes. Returns 0 on success, or non-zero if the memory could not be
2148  * accessed (ie. because it is an invalid address).
2149  *
2150  * writev-intensive code may want this to prefault several iovecs -- that
2151  * would be possible (callers must not rely on the fact that _only_ the
2152  * first iovec will be faulted with the current implementation).
2153  */
2154 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2155 {
2156         char __user *buf = i->iov->iov_base + i->iov_offset;
2157         bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2158         return fault_in_pages_readable(buf, bytes);
2159 }
2160 EXPORT_SYMBOL(iov_iter_fault_in_readable);
2161
2162 /*
2163  * Return the count of just the current iov_iter segment.
2164  */
2165 size_t iov_iter_single_seg_count(struct iov_iter *i)
2166 {
2167         const struct iovec *iov = i->iov;
2168         if (i->nr_segs == 1)
2169                 return i->count;
2170         else
2171                 return min(i->count, iov->iov_len - i->iov_offset);
2172 }
2173 EXPORT_SYMBOL(iov_iter_single_seg_count);
2174
2175 /*
2176  * Performs necessary checks before doing a write
2177  *
2178  * Can adjust writing position or amount of bytes to write.
2179  * Returns appropriate error code that caller should return or
2180  * zero in case that write should be allowed.
2181  */
2182 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2183 {
2184         struct inode *inode = file->f_mapping->host;
2185         unsigned long limit = rlimit(RLIMIT_FSIZE);
2186
2187         if (unlikely(*pos < 0))
2188                 return -EINVAL;
2189
2190         if (!isblk) {
2191                 /* FIXME: this is for backwards compatibility with 2.4 */
2192                 if (file->f_flags & O_APPEND)
2193                         *pos = i_size_read(inode);
2194
2195                 if (limit != RLIM_INFINITY) {
2196                         if (*pos >= limit) {
2197                                 send_sig(SIGXFSZ, current, 0);
2198                                 return -EFBIG;
2199                         }
2200                         if (*count > limit - (typeof(limit))*pos) {
2201                                 *count = limit - (typeof(limit))*pos;
2202                         }
2203                 }
2204         }
2205
2206         /*
2207          * LFS rule
2208          */
2209         if (unlikely(*pos + *count > MAX_NON_LFS &&
2210                                 !(file->f_flags & O_LARGEFILE))) {
2211                 if (*pos >= MAX_NON_LFS) {
2212                         return -EFBIG;
2213                 }
2214                 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2215                         *count = MAX_NON_LFS - (unsigned long)*pos;
2216                 }
2217         }
2218
2219         /*
2220          * Are we about to exceed the fs block limit ?
2221          *
2222          * If we have written data it becomes a short write.  If we have
2223          * exceeded without writing data we send a signal and return EFBIG.
2224          * Linus frestrict idea will clean these up nicely..
2225          */
2226         if (likely(!isblk)) {
2227                 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2228                         if (*count || *pos > inode->i_sb->s_maxbytes) {
2229                                 return -EFBIG;
2230                         }
2231                         /* zero-length writes at ->s_maxbytes are OK */
2232                 }
2233
2234                 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2235                         *count = inode->i_sb->s_maxbytes - *pos;
2236         } else {
2237 #ifdef CONFIG_BLOCK
2238                 loff_t isize;
2239                 if (bdev_read_only(I_BDEV(inode)))
2240                         return -EPERM;
2241                 isize = i_size_read(inode);
2242                 if (*pos >= isize) {
2243                         if (*count || *pos > isize)
2244                                 return -ENOSPC;
2245                 }
2246
2247                 if (*pos + *count > isize)
2248                         *count = isize - *pos;
2249 #else
2250                 return -EPERM;
2251 #endif
2252         }
2253         return 0;
2254 }
2255 EXPORT_SYMBOL(generic_write_checks);
2256
2257 int pagecache_write_begin(struct file *file, struct address_space *mapping,
2258                                 loff_t pos, unsigned len, unsigned flags,
2259                                 struct page **pagep, void **fsdata)
2260 {
2261         const struct address_space_operations *aops = mapping->a_ops;
2262
2263         return aops->write_begin(file, mapping, pos, len, flags,
2264                                                         pagep, fsdata);
2265 }
2266 EXPORT_SYMBOL(pagecache_write_begin);
2267
2268 int pagecache_write_end(struct file *file, struct address_space *mapping,
2269                                 loff_t pos, unsigned len, unsigned copied,
2270                                 struct page *page, void *fsdata)
2271 {
2272         const struct address_space_operations *aops = mapping->a_ops;
2273
2274         mark_page_accessed(page);
2275         return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2276 }
2277 EXPORT_SYMBOL(pagecache_write_end);
2278
2279 ssize_t
2280 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2281                 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2282                 size_t count, size_t ocount)
2283 {
2284         struct file     *file = iocb->ki_filp;
2285         struct address_space *mapping = file->f_mapping;
2286         struct inode    *inode = mapping->host;
2287         ssize_t         written;
2288         size_t          write_len;
2289         pgoff_t         end;
2290
2291         if (count != ocount)
2292                 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2293
2294         write_len = iov_length(iov, *nr_segs);
2295         end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2296
2297         written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2298         if (written)
2299                 goto out;
2300
2301         /*
2302          * After a write we want buffered reads to be sure to go to disk to get
2303          * the new data.  We invalidate clean cached page from the region we're
2304          * about to write.  We do this *before* the write so that we can return
2305          * without clobbering -EIOCBQUEUED from ->direct_IO().
2306          */
2307         if (mapping->nrpages) {
2308                 written = invalidate_inode_pages2_range(mapping,
2309                                         pos >> PAGE_CACHE_SHIFT, end);
2310                 /*
2311                  * If a page can not be invalidated, return 0 to fall back
2312                  * to buffered write.
2313                  */
2314                 if (written) {
2315                         if (written == -EBUSY)
2316                                 return 0;
2317                         goto out;
2318                 }
2319         }
2320
2321         written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2322
2323         /*
2324          * Finally, try again to invalidate clean pages which might have been
2325          * cached by non-direct readahead, or faulted in by get_user_pages()
2326          * if the source of the write was an mmap'ed region of the file
2327          * we're writing.  Either one is a pretty crazy thing to do,
2328          * so we don't support it 100%.  If this invalidation
2329          * fails, tough, the write still worked...
2330          */
2331         if (mapping->nrpages) {
2332                 invalidate_inode_pages2_range(mapping,
2333                                               pos >> PAGE_CACHE_SHIFT, end);
2334         }
2335
2336         if (written > 0) {
2337                 pos += written;
2338                 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2339                         i_size_write(inode, pos);
2340                         mark_inode_dirty(inode);
2341                 }
2342                 *ppos = pos;
2343         }
2344 out:
2345         return written;
2346 }
2347 EXPORT_SYMBOL(generic_file_direct_write);
2348
2349 /*
2350  * Find or create a page at the given pagecache position. Return the locked
2351  * page. This function is specifically for buffered writes.
2352  */
2353 struct page *grab_cache_page_write_begin(struct address_space *mapping,
2354                                         pgoff_t index, unsigned flags)
2355 {
2356         int status;
2357         struct page *page;
2358         gfp_t gfp_notmask = 0;
2359         if (flags & AOP_FLAG_NOFS)
2360                 gfp_notmask = __GFP_FS;
2361 repeat:
2362         page = find_lock_page(mapping, index);
2363         if (page)
2364                 goto found;
2365
2366         page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2367         if (!page)
2368                 return NULL;
2369         status = add_to_page_cache_lru(page, mapping, index,
2370                                                 GFP_KERNEL & ~gfp_notmask);
2371         if (unlikely(status)) {
2372                 page_cache_release(page);
2373                 if (status == -EEXIST)
2374                         goto repeat;
2375                 return NULL;
2376         }
2377 found:
2378         wait_on_page_writeback(page);
2379         return page;
2380 }
2381 EXPORT_SYMBOL(grab_cache_page_write_begin);
2382
2383 static ssize_t generic_perform_write(struct file *file,
2384                                 struct iov_iter *i, loff_t pos)
2385 {
2386         struct address_space *mapping = file->f_mapping;
2387         const struct address_space_operations *a_ops = mapping->a_ops;
2388         long status = 0;
2389         ssize_t written = 0;
2390         unsigned int flags = 0;
2391
2392         /*
2393          * Copies from kernel address space cannot fail (NFSD is a big user).
2394          */
2395         if (segment_eq(get_fs(), KERNEL_DS))
2396                 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2397
2398         do {
2399                 struct page *page;
2400                 unsigned long offset;   /* Offset into pagecache page */
2401                 unsigned long bytes;    /* Bytes to write to page */
2402                 size_t copied;          /* Bytes copied from user */
2403                 void *fsdata;
2404
2405                 offset = (pos & (PAGE_CACHE_SIZE - 1));
2406                 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2407                                                 iov_iter_count(i));
2408
2409 again:
2410                 /*
2411                  * Bring in the user page that we will copy from _first_.
2412                  * Otherwise there's a nasty deadlock on copying from the
2413                  * same page as we're writing to, without it being marked
2414                  * up-to-date.
2415                  *
2416                  * Not only is this an optimisation, but it is also required
2417                  * to check that the address is actually valid, when atomic
2418                  * usercopies are used, below.
2419                  */
2420                 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2421                         status = -EFAULT;
2422                         break;
2423                 }
2424
2425                 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2426                                                 &page, &fsdata);
2427                 if (unlikely(status))
2428                         break;
2429
2430                 if (mapping_writably_mapped(mapping))
2431                         flush_dcache_page(page);
2432
2433                 pagefault_disable();
2434                 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2435                 pagefault_enable();
2436                 flush_dcache_page(page);
2437
2438                 mark_page_accessed(page);
2439                 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2440                                                 page, fsdata);
2441                 if (unlikely(status < 0))
2442                         break;
2443                 copied = status;
2444
2445                 cond_resched();
2446
2447                 iov_iter_advance(i, copied);
2448                 if (unlikely(copied == 0)) {
2449                         /*
2450                          * If we were unable to copy any data at all, we must
2451                          * fall back to a single segment length write.
2452                          *
2453                          * If we didn't fallback here, we could livelock
2454                          * because not all segments in the iov can be copied at
2455                          * once without a pagefault.
2456                          */
2457                         bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2458                                                 iov_iter_single_seg_count(i));
2459                         goto again;
2460                 }
2461                 pos += copied;
2462                 written += copied;
2463
2464                 balance_dirty_pages_ratelimited(mapping);
2465                 if (fatal_signal_pending(current)) {
2466                         status = -EINTR;
2467                         break;
2468                 }
2469         } while (iov_iter_count(i));
2470
2471         return written ? written : status;
2472 }
2473
2474 ssize_t
2475 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2476                 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2477                 size_t count, ssize_t written)
2478 {
2479         struct file *file = iocb->ki_filp;
2480         ssize_t status;
2481         struct iov_iter i;
2482
2483         iov_iter_init(&i, iov, nr_segs, count, written);
2484         status = generic_perform_write(file, &i, pos);
2485
2486         if (likely(status >= 0)) {
2487                 written += status;
2488                 *ppos = pos + status;
2489         }
2490         
2491         return written ? written : status;
2492 }
2493 EXPORT_SYMBOL(generic_file_buffered_write);
2494
2495 /**
2496  * __generic_file_aio_write - write data to a file
2497  * @iocb:       IO state structure (file, offset, etc.)
2498  * @iov:        vector with data to write
2499  * @nr_segs:    number of segments in the vector
2500  * @ppos:       position where to write
2501  *
2502  * This function does all the work needed for actually writing data to a
2503  * file. It does all basic checks, removes SUID from the file, updates
2504  * modification times and calls proper subroutines depending on whether we
2505  * do direct IO or a standard buffered write.
2506  *
2507  * It expects i_mutex to be grabbed unless we work on a block device or similar
2508  * object which does not need locking at all.
2509  *
2510  * This function does *not* take care of syncing data in case of O_SYNC write.
2511  * A caller has to handle it. This is mainly due to the fact that we want to
2512  * avoid syncing under i_mutex.
2513  */
2514 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2515                                  unsigned long nr_segs, loff_t *ppos)
2516 {
2517         struct file *file = iocb->ki_filp;
2518         struct address_space * mapping = file->f_mapping;
2519         size_t ocount;          /* original count */
2520         size_t count;           /* after file limit checks */
2521         struct inode    *inode = mapping->host;
2522         loff_t          pos;
2523         ssize_t         written;
2524         ssize_t         err;
2525
2526         ocount = 0;
2527         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2528         if (err)
2529                 return err;
2530
2531         count = ocount;
2532         pos = *ppos;
2533
2534         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2535
2536         /* We can write back this queue in page reclaim */
2537         current->backing_dev_info = mapping->backing_dev_info;
2538         written = 0;
2539
2540         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2541         if (err)
2542                 goto out;
2543
2544         if (count == 0)
2545                 goto out;
2546
2547         err = file_remove_suid(file);
2548         if (err)
2549                 goto out;
2550
2551         file_update_time(file);
2552
2553         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2554         if (unlikely(file->f_flags & O_DIRECT)) {
2555                 loff_t endbyte;
2556                 ssize_t written_buffered;
2557
2558                 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2559                                                         ppos, count, ocount);
2560                 if (written < 0 || written == count)
2561                         goto out;
2562                 /*
2563                  * direct-io write to a hole: fall through to buffered I/O
2564                  * for completing the rest of the request.
2565                  */
2566                 pos += written;
2567                 count -= written;
2568                 written_buffered = generic_file_buffered_write(iocb, iov,
2569                                                 nr_segs, pos, ppos, count,
2570                                                 written);
2571                 /*
2572                  * If generic_file_buffered_write() retuned a synchronous error
2573                  * then we want to return the number of bytes which were
2574                  * direct-written, or the error code if that was zero.  Note
2575                  * that this differs from normal direct-io semantics, which
2576                  * will return -EFOO even if some bytes were written.
2577                  */
2578                 if (written_buffered < 0) {
2579                         err = written_buffered;
2580                         goto out;
2581                 }
2582
2583                 /*
2584                  * We need to ensure that the page cache pages are written to
2585                  * disk and invalidated to preserve the expected O_DIRECT
2586                  * semantics.
2587                  */
2588                 endbyte = pos + written_buffered - written - 1;
2589                 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2590                 if (err == 0) {
2591                         written = written_buffered;
2592                         invalidate_mapping_pages(mapping,
2593                                                  pos >> PAGE_CACHE_SHIFT,
2594                                                  endbyte >> PAGE_CACHE_SHIFT);
2595                 } else {
2596                         /*
2597                          * We don't know how much we wrote, so just return
2598                          * the number of bytes which were direct-written
2599                          */
2600                 }
2601         } else {
2602                 written = generic_file_buffered_write(iocb, iov, nr_segs,
2603                                 pos, ppos, count, written);
2604         }
2605 out:
2606         current->backing_dev_info = NULL;
2607         return written ? written : err;
2608 }
2609 EXPORT_SYMBOL(__generic_file_aio_write);
2610
2611 /**
2612  * generic_file_aio_write - write data to a file
2613  * @iocb:       IO state structure
2614  * @iov:        vector with data to write
2615  * @nr_segs:    number of segments in the vector
2616  * @pos:        position in file where to write
2617  *
2618  * This is a wrapper around __generic_file_aio_write() to be used by most
2619  * filesystems. It takes care of syncing the file in case of O_SYNC file
2620  * and acquires i_mutex as needed.
2621  */
2622 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2623                 unsigned long nr_segs, loff_t pos)
2624 {
2625         struct file *file = iocb->ki_filp;
2626         struct inode *inode = file->f_mapping->host;
2627         struct blk_plug plug;
2628         ssize_t ret;
2629
2630         BUG_ON(iocb->ki_pos != pos);
2631
2632         mutex_lock(&inode->i_mutex);
2633         blk_start_plug(&plug);
2634         ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2635         mutex_unlock(&inode->i_mutex);
2636
2637         if (ret > 0 || ret == -EIOCBQUEUED) {
2638                 ssize_t err;
2639
2640                 err = generic_write_sync(file, pos, ret);
2641                 if (err < 0 && ret > 0)
2642                         ret = err;
2643         }
2644         blk_finish_plug(&plug);
2645         return ret;
2646 }
2647 EXPORT_SYMBOL(generic_file_aio_write);
2648
2649 /**
2650  * try_to_release_page() - release old fs-specific metadata on a page
2651  *
2652  * @page: the page which the kernel is trying to free
2653  * @gfp_mask: memory allocation flags (and I/O mode)
2654  *
2655  * The address_space is to try to release any data against the page
2656  * (presumably at page->private).  If the release was successful, return `1'.
2657  * Otherwise return zero.
2658  *
2659  * This may also be called if PG_fscache is set on a page, indicating that the
2660  * page is known to the local caching routines.
2661  *
2662  * The @gfp_mask argument specifies whether I/O may be performed to release
2663  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2664  *
2665  */
2666 int try_to_release_page(struct page *page, gfp_t gfp_mask)
2667 {
2668         struct address_space * const mapping = page->mapping;
2669
2670         BUG_ON(!PageLocked(page));
2671         if (PageWriteback(page))
2672                 return 0;
2673
2674         if (mapping && mapping->a_ops->releasepage)
2675                 return mapping->a_ops->releasepage(page, gfp_mask);
2676         return try_to_free_buffers(page);
2677 }
2678
2679 EXPORT_SYMBOL(try_to_release_page);