shmem: fix faulting into a hole, not taking i_mutex
[pandora-kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
33
34 static struct vfsmount *shm_mnt;
35
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
66
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
69
70 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
71 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
73 /* Pretend that each entry is of this size in directory's i_size */
74 #define BOGO_DIRENT_SIZE 20
75
76 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77 #define SHORT_SYMLINK_LEN 128
78
79 /*
80  * vmtruncate_range() communicates with shmem_fault via
81  * inode->i_private (with i_mutex making sure that it has only one user at
82  * a time): we would prefer not to enlarge the shmem inode just for that.
83  */
84 struct shmem_falloc {
85         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
86         pgoff_t start;          /* start of range currently being fallocated */
87         pgoff_t next;           /* the next page offset to be fallocated */
88 };
89
90 struct shmem_xattr {
91         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
92         char *name;             /* xattr name */
93         size_t size;
94         char value[0];
95 };
96
97 /* Flag allocation requirements to shmem_getpage */
98 enum sgp_type {
99         SGP_READ,       /* don't exceed i_size, don't allocate page */
100         SGP_CACHE,      /* don't exceed i_size, may allocate page */
101         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
102         SGP_WRITE,      /* may exceed i_size, may allocate page */
103 };
104
105 #ifdef CONFIG_TMPFS
106 static unsigned long shmem_default_max_blocks(void)
107 {
108         return totalram_pages / 2;
109 }
110
111 static unsigned long shmem_default_max_inodes(void)
112 {
113         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
114 }
115 #endif
116
117 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
118         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
119
120 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
121         struct page **pagep, enum sgp_type sgp, int *fault_type)
122 {
123         return shmem_getpage_gfp(inode, index, pagep, sgp,
124                         mapping_gfp_mask(inode->i_mapping), fault_type);
125 }
126
127 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
128 {
129         return sb->s_fs_info;
130 }
131
132 /*
133  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134  * for shared memory and for shared anonymous (/dev/zero) mappings
135  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136  * consistent with the pre-accounting of private mappings ...
137  */
138 static inline int shmem_acct_size(unsigned long flags, loff_t size)
139 {
140         return (flags & VM_NORESERVE) ?
141                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
142 }
143
144 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
145 {
146         if (!(flags & VM_NORESERVE))
147                 vm_unacct_memory(VM_ACCT(size));
148 }
149
150 /*
151  * ... whereas tmpfs objects are accounted incrementally as
152  * pages are allocated, in order to allow huge sparse files.
153  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
154  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
155  */
156 static inline int shmem_acct_block(unsigned long flags)
157 {
158         return (flags & VM_NORESERVE) ?
159                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
160 }
161
162 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
163 {
164         if (flags & VM_NORESERVE)
165                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
166 }
167
168 static const struct super_operations shmem_ops;
169 static const struct address_space_operations shmem_aops;
170 static const struct file_operations shmem_file_operations;
171 static const struct inode_operations shmem_inode_operations;
172 static const struct inode_operations shmem_dir_inode_operations;
173 static const struct inode_operations shmem_special_inode_operations;
174 static const struct vm_operations_struct shmem_vm_ops;
175
176 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
177         .ra_pages       = 0,    /* No readahead */
178         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
179 };
180
181 static LIST_HEAD(shmem_swaplist);
182 static DEFINE_MUTEX(shmem_swaplist_mutex);
183
184 static int shmem_reserve_inode(struct super_block *sb)
185 {
186         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
187         if (sbinfo->max_inodes) {
188                 spin_lock(&sbinfo->stat_lock);
189                 if (!sbinfo->free_inodes) {
190                         spin_unlock(&sbinfo->stat_lock);
191                         return -ENOSPC;
192                 }
193                 sbinfo->free_inodes--;
194                 spin_unlock(&sbinfo->stat_lock);
195         }
196         return 0;
197 }
198
199 static void shmem_free_inode(struct super_block *sb)
200 {
201         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
202         if (sbinfo->max_inodes) {
203                 spin_lock(&sbinfo->stat_lock);
204                 sbinfo->free_inodes++;
205                 spin_unlock(&sbinfo->stat_lock);
206         }
207 }
208
209 /**
210  * shmem_recalc_inode - recalculate the block usage of an inode
211  * @inode: inode to recalc
212  *
213  * We have to calculate the free blocks since the mm can drop
214  * undirtied hole pages behind our back.
215  *
216  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
217  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
218  *
219  * It has to be called with the spinlock held.
220  */
221 static void shmem_recalc_inode(struct inode *inode)
222 {
223         struct shmem_inode_info *info = SHMEM_I(inode);
224         long freed;
225
226         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
227         if (freed > 0) {
228                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
229                 if (sbinfo->max_blocks)
230                         percpu_counter_add(&sbinfo->used_blocks, -freed);
231                 info->alloced -= freed;
232                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
233                 shmem_unacct_blocks(info->flags, freed);
234         }
235 }
236
237 /*
238  * Replace item expected in radix tree by a new item, while holding tree lock.
239  */
240 static int shmem_radix_tree_replace(struct address_space *mapping,
241                         pgoff_t index, void *expected, void *replacement)
242 {
243         void **pslot;
244         void *item = NULL;
245
246         VM_BUG_ON(!expected);
247         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
248         if (pslot)
249                 item = radix_tree_deref_slot_protected(pslot,
250                                                         &mapping->tree_lock);
251         if (item != expected)
252                 return -ENOENT;
253         if (replacement)
254                 radix_tree_replace_slot(pslot, replacement);
255         else
256                 radix_tree_delete(&mapping->page_tree, index);
257         return 0;
258 }
259
260 /*
261  * Like add_to_page_cache_locked, but error if expected item has gone.
262  */
263 static int shmem_add_to_page_cache(struct page *page,
264                                    struct address_space *mapping,
265                                    pgoff_t index, gfp_t gfp, void *expected)
266 {
267         int error = 0;
268
269         VM_BUG_ON(!PageLocked(page));
270         VM_BUG_ON(!PageSwapBacked(page));
271
272         if (!expected)
273                 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
274         if (!error) {
275                 page_cache_get(page);
276                 page->mapping = mapping;
277                 page->index = index;
278
279                 spin_lock_irq(&mapping->tree_lock);
280                 if (!expected)
281                         error = radix_tree_insert(&mapping->page_tree,
282                                                         index, page);
283                 else
284                         error = shmem_radix_tree_replace(mapping, index,
285                                                         expected, page);
286                 if (!error) {
287                         mapping->nrpages++;
288                         __inc_zone_page_state(page, NR_FILE_PAGES);
289                         __inc_zone_page_state(page, NR_SHMEM);
290                         spin_unlock_irq(&mapping->tree_lock);
291                 } else {
292                         page->mapping = NULL;
293                         spin_unlock_irq(&mapping->tree_lock);
294                         page_cache_release(page);
295                 }
296                 if (!expected)
297                         radix_tree_preload_end();
298         }
299         if (error)
300                 mem_cgroup_uncharge_cache_page(page);
301         return error;
302 }
303
304 /*
305  * Like delete_from_page_cache, but substitutes swap for page.
306  */
307 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
308 {
309         struct address_space *mapping = page->mapping;
310         int error;
311
312         spin_lock_irq(&mapping->tree_lock);
313         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
314         page->mapping = NULL;
315         mapping->nrpages--;
316         __dec_zone_page_state(page, NR_FILE_PAGES);
317         __dec_zone_page_state(page, NR_SHMEM);
318         spin_unlock_irq(&mapping->tree_lock);
319         page_cache_release(page);
320         BUG_ON(error);
321 }
322
323 /*
324  * Like find_get_pages, but collecting swap entries as well as pages.
325  */
326 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
327                                         pgoff_t start, unsigned int nr_pages,
328                                         struct page **pages, pgoff_t *indices)
329 {
330         unsigned int i;
331         unsigned int ret;
332         unsigned int nr_found;
333
334         rcu_read_lock();
335 restart:
336         nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
337                                 (void ***)pages, indices, start, nr_pages);
338         ret = 0;
339         for (i = 0; i < nr_found; i++) {
340                 struct page *page;
341 repeat:
342                 page = radix_tree_deref_slot((void **)pages[i]);
343                 if (unlikely(!page))
344                         continue;
345                 if (radix_tree_exception(page)) {
346                         if (radix_tree_deref_retry(page))
347                                 goto restart;
348                         /*
349                          * Otherwise, we must be storing a swap entry
350                          * here as an exceptional entry: so return it
351                          * without attempting to raise page count.
352                          */
353                         goto export;
354                 }
355                 if (!page_cache_get_speculative(page))
356                         goto repeat;
357
358                 /* Has the page moved? */
359                 if (unlikely(page != *((void **)pages[i]))) {
360                         page_cache_release(page);
361                         goto repeat;
362                 }
363 export:
364                 indices[ret] = indices[i];
365                 pages[ret] = page;
366                 ret++;
367         }
368         if (unlikely(!ret && nr_found))
369                 goto restart;
370         rcu_read_unlock();
371         return ret;
372 }
373
374 /*
375  * Remove swap entry from radix tree, free the swap and its page cache.
376  */
377 static int shmem_free_swap(struct address_space *mapping,
378                            pgoff_t index, void *radswap)
379 {
380         int error;
381
382         spin_lock_irq(&mapping->tree_lock);
383         error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
384         spin_unlock_irq(&mapping->tree_lock);
385         if (!error)
386                 free_swap_and_cache(radix_to_swp_entry(radswap));
387         return error;
388 }
389
390 /*
391  * Pagevec may contain swap entries, so shuffle up pages before releasing.
392  */
393 static void shmem_deswap_pagevec(struct pagevec *pvec)
394 {
395         int i, j;
396
397         for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
398                 struct page *page = pvec->pages[i];
399                 if (!radix_tree_exceptional_entry(page))
400                         pvec->pages[j++] = page;
401         }
402         pvec->nr = j;
403 }
404
405 /*
406  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
407  */
408 void shmem_unlock_mapping(struct address_space *mapping)
409 {
410         struct pagevec pvec;
411         pgoff_t indices[PAGEVEC_SIZE];
412         pgoff_t index = 0;
413
414         pagevec_init(&pvec, 0);
415         /*
416          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
417          */
418         while (!mapping_unevictable(mapping)) {
419                 /*
420                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
421                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
422                  */
423                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
424                                         PAGEVEC_SIZE, pvec.pages, indices);
425                 if (!pvec.nr)
426                         break;
427                 index = indices[pvec.nr - 1] + 1;
428                 shmem_deswap_pagevec(&pvec);
429                 check_move_unevictable_pages(pvec.pages, pvec.nr);
430                 pagevec_release(&pvec);
431                 cond_resched();
432         }
433 }
434
435 /*
436  * Remove range of pages and swap entries from radix tree, and free them.
437  */
438 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
439 {
440         struct address_space *mapping = inode->i_mapping;
441         struct shmem_inode_info *info = SHMEM_I(inode);
442         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
443         unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
444         pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
445         struct pagevec pvec;
446         pgoff_t indices[PAGEVEC_SIZE];
447         long nr_swaps_freed = 0;
448         pgoff_t index;
449         int i;
450
451         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
452
453         pagevec_init(&pvec, 0);
454         index = start;
455         while (index <= end) {
456                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
457                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
458                                                         pvec.pages, indices);
459                 if (!pvec.nr)
460                         break;
461                 mem_cgroup_uncharge_start();
462                 for (i = 0; i < pagevec_count(&pvec); i++) {
463                         struct page *page = pvec.pages[i];
464
465                         index = indices[i];
466                         if (index > end)
467                                 break;
468
469                         if (radix_tree_exceptional_entry(page)) {
470                                 nr_swaps_freed += !shmem_free_swap(mapping,
471                                                                 index, page);
472                                 continue;
473                         }
474
475                         if (!trylock_page(page))
476                                 continue;
477                         if (page->mapping == mapping) {
478                                 VM_BUG_ON(PageWriteback(page));
479                                 truncate_inode_page(mapping, page);
480                         }
481                         unlock_page(page);
482                 }
483                 shmem_deswap_pagevec(&pvec);
484                 pagevec_release(&pvec);
485                 mem_cgroup_uncharge_end();
486                 cond_resched();
487                 index++;
488         }
489
490         if (partial) {
491                 struct page *page = NULL;
492                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
493                 if (page) {
494                         zero_user_segment(page, partial, PAGE_CACHE_SIZE);
495                         set_page_dirty(page);
496                         unlock_page(page);
497                         page_cache_release(page);
498                 }
499         }
500
501         index = start;
502         for ( ; ; ) {
503                 cond_resched();
504                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
505                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
506                                                         pvec.pages, indices);
507                 if (!pvec.nr) {
508                         if (index == start)
509                                 break;
510                         index = start;
511                         continue;
512                 }
513                 if (index == start && indices[0] > end) {
514                         shmem_deswap_pagevec(&pvec);
515                         pagevec_release(&pvec);
516                         break;
517                 }
518                 mem_cgroup_uncharge_start();
519                 for (i = 0; i < pagevec_count(&pvec); i++) {
520                         struct page *page = pvec.pages[i];
521
522                         index = indices[i];
523                         if (index > end)
524                                 break;
525
526                         if (radix_tree_exceptional_entry(page)) {
527                                 nr_swaps_freed += !shmem_free_swap(mapping,
528                                                                 index, page);
529                                 continue;
530                         }
531
532                         lock_page(page);
533                         if (page->mapping == mapping) {
534                                 VM_BUG_ON(PageWriteback(page));
535                                 truncate_inode_page(mapping, page);
536                         }
537                         unlock_page(page);
538                 }
539                 shmem_deswap_pagevec(&pvec);
540                 pagevec_release(&pvec);
541                 mem_cgroup_uncharge_end();
542                 index++;
543         }
544
545         spin_lock(&info->lock);
546         info->swapped -= nr_swaps_freed;
547         shmem_recalc_inode(inode);
548         spin_unlock(&info->lock);
549
550         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
551 }
552 EXPORT_SYMBOL_GPL(shmem_truncate_range);
553
554 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
555 {
556         struct inode *inode = dentry->d_inode;
557         int error;
558
559         error = inode_change_ok(inode, attr);
560         if (error)
561                 return error;
562
563         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
564                 loff_t oldsize = inode->i_size;
565                 loff_t newsize = attr->ia_size;
566
567                 if (newsize != oldsize) {
568                         i_size_write(inode, newsize);
569                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
570                 }
571                 if (newsize < oldsize) {
572                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
573                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
574                         shmem_truncate_range(inode, newsize, (loff_t)-1);
575                         /* unmap again to remove racily COWed private pages */
576                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
577                 }
578         }
579
580         setattr_copy(inode, attr);
581 #ifdef CONFIG_TMPFS_POSIX_ACL
582         if (attr->ia_valid & ATTR_MODE)
583                 error = generic_acl_chmod(inode);
584 #endif
585         return error;
586 }
587
588 static void shmem_evict_inode(struct inode *inode)
589 {
590         struct shmem_inode_info *info = SHMEM_I(inode);
591         struct shmem_xattr *xattr, *nxattr;
592
593         if (inode->i_mapping->a_ops == &shmem_aops) {
594                 shmem_unacct_size(info->flags, inode->i_size);
595                 inode->i_size = 0;
596                 shmem_truncate_range(inode, 0, (loff_t)-1);
597                 if (!list_empty(&info->swaplist)) {
598                         mutex_lock(&shmem_swaplist_mutex);
599                         list_del_init(&info->swaplist);
600                         mutex_unlock(&shmem_swaplist_mutex);
601                 }
602         } else
603                 kfree(info->symlink);
604
605         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
606                 kfree(xattr->name);
607                 kfree(xattr);
608         }
609         WARN_ON(inode->i_blocks);
610         shmem_free_inode(inode->i_sb);
611         end_writeback(inode);
612 }
613
614 /*
615  * If swap found in inode, free it and move page from swapcache to filecache.
616  */
617 static int shmem_unuse_inode(struct shmem_inode_info *info,
618                              swp_entry_t swap, struct page *page)
619 {
620         struct address_space *mapping = info->vfs_inode.i_mapping;
621         void *radswap;
622         pgoff_t index;
623         int error;
624
625         radswap = swp_to_radix_entry(swap);
626         index = radix_tree_locate_item(&mapping->page_tree, radswap);
627         if (index == -1)
628                 return 0;
629
630         /*
631          * Move _head_ to start search for next from here.
632          * But be careful: shmem_evict_inode checks list_empty without taking
633          * mutex, and there's an instant in list_move_tail when info->swaplist
634          * would appear empty, if it were the only one on shmem_swaplist.
635          */
636         if (shmem_swaplist.next != &info->swaplist)
637                 list_move_tail(&shmem_swaplist, &info->swaplist);
638
639         /*
640          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
641          * but also to hold up shmem_evict_inode(): so inode cannot be freed
642          * beneath us (pagelock doesn't help until the page is in pagecache).
643          */
644         error = shmem_add_to_page_cache(page, mapping, index,
645                                                 GFP_NOWAIT, radswap);
646         /* which does mem_cgroup_uncharge_cache_page on error */
647
648         if (error != -ENOMEM) {
649                 /*
650                  * Truncation and eviction use free_swap_and_cache(), which
651                  * only does trylock page: if we raced, best clean up here.
652                  */
653                 delete_from_swap_cache(page);
654                 set_page_dirty(page);
655                 if (!error) {
656                         spin_lock(&info->lock);
657                         info->swapped--;
658                         spin_unlock(&info->lock);
659                         swap_free(swap);
660                 }
661                 error = 1;      /* not an error, but entry was found */
662         }
663         return error;
664 }
665
666 /*
667  * Search through swapped inodes to find and replace swap by page.
668  */
669 int shmem_unuse(swp_entry_t swap, struct page *page)
670 {
671         struct list_head *this, *next;
672         struct shmem_inode_info *info;
673         int found = 0;
674         int error;
675
676         /*
677          * Charge page using GFP_KERNEL while we can wait, before taking
678          * the shmem_swaplist_mutex which might hold up shmem_writepage().
679          * Charged back to the user (not to caller) when swap account is used.
680          */
681         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
682         if (error)
683                 goto out;
684         /* No radix_tree_preload: swap entry keeps a place for page in tree */
685
686         mutex_lock(&shmem_swaplist_mutex);
687         list_for_each_safe(this, next, &shmem_swaplist) {
688                 info = list_entry(this, struct shmem_inode_info, swaplist);
689                 if (info->swapped)
690                         found = shmem_unuse_inode(info, swap, page);
691                 else
692                         list_del_init(&info->swaplist);
693                 cond_resched();
694                 if (found)
695                         break;
696         }
697         mutex_unlock(&shmem_swaplist_mutex);
698
699         if (!found)
700                 mem_cgroup_uncharge_cache_page(page);
701         if (found < 0)
702                 error = found;
703 out:
704         unlock_page(page);
705         page_cache_release(page);
706         return error;
707 }
708
709 /*
710  * Move the page from the page cache to the swap cache.
711  */
712 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
713 {
714         struct shmem_inode_info *info;
715         struct address_space *mapping;
716         struct inode *inode;
717         swp_entry_t swap;
718         pgoff_t index;
719
720         BUG_ON(!PageLocked(page));
721         mapping = page->mapping;
722         index = page->index;
723         inode = mapping->host;
724         info = SHMEM_I(inode);
725         if (info->flags & VM_LOCKED)
726                 goto redirty;
727         if (!total_swap_pages)
728                 goto redirty;
729
730         /*
731          * shmem_backing_dev_info's capabilities prevent regular writeback or
732          * sync from ever calling shmem_writepage; but a stacking filesystem
733          * might use ->writepage of its underlying filesystem, in which case
734          * tmpfs should write out to swap only in response to memory pressure,
735          * and not for the writeback threads or sync.
736          */
737         if (!wbc->for_reclaim) {
738                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
739                 goto redirty;
740         }
741         swap = get_swap_page();
742         if (!swap.val)
743                 goto redirty;
744
745         /*
746          * Add inode to shmem_unuse()'s list of swapped-out inodes,
747          * if it's not already there.  Do it now before the page is
748          * moved to swap cache, when its pagelock no longer protects
749          * the inode from eviction.  But don't unlock the mutex until
750          * we've incremented swapped, because shmem_unuse_inode() will
751          * prune a !swapped inode from the swaplist under this mutex.
752          */
753         mutex_lock(&shmem_swaplist_mutex);
754         if (list_empty(&info->swaplist))
755                 list_add_tail(&info->swaplist, &shmem_swaplist);
756
757         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
758                 swap_shmem_alloc(swap);
759                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
760
761                 spin_lock(&info->lock);
762                 info->swapped++;
763                 shmem_recalc_inode(inode);
764                 spin_unlock(&info->lock);
765
766                 mutex_unlock(&shmem_swaplist_mutex);
767                 BUG_ON(page_mapped(page));
768                 swap_writepage(page, wbc);
769                 return 0;
770         }
771
772         mutex_unlock(&shmem_swaplist_mutex);
773         swapcache_free(swap, NULL);
774 redirty:
775         set_page_dirty(page);
776         if (wbc->for_reclaim)
777                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
778         unlock_page(page);
779         return 0;
780 }
781
782 #ifdef CONFIG_NUMA
783 #ifdef CONFIG_TMPFS
784 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
785 {
786         char buffer[64];
787
788         if (!mpol || mpol->mode == MPOL_DEFAULT)
789                 return;         /* show nothing */
790
791         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
792
793         seq_printf(seq, ",mpol=%s", buffer);
794 }
795
796 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
797 {
798         struct mempolicy *mpol = NULL;
799         if (sbinfo->mpol) {
800                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
801                 mpol = sbinfo->mpol;
802                 mpol_get(mpol);
803                 spin_unlock(&sbinfo->stat_lock);
804         }
805         return mpol;
806 }
807 #endif /* CONFIG_TMPFS */
808
809 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
810                         struct shmem_inode_info *info, pgoff_t index)
811 {
812         struct vm_area_struct pvma;
813         struct page *page;
814
815         /* Create a pseudo vma that just contains the policy */
816         pvma.vm_start = 0;
817         pvma.vm_pgoff = index;
818         pvma.vm_ops = NULL;
819         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
820
821         page = swapin_readahead(swap, gfp, &pvma, 0);
822
823         /* Drop reference taken by mpol_shared_policy_lookup() */
824         mpol_cond_put(pvma.vm_policy);
825
826         return page;
827 }
828
829 static struct page *shmem_alloc_page(gfp_t gfp,
830                         struct shmem_inode_info *info, pgoff_t index)
831 {
832         struct vm_area_struct pvma;
833         struct page *page;
834
835         /* Create a pseudo vma that just contains the policy */
836         pvma.vm_start = 0;
837         pvma.vm_pgoff = index;
838         pvma.vm_ops = NULL;
839         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
840
841         page = alloc_page_vma(gfp, &pvma, 0);
842
843         /* Drop reference taken by mpol_shared_policy_lookup() */
844         mpol_cond_put(pvma.vm_policy);
845
846         return page;
847 }
848 #else /* !CONFIG_NUMA */
849 #ifdef CONFIG_TMPFS
850 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
851 {
852 }
853 #endif /* CONFIG_TMPFS */
854
855 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
856                         struct shmem_inode_info *info, pgoff_t index)
857 {
858         return swapin_readahead(swap, gfp, NULL, 0);
859 }
860
861 static inline struct page *shmem_alloc_page(gfp_t gfp,
862                         struct shmem_inode_info *info, pgoff_t index)
863 {
864         return alloc_page(gfp);
865 }
866 #endif /* CONFIG_NUMA */
867
868 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
869 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
870 {
871         return NULL;
872 }
873 #endif
874
875 /*
876  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
877  *
878  * If we allocate a new one we do not mark it dirty. That's up to the
879  * vm. If we swap it in we mark it dirty since we also free the swap
880  * entry since a page cannot live in both the swap and page cache
881  */
882 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
883         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
884 {
885         struct address_space *mapping = inode->i_mapping;
886         struct shmem_inode_info *info;
887         struct shmem_sb_info *sbinfo;
888         struct page *page;
889         swp_entry_t swap;
890         int error;
891         int once = 0;
892
893         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
894                 return -EFBIG;
895 repeat:
896         swap.val = 0;
897         page = find_lock_page(mapping, index);
898         if (radix_tree_exceptional_entry(page)) {
899                 swap = radix_to_swp_entry(page);
900                 page = NULL;
901         }
902
903         if (sgp != SGP_WRITE &&
904             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
905                 error = -EINVAL;
906                 goto failed;
907         }
908
909         if (page || (sgp == SGP_READ && !swap.val)) {
910                 /*
911                  * Once we can get the page lock, it must be uptodate:
912                  * if there were an error in reading back from swap,
913                  * the page would not be inserted into the filecache.
914                  */
915                 BUG_ON(page && !PageUptodate(page));
916                 *pagep = page;
917                 return 0;
918         }
919
920         /*
921          * Fast cache lookup did not find it:
922          * bring it back from swap or allocate.
923          */
924         info = SHMEM_I(inode);
925         sbinfo = SHMEM_SB(inode->i_sb);
926
927         if (swap.val) {
928                 /* Look it up and read it in.. */
929                 page = lookup_swap_cache(swap);
930                 if (!page) {
931                         /* here we actually do the io */
932                         if (fault_type)
933                                 *fault_type |= VM_FAULT_MAJOR;
934                         page = shmem_swapin(swap, gfp, info, index);
935                         if (!page) {
936                                 error = -ENOMEM;
937                                 goto failed;
938                         }
939                 }
940
941                 /* We have to do this with page locked to prevent races */
942                 lock_page(page);
943                 if (!PageUptodate(page)) {
944                         error = -EIO;
945                         goto failed;
946                 }
947                 wait_on_page_writeback(page);
948
949                 /* Someone may have already done it for us */
950                 if (page->mapping) {
951                         if (page->mapping == mapping &&
952                             page->index == index)
953                                 goto done;
954                         error = -EEXIST;
955                         goto failed;
956                 }
957
958                 error = mem_cgroup_cache_charge(page, current->mm,
959                                                 gfp & GFP_RECLAIM_MASK);
960                 if (!error)
961                         error = shmem_add_to_page_cache(page, mapping, index,
962                                                 gfp, swp_to_radix_entry(swap));
963                 if (error)
964                         goto failed;
965
966                 spin_lock(&info->lock);
967                 info->swapped--;
968                 shmem_recalc_inode(inode);
969                 spin_unlock(&info->lock);
970
971                 delete_from_swap_cache(page);
972                 set_page_dirty(page);
973                 swap_free(swap);
974
975         } else {
976                 if (shmem_acct_block(info->flags)) {
977                         error = -ENOSPC;
978                         goto failed;
979                 }
980                 if (sbinfo->max_blocks) {
981                         if (percpu_counter_compare(&sbinfo->used_blocks,
982                                                 sbinfo->max_blocks) >= 0) {
983                                 error = -ENOSPC;
984                                 goto unacct;
985                         }
986                         percpu_counter_inc(&sbinfo->used_blocks);
987                 }
988
989                 page = shmem_alloc_page(gfp, info, index);
990                 if (!page) {
991                         error = -ENOMEM;
992                         goto decused;
993                 }
994
995                 SetPageSwapBacked(page);
996                 __set_page_locked(page);
997                 error = mem_cgroup_cache_charge(page, current->mm,
998                                                 gfp & GFP_RECLAIM_MASK);
999                 if (!error)
1000                         error = shmem_add_to_page_cache(page, mapping, index,
1001                                                 gfp, NULL);
1002                 if (error)
1003                         goto decused;
1004                 lru_cache_add_anon(page);
1005
1006                 spin_lock(&info->lock);
1007                 info->alloced++;
1008                 inode->i_blocks += BLOCKS_PER_PAGE;
1009                 shmem_recalc_inode(inode);
1010                 spin_unlock(&info->lock);
1011
1012                 clear_highpage(page);
1013                 flush_dcache_page(page);
1014                 SetPageUptodate(page);
1015                 if (sgp == SGP_DIRTY)
1016                         set_page_dirty(page);
1017         }
1018 done:
1019         /* Perhaps the file has been truncated since we checked */
1020         if (sgp != SGP_WRITE &&
1021             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1022                 error = -EINVAL;
1023                 goto trunc;
1024         }
1025         *pagep = page;
1026         return 0;
1027
1028         /*
1029          * Error recovery.
1030          */
1031 trunc:
1032         ClearPageDirty(page);
1033         delete_from_page_cache(page);
1034         spin_lock(&info->lock);
1035         info->alloced--;
1036         inode->i_blocks -= BLOCKS_PER_PAGE;
1037         spin_unlock(&info->lock);
1038 decused:
1039         if (sbinfo->max_blocks)
1040                 percpu_counter_add(&sbinfo->used_blocks, -1);
1041 unacct:
1042         shmem_unacct_blocks(info->flags, 1);
1043 failed:
1044         if (swap.val && error != -EINVAL) {
1045                 struct page *test = find_get_page(mapping, index);
1046                 if (test && !radix_tree_exceptional_entry(test))
1047                         page_cache_release(test);
1048                 /* Have another try if the entry has changed */
1049                 if (test != swp_to_radix_entry(swap))
1050                         error = -EEXIST;
1051         }
1052         if (page) {
1053                 unlock_page(page);
1054                 page_cache_release(page);
1055         }
1056         if (error == -ENOSPC && !once++) {
1057                 info = SHMEM_I(inode);
1058                 spin_lock(&info->lock);
1059                 shmem_recalc_inode(inode);
1060                 spin_unlock(&info->lock);
1061                 goto repeat;
1062         }
1063         if (error == -EEXIST)
1064                 goto repeat;
1065         return error;
1066 }
1067
1068 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1069 {
1070         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1071         int error;
1072         int ret = VM_FAULT_LOCKED;
1073
1074         /*
1075          * Trinity finds that probing a hole which tmpfs is punching can
1076          * prevent the hole-punch from ever completing: which in turn
1077          * locks writers out with its hold on i_mutex.  So refrain from
1078          * faulting pages into the hole while it's being punched.  Although
1079          * shmem_truncate_range() does remove the additions, it may be unable to
1080          * keep up, as each new page needs its own unmap_mapping_range() call,
1081          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1082          *
1083          * It does not matter if we sometimes reach this check just before the
1084          * hole-punch begins, so that one fault then races with the punch:
1085          * we just need to make racing faults a rare case.
1086          *
1087          * The implementation below would be much simpler if we just used a
1088          * standard mutex or completion: but we cannot take i_mutex in fault,
1089          * and bloating every shmem inode for this unlikely case would be sad.
1090          */
1091         if (unlikely(inode->i_private)) {
1092                 struct shmem_falloc *shmem_falloc;
1093
1094                 spin_lock(&inode->i_lock);
1095                 shmem_falloc = inode->i_private;
1096                 if (shmem_falloc &&
1097                     vmf->pgoff >= shmem_falloc->start &&
1098                     vmf->pgoff < shmem_falloc->next) {
1099                         wait_queue_head_t *shmem_falloc_waitq;
1100                         DEFINE_WAIT(shmem_fault_wait);
1101
1102                         ret = VM_FAULT_NOPAGE;
1103                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1104                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1105                                 /* It's polite to up mmap_sem if we can */
1106                                 up_read(&vma->vm_mm->mmap_sem);
1107                                 ret = VM_FAULT_RETRY;
1108                         }
1109
1110                         shmem_falloc_waitq = shmem_falloc->waitq;
1111                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1112                                         TASK_UNINTERRUPTIBLE);
1113                         spin_unlock(&inode->i_lock);
1114                         schedule();
1115
1116                         /*
1117                          * shmem_falloc_waitq points into the vmtruncate_range()
1118                          * stack of the hole-punching task: shmem_falloc_waitq
1119                          * is usually invalid by the time we reach here, but
1120                          * finish_wait() does not dereference it in that case;
1121                          * though i_lock needed lest racing with wake_up_all().
1122                          */
1123                         spin_lock(&inode->i_lock);
1124                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1125                         spin_unlock(&inode->i_lock);
1126                         return ret;
1127                 }
1128                 spin_unlock(&inode->i_lock);
1129         }
1130
1131         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1132         if (error)
1133                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1134
1135         if (ret & VM_FAULT_MAJOR) {
1136                 count_vm_event(PGMAJFAULT);
1137                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1138         }
1139         return ret;
1140 }
1141
1142 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1143 {
1144         /*
1145          * If the underlying filesystem is not going to provide
1146          * a way to truncate a range of blocks (punch a hole) -
1147          * we should return failure right now.
1148          * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
1149          */
1150         if (inode->i_op->truncate_range != shmem_truncate_range)
1151                 return -ENOSYS;
1152
1153         mutex_lock(&inode->i_mutex);
1154         {
1155                 struct shmem_falloc shmem_falloc;
1156                 struct address_space *mapping = inode->i_mapping;
1157                 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
1158                 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
1159                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1160
1161                 shmem_falloc.waitq = &shmem_falloc_waitq;
1162                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1163                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1164                 spin_lock(&inode->i_lock);
1165                 inode->i_private = &shmem_falloc;
1166                 spin_unlock(&inode->i_lock);
1167
1168                 if ((u64)unmap_end > (u64)unmap_start)
1169                         unmap_mapping_range(mapping, unmap_start,
1170                                             1 + unmap_end - unmap_start, 0);
1171                 shmem_truncate_range(inode, lstart, lend);
1172                 /* No need to unmap again: hole-punching leaves COWed pages */
1173
1174                 spin_lock(&inode->i_lock);
1175                 inode->i_private = NULL;
1176                 wake_up_all(&shmem_falloc_waitq);
1177                 spin_unlock(&inode->i_lock);
1178         }
1179         mutex_unlock(&inode->i_mutex);
1180         return 0;
1181 }
1182
1183 #ifdef CONFIG_NUMA
1184 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1185 {
1186         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1187         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1188 }
1189
1190 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1191                                           unsigned long addr)
1192 {
1193         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1194         pgoff_t index;
1195
1196         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1197         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1198 }
1199 #endif
1200
1201 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1202 {
1203         struct inode *inode = file->f_path.dentry->d_inode;
1204         struct shmem_inode_info *info = SHMEM_I(inode);
1205         int retval = -ENOMEM;
1206
1207         spin_lock(&info->lock);
1208         if (lock && !(info->flags & VM_LOCKED)) {
1209                 if (!user_shm_lock(inode->i_size, user))
1210                         goto out_nomem;
1211                 info->flags |= VM_LOCKED;
1212                 mapping_set_unevictable(file->f_mapping);
1213         }
1214         if (!lock && (info->flags & VM_LOCKED) && user) {
1215                 user_shm_unlock(inode->i_size, user);
1216                 info->flags &= ~VM_LOCKED;
1217                 mapping_clear_unevictable(file->f_mapping);
1218         }
1219         retval = 0;
1220
1221 out_nomem:
1222         spin_unlock(&info->lock);
1223         return retval;
1224 }
1225
1226 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1227 {
1228         file_accessed(file);
1229         vma->vm_ops = &shmem_vm_ops;
1230         vma->vm_flags |= VM_CAN_NONLINEAR;
1231         return 0;
1232 }
1233
1234 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1235                                      int mode, dev_t dev, unsigned long flags)
1236 {
1237         struct inode *inode;
1238         struct shmem_inode_info *info;
1239         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1240
1241         if (shmem_reserve_inode(sb))
1242                 return NULL;
1243
1244         inode = new_inode(sb);
1245         if (inode) {
1246                 inode->i_ino = get_next_ino();
1247                 inode_init_owner(inode, dir, mode);
1248                 inode->i_blocks = 0;
1249                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1250                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1251                 inode->i_generation = get_seconds();
1252                 info = SHMEM_I(inode);
1253                 memset(info, 0, (char *)inode - (char *)info);
1254                 spin_lock_init(&info->lock);
1255                 info->flags = flags & VM_NORESERVE;
1256                 INIT_LIST_HEAD(&info->swaplist);
1257                 INIT_LIST_HEAD(&info->xattr_list);
1258                 cache_no_acl(inode);
1259
1260                 switch (mode & S_IFMT) {
1261                 default:
1262                         inode->i_op = &shmem_special_inode_operations;
1263                         init_special_inode(inode, mode, dev);
1264                         break;
1265                 case S_IFREG:
1266                         inode->i_mapping->a_ops = &shmem_aops;
1267                         inode->i_op = &shmem_inode_operations;
1268                         inode->i_fop = &shmem_file_operations;
1269                         mpol_shared_policy_init(&info->policy,
1270                                                  shmem_get_sbmpol(sbinfo));
1271                         break;
1272                 case S_IFDIR:
1273                         inc_nlink(inode);
1274                         /* Some things misbehave if size == 0 on a directory */
1275                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1276                         inode->i_op = &shmem_dir_inode_operations;
1277                         inode->i_fop = &simple_dir_operations;
1278                         break;
1279                 case S_IFLNK:
1280                         /*
1281                          * Must not load anything in the rbtree,
1282                          * mpol_free_shared_policy will not be called.
1283                          */
1284                         mpol_shared_policy_init(&info->policy, NULL);
1285                         break;
1286                 }
1287         } else
1288                 shmem_free_inode(sb);
1289         return inode;
1290 }
1291
1292 #ifdef CONFIG_TMPFS
1293 static const struct inode_operations shmem_symlink_inode_operations;
1294 static const struct inode_operations shmem_short_symlink_operations;
1295
1296 static int
1297 shmem_write_begin(struct file *file, struct address_space *mapping,
1298                         loff_t pos, unsigned len, unsigned flags,
1299                         struct page **pagep, void **fsdata)
1300 {
1301         struct inode *inode = mapping->host;
1302         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1303         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1304 }
1305
1306 static int
1307 shmem_write_end(struct file *file, struct address_space *mapping,
1308                         loff_t pos, unsigned len, unsigned copied,
1309                         struct page *page, void *fsdata)
1310 {
1311         struct inode *inode = mapping->host;
1312
1313         if (pos + copied > inode->i_size)
1314                 i_size_write(inode, pos + copied);
1315
1316         set_page_dirty(page);
1317         unlock_page(page);
1318         page_cache_release(page);
1319
1320         return copied;
1321 }
1322
1323 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1324 {
1325         struct inode *inode = filp->f_path.dentry->d_inode;
1326         struct address_space *mapping = inode->i_mapping;
1327         pgoff_t index;
1328         unsigned long offset;
1329         enum sgp_type sgp = SGP_READ;
1330
1331         /*
1332          * Might this read be for a stacking filesystem?  Then when reading
1333          * holes of a sparse file, we actually need to allocate those pages,
1334          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1335          */
1336         if (segment_eq(get_fs(), KERNEL_DS))
1337                 sgp = SGP_DIRTY;
1338
1339         index = *ppos >> PAGE_CACHE_SHIFT;
1340         offset = *ppos & ~PAGE_CACHE_MASK;
1341
1342         for (;;) {
1343                 struct page *page = NULL;
1344                 pgoff_t end_index;
1345                 unsigned long nr, ret;
1346                 loff_t i_size = i_size_read(inode);
1347
1348                 end_index = i_size >> PAGE_CACHE_SHIFT;
1349                 if (index > end_index)
1350                         break;
1351                 if (index == end_index) {
1352                         nr = i_size & ~PAGE_CACHE_MASK;
1353                         if (nr <= offset)
1354                                 break;
1355                 }
1356
1357                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1358                 if (desc->error) {
1359                         if (desc->error == -EINVAL)
1360                                 desc->error = 0;
1361                         break;
1362                 }
1363                 if (page)
1364                         unlock_page(page);
1365
1366                 /*
1367                  * We must evaluate after, since reads (unlike writes)
1368                  * are called without i_mutex protection against truncate
1369                  */
1370                 nr = PAGE_CACHE_SIZE;
1371                 i_size = i_size_read(inode);
1372                 end_index = i_size >> PAGE_CACHE_SHIFT;
1373                 if (index == end_index) {
1374                         nr = i_size & ~PAGE_CACHE_MASK;
1375                         if (nr <= offset) {
1376                                 if (page)
1377                                         page_cache_release(page);
1378                                 break;
1379                         }
1380                 }
1381                 nr -= offset;
1382
1383                 if (page) {
1384                         /*
1385                          * If users can be writing to this page using arbitrary
1386                          * virtual addresses, take care about potential aliasing
1387                          * before reading the page on the kernel side.
1388                          */
1389                         if (mapping_writably_mapped(mapping))
1390                                 flush_dcache_page(page);
1391                         /*
1392                          * Mark the page accessed if we read the beginning.
1393                          */
1394                         if (!offset)
1395                                 mark_page_accessed(page);
1396                 } else {
1397                         page = ZERO_PAGE(0);
1398                         page_cache_get(page);
1399                 }
1400
1401                 /*
1402                  * Ok, we have the page, and it's up-to-date, so
1403                  * now we can copy it to user space...
1404                  *
1405                  * The actor routine returns how many bytes were actually used..
1406                  * NOTE! This may not be the same as how much of a user buffer
1407                  * we filled up (we may be padding etc), so we can only update
1408                  * "pos" here (the actor routine has to update the user buffer
1409                  * pointers and the remaining count).
1410                  */
1411                 ret = actor(desc, page, offset, nr);
1412                 offset += ret;
1413                 index += offset >> PAGE_CACHE_SHIFT;
1414                 offset &= ~PAGE_CACHE_MASK;
1415
1416                 page_cache_release(page);
1417                 if (ret != nr || !desc->count)
1418                         break;
1419
1420                 cond_resched();
1421         }
1422
1423         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1424         file_accessed(filp);
1425 }
1426
1427 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1428                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1429 {
1430         struct file *filp = iocb->ki_filp;
1431         ssize_t retval;
1432         unsigned long seg;
1433         size_t count;
1434         loff_t *ppos = &iocb->ki_pos;
1435
1436         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1437         if (retval)
1438                 return retval;
1439
1440         for (seg = 0; seg < nr_segs; seg++) {
1441                 read_descriptor_t desc;
1442
1443                 desc.written = 0;
1444                 desc.arg.buf = iov[seg].iov_base;
1445                 desc.count = iov[seg].iov_len;
1446                 if (desc.count == 0)
1447                         continue;
1448                 desc.error = 0;
1449                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1450                 retval += desc.written;
1451                 if (desc.error) {
1452                         retval = retval ?: desc.error;
1453                         break;
1454                 }
1455                 if (desc.count > 0)
1456                         break;
1457         }
1458         return retval;
1459 }
1460
1461 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1462                                 struct pipe_inode_info *pipe, size_t len,
1463                                 unsigned int flags)
1464 {
1465         struct address_space *mapping = in->f_mapping;
1466         struct inode *inode = mapping->host;
1467         unsigned int loff, nr_pages, req_pages;
1468         struct page *pages[PIPE_DEF_BUFFERS];
1469         struct partial_page partial[PIPE_DEF_BUFFERS];
1470         struct page *page;
1471         pgoff_t index, end_index;
1472         loff_t isize, left;
1473         int error, page_nr;
1474         struct splice_pipe_desc spd = {
1475                 .pages = pages,
1476                 .partial = partial,
1477                 .nr_pages_max = PIPE_DEF_BUFFERS,
1478                 .flags = flags,
1479                 .ops = &page_cache_pipe_buf_ops,
1480                 .spd_release = spd_release_page,
1481         };
1482
1483         isize = i_size_read(inode);
1484         if (unlikely(*ppos >= isize))
1485                 return 0;
1486
1487         left = isize - *ppos;
1488         if (unlikely(left < len))
1489                 len = left;
1490
1491         if (splice_grow_spd(pipe, &spd))
1492                 return -ENOMEM;
1493
1494         index = *ppos >> PAGE_CACHE_SHIFT;
1495         loff = *ppos & ~PAGE_CACHE_MASK;
1496         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1497         nr_pages = min(req_pages, pipe->buffers);
1498
1499         spd.nr_pages = find_get_pages_contig(mapping, index,
1500                                                 nr_pages, spd.pages);
1501         index += spd.nr_pages;
1502         error = 0;
1503
1504         while (spd.nr_pages < nr_pages) {
1505                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1506                 if (error)
1507                         break;
1508                 unlock_page(page);
1509                 spd.pages[spd.nr_pages++] = page;
1510                 index++;
1511         }
1512
1513         index = *ppos >> PAGE_CACHE_SHIFT;
1514         nr_pages = spd.nr_pages;
1515         spd.nr_pages = 0;
1516
1517         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1518                 unsigned int this_len;
1519
1520                 if (!len)
1521                         break;
1522
1523                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1524                 page = spd.pages[page_nr];
1525
1526                 if (!PageUptodate(page) || page->mapping != mapping) {
1527                         error = shmem_getpage(inode, index, &page,
1528                                                         SGP_CACHE, NULL);
1529                         if (error)
1530                                 break;
1531                         unlock_page(page);
1532                         page_cache_release(spd.pages[page_nr]);
1533                         spd.pages[page_nr] = page;
1534                 }
1535
1536                 isize = i_size_read(inode);
1537                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1538                 if (unlikely(!isize || index > end_index))
1539                         break;
1540
1541                 if (end_index == index) {
1542                         unsigned int plen;
1543
1544                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1545                         if (plen <= loff)
1546                                 break;
1547
1548                         this_len = min(this_len, plen - loff);
1549                         len = this_len;
1550                 }
1551
1552                 spd.partial[page_nr].offset = loff;
1553                 spd.partial[page_nr].len = this_len;
1554                 len -= this_len;
1555                 loff = 0;
1556                 spd.nr_pages++;
1557                 index++;
1558         }
1559
1560         while (page_nr < nr_pages)
1561                 page_cache_release(spd.pages[page_nr++]);
1562
1563         if (spd.nr_pages)
1564                 error = splice_to_pipe(pipe, &spd);
1565
1566         splice_shrink_spd(&spd);
1567
1568         if (error > 0) {
1569                 *ppos += error;
1570                 file_accessed(in);
1571         }
1572         return error;
1573 }
1574
1575 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1576 {
1577         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1578
1579         buf->f_type = TMPFS_MAGIC;
1580         buf->f_bsize = PAGE_CACHE_SIZE;
1581         buf->f_namelen = NAME_MAX;
1582         if (sbinfo->max_blocks) {
1583                 buf->f_blocks = sbinfo->max_blocks;
1584                 buf->f_bavail =
1585                 buf->f_bfree  = sbinfo->max_blocks -
1586                                 percpu_counter_sum(&sbinfo->used_blocks);
1587         }
1588         if (sbinfo->max_inodes) {
1589                 buf->f_files = sbinfo->max_inodes;
1590                 buf->f_ffree = sbinfo->free_inodes;
1591         }
1592         /* else leave those fields 0 like simple_statfs */
1593         return 0;
1594 }
1595
1596 /*
1597  * File creation. Allocate an inode, and we're done..
1598  */
1599 static int
1600 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1601 {
1602         struct inode *inode;
1603         int error = -ENOSPC;
1604
1605         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1606         if (inode) {
1607                 error = security_inode_init_security(inode, dir,
1608                                                      &dentry->d_name,
1609                                                      NULL, NULL);
1610                 if (error) {
1611                         if (error != -EOPNOTSUPP) {
1612                                 iput(inode);
1613                                 return error;
1614                         }
1615                 }
1616 #ifdef CONFIG_TMPFS_POSIX_ACL
1617                 error = generic_acl_init(inode, dir);
1618                 if (error) {
1619                         iput(inode);
1620                         return error;
1621                 }
1622 #else
1623                 error = 0;
1624 #endif
1625                 dir->i_size += BOGO_DIRENT_SIZE;
1626                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1627                 d_instantiate(dentry, inode);
1628                 dget(dentry); /* Extra count - pin the dentry in core */
1629         }
1630         return error;
1631 }
1632
1633 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1634 {
1635         int error;
1636
1637         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1638                 return error;
1639         inc_nlink(dir);
1640         return 0;
1641 }
1642
1643 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1644                 struct nameidata *nd)
1645 {
1646         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1647 }
1648
1649 /*
1650  * Link a file..
1651  */
1652 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1653 {
1654         struct inode *inode = old_dentry->d_inode;
1655         int ret;
1656
1657         /*
1658          * No ordinary (disk based) filesystem counts links as inodes;
1659          * but each new link needs a new dentry, pinning lowmem, and
1660          * tmpfs dentries cannot be pruned until they are unlinked.
1661          */
1662         ret = shmem_reserve_inode(inode->i_sb);
1663         if (ret)
1664                 goto out;
1665
1666         dir->i_size += BOGO_DIRENT_SIZE;
1667         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1668         inc_nlink(inode);
1669         ihold(inode);   /* New dentry reference */
1670         dget(dentry);           /* Extra pinning count for the created dentry */
1671         d_instantiate(dentry, inode);
1672 out:
1673         return ret;
1674 }
1675
1676 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1677 {
1678         struct inode *inode = dentry->d_inode;
1679
1680         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1681                 shmem_free_inode(inode->i_sb);
1682
1683         dir->i_size -= BOGO_DIRENT_SIZE;
1684         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1685         drop_nlink(inode);
1686         dput(dentry);   /* Undo the count from "create" - this does all the work */
1687         return 0;
1688 }
1689
1690 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1691 {
1692         if (!simple_empty(dentry))
1693                 return -ENOTEMPTY;
1694
1695         drop_nlink(dentry->d_inode);
1696         drop_nlink(dir);
1697         return shmem_unlink(dir, dentry);
1698 }
1699
1700 /*
1701  * The VFS layer already does all the dentry stuff for rename,
1702  * we just have to decrement the usage count for the target if
1703  * it exists so that the VFS layer correctly free's it when it
1704  * gets overwritten.
1705  */
1706 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1707 {
1708         struct inode *inode = old_dentry->d_inode;
1709         int they_are_dirs = S_ISDIR(inode->i_mode);
1710
1711         if (!simple_empty(new_dentry))
1712                 return -ENOTEMPTY;
1713
1714         if (new_dentry->d_inode) {
1715                 (void) shmem_unlink(new_dir, new_dentry);
1716                 if (they_are_dirs)
1717                         drop_nlink(old_dir);
1718         } else if (they_are_dirs) {
1719                 drop_nlink(old_dir);
1720                 inc_nlink(new_dir);
1721         }
1722
1723         old_dir->i_size -= BOGO_DIRENT_SIZE;
1724         new_dir->i_size += BOGO_DIRENT_SIZE;
1725         old_dir->i_ctime = old_dir->i_mtime =
1726         new_dir->i_ctime = new_dir->i_mtime =
1727         inode->i_ctime = CURRENT_TIME;
1728         return 0;
1729 }
1730
1731 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1732 {
1733         int error;
1734         int len;
1735         struct inode *inode;
1736         struct page *page;
1737         char *kaddr;
1738         struct shmem_inode_info *info;
1739
1740         len = strlen(symname) + 1;
1741         if (len > PAGE_CACHE_SIZE)
1742                 return -ENAMETOOLONG;
1743
1744         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1745         if (!inode)
1746                 return -ENOSPC;
1747
1748         error = security_inode_init_security(inode, dir, &dentry->d_name,
1749                                              NULL, NULL);
1750         if (error) {
1751                 if (error != -EOPNOTSUPP) {
1752                         iput(inode);
1753                         return error;
1754                 }
1755                 error = 0;
1756         }
1757
1758         info = SHMEM_I(inode);
1759         inode->i_size = len-1;
1760         if (len <= SHORT_SYMLINK_LEN) {
1761                 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1762                 if (!info->symlink) {
1763                         iput(inode);
1764                         return -ENOMEM;
1765                 }
1766                 inode->i_op = &shmem_short_symlink_operations;
1767         } else {
1768                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1769                 if (error) {
1770                         iput(inode);
1771                         return error;
1772                 }
1773                 inode->i_mapping->a_ops = &shmem_aops;
1774                 inode->i_op = &shmem_symlink_inode_operations;
1775                 kaddr = kmap_atomic(page, KM_USER0);
1776                 memcpy(kaddr, symname, len);
1777                 kunmap_atomic(kaddr, KM_USER0);
1778                 set_page_dirty(page);
1779                 unlock_page(page);
1780                 page_cache_release(page);
1781         }
1782         dir->i_size += BOGO_DIRENT_SIZE;
1783         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1784         d_instantiate(dentry, inode);
1785         dget(dentry);
1786         return 0;
1787 }
1788
1789 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1790 {
1791         nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1792         return NULL;
1793 }
1794
1795 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1796 {
1797         struct page *page = NULL;
1798         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1799         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1800         if (page)
1801                 unlock_page(page);
1802         return page;
1803 }
1804
1805 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1806 {
1807         if (!IS_ERR(nd_get_link(nd))) {
1808                 struct page *page = cookie;
1809                 kunmap(page);
1810                 mark_page_accessed(page);
1811                 page_cache_release(page);
1812         }
1813 }
1814
1815 #ifdef CONFIG_TMPFS_XATTR
1816 /*
1817  * Superblocks without xattr inode operations may get some security.* xattr
1818  * support from the LSM "for free". As soon as we have any other xattrs
1819  * like ACLs, we also need to implement the security.* handlers at
1820  * filesystem level, though.
1821  */
1822
1823 static int shmem_xattr_get(struct dentry *dentry, const char *name,
1824                            void *buffer, size_t size)
1825 {
1826         struct shmem_inode_info *info;
1827         struct shmem_xattr *xattr;
1828         int ret = -ENODATA;
1829
1830         info = SHMEM_I(dentry->d_inode);
1831
1832         spin_lock(&info->lock);
1833         list_for_each_entry(xattr, &info->xattr_list, list) {
1834                 if (strcmp(name, xattr->name))
1835                         continue;
1836
1837                 ret = xattr->size;
1838                 if (buffer) {
1839                         if (size < xattr->size)
1840                                 ret = -ERANGE;
1841                         else
1842                                 memcpy(buffer, xattr->value, xattr->size);
1843                 }
1844                 break;
1845         }
1846         spin_unlock(&info->lock);
1847         return ret;
1848 }
1849
1850 static int shmem_xattr_set(struct dentry *dentry, const char *name,
1851                            const void *value, size_t size, int flags)
1852 {
1853         struct inode *inode = dentry->d_inode;
1854         struct shmem_inode_info *info = SHMEM_I(inode);
1855         struct shmem_xattr *xattr;
1856         struct shmem_xattr *new_xattr = NULL;
1857         size_t len;
1858         int err = 0;
1859
1860         /* value == NULL means remove */
1861         if (value) {
1862                 /* wrap around? */
1863                 len = sizeof(*new_xattr) + size;
1864                 if (len <= sizeof(*new_xattr))
1865                         return -ENOMEM;
1866
1867                 new_xattr = kmalloc(len, GFP_KERNEL);
1868                 if (!new_xattr)
1869                         return -ENOMEM;
1870
1871                 new_xattr->name = kstrdup(name, GFP_KERNEL);
1872                 if (!new_xattr->name) {
1873                         kfree(new_xattr);
1874                         return -ENOMEM;
1875                 }
1876
1877                 new_xattr->size = size;
1878                 memcpy(new_xattr->value, value, size);
1879         }
1880
1881         spin_lock(&info->lock);
1882         list_for_each_entry(xattr, &info->xattr_list, list) {
1883                 if (!strcmp(name, xattr->name)) {
1884                         if (flags & XATTR_CREATE) {
1885                                 xattr = new_xattr;
1886                                 err = -EEXIST;
1887                         } else if (new_xattr) {
1888                                 list_replace(&xattr->list, &new_xattr->list);
1889                         } else {
1890                                 list_del(&xattr->list);
1891                         }
1892                         goto out;
1893                 }
1894         }
1895         if (flags & XATTR_REPLACE) {
1896                 xattr = new_xattr;
1897                 err = -ENODATA;
1898         } else {
1899                 list_add(&new_xattr->list, &info->xattr_list);
1900                 xattr = NULL;
1901         }
1902 out:
1903         spin_unlock(&info->lock);
1904         if (xattr)
1905                 kfree(xattr->name);
1906         kfree(xattr);
1907         return err;
1908 }
1909
1910 static const struct xattr_handler *shmem_xattr_handlers[] = {
1911 #ifdef CONFIG_TMPFS_POSIX_ACL
1912         &generic_acl_access_handler,
1913         &generic_acl_default_handler,
1914 #endif
1915         NULL
1916 };
1917
1918 static int shmem_xattr_validate(const char *name)
1919 {
1920         struct { const char *prefix; size_t len; } arr[] = {
1921                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1922                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1923         };
1924         int i;
1925
1926         for (i = 0; i < ARRAY_SIZE(arr); i++) {
1927                 size_t preflen = arr[i].len;
1928                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1929                         if (!name[preflen])
1930                                 return -EINVAL;
1931                         return 0;
1932                 }
1933         }
1934         return -EOPNOTSUPP;
1935 }
1936
1937 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1938                               void *buffer, size_t size)
1939 {
1940         int err;
1941
1942         /*
1943          * If this is a request for a synthetic attribute in the system.*
1944          * namespace use the generic infrastructure to resolve a handler
1945          * for it via sb->s_xattr.
1946          */
1947         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1948                 return generic_getxattr(dentry, name, buffer, size);
1949
1950         err = shmem_xattr_validate(name);
1951         if (err)
1952                 return err;
1953
1954         return shmem_xattr_get(dentry, name, buffer, size);
1955 }
1956
1957 static int shmem_setxattr(struct dentry *dentry, const char *name,
1958                           const void *value, size_t size, int flags)
1959 {
1960         int err;
1961
1962         /*
1963          * If this is a request for a synthetic attribute in the system.*
1964          * namespace use the generic infrastructure to resolve a handler
1965          * for it via sb->s_xattr.
1966          */
1967         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1968                 return generic_setxattr(dentry, name, value, size, flags);
1969
1970         err = shmem_xattr_validate(name);
1971         if (err)
1972                 return err;
1973
1974         if (size == 0)
1975                 value = "";  /* empty EA, do not remove */
1976
1977         return shmem_xattr_set(dentry, name, value, size, flags);
1978
1979 }
1980
1981 static int shmem_removexattr(struct dentry *dentry, const char *name)
1982 {
1983         int err;
1984
1985         /*
1986          * If this is a request for a synthetic attribute in the system.*
1987          * namespace use the generic infrastructure to resolve a handler
1988          * for it via sb->s_xattr.
1989          */
1990         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1991                 return generic_removexattr(dentry, name);
1992
1993         err = shmem_xattr_validate(name);
1994         if (err)
1995                 return err;
1996
1997         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1998 }
1999
2000 static bool xattr_is_trusted(const char *name)
2001 {
2002         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2003 }
2004
2005 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2006 {
2007         bool trusted = capable(CAP_SYS_ADMIN);
2008         struct shmem_xattr *xattr;
2009         struct shmem_inode_info *info;
2010         size_t used = 0;
2011
2012         info = SHMEM_I(dentry->d_inode);
2013
2014         spin_lock(&info->lock);
2015         list_for_each_entry(xattr, &info->xattr_list, list) {
2016                 size_t len;
2017
2018                 /* skip "trusted." attributes for unprivileged callers */
2019                 if (!trusted && xattr_is_trusted(xattr->name))
2020                         continue;
2021
2022                 len = strlen(xattr->name) + 1;
2023                 used += len;
2024                 if (buffer) {
2025                         if (size < used) {
2026                                 used = -ERANGE;
2027                                 break;
2028                         }
2029                         memcpy(buffer, xattr->name, len);
2030                         buffer += len;
2031                 }
2032         }
2033         spin_unlock(&info->lock);
2034
2035         return used;
2036 }
2037 #endif /* CONFIG_TMPFS_XATTR */
2038
2039 static const struct inode_operations shmem_short_symlink_operations = {
2040         .readlink       = generic_readlink,
2041         .follow_link    = shmem_follow_short_symlink,
2042 #ifdef CONFIG_TMPFS_XATTR
2043         .setxattr       = shmem_setxattr,
2044         .getxattr       = shmem_getxattr,
2045         .listxattr      = shmem_listxattr,
2046         .removexattr    = shmem_removexattr,
2047 #endif
2048 };
2049
2050 static const struct inode_operations shmem_symlink_inode_operations = {
2051         .readlink       = generic_readlink,
2052         .follow_link    = shmem_follow_link,
2053         .put_link       = shmem_put_link,
2054 #ifdef CONFIG_TMPFS_XATTR
2055         .setxattr       = shmem_setxattr,
2056         .getxattr       = shmem_getxattr,
2057         .listxattr      = shmem_listxattr,
2058         .removexattr    = shmem_removexattr,
2059 #endif
2060 };
2061
2062 static struct dentry *shmem_get_parent(struct dentry *child)
2063 {
2064         return ERR_PTR(-ESTALE);
2065 }
2066
2067 static int shmem_match(struct inode *ino, void *vfh)
2068 {
2069         __u32 *fh = vfh;
2070         __u64 inum = fh[2];
2071         inum = (inum << 32) | fh[1];
2072         return ino->i_ino == inum && fh[0] == ino->i_generation;
2073 }
2074
2075 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2076                 struct fid *fid, int fh_len, int fh_type)
2077 {
2078         struct inode *inode;
2079         struct dentry *dentry = NULL;
2080         u64 inum;
2081
2082         if (fh_len < 3)
2083                 return NULL;
2084
2085         inum = fid->raw[2];
2086         inum = (inum << 32) | fid->raw[1];
2087
2088         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2089                         shmem_match, fid->raw);
2090         if (inode) {
2091                 dentry = d_find_alias(inode);
2092                 iput(inode);
2093         }
2094
2095         return dentry;
2096 }
2097
2098 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2099                                 int connectable)
2100 {
2101         struct inode *inode = dentry->d_inode;
2102
2103         if (*len < 3) {
2104                 *len = 3;
2105                 return 255;
2106         }
2107
2108         if (inode_unhashed(inode)) {
2109                 /* Unfortunately insert_inode_hash is not idempotent,
2110                  * so as we hash inodes here rather than at creation
2111                  * time, we need a lock to ensure we only try
2112                  * to do it once
2113                  */
2114                 static DEFINE_SPINLOCK(lock);
2115                 spin_lock(&lock);
2116                 if (inode_unhashed(inode))
2117                         __insert_inode_hash(inode,
2118                                             inode->i_ino + inode->i_generation);
2119                 spin_unlock(&lock);
2120         }
2121
2122         fh[0] = inode->i_generation;
2123         fh[1] = inode->i_ino;
2124         fh[2] = ((__u64)inode->i_ino) >> 32;
2125
2126         *len = 3;
2127         return 1;
2128 }
2129
2130 static const struct export_operations shmem_export_ops = {
2131         .get_parent     = shmem_get_parent,
2132         .encode_fh      = shmem_encode_fh,
2133         .fh_to_dentry   = shmem_fh_to_dentry,
2134 };
2135
2136 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2137                                bool remount)
2138 {
2139         char *this_char, *value, *rest;
2140
2141         while (options != NULL) {
2142                 this_char = options;
2143                 for (;;) {
2144                         /*
2145                          * NUL-terminate this option: unfortunately,
2146                          * mount options form a comma-separated list,
2147                          * but mpol's nodelist may also contain commas.
2148                          */
2149                         options = strchr(options, ',');
2150                         if (options == NULL)
2151                                 break;
2152                         options++;
2153                         if (!isdigit(*options)) {
2154                                 options[-1] = '\0';
2155                                 break;
2156                         }
2157                 }
2158                 if (!*this_char)
2159                         continue;
2160                 if ((value = strchr(this_char,'=')) != NULL) {
2161                         *value++ = 0;
2162                 } else {
2163                         printk(KERN_ERR
2164                             "tmpfs: No value for mount option '%s'\n",
2165                             this_char);
2166                         return 1;
2167                 }
2168
2169                 if (!strcmp(this_char,"size")) {
2170                         unsigned long long size;
2171                         size = memparse(value,&rest);
2172                         if (*rest == '%') {
2173                                 size <<= PAGE_SHIFT;
2174                                 size *= totalram_pages;
2175                                 do_div(size, 100);
2176                                 rest++;
2177                         }
2178                         if (*rest)
2179                                 goto bad_val;
2180                         sbinfo->max_blocks =
2181                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2182                 } else if (!strcmp(this_char,"nr_blocks")) {
2183                         sbinfo->max_blocks = memparse(value, &rest);
2184                         if (*rest)
2185                                 goto bad_val;
2186                 } else if (!strcmp(this_char,"nr_inodes")) {
2187                         sbinfo->max_inodes = memparse(value, &rest);
2188                         if (*rest)
2189                                 goto bad_val;
2190                 } else if (!strcmp(this_char,"mode")) {
2191                         if (remount)
2192                                 continue;
2193                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2194                         if (*rest)
2195                                 goto bad_val;
2196                 } else if (!strcmp(this_char,"uid")) {
2197                         if (remount)
2198                                 continue;
2199                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2200                         if (*rest)
2201                                 goto bad_val;
2202                 } else if (!strcmp(this_char,"gid")) {
2203                         if (remount)
2204                                 continue;
2205                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2206                         if (*rest)
2207                                 goto bad_val;
2208                 } else if (!strcmp(this_char,"mpol")) {
2209                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2210                                 goto bad_val;
2211                 } else {
2212                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2213                                this_char);
2214                         return 1;
2215                 }
2216         }
2217         return 0;
2218
2219 bad_val:
2220         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2221                value, this_char);
2222         return 1;
2223
2224 }
2225
2226 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2227 {
2228         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2229         struct shmem_sb_info config = *sbinfo;
2230         unsigned long inodes;
2231         int error = -EINVAL;
2232
2233         config.mpol = NULL;
2234         if (shmem_parse_options(data, &config, true))
2235                 return error;
2236
2237         spin_lock(&sbinfo->stat_lock);
2238         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2239         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2240                 goto out;
2241         if (config.max_inodes < inodes)
2242                 goto out;
2243         /*
2244          * Those tests disallow limited->unlimited while any are in use;
2245          * but we must separately disallow unlimited->limited, because
2246          * in that case we have no record of how much is already in use.
2247          */
2248         if (config.max_blocks && !sbinfo->max_blocks)
2249                 goto out;
2250         if (config.max_inodes && !sbinfo->max_inodes)
2251                 goto out;
2252
2253         error = 0;
2254         sbinfo->max_blocks  = config.max_blocks;
2255         sbinfo->max_inodes  = config.max_inodes;
2256         sbinfo->free_inodes = config.max_inodes - inodes;
2257
2258         /*
2259          * Preserve previous mempolicy unless mpol remount option was specified.
2260          */
2261         if (config.mpol) {
2262                 mpol_put(sbinfo->mpol);
2263                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
2264         }
2265 out:
2266         spin_unlock(&sbinfo->stat_lock);
2267         return error;
2268 }
2269
2270 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2271 {
2272         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2273
2274         if (sbinfo->max_blocks != shmem_default_max_blocks())
2275                 seq_printf(seq, ",size=%luk",
2276                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2277         if (sbinfo->max_inodes != shmem_default_max_inodes())
2278                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2279         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2280                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2281         if (sbinfo->uid != 0)
2282                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2283         if (sbinfo->gid != 0)
2284                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2285         shmem_show_mpol(seq, sbinfo->mpol);
2286         return 0;
2287 }
2288 #endif /* CONFIG_TMPFS */
2289
2290 static void shmem_put_super(struct super_block *sb)
2291 {
2292         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2293
2294         percpu_counter_destroy(&sbinfo->used_blocks);
2295         kfree(sbinfo);
2296         sb->s_fs_info = NULL;
2297 }
2298
2299 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2300 {
2301         struct inode *inode;
2302         struct dentry *root;
2303         struct shmem_sb_info *sbinfo;
2304         int err = -ENOMEM;
2305
2306         /* Round up to L1_CACHE_BYTES to resist false sharing */
2307         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2308                                 L1_CACHE_BYTES), GFP_KERNEL);
2309         if (!sbinfo)
2310                 return -ENOMEM;
2311
2312         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2313         sbinfo->uid = current_fsuid();
2314         sbinfo->gid = current_fsgid();
2315         sb->s_fs_info = sbinfo;
2316
2317 #ifdef CONFIG_TMPFS
2318         /*
2319          * Per default we only allow half of the physical ram per
2320          * tmpfs instance, limiting inodes to one per page of lowmem;
2321          * but the internal instance is left unlimited.
2322          */
2323         if (!(sb->s_flags & MS_NOUSER)) {
2324                 sbinfo->max_blocks = shmem_default_max_blocks();
2325                 sbinfo->max_inodes = shmem_default_max_inodes();
2326                 if (shmem_parse_options(data, sbinfo, false)) {
2327                         err = -EINVAL;
2328                         goto failed;
2329                 }
2330         }
2331         sb->s_export_op = &shmem_export_ops;
2332 #else
2333         sb->s_flags |= MS_NOUSER;
2334 #endif
2335
2336         spin_lock_init(&sbinfo->stat_lock);
2337         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2338                 goto failed;
2339         sbinfo->free_inodes = sbinfo->max_inodes;
2340
2341         sb->s_maxbytes = MAX_LFS_FILESIZE;
2342         sb->s_blocksize = PAGE_CACHE_SIZE;
2343         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2344         sb->s_magic = TMPFS_MAGIC;
2345         sb->s_op = &shmem_ops;
2346         sb->s_time_gran = 1;
2347 #ifdef CONFIG_TMPFS_XATTR
2348         sb->s_xattr = shmem_xattr_handlers;
2349 #endif
2350 #ifdef CONFIG_TMPFS_POSIX_ACL
2351         sb->s_flags |= MS_POSIXACL;
2352 #endif
2353
2354         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2355         if (!inode)
2356                 goto failed;
2357         inode->i_uid = sbinfo->uid;
2358         inode->i_gid = sbinfo->gid;
2359         root = d_alloc_root(inode);
2360         if (!root)
2361                 goto failed_iput;
2362         sb->s_root = root;
2363         return 0;
2364
2365 failed_iput:
2366         iput(inode);
2367 failed:
2368         shmem_put_super(sb);
2369         return err;
2370 }
2371
2372 static struct kmem_cache *shmem_inode_cachep;
2373
2374 static struct inode *shmem_alloc_inode(struct super_block *sb)
2375 {
2376         struct shmem_inode_info *info;
2377         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2378         if (!info)
2379                 return NULL;
2380         return &info->vfs_inode;
2381 }
2382
2383 static void shmem_destroy_callback(struct rcu_head *head)
2384 {
2385         struct inode *inode = container_of(head, struct inode, i_rcu);
2386         INIT_LIST_HEAD(&inode->i_dentry);
2387         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2388 }
2389
2390 static void shmem_destroy_inode(struct inode *inode)
2391 {
2392         if ((inode->i_mode & S_IFMT) == S_IFREG)
2393                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2394         call_rcu(&inode->i_rcu, shmem_destroy_callback);
2395 }
2396
2397 static void shmem_init_inode(void *foo)
2398 {
2399         struct shmem_inode_info *info = foo;
2400         inode_init_once(&info->vfs_inode);
2401 }
2402
2403 static int shmem_init_inodecache(void)
2404 {
2405         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2406                                 sizeof(struct shmem_inode_info),
2407                                 0, SLAB_PANIC, shmem_init_inode);
2408         return 0;
2409 }
2410
2411 static void shmem_destroy_inodecache(void)
2412 {
2413         kmem_cache_destroy(shmem_inode_cachep);
2414 }
2415
2416 static const struct address_space_operations shmem_aops = {
2417         .writepage      = shmem_writepage,
2418         .set_page_dirty = __set_page_dirty_no_writeback,
2419 #ifdef CONFIG_TMPFS
2420         .write_begin    = shmem_write_begin,
2421         .write_end      = shmem_write_end,
2422 #endif
2423         .migratepage    = migrate_page,
2424         .error_remove_page = generic_error_remove_page,
2425 };
2426
2427 static const struct file_operations shmem_file_operations = {
2428         .mmap           = shmem_mmap,
2429 #ifdef CONFIG_TMPFS
2430         .llseek         = generic_file_llseek,
2431         .read           = do_sync_read,
2432         .write          = do_sync_write,
2433         .aio_read       = shmem_file_aio_read,
2434         .aio_write      = generic_file_aio_write,
2435         .fsync          = noop_fsync,
2436         .splice_read    = shmem_file_splice_read,
2437         .splice_write   = generic_file_splice_write,
2438 #endif
2439 };
2440
2441 static const struct inode_operations shmem_inode_operations = {
2442         .setattr        = shmem_setattr,
2443         .truncate_range = shmem_truncate_range,
2444 #ifdef CONFIG_TMPFS_XATTR
2445         .setxattr       = shmem_setxattr,
2446         .getxattr       = shmem_getxattr,
2447         .listxattr      = shmem_listxattr,
2448         .removexattr    = shmem_removexattr,
2449 #endif
2450 };
2451
2452 static const struct inode_operations shmem_dir_inode_operations = {
2453 #ifdef CONFIG_TMPFS
2454         .create         = shmem_create,
2455         .lookup         = simple_lookup,
2456         .link           = shmem_link,
2457         .unlink         = shmem_unlink,
2458         .symlink        = shmem_symlink,
2459         .mkdir          = shmem_mkdir,
2460         .rmdir          = shmem_rmdir,
2461         .mknod          = shmem_mknod,
2462         .rename         = shmem_rename,
2463 #endif
2464 #ifdef CONFIG_TMPFS_XATTR
2465         .setxattr       = shmem_setxattr,
2466         .getxattr       = shmem_getxattr,
2467         .listxattr      = shmem_listxattr,
2468         .removexattr    = shmem_removexattr,
2469 #endif
2470 #ifdef CONFIG_TMPFS_POSIX_ACL
2471         .setattr        = shmem_setattr,
2472 #endif
2473 };
2474
2475 static const struct inode_operations shmem_special_inode_operations = {
2476 #ifdef CONFIG_TMPFS_XATTR
2477         .setxattr       = shmem_setxattr,
2478         .getxattr       = shmem_getxattr,
2479         .listxattr      = shmem_listxattr,
2480         .removexattr    = shmem_removexattr,
2481 #endif
2482 #ifdef CONFIG_TMPFS_POSIX_ACL
2483         .setattr        = shmem_setattr,
2484 #endif
2485 };
2486
2487 static const struct super_operations shmem_ops = {
2488         .alloc_inode    = shmem_alloc_inode,
2489         .destroy_inode  = shmem_destroy_inode,
2490 #ifdef CONFIG_TMPFS
2491         .statfs         = shmem_statfs,
2492         .remount_fs     = shmem_remount_fs,
2493         .show_options   = shmem_show_options,
2494 #endif
2495         .evict_inode    = shmem_evict_inode,
2496         .drop_inode     = generic_delete_inode,
2497         .put_super      = shmem_put_super,
2498 };
2499
2500 static const struct vm_operations_struct shmem_vm_ops = {
2501         .fault          = shmem_fault,
2502 #ifdef CONFIG_NUMA
2503         .set_policy     = shmem_set_policy,
2504         .get_policy     = shmem_get_policy,
2505 #endif
2506 };
2507
2508 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2509         int flags, const char *dev_name, void *data)
2510 {
2511         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2512 }
2513
2514 static struct file_system_type shmem_fs_type = {
2515         .owner          = THIS_MODULE,
2516         .name           = "tmpfs",
2517         .mount          = shmem_mount,
2518         .kill_sb        = kill_litter_super,
2519 };
2520
2521 int __init shmem_init(void)
2522 {
2523         int error;
2524
2525         error = bdi_init(&shmem_backing_dev_info);
2526         if (error)
2527                 goto out4;
2528
2529         error = shmem_init_inodecache();
2530         if (error)
2531                 goto out3;
2532
2533         error = register_filesystem(&shmem_fs_type);
2534         if (error) {
2535                 printk(KERN_ERR "Could not register tmpfs\n");
2536                 goto out2;
2537         }
2538
2539         shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2540                                  shmem_fs_type.name, NULL);
2541         if (IS_ERR(shm_mnt)) {
2542                 error = PTR_ERR(shm_mnt);
2543                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2544                 goto out1;
2545         }
2546         return 0;
2547
2548 out1:
2549         unregister_filesystem(&shmem_fs_type);
2550 out2:
2551         shmem_destroy_inodecache();
2552 out3:
2553         bdi_destroy(&shmem_backing_dev_info);
2554 out4:
2555         shm_mnt = ERR_PTR(error);
2556         return error;
2557 }
2558
2559 #else /* !CONFIG_SHMEM */
2560
2561 /*
2562  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2563  *
2564  * This is intended for small system where the benefits of the full
2565  * shmem code (swap-backed and resource-limited) are outweighed by
2566  * their complexity. On systems without swap this code should be
2567  * effectively equivalent, but much lighter weight.
2568  */
2569
2570 #include <linux/ramfs.h>
2571
2572 static struct file_system_type shmem_fs_type = {
2573         .name           = "tmpfs",
2574         .mount          = ramfs_mount,
2575         .kill_sb        = kill_litter_super,
2576 };
2577
2578 int __init shmem_init(void)
2579 {
2580         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2581
2582         shm_mnt = kern_mount(&shmem_fs_type);
2583         BUG_ON(IS_ERR(shm_mnt));
2584
2585         return 0;
2586 }
2587
2588 int shmem_unuse(swp_entry_t swap, struct page *page)
2589 {
2590         return 0;
2591 }
2592
2593 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2594 {
2595         return 0;
2596 }
2597
2598 void shmem_unlock_mapping(struct address_space *mapping)
2599 {
2600 }
2601
2602 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2603 {
2604         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2605 }
2606 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2607
2608 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2609 {
2610         /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
2611         return -ENOSYS;
2612 }
2613
2614 #define shmem_vm_ops                            generic_file_vm_ops
2615 #define shmem_file_operations                   ramfs_file_operations
2616 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
2617 #define shmem_acct_size(flags, size)            0
2618 #define shmem_unacct_size(flags, size)          do {} while (0)
2619
2620 #endif /* CONFIG_SHMEM */
2621
2622 /* common code */
2623
2624 /**
2625  * shmem_file_setup - get an unlinked file living in tmpfs
2626  * @name: name for dentry (to be seen in /proc/<pid>/maps
2627  * @size: size to be set for the file
2628  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2629  */
2630 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2631 {
2632         int error;
2633         struct file *file;
2634         struct inode *inode;
2635         struct path path;
2636         struct dentry *root;
2637         struct qstr this;
2638
2639         if (IS_ERR(shm_mnt))
2640                 return (void *)shm_mnt;
2641
2642         if (size < 0 || size > MAX_LFS_FILESIZE)
2643                 return ERR_PTR(-EINVAL);
2644
2645         if (shmem_acct_size(flags, size))
2646                 return ERR_PTR(-ENOMEM);
2647
2648         error = -ENOMEM;
2649         this.name = name;
2650         this.len = strlen(name);
2651         this.hash = 0; /* will go */
2652         root = shm_mnt->mnt_root;
2653         path.dentry = d_alloc(root, &this);
2654         if (!path.dentry)
2655                 goto put_memory;
2656         path.mnt = mntget(shm_mnt);
2657
2658         error = -ENOSPC;
2659         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2660         if (!inode)
2661                 goto put_dentry;
2662
2663         d_instantiate(path.dentry, inode);
2664         inode->i_size = size;
2665         clear_nlink(inode);     /* It is unlinked */
2666 #ifndef CONFIG_MMU
2667         error = ramfs_nommu_expand_for_mapping(inode, size);
2668         if (error)
2669                 goto put_dentry;
2670 #endif
2671
2672         error = -ENFILE;
2673         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2674                   &shmem_file_operations);
2675         if (!file)
2676                 goto put_dentry;
2677
2678         return file;
2679
2680 put_dentry:
2681         path_put(&path);
2682 put_memory:
2683         shmem_unacct_size(flags, size);
2684         return ERR_PTR(error);
2685 }
2686 EXPORT_SYMBOL_GPL(shmem_file_setup);
2687
2688 /**
2689  * shmem_zero_setup - setup a shared anonymous mapping
2690  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2691  */
2692 int shmem_zero_setup(struct vm_area_struct *vma)
2693 {
2694         struct file *file;
2695         loff_t size = vma->vm_end - vma->vm_start;
2696
2697         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2698         if (IS_ERR(file))
2699                 return PTR_ERR(file);
2700
2701         if (vma->vm_file)
2702                 fput(vma->vm_file);
2703         vma->vm_file = file;
2704         vma->vm_ops = &shmem_vm_ops;
2705         vma->vm_flags |= VM_CAN_NONLINEAR;
2706         return 0;
2707 }
2708
2709 /**
2710  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2711  * @mapping:    the page's address_space
2712  * @index:      the page index
2713  * @gfp:        the page allocator flags to use if allocating
2714  *
2715  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2716  * with any new page allocations done using the specified allocation flags.
2717  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2718  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2719  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2720  *
2721  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2722  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2723  */
2724 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2725                                          pgoff_t index, gfp_t gfp)
2726 {
2727 #ifdef CONFIG_SHMEM
2728         struct inode *inode = mapping->host;
2729         struct page *page;
2730         int error;
2731
2732         BUG_ON(mapping->a_ops != &shmem_aops);
2733         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2734         if (error)
2735                 page = ERR_PTR(error);
2736         else
2737                 unlock_page(page);
2738         return page;
2739 #else
2740         /*
2741          * The tiny !SHMEM case uses ramfs without swap
2742          */
2743         return read_cache_page_gfp(mapping, index, gfp);
2744 #endif
2745 }
2746 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);