VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry)
[pandora-kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/export.h>
33 #include <linux/swap.h>
34 #include <linux/aio.h>
35
36 static struct vfsmount *shm_mnt;
37
38 #ifdef CONFIG_SHMEM
39 /*
40  * This virtual memory filesystem is heavily based on the ramfs. It
41  * extends ramfs by the ability to use swap and honor resource limits
42  * which makes it a completely usable filesystem.
43  */
44
45 #include <linux/xattr.h>
46 #include <linux/exportfs.h>
47 #include <linux/posix_acl.h>
48 #include <linux/posix_acl_xattr.h>
49 #include <linux/mman.h>
50 #include <linux/string.h>
51 #include <linux/slab.h>
52 #include <linux/backing-dev.h>
53 #include <linux/shmem_fs.h>
54 #include <linux/writeback.h>
55 #include <linux/blkdev.h>
56 #include <linux/pagevec.h>
57 #include <linux/percpu_counter.h>
58 #include <linux/falloc.h>
59 #include <linux/splice.h>
60 #include <linux/security.h>
61 #include <linux/swapops.h>
62 #include <linux/mempolicy.h>
63 #include <linux/namei.h>
64 #include <linux/ctype.h>
65 #include <linux/migrate.h>
66 #include <linux/highmem.h>
67 #include <linux/seq_file.h>
68 #include <linux/magic.h>
69 #include <linux/syscalls.h>
70 #include <linux/fcntl.h>
71 #include <uapi/linux/memfd.h>
72
73 #include <asm/uaccess.h>
74 #include <asm/pgtable.h>
75
76 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
77 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
78
79 /* Pretend that each entry is of this size in directory's i_size */
80 #define BOGO_DIRENT_SIZE 20
81
82 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
83 #define SHORT_SYMLINK_LEN 128
84
85 /*
86  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
87  * inode->i_private (with i_mutex making sure that it has only one user at
88  * a time): we would prefer not to enlarge the shmem inode just for that.
89  */
90 struct shmem_falloc {
91         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
92         pgoff_t start;          /* start of range currently being fallocated */
93         pgoff_t next;           /* the next page offset to be fallocated */
94         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
95         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
96 };
97
98 /* Flag allocation requirements to shmem_getpage */
99 enum sgp_type {
100         SGP_READ,       /* don't exceed i_size, don't allocate page */
101         SGP_CACHE,      /* don't exceed i_size, may allocate page */
102         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
103         SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
104         SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
105 };
106
107 #ifdef CONFIG_TMPFS
108 static unsigned long shmem_default_max_blocks(void)
109 {
110         return totalram_pages / 2;
111 }
112
113 static unsigned long shmem_default_max_inodes(void)
114 {
115         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
116 }
117 #endif
118
119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
121                                 struct shmem_inode_info *info, pgoff_t index);
122 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
123         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
124
125 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
126         struct page **pagep, enum sgp_type sgp, int *fault_type)
127 {
128         return shmem_getpage_gfp(inode, index, pagep, sgp,
129                         mapping_gfp_mask(inode->i_mapping), fault_type);
130 }
131
132 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
133 {
134         return sb->s_fs_info;
135 }
136
137 /*
138  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
139  * for shared memory and for shared anonymous (/dev/zero) mappings
140  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
141  * consistent with the pre-accounting of private mappings ...
142  */
143 static inline int shmem_acct_size(unsigned long flags, loff_t size)
144 {
145         return (flags & VM_NORESERVE) ?
146                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
147 }
148
149 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
150 {
151         if (!(flags & VM_NORESERVE))
152                 vm_unacct_memory(VM_ACCT(size));
153 }
154
155 static inline int shmem_reacct_size(unsigned long flags,
156                 loff_t oldsize, loff_t newsize)
157 {
158         if (!(flags & VM_NORESERVE)) {
159                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
160                         return security_vm_enough_memory_mm(current->mm,
161                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
162                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
163                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
164         }
165         return 0;
166 }
167
168 /*
169  * ... whereas tmpfs objects are accounted incrementally as
170  * pages are allocated, in order to allow huge sparse files.
171  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
172  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
173  */
174 static inline int shmem_acct_block(unsigned long flags)
175 {
176         return (flags & VM_NORESERVE) ?
177                 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
178 }
179
180 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
181 {
182         if (flags & VM_NORESERVE)
183                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
184 }
185
186 static const struct super_operations shmem_ops;
187 static const struct address_space_operations shmem_aops;
188 static const struct file_operations shmem_file_operations;
189 static const struct inode_operations shmem_inode_operations;
190 static const struct inode_operations shmem_dir_inode_operations;
191 static const struct inode_operations shmem_special_inode_operations;
192 static const struct vm_operations_struct shmem_vm_ops;
193
194 static LIST_HEAD(shmem_swaplist);
195 static DEFINE_MUTEX(shmem_swaplist_mutex);
196
197 static int shmem_reserve_inode(struct super_block *sb)
198 {
199         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
200         if (sbinfo->max_inodes) {
201                 spin_lock(&sbinfo->stat_lock);
202                 if (!sbinfo->free_inodes) {
203                         spin_unlock(&sbinfo->stat_lock);
204                         return -ENOSPC;
205                 }
206                 sbinfo->free_inodes--;
207                 spin_unlock(&sbinfo->stat_lock);
208         }
209         return 0;
210 }
211
212 static void shmem_free_inode(struct super_block *sb)
213 {
214         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
215         if (sbinfo->max_inodes) {
216                 spin_lock(&sbinfo->stat_lock);
217                 sbinfo->free_inodes++;
218                 spin_unlock(&sbinfo->stat_lock);
219         }
220 }
221
222 /**
223  * shmem_recalc_inode - recalculate the block usage of an inode
224  * @inode: inode to recalc
225  *
226  * We have to calculate the free blocks since the mm can drop
227  * undirtied hole pages behind our back.
228  *
229  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
230  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
231  *
232  * It has to be called with the spinlock held.
233  */
234 static void shmem_recalc_inode(struct inode *inode)
235 {
236         struct shmem_inode_info *info = SHMEM_I(inode);
237         long freed;
238
239         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
240         if (freed > 0) {
241                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
242                 if (sbinfo->max_blocks)
243                         percpu_counter_add(&sbinfo->used_blocks, -freed);
244                 info->alloced -= freed;
245                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
246                 shmem_unacct_blocks(info->flags, freed);
247         }
248 }
249
250 /*
251  * Replace item expected in radix tree by a new item, while holding tree lock.
252  */
253 static int shmem_radix_tree_replace(struct address_space *mapping,
254                         pgoff_t index, void *expected, void *replacement)
255 {
256         void **pslot;
257         void *item;
258
259         VM_BUG_ON(!expected);
260         VM_BUG_ON(!replacement);
261         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
262         if (!pslot)
263                 return -ENOENT;
264         item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
265         if (item != expected)
266                 return -ENOENT;
267         radix_tree_replace_slot(pslot, replacement);
268         return 0;
269 }
270
271 /*
272  * Sometimes, before we decide whether to proceed or to fail, we must check
273  * that an entry was not already brought back from swap by a racing thread.
274  *
275  * Checking page is not enough: by the time a SwapCache page is locked, it
276  * might be reused, and again be SwapCache, using the same swap as before.
277  */
278 static bool shmem_confirm_swap(struct address_space *mapping,
279                                pgoff_t index, swp_entry_t swap)
280 {
281         void *item;
282
283         rcu_read_lock();
284         item = radix_tree_lookup(&mapping->page_tree, index);
285         rcu_read_unlock();
286         return item == swp_to_radix_entry(swap);
287 }
288
289 /*
290  * Like add_to_page_cache_locked, but error if expected item has gone.
291  */
292 static int shmem_add_to_page_cache(struct page *page,
293                                    struct address_space *mapping,
294                                    pgoff_t index, void *expected)
295 {
296         int error;
297
298         VM_BUG_ON_PAGE(!PageLocked(page), page);
299         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
300
301         page_cache_get(page);
302         page->mapping = mapping;
303         page->index = index;
304
305         spin_lock_irq(&mapping->tree_lock);
306         if (!expected)
307                 error = radix_tree_insert(&mapping->page_tree, index, page);
308         else
309                 error = shmem_radix_tree_replace(mapping, index, expected,
310                                                                  page);
311         if (!error) {
312                 mapping->nrpages++;
313                 __inc_zone_page_state(page, NR_FILE_PAGES);
314                 __inc_zone_page_state(page, NR_SHMEM);
315                 spin_unlock_irq(&mapping->tree_lock);
316         } else {
317                 page->mapping = NULL;
318                 spin_unlock_irq(&mapping->tree_lock);
319                 page_cache_release(page);
320         }
321         return error;
322 }
323
324 /*
325  * Like delete_from_page_cache, but substitutes swap for page.
326  */
327 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
328 {
329         struct address_space *mapping = page->mapping;
330         int error;
331
332         spin_lock_irq(&mapping->tree_lock);
333         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
334         page->mapping = NULL;
335         mapping->nrpages--;
336         __dec_zone_page_state(page, NR_FILE_PAGES);
337         __dec_zone_page_state(page, NR_SHMEM);
338         spin_unlock_irq(&mapping->tree_lock);
339         page_cache_release(page);
340         BUG_ON(error);
341 }
342
343 /*
344  * Remove swap entry from radix tree, free the swap and its page cache.
345  */
346 static int shmem_free_swap(struct address_space *mapping,
347                            pgoff_t index, void *radswap)
348 {
349         void *old;
350
351         spin_lock_irq(&mapping->tree_lock);
352         old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
353         spin_unlock_irq(&mapping->tree_lock);
354         if (old != radswap)
355                 return -ENOENT;
356         free_swap_and_cache(radix_to_swp_entry(radswap));
357         return 0;
358 }
359
360 /*
361  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
362  */
363 void shmem_unlock_mapping(struct address_space *mapping)
364 {
365         struct pagevec pvec;
366         pgoff_t indices[PAGEVEC_SIZE];
367         pgoff_t index = 0;
368
369         pagevec_init(&pvec, 0);
370         /*
371          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
372          */
373         while (!mapping_unevictable(mapping)) {
374                 /*
375                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
376                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
377                  */
378                 pvec.nr = find_get_entries(mapping, index,
379                                            PAGEVEC_SIZE, pvec.pages, indices);
380                 if (!pvec.nr)
381                         break;
382                 index = indices[pvec.nr - 1] + 1;
383                 pagevec_remove_exceptionals(&pvec);
384                 check_move_unevictable_pages(pvec.pages, pvec.nr);
385                 pagevec_release(&pvec);
386                 cond_resched();
387         }
388 }
389
390 /*
391  * Remove range of pages and swap entries from radix tree, and free them.
392  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
393  */
394 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
395                                                                  bool unfalloc)
396 {
397         struct address_space *mapping = inode->i_mapping;
398         struct shmem_inode_info *info = SHMEM_I(inode);
399         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
400         pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
401         unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
402         unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
403         struct pagevec pvec;
404         pgoff_t indices[PAGEVEC_SIZE];
405         long nr_swaps_freed = 0;
406         pgoff_t index;
407         int i;
408
409         if (lend == -1)
410                 end = -1;       /* unsigned, so actually very big */
411
412         pagevec_init(&pvec, 0);
413         index = start;
414         while (index < end) {
415                 pvec.nr = find_get_entries(mapping, index,
416                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
417                         pvec.pages, indices);
418                 if (!pvec.nr)
419                         break;
420                 for (i = 0; i < pagevec_count(&pvec); i++) {
421                         struct page *page = pvec.pages[i];
422
423                         index = indices[i];
424                         if (index >= end)
425                                 break;
426
427                         if (radix_tree_exceptional_entry(page)) {
428                                 if (unfalloc)
429                                         continue;
430                                 nr_swaps_freed += !shmem_free_swap(mapping,
431                                                                 index, page);
432                                 continue;
433                         }
434
435                         if (!trylock_page(page))
436                                 continue;
437                         if (!unfalloc || !PageUptodate(page)) {
438                                 if (page->mapping == mapping) {
439                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
440                                         truncate_inode_page(mapping, page);
441                                 }
442                         }
443                         unlock_page(page);
444                 }
445                 pagevec_remove_exceptionals(&pvec);
446                 pagevec_release(&pvec);
447                 cond_resched();
448                 index++;
449         }
450
451         if (partial_start) {
452                 struct page *page = NULL;
453                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
454                 if (page) {
455                         unsigned int top = PAGE_CACHE_SIZE;
456                         if (start > end) {
457                                 top = partial_end;
458                                 partial_end = 0;
459                         }
460                         zero_user_segment(page, partial_start, top);
461                         set_page_dirty(page);
462                         unlock_page(page);
463                         page_cache_release(page);
464                 }
465         }
466         if (partial_end) {
467                 struct page *page = NULL;
468                 shmem_getpage(inode, end, &page, SGP_READ, NULL);
469                 if (page) {
470                         zero_user_segment(page, 0, partial_end);
471                         set_page_dirty(page);
472                         unlock_page(page);
473                         page_cache_release(page);
474                 }
475         }
476         if (start >= end)
477                 return;
478
479         index = start;
480         while (index < end) {
481                 cond_resched();
482
483                 pvec.nr = find_get_entries(mapping, index,
484                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
485                                 pvec.pages, indices);
486                 if (!pvec.nr) {
487                         /* If all gone or hole-punch or unfalloc, we're done */
488                         if (index == start || end != -1)
489                                 break;
490                         /* But if truncating, restart to make sure all gone */
491                         index = start;
492                         continue;
493                 }
494                 for (i = 0; i < pagevec_count(&pvec); i++) {
495                         struct page *page = pvec.pages[i];
496
497                         index = indices[i];
498                         if (index >= end)
499                                 break;
500
501                         if (radix_tree_exceptional_entry(page)) {
502                                 if (unfalloc)
503                                         continue;
504                                 if (shmem_free_swap(mapping, index, page)) {
505                                         /* Swap was replaced by page: retry */
506                                         index--;
507                                         break;
508                                 }
509                                 nr_swaps_freed++;
510                                 continue;
511                         }
512
513                         lock_page(page);
514                         if (!unfalloc || !PageUptodate(page)) {
515                                 if (page->mapping == mapping) {
516                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
517                                         truncate_inode_page(mapping, page);
518                                 } else {
519                                         /* Page was replaced by swap: retry */
520                                         unlock_page(page);
521                                         index--;
522                                         break;
523                                 }
524                         }
525                         unlock_page(page);
526                 }
527                 pagevec_remove_exceptionals(&pvec);
528                 pagevec_release(&pvec);
529                 index++;
530         }
531
532         spin_lock(&info->lock);
533         info->swapped -= nr_swaps_freed;
534         shmem_recalc_inode(inode);
535         spin_unlock(&info->lock);
536 }
537
538 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
539 {
540         shmem_undo_range(inode, lstart, lend, false);
541         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
542 }
543 EXPORT_SYMBOL_GPL(shmem_truncate_range);
544
545 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
546 {
547         struct inode *inode = dentry->d_inode;
548         struct shmem_inode_info *info = SHMEM_I(inode);
549         int error;
550
551         error = inode_change_ok(inode, attr);
552         if (error)
553                 return error;
554
555         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
556                 loff_t oldsize = inode->i_size;
557                 loff_t newsize = attr->ia_size;
558
559                 /* protected by i_mutex */
560                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
561                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
562                         return -EPERM;
563
564                 if (newsize != oldsize) {
565                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
566                                         oldsize, newsize);
567                         if (error)
568                                 return error;
569                         i_size_write(inode, newsize);
570                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
571                 }
572                 if (newsize < oldsize) {
573                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
574                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
575                         shmem_truncate_range(inode, newsize, (loff_t)-1);
576                         /* unmap again to remove racily COWed private pages */
577                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
578                 }
579         }
580
581         setattr_copy(inode, attr);
582         if (attr->ia_valid & ATTR_MODE)
583                 error = posix_acl_chmod(inode, inode->i_mode);
584         return error;
585 }
586
587 static void shmem_evict_inode(struct inode *inode)
588 {
589         struct shmem_inode_info *info = SHMEM_I(inode);
590
591         if (inode->i_mapping->a_ops == &shmem_aops) {
592                 shmem_unacct_size(info->flags, inode->i_size);
593                 inode->i_size = 0;
594                 shmem_truncate_range(inode, 0, (loff_t)-1);
595                 if (!list_empty(&info->swaplist)) {
596                         mutex_lock(&shmem_swaplist_mutex);
597                         list_del_init(&info->swaplist);
598                         mutex_unlock(&shmem_swaplist_mutex);
599                 }
600         } else
601                 kfree(info->symlink);
602
603         simple_xattrs_free(&info->xattrs);
604         WARN_ON(inode->i_blocks);
605         shmem_free_inode(inode->i_sb);
606         clear_inode(inode);
607 }
608
609 /*
610  * If swap found in inode, free it and move page from swapcache to filecache.
611  */
612 static int shmem_unuse_inode(struct shmem_inode_info *info,
613                              swp_entry_t swap, struct page **pagep)
614 {
615         struct address_space *mapping = info->vfs_inode.i_mapping;
616         void *radswap;
617         pgoff_t index;
618         gfp_t gfp;
619         int error = 0;
620
621         radswap = swp_to_radix_entry(swap);
622         index = radix_tree_locate_item(&mapping->page_tree, radswap);
623         if (index == -1)
624                 return -EAGAIN; /* tell shmem_unuse we found nothing */
625
626         /*
627          * Move _head_ to start search for next from here.
628          * But be careful: shmem_evict_inode checks list_empty without taking
629          * mutex, and there's an instant in list_move_tail when info->swaplist
630          * would appear empty, if it were the only one on shmem_swaplist.
631          */
632         if (shmem_swaplist.next != &info->swaplist)
633                 list_move_tail(&shmem_swaplist, &info->swaplist);
634
635         gfp = mapping_gfp_mask(mapping);
636         if (shmem_should_replace_page(*pagep, gfp)) {
637                 mutex_unlock(&shmem_swaplist_mutex);
638                 error = shmem_replace_page(pagep, gfp, info, index);
639                 mutex_lock(&shmem_swaplist_mutex);
640                 /*
641                  * We needed to drop mutex to make that restrictive page
642                  * allocation, but the inode might have been freed while we
643                  * dropped it: although a racing shmem_evict_inode() cannot
644                  * complete without emptying the radix_tree, our page lock
645                  * on this swapcache page is not enough to prevent that -
646                  * free_swap_and_cache() of our swap entry will only
647                  * trylock_page(), removing swap from radix_tree whatever.
648                  *
649                  * We must not proceed to shmem_add_to_page_cache() if the
650                  * inode has been freed, but of course we cannot rely on
651                  * inode or mapping or info to check that.  However, we can
652                  * safely check if our swap entry is still in use (and here
653                  * it can't have got reused for another page): if it's still
654                  * in use, then the inode cannot have been freed yet, and we
655                  * can safely proceed (if it's no longer in use, that tells
656                  * nothing about the inode, but we don't need to unuse swap).
657                  */
658                 if (!page_swapcount(*pagep))
659                         error = -ENOENT;
660         }
661
662         /*
663          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
664          * but also to hold up shmem_evict_inode(): so inode cannot be freed
665          * beneath us (pagelock doesn't help until the page is in pagecache).
666          */
667         if (!error)
668                 error = shmem_add_to_page_cache(*pagep, mapping, index,
669                                                 radswap);
670         if (error != -ENOMEM) {
671                 /*
672                  * Truncation and eviction use free_swap_and_cache(), which
673                  * only does trylock page: if we raced, best clean up here.
674                  */
675                 delete_from_swap_cache(*pagep);
676                 set_page_dirty(*pagep);
677                 if (!error) {
678                         spin_lock(&info->lock);
679                         info->swapped--;
680                         spin_unlock(&info->lock);
681                         swap_free(swap);
682                 }
683         }
684         return error;
685 }
686
687 /*
688  * Search through swapped inodes to find and replace swap by page.
689  */
690 int shmem_unuse(swp_entry_t swap, struct page *page)
691 {
692         struct list_head *this, *next;
693         struct shmem_inode_info *info;
694         struct mem_cgroup *memcg;
695         int error = 0;
696
697         /*
698          * There's a faint possibility that swap page was replaced before
699          * caller locked it: caller will come back later with the right page.
700          */
701         if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
702                 goto out;
703
704         /*
705          * Charge page using GFP_KERNEL while we can wait, before taking
706          * the shmem_swaplist_mutex which might hold up shmem_writepage().
707          * Charged back to the user (not to caller) when swap account is used.
708          */
709         error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg);
710         if (error)
711                 goto out;
712         /* No radix_tree_preload: swap entry keeps a place for page in tree */
713         error = -EAGAIN;
714
715         mutex_lock(&shmem_swaplist_mutex);
716         list_for_each_safe(this, next, &shmem_swaplist) {
717                 info = list_entry(this, struct shmem_inode_info, swaplist);
718                 if (info->swapped)
719                         error = shmem_unuse_inode(info, swap, &page);
720                 else
721                         list_del_init(&info->swaplist);
722                 cond_resched();
723                 if (error != -EAGAIN)
724                         break;
725                 /* found nothing in this: move on to search the next */
726         }
727         mutex_unlock(&shmem_swaplist_mutex);
728
729         if (error) {
730                 if (error != -ENOMEM)
731                         error = 0;
732                 mem_cgroup_cancel_charge(page, memcg);
733         } else
734                 mem_cgroup_commit_charge(page, memcg, true);
735 out:
736         unlock_page(page);
737         page_cache_release(page);
738         return error;
739 }
740
741 /*
742  * Move the page from the page cache to the swap cache.
743  */
744 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
745 {
746         struct shmem_inode_info *info;
747         struct address_space *mapping;
748         struct inode *inode;
749         swp_entry_t swap;
750         pgoff_t index;
751
752         BUG_ON(!PageLocked(page));
753         mapping = page->mapping;
754         index = page->index;
755         inode = mapping->host;
756         info = SHMEM_I(inode);
757         if (info->flags & VM_LOCKED)
758                 goto redirty;
759         if (!total_swap_pages)
760                 goto redirty;
761
762         /*
763          * Our capabilities prevent regular writeback or sync from ever calling
764          * shmem_writepage; but a stacking filesystem might use ->writepage of
765          * its underlying filesystem, in which case tmpfs should write out to
766          * swap only in response to memory pressure, and not for the writeback
767          * threads or sync.
768          */
769         if (!wbc->for_reclaim) {
770                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
771                 goto redirty;
772         }
773
774         /*
775          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
776          * value into swapfile.c, the only way we can correctly account for a
777          * fallocated page arriving here is now to initialize it and write it.
778          *
779          * That's okay for a page already fallocated earlier, but if we have
780          * not yet completed the fallocation, then (a) we want to keep track
781          * of this page in case we have to undo it, and (b) it may not be a
782          * good idea to continue anyway, once we're pushing into swap.  So
783          * reactivate the page, and let shmem_fallocate() quit when too many.
784          */
785         if (!PageUptodate(page)) {
786                 if (inode->i_private) {
787                         struct shmem_falloc *shmem_falloc;
788                         spin_lock(&inode->i_lock);
789                         shmem_falloc = inode->i_private;
790                         if (shmem_falloc &&
791                             !shmem_falloc->waitq &&
792                             index >= shmem_falloc->start &&
793                             index < shmem_falloc->next)
794                                 shmem_falloc->nr_unswapped++;
795                         else
796                                 shmem_falloc = NULL;
797                         spin_unlock(&inode->i_lock);
798                         if (shmem_falloc)
799                                 goto redirty;
800                 }
801                 clear_highpage(page);
802                 flush_dcache_page(page);
803                 SetPageUptodate(page);
804         }
805
806         swap = get_swap_page();
807         if (!swap.val)
808                 goto redirty;
809
810         /*
811          * Add inode to shmem_unuse()'s list of swapped-out inodes,
812          * if it's not already there.  Do it now before the page is
813          * moved to swap cache, when its pagelock no longer protects
814          * the inode from eviction.  But don't unlock the mutex until
815          * we've incremented swapped, because shmem_unuse_inode() will
816          * prune a !swapped inode from the swaplist under this mutex.
817          */
818         mutex_lock(&shmem_swaplist_mutex);
819         if (list_empty(&info->swaplist))
820                 list_add_tail(&info->swaplist, &shmem_swaplist);
821
822         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
823                 swap_shmem_alloc(swap);
824                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
825
826                 spin_lock(&info->lock);
827                 info->swapped++;
828                 shmem_recalc_inode(inode);
829                 spin_unlock(&info->lock);
830
831                 mutex_unlock(&shmem_swaplist_mutex);
832                 BUG_ON(page_mapped(page));
833                 swap_writepage(page, wbc);
834                 return 0;
835         }
836
837         mutex_unlock(&shmem_swaplist_mutex);
838         swapcache_free(swap);
839 redirty:
840         set_page_dirty(page);
841         if (wbc->for_reclaim)
842                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
843         unlock_page(page);
844         return 0;
845 }
846
847 #ifdef CONFIG_NUMA
848 #ifdef CONFIG_TMPFS
849 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
850 {
851         char buffer[64];
852
853         if (!mpol || mpol->mode == MPOL_DEFAULT)
854                 return;         /* show nothing */
855
856         mpol_to_str(buffer, sizeof(buffer), mpol);
857
858         seq_printf(seq, ",mpol=%s", buffer);
859 }
860
861 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
862 {
863         struct mempolicy *mpol = NULL;
864         if (sbinfo->mpol) {
865                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
866                 mpol = sbinfo->mpol;
867                 mpol_get(mpol);
868                 spin_unlock(&sbinfo->stat_lock);
869         }
870         return mpol;
871 }
872 #endif /* CONFIG_TMPFS */
873
874 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
875                         struct shmem_inode_info *info, pgoff_t index)
876 {
877         struct vm_area_struct pvma;
878         struct page *page;
879
880         /* Create a pseudo vma that just contains the policy */
881         pvma.vm_start = 0;
882         /* Bias interleave by inode number to distribute better across nodes */
883         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
884         pvma.vm_ops = NULL;
885         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
886
887         page = swapin_readahead(swap, gfp, &pvma, 0);
888
889         /* Drop reference taken by mpol_shared_policy_lookup() */
890         mpol_cond_put(pvma.vm_policy);
891
892         return page;
893 }
894
895 static struct page *shmem_alloc_page(gfp_t gfp,
896                         struct shmem_inode_info *info, pgoff_t index)
897 {
898         struct vm_area_struct pvma;
899         struct page *page;
900
901         /* Create a pseudo vma that just contains the policy */
902         pvma.vm_start = 0;
903         /* Bias interleave by inode number to distribute better across nodes */
904         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
905         pvma.vm_ops = NULL;
906         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
907
908         page = alloc_page_vma(gfp, &pvma, 0);
909
910         /* Drop reference taken by mpol_shared_policy_lookup() */
911         mpol_cond_put(pvma.vm_policy);
912
913         return page;
914 }
915 #else /* !CONFIG_NUMA */
916 #ifdef CONFIG_TMPFS
917 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
918 {
919 }
920 #endif /* CONFIG_TMPFS */
921
922 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
923                         struct shmem_inode_info *info, pgoff_t index)
924 {
925         return swapin_readahead(swap, gfp, NULL, 0);
926 }
927
928 static inline struct page *shmem_alloc_page(gfp_t gfp,
929                         struct shmem_inode_info *info, pgoff_t index)
930 {
931         return alloc_page(gfp);
932 }
933 #endif /* CONFIG_NUMA */
934
935 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
936 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
937 {
938         return NULL;
939 }
940 #endif
941
942 /*
943  * When a page is moved from swapcache to shmem filecache (either by the
944  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
945  * shmem_unuse_inode()), it may have been read in earlier from swap, in
946  * ignorance of the mapping it belongs to.  If that mapping has special
947  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
948  * we may need to copy to a suitable page before moving to filecache.
949  *
950  * In a future release, this may well be extended to respect cpuset and
951  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
952  * but for now it is a simple matter of zone.
953  */
954 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
955 {
956         return page_zonenum(page) > gfp_zone(gfp);
957 }
958
959 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
960                                 struct shmem_inode_info *info, pgoff_t index)
961 {
962         struct page *oldpage, *newpage;
963         struct address_space *swap_mapping;
964         pgoff_t swap_index;
965         int error;
966
967         oldpage = *pagep;
968         swap_index = page_private(oldpage);
969         swap_mapping = page_mapping(oldpage);
970
971         /*
972          * We have arrived here because our zones are constrained, so don't
973          * limit chance of success by further cpuset and node constraints.
974          */
975         gfp &= ~GFP_CONSTRAINT_MASK;
976         newpage = shmem_alloc_page(gfp, info, index);
977         if (!newpage)
978                 return -ENOMEM;
979
980         page_cache_get(newpage);
981         copy_highpage(newpage, oldpage);
982         flush_dcache_page(newpage);
983
984         __set_page_locked(newpage);
985         SetPageUptodate(newpage);
986         SetPageSwapBacked(newpage);
987         set_page_private(newpage, swap_index);
988         SetPageSwapCache(newpage);
989
990         /*
991          * Our caller will very soon move newpage out of swapcache, but it's
992          * a nice clean interface for us to replace oldpage by newpage there.
993          */
994         spin_lock_irq(&swap_mapping->tree_lock);
995         error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
996                                                                    newpage);
997         if (!error) {
998                 __inc_zone_page_state(newpage, NR_FILE_PAGES);
999                 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1000         }
1001         spin_unlock_irq(&swap_mapping->tree_lock);
1002
1003         if (unlikely(error)) {
1004                 /*
1005                  * Is this possible?  I think not, now that our callers check
1006                  * both PageSwapCache and page_private after getting page lock;
1007                  * but be defensive.  Reverse old to newpage for clear and free.
1008                  */
1009                 oldpage = newpage;
1010         } else {
1011                 mem_cgroup_migrate(oldpage, newpage, true);
1012                 lru_cache_add_anon(newpage);
1013                 *pagep = newpage;
1014         }
1015
1016         ClearPageSwapCache(oldpage);
1017         set_page_private(oldpage, 0);
1018
1019         unlock_page(oldpage);
1020         page_cache_release(oldpage);
1021         page_cache_release(oldpage);
1022         return error;
1023 }
1024
1025 /*
1026  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1027  *
1028  * If we allocate a new one we do not mark it dirty. That's up to the
1029  * vm. If we swap it in we mark it dirty since we also free the swap
1030  * entry since a page cannot live in both the swap and page cache
1031  */
1032 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1033         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1034 {
1035         struct address_space *mapping = inode->i_mapping;
1036         struct shmem_inode_info *info;
1037         struct shmem_sb_info *sbinfo;
1038         struct mem_cgroup *memcg;
1039         struct page *page;
1040         swp_entry_t swap;
1041         int error;
1042         int once = 0;
1043         int alloced = 0;
1044
1045         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1046                 return -EFBIG;
1047 repeat:
1048         swap.val = 0;
1049         page = find_lock_entry(mapping, index);
1050         if (radix_tree_exceptional_entry(page)) {
1051                 swap = radix_to_swp_entry(page);
1052                 page = NULL;
1053         }
1054
1055         if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1056             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1057                 error = -EINVAL;
1058                 goto failed;
1059         }
1060
1061         if (page && sgp == SGP_WRITE)
1062                 mark_page_accessed(page);
1063
1064         /* fallocated page? */
1065         if (page && !PageUptodate(page)) {
1066                 if (sgp != SGP_READ)
1067                         goto clear;
1068                 unlock_page(page);
1069                 page_cache_release(page);
1070                 page = NULL;
1071         }
1072         if (page || (sgp == SGP_READ && !swap.val)) {
1073                 *pagep = page;
1074                 return 0;
1075         }
1076
1077         /*
1078          * Fast cache lookup did not find it:
1079          * bring it back from swap or allocate.
1080          */
1081         info = SHMEM_I(inode);
1082         sbinfo = SHMEM_SB(inode->i_sb);
1083
1084         if (swap.val) {
1085                 /* Look it up and read it in.. */
1086                 page = lookup_swap_cache(swap);
1087                 if (!page) {
1088                         /* here we actually do the io */
1089                         if (fault_type)
1090                                 *fault_type |= VM_FAULT_MAJOR;
1091                         page = shmem_swapin(swap, gfp, info, index);
1092                         if (!page) {
1093                                 error = -ENOMEM;
1094                                 goto failed;
1095                         }
1096                 }
1097
1098                 /* We have to do this with page locked to prevent races */
1099                 lock_page(page);
1100                 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1101                     !shmem_confirm_swap(mapping, index, swap)) {
1102                         error = -EEXIST;        /* try again */
1103                         goto unlock;
1104                 }
1105                 if (!PageUptodate(page)) {
1106                         error = -EIO;
1107                         goto failed;
1108                 }
1109                 wait_on_page_writeback(page);
1110
1111                 if (shmem_should_replace_page(page, gfp)) {
1112                         error = shmem_replace_page(&page, gfp, info, index);
1113                         if (error)
1114                                 goto failed;
1115                 }
1116
1117                 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
1118                 if (!error) {
1119                         error = shmem_add_to_page_cache(page, mapping, index,
1120                                                 swp_to_radix_entry(swap));
1121                         /*
1122                          * We already confirmed swap under page lock, and make
1123                          * no memory allocation here, so usually no possibility
1124                          * of error; but free_swap_and_cache() only trylocks a
1125                          * page, so it is just possible that the entry has been
1126                          * truncated or holepunched since swap was confirmed.
1127                          * shmem_undo_range() will have done some of the
1128                          * unaccounting, now delete_from_swap_cache() will do
1129                          * the rest.
1130                          * Reset swap.val? No, leave it so "failed" goes back to
1131                          * "repeat": reading a hole and writing should succeed.
1132                          */
1133                         if (error) {
1134                                 mem_cgroup_cancel_charge(page, memcg);
1135                                 delete_from_swap_cache(page);
1136                         }
1137                 }
1138                 if (error)
1139                         goto failed;
1140
1141                 mem_cgroup_commit_charge(page, memcg, true);
1142
1143                 spin_lock(&info->lock);
1144                 info->swapped--;
1145                 shmem_recalc_inode(inode);
1146                 spin_unlock(&info->lock);
1147
1148                 if (sgp == SGP_WRITE)
1149                         mark_page_accessed(page);
1150
1151                 delete_from_swap_cache(page);
1152                 set_page_dirty(page);
1153                 swap_free(swap);
1154
1155         } else {
1156                 if (shmem_acct_block(info->flags)) {
1157                         error = -ENOSPC;
1158                         goto failed;
1159                 }
1160                 if (sbinfo->max_blocks) {
1161                         if (percpu_counter_compare(&sbinfo->used_blocks,
1162                                                 sbinfo->max_blocks) >= 0) {
1163                                 error = -ENOSPC;
1164                                 goto unacct;
1165                         }
1166                         percpu_counter_inc(&sbinfo->used_blocks);
1167                 }
1168
1169                 page = shmem_alloc_page(gfp, info, index);
1170                 if (!page) {
1171                         error = -ENOMEM;
1172                         goto decused;
1173                 }
1174
1175                 __SetPageSwapBacked(page);
1176                 __set_page_locked(page);
1177                 if (sgp == SGP_WRITE)
1178                         __SetPageReferenced(page);
1179
1180                 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
1181                 if (error)
1182                         goto decused;
1183                 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
1184                 if (!error) {
1185                         error = shmem_add_to_page_cache(page, mapping, index,
1186                                                         NULL);
1187                         radix_tree_preload_end();
1188                 }
1189                 if (error) {
1190                         mem_cgroup_cancel_charge(page, memcg);
1191                         goto decused;
1192                 }
1193                 mem_cgroup_commit_charge(page, memcg, false);
1194                 lru_cache_add_anon(page);
1195
1196                 spin_lock(&info->lock);
1197                 info->alloced++;
1198                 inode->i_blocks += BLOCKS_PER_PAGE;
1199                 shmem_recalc_inode(inode);
1200                 spin_unlock(&info->lock);
1201                 alloced = true;
1202
1203                 /*
1204                  * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1205                  */
1206                 if (sgp == SGP_FALLOC)
1207                         sgp = SGP_WRITE;
1208 clear:
1209                 /*
1210                  * Let SGP_WRITE caller clear ends if write does not fill page;
1211                  * but SGP_FALLOC on a page fallocated earlier must initialize
1212                  * it now, lest undo on failure cancel our earlier guarantee.
1213                  */
1214                 if (sgp != SGP_WRITE) {
1215                         clear_highpage(page);
1216                         flush_dcache_page(page);
1217                         SetPageUptodate(page);
1218                 }
1219                 if (sgp == SGP_DIRTY)
1220                         set_page_dirty(page);
1221         }
1222
1223         /* Perhaps the file has been truncated since we checked */
1224         if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1225             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1226                 error = -EINVAL;
1227                 if (alloced)
1228                         goto trunc;
1229                 else
1230                         goto failed;
1231         }
1232         *pagep = page;
1233         return 0;
1234
1235         /*
1236          * Error recovery.
1237          */
1238 trunc:
1239         info = SHMEM_I(inode);
1240         ClearPageDirty(page);
1241         delete_from_page_cache(page);
1242         spin_lock(&info->lock);
1243         info->alloced--;
1244         inode->i_blocks -= BLOCKS_PER_PAGE;
1245         spin_unlock(&info->lock);
1246 decused:
1247         sbinfo = SHMEM_SB(inode->i_sb);
1248         if (sbinfo->max_blocks)
1249                 percpu_counter_add(&sbinfo->used_blocks, -1);
1250 unacct:
1251         shmem_unacct_blocks(info->flags, 1);
1252 failed:
1253         if (swap.val && error != -EINVAL &&
1254             !shmem_confirm_swap(mapping, index, swap))
1255                 error = -EEXIST;
1256 unlock:
1257         if (page) {
1258                 unlock_page(page);
1259                 page_cache_release(page);
1260         }
1261         if (error == -ENOSPC && !once++) {
1262                 info = SHMEM_I(inode);
1263                 spin_lock(&info->lock);
1264                 shmem_recalc_inode(inode);
1265                 spin_unlock(&info->lock);
1266                 goto repeat;
1267         }
1268         if (error == -EEXIST)   /* from above or from radix_tree_insert */
1269                 goto repeat;
1270         return error;
1271 }
1272
1273 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1274 {
1275         struct inode *inode = file_inode(vma->vm_file);
1276         int error;
1277         int ret = VM_FAULT_LOCKED;
1278
1279         /*
1280          * Trinity finds that probing a hole which tmpfs is punching can
1281          * prevent the hole-punch from ever completing: which in turn
1282          * locks writers out with its hold on i_mutex.  So refrain from
1283          * faulting pages into the hole while it's being punched.  Although
1284          * shmem_undo_range() does remove the additions, it may be unable to
1285          * keep up, as each new page needs its own unmap_mapping_range() call,
1286          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1287          *
1288          * It does not matter if we sometimes reach this check just before the
1289          * hole-punch begins, so that one fault then races with the punch:
1290          * we just need to make racing faults a rare case.
1291          *
1292          * The implementation below would be much simpler if we just used a
1293          * standard mutex or completion: but we cannot take i_mutex in fault,
1294          * and bloating every shmem inode for this unlikely case would be sad.
1295          */
1296         if (unlikely(inode->i_private)) {
1297                 struct shmem_falloc *shmem_falloc;
1298
1299                 spin_lock(&inode->i_lock);
1300                 shmem_falloc = inode->i_private;
1301                 if (shmem_falloc &&
1302                     shmem_falloc->waitq &&
1303                     vmf->pgoff >= shmem_falloc->start &&
1304                     vmf->pgoff < shmem_falloc->next) {
1305                         wait_queue_head_t *shmem_falloc_waitq;
1306                         DEFINE_WAIT(shmem_fault_wait);
1307
1308                         ret = VM_FAULT_NOPAGE;
1309                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1310                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1311                                 /* It's polite to up mmap_sem if we can */
1312                                 up_read(&vma->vm_mm->mmap_sem);
1313                                 ret = VM_FAULT_RETRY;
1314                         }
1315
1316                         shmem_falloc_waitq = shmem_falloc->waitq;
1317                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1318                                         TASK_UNINTERRUPTIBLE);
1319                         spin_unlock(&inode->i_lock);
1320                         schedule();
1321
1322                         /*
1323                          * shmem_falloc_waitq points into the shmem_fallocate()
1324                          * stack of the hole-punching task: shmem_falloc_waitq
1325                          * is usually invalid by the time we reach here, but
1326                          * finish_wait() does not dereference it in that case;
1327                          * though i_lock needed lest racing with wake_up_all().
1328                          */
1329                         spin_lock(&inode->i_lock);
1330                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1331                         spin_unlock(&inode->i_lock);
1332                         return ret;
1333                 }
1334                 spin_unlock(&inode->i_lock);
1335         }
1336
1337         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1338         if (error)
1339                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1340
1341         if (ret & VM_FAULT_MAJOR) {
1342                 count_vm_event(PGMAJFAULT);
1343                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1344         }
1345         return ret;
1346 }
1347
1348 #ifdef CONFIG_NUMA
1349 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1350 {
1351         struct inode *inode = file_inode(vma->vm_file);
1352         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1353 }
1354
1355 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1356                                           unsigned long addr)
1357 {
1358         struct inode *inode = file_inode(vma->vm_file);
1359         pgoff_t index;
1360
1361         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1362         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1363 }
1364 #endif
1365
1366 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1367 {
1368         struct inode *inode = file_inode(file);
1369         struct shmem_inode_info *info = SHMEM_I(inode);
1370         int retval = -ENOMEM;
1371
1372         spin_lock(&info->lock);
1373         if (lock && !(info->flags & VM_LOCKED)) {
1374                 if (!user_shm_lock(inode->i_size, user))
1375                         goto out_nomem;
1376                 info->flags |= VM_LOCKED;
1377                 mapping_set_unevictable(file->f_mapping);
1378         }
1379         if (!lock && (info->flags & VM_LOCKED) && user) {
1380                 user_shm_unlock(inode->i_size, user);
1381                 info->flags &= ~VM_LOCKED;
1382                 mapping_clear_unevictable(file->f_mapping);
1383         }
1384         retval = 0;
1385
1386 out_nomem:
1387         spin_unlock(&info->lock);
1388         return retval;
1389 }
1390
1391 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1392 {
1393         file_accessed(file);
1394         vma->vm_ops = &shmem_vm_ops;
1395         return 0;
1396 }
1397
1398 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1399                                      umode_t mode, dev_t dev, unsigned long flags)
1400 {
1401         struct inode *inode;
1402         struct shmem_inode_info *info;
1403         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1404
1405         if (shmem_reserve_inode(sb))
1406                 return NULL;
1407
1408         inode = new_inode(sb);
1409         if (inode) {
1410                 inode->i_ino = get_next_ino();
1411                 inode_init_owner(inode, dir, mode);
1412                 inode->i_blocks = 0;
1413                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1414                 inode->i_generation = get_seconds();
1415                 info = SHMEM_I(inode);
1416                 memset(info, 0, (char *)inode - (char *)info);
1417                 spin_lock_init(&info->lock);
1418                 info->seals = F_SEAL_SEAL;
1419                 info->flags = flags & VM_NORESERVE;
1420                 INIT_LIST_HEAD(&info->swaplist);
1421                 simple_xattrs_init(&info->xattrs);
1422                 cache_no_acl(inode);
1423
1424                 switch (mode & S_IFMT) {
1425                 default:
1426                         inode->i_op = &shmem_special_inode_operations;
1427                         init_special_inode(inode, mode, dev);
1428                         break;
1429                 case S_IFREG:
1430                         inode->i_mapping->a_ops = &shmem_aops;
1431                         inode->i_op = &shmem_inode_operations;
1432                         inode->i_fop = &shmem_file_operations;
1433                         mpol_shared_policy_init(&info->policy,
1434                                                  shmem_get_sbmpol(sbinfo));
1435                         break;
1436                 case S_IFDIR:
1437                         inc_nlink(inode);
1438                         /* Some things misbehave if size == 0 on a directory */
1439                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1440                         inode->i_op = &shmem_dir_inode_operations;
1441                         inode->i_fop = &simple_dir_operations;
1442                         break;
1443                 case S_IFLNK:
1444                         /*
1445                          * Must not load anything in the rbtree,
1446                          * mpol_free_shared_policy will not be called.
1447                          */
1448                         mpol_shared_policy_init(&info->policy, NULL);
1449                         break;
1450                 }
1451         } else
1452                 shmem_free_inode(sb);
1453         return inode;
1454 }
1455
1456 bool shmem_mapping(struct address_space *mapping)
1457 {
1458         return mapping->host->i_sb->s_op == &shmem_ops;
1459 }
1460
1461 #ifdef CONFIG_TMPFS
1462 static const struct inode_operations shmem_symlink_inode_operations;
1463 static const struct inode_operations shmem_short_symlink_operations;
1464
1465 #ifdef CONFIG_TMPFS_XATTR
1466 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1467 #else
1468 #define shmem_initxattrs NULL
1469 #endif
1470
1471 static int
1472 shmem_write_begin(struct file *file, struct address_space *mapping,
1473                         loff_t pos, unsigned len, unsigned flags,
1474                         struct page **pagep, void **fsdata)
1475 {
1476         struct inode *inode = mapping->host;
1477         struct shmem_inode_info *info = SHMEM_I(inode);
1478         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1479
1480         /* i_mutex is held by caller */
1481         if (unlikely(info->seals)) {
1482                 if (info->seals & F_SEAL_WRITE)
1483                         return -EPERM;
1484                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1485                         return -EPERM;
1486         }
1487
1488         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1489 }
1490
1491 static int
1492 shmem_write_end(struct file *file, struct address_space *mapping,
1493                         loff_t pos, unsigned len, unsigned copied,
1494                         struct page *page, void *fsdata)
1495 {
1496         struct inode *inode = mapping->host;
1497
1498         if (pos + copied > inode->i_size)
1499                 i_size_write(inode, pos + copied);
1500
1501         if (!PageUptodate(page)) {
1502                 if (copied < PAGE_CACHE_SIZE) {
1503                         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1504                         zero_user_segments(page, 0, from,
1505                                         from + copied, PAGE_CACHE_SIZE);
1506                 }
1507                 SetPageUptodate(page);
1508         }
1509         set_page_dirty(page);
1510         unlock_page(page);
1511         page_cache_release(page);
1512
1513         return copied;
1514 }
1515
1516 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1517 {
1518         struct file *file = iocb->ki_filp;
1519         struct inode *inode = file_inode(file);
1520         struct address_space *mapping = inode->i_mapping;
1521         pgoff_t index;
1522         unsigned long offset;
1523         enum sgp_type sgp = SGP_READ;
1524         int error = 0;
1525         ssize_t retval = 0;
1526         loff_t *ppos = &iocb->ki_pos;
1527
1528         /*
1529          * Might this read be for a stacking filesystem?  Then when reading
1530          * holes of a sparse file, we actually need to allocate those pages,
1531          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1532          */
1533         if (!iter_is_iovec(to))
1534                 sgp = SGP_DIRTY;
1535
1536         index = *ppos >> PAGE_CACHE_SHIFT;
1537         offset = *ppos & ~PAGE_CACHE_MASK;
1538
1539         for (;;) {
1540                 struct page *page = NULL;
1541                 pgoff_t end_index;
1542                 unsigned long nr, ret;
1543                 loff_t i_size = i_size_read(inode);
1544
1545                 end_index = i_size >> PAGE_CACHE_SHIFT;
1546                 if (index > end_index)
1547                         break;
1548                 if (index == end_index) {
1549                         nr = i_size & ~PAGE_CACHE_MASK;
1550                         if (nr <= offset)
1551                                 break;
1552                 }
1553
1554                 error = shmem_getpage(inode, index, &page, sgp, NULL);
1555                 if (error) {
1556                         if (error == -EINVAL)
1557                                 error = 0;
1558                         break;
1559                 }
1560                 if (page)
1561                         unlock_page(page);
1562
1563                 /*
1564                  * We must evaluate after, since reads (unlike writes)
1565                  * are called without i_mutex protection against truncate
1566                  */
1567                 nr = PAGE_CACHE_SIZE;
1568                 i_size = i_size_read(inode);
1569                 end_index = i_size >> PAGE_CACHE_SHIFT;
1570                 if (index == end_index) {
1571                         nr = i_size & ~PAGE_CACHE_MASK;
1572                         if (nr <= offset) {
1573                                 if (page)
1574                                         page_cache_release(page);
1575                                 break;
1576                         }
1577                 }
1578                 nr -= offset;
1579
1580                 if (page) {
1581                         /*
1582                          * If users can be writing to this page using arbitrary
1583                          * virtual addresses, take care about potential aliasing
1584                          * before reading the page on the kernel side.
1585                          */
1586                         if (mapping_writably_mapped(mapping))
1587                                 flush_dcache_page(page);
1588                         /*
1589                          * Mark the page accessed if we read the beginning.
1590                          */
1591                         if (!offset)
1592                                 mark_page_accessed(page);
1593                 } else {
1594                         page = ZERO_PAGE(0);
1595                         page_cache_get(page);
1596                 }
1597
1598                 /*
1599                  * Ok, we have the page, and it's up-to-date, so
1600                  * now we can copy it to user space...
1601                  */
1602                 ret = copy_page_to_iter(page, offset, nr, to);
1603                 retval += ret;
1604                 offset += ret;
1605                 index += offset >> PAGE_CACHE_SHIFT;
1606                 offset &= ~PAGE_CACHE_MASK;
1607
1608                 page_cache_release(page);
1609                 if (!iov_iter_count(to))
1610                         break;
1611                 if (ret < nr) {
1612                         error = -EFAULT;
1613                         break;
1614                 }
1615                 cond_resched();
1616         }
1617
1618         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1619         file_accessed(file);
1620         return retval ? retval : error;
1621 }
1622
1623 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1624                                 struct pipe_inode_info *pipe, size_t len,
1625                                 unsigned int flags)
1626 {
1627         struct address_space *mapping = in->f_mapping;
1628         struct inode *inode = mapping->host;
1629         unsigned int loff, nr_pages, req_pages;
1630         struct page *pages[PIPE_DEF_BUFFERS];
1631         struct partial_page partial[PIPE_DEF_BUFFERS];
1632         struct page *page;
1633         pgoff_t index, end_index;
1634         loff_t isize, left;
1635         int error, page_nr;
1636         struct splice_pipe_desc spd = {
1637                 .pages = pages,
1638                 .partial = partial,
1639                 .nr_pages_max = PIPE_DEF_BUFFERS,
1640                 .flags = flags,
1641                 .ops = &page_cache_pipe_buf_ops,
1642                 .spd_release = spd_release_page,
1643         };
1644
1645         isize = i_size_read(inode);
1646         if (unlikely(*ppos >= isize))
1647                 return 0;
1648
1649         left = isize - *ppos;
1650         if (unlikely(left < len))
1651                 len = left;
1652
1653         if (splice_grow_spd(pipe, &spd))
1654                 return -ENOMEM;
1655
1656         index = *ppos >> PAGE_CACHE_SHIFT;
1657         loff = *ppos & ~PAGE_CACHE_MASK;
1658         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1659         nr_pages = min(req_pages, spd.nr_pages_max);
1660
1661         spd.nr_pages = find_get_pages_contig(mapping, index,
1662                                                 nr_pages, spd.pages);
1663         index += spd.nr_pages;
1664         error = 0;
1665
1666         while (spd.nr_pages < nr_pages) {
1667                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1668                 if (error)
1669                         break;
1670                 unlock_page(page);
1671                 spd.pages[spd.nr_pages++] = page;
1672                 index++;
1673         }
1674
1675         index = *ppos >> PAGE_CACHE_SHIFT;
1676         nr_pages = spd.nr_pages;
1677         spd.nr_pages = 0;
1678
1679         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1680                 unsigned int this_len;
1681
1682                 if (!len)
1683                         break;
1684
1685                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1686                 page = spd.pages[page_nr];
1687
1688                 if (!PageUptodate(page) || page->mapping != mapping) {
1689                         error = shmem_getpage(inode, index, &page,
1690                                                         SGP_CACHE, NULL);
1691                         if (error)
1692                                 break;
1693                         unlock_page(page);
1694                         page_cache_release(spd.pages[page_nr]);
1695                         spd.pages[page_nr] = page;
1696                 }
1697
1698                 isize = i_size_read(inode);
1699                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1700                 if (unlikely(!isize || index > end_index))
1701                         break;
1702
1703                 if (end_index == index) {
1704                         unsigned int plen;
1705
1706                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1707                         if (plen <= loff)
1708                                 break;
1709
1710                         this_len = min(this_len, plen - loff);
1711                         len = this_len;
1712                 }
1713
1714                 spd.partial[page_nr].offset = loff;
1715                 spd.partial[page_nr].len = this_len;
1716                 len -= this_len;
1717                 loff = 0;
1718                 spd.nr_pages++;
1719                 index++;
1720         }
1721
1722         while (page_nr < nr_pages)
1723                 page_cache_release(spd.pages[page_nr++]);
1724
1725         if (spd.nr_pages)
1726                 error = splice_to_pipe(pipe, &spd);
1727
1728         splice_shrink_spd(&spd);
1729
1730         if (error > 0) {
1731                 *ppos += error;
1732                 file_accessed(in);
1733         }
1734         return error;
1735 }
1736
1737 /*
1738  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1739  */
1740 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1741                                     pgoff_t index, pgoff_t end, int whence)
1742 {
1743         struct page *page;
1744         struct pagevec pvec;
1745         pgoff_t indices[PAGEVEC_SIZE];
1746         bool done = false;
1747         int i;
1748
1749         pagevec_init(&pvec, 0);
1750         pvec.nr = 1;            /* start small: we may be there already */
1751         while (!done) {
1752                 pvec.nr = find_get_entries(mapping, index,
1753                                         pvec.nr, pvec.pages, indices);
1754                 if (!pvec.nr) {
1755                         if (whence == SEEK_DATA)
1756                                 index = end;
1757                         break;
1758                 }
1759                 for (i = 0; i < pvec.nr; i++, index++) {
1760                         if (index < indices[i]) {
1761                                 if (whence == SEEK_HOLE) {
1762                                         done = true;
1763                                         break;
1764                                 }
1765                                 index = indices[i];
1766                         }
1767                         page = pvec.pages[i];
1768                         if (page && !radix_tree_exceptional_entry(page)) {
1769                                 if (!PageUptodate(page))
1770                                         page = NULL;
1771                         }
1772                         if (index >= end ||
1773                             (page && whence == SEEK_DATA) ||
1774                             (!page && whence == SEEK_HOLE)) {
1775                                 done = true;
1776                                 break;
1777                         }
1778                 }
1779                 pagevec_remove_exceptionals(&pvec);
1780                 pagevec_release(&pvec);
1781                 pvec.nr = PAGEVEC_SIZE;
1782                 cond_resched();
1783         }
1784         return index;
1785 }
1786
1787 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1788 {
1789         struct address_space *mapping = file->f_mapping;
1790         struct inode *inode = mapping->host;
1791         pgoff_t start, end;
1792         loff_t new_offset;
1793
1794         if (whence != SEEK_DATA && whence != SEEK_HOLE)
1795                 return generic_file_llseek_size(file, offset, whence,
1796                                         MAX_LFS_FILESIZE, i_size_read(inode));
1797         mutex_lock(&inode->i_mutex);
1798         /* We're holding i_mutex so we can access i_size directly */
1799
1800         if (offset < 0)
1801                 offset = -EINVAL;
1802         else if (offset >= inode->i_size)
1803                 offset = -ENXIO;
1804         else {
1805                 start = offset >> PAGE_CACHE_SHIFT;
1806                 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1807                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1808                 new_offset <<= PAGE_CACHE_SHIFT;
1809                 if (new_offset > offset) {
1810                         if (new_offset < inode->i_size)
1811                                 offset = new_offset;
1812                         else if (whence == SEEK_DATA)
1813                                 offset = -ENXIO;
1814                         else
1815                                 offset = inode->i_size;
1816                 }
1817         }
1818
1819         if (offset >= 0)
1820                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1821         mutex_unlock(&inode->i_mutex);
1822         return offset;
1823 }
1824
1825 /*
1826  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
1827  * so reuse a tag which we firmly believe is never set or cleared on shmem.
1828  */
1829 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
1830 #define LAST_SCAN               4       /* about 150ms max */
1831
1832 static void shmem_tag_pins(struct address_space *mapping)
1833 {
1834         struct radix_tree_iter iter;
1835         void **slot;
1836         pgoff_t start;
1837         struct page *page;
1838
1839         lru_add_drain();
1840         start = 0;
1841         rcu_read_lock();
1842
1843 restart:
1844         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1845                 page = radix_tree_deref_slot(slot);
1846                 if (!page || radix_tree_exception(page)) {
1847                         if (radix_tree_deref_retry(page))
1848                                 goto restart;
1849                 } else if (page_count(page) - page_mapcount(page) > 1) {
1850                         spin_lock_irq(&mapping->tree_lock);
1851                         radix_tree_tag_set(&mapping->page_tree, iter.index,
1852                                            SHMEM_TAG_PINNED);
1853                         spin_unlock_irq(&mapping->tree_lock);
1854                 }
1855
1856                 if (need_resched()) {
1857                         cond_resched_rcu();
1858                         start = iter.index + 1;
1859                         goto restart;
1860                 }
1861         }
1862         rcu_read_unlock();
1863 }
1864
1865 /*
1866  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
1867  * via get_user_pages(), drivers might have some pending I/O without any active
1868  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
1869  * and see whether it has an elevated ref-count. If so, we tag them and wait for
1870  * them to be dropped.
1871  * The caller must guarantee that no new user will acquire writable references
1872  * to those pages to avoid races.
1873  */
1874 static int shmem_wait_for_pins(struct address_space *mapping)
1875 {
1876         struct radix_tree_iter iter;
1877         void **slot;
1878         pgoff_t start;
1879         struct page *page;
1880         int error, scan;
1881
1882         shmem_tag_pins(mapping);
1883
1884         error = 0;
1885         for (scan = 0; scan <= LAST_SCAN; scan++) {
1886                 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
1887                         break;
1888
1889                 if (!scan)
1890                         lru_add_drain_all();
1891                 else if (schedule_timeout_killable((HZ << scan) / 200))
1892                         scan = LAST_SCAN;
1893
1894                 start = 0;
1895                 rcu_read_lock();
1896 restart:
1897                 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
1898                                            start, SHMEM_TAG_PINNED) {
1899
1900                         page = radix_tree_deref_slot(slot);
1901                         if (radix_tree_exception(page)) {
1902                                 if (radix_tree_deref_retry(page))
1903                                         goto restart;
1904
1905                                 page = NULL;
1906                         }
1907
1908                         if (page &&
1909                             page_count(page) - page_mapcount(page) != 1) {
1910                                 if (scan < LAST_SCAN)
1911                                         goto continue_resched;
1912
1913                                 /*
1914                                  * On the last scan, we clean up all those tags
1915                                  * we inserted; but make a note that we still
1916                                  * found pages pinned.
1917                                  */
1918                                 error = -EBUSY;
1919                         }
1920
1921                         spin_lock_irq(&mapping->tree_lock);
1922                         radix_tree_tag_clear(&mapping->page_tree,
1923                                              iter.index, SHMEM_TAG_PINNED);
1924                         spin_unlock_irq(&mapping->tree_lock);
1925 continue_resched:
1926                         if (need_resched()) {
1927                                 cond_resched_rcu();
1928                                 start = iter.index + 1;
1929                                 goto restart;
1930                         }
1931                 }
1932                 rcu_read_unlock();
1933         }
1934
1935         return error;
1936 }
1937
1938 #define F_ALL_SEALS (F_SEAL_SEAL | \
1939                      F_SEAL_SHRINK | \
1940                      F_SEAL_GROW | \
1941                      F_SEAL_WRITE)
1942
1943 int shmem_add_seals(struct file *file, unsigned int seals)
1944 {
1945         struct inode *inode = file_inode(file);
1946         struct shmem_inode_info *info = SHMEM_I(inode);
1947         int error;
1948
1949         /*
1950          * SEALING
1951          * Sealing allows multiple parties to share a shmem-file but restrict
1952          * access to a specific subset of file operations. Seals can only be
1953          * added, but never removed. This way, mutually untrusted parties can
1954          * share common memory regions with a well-defined policy. A malicious
1955          * peer can thus never perform unwanted operations on a shared object.
1956          *
1957          * Seals are only supported on special shmem-files and always affect
1958          * the whole underlying inode. Once a seal is set, it may prevent some
1959          * kinds of access to the file. Currently, the following seals are
1960          * defined:
1961          *   SEAL_SEAL: Prevent further seals from being set on this file
1962          *   SEAL_SHRINK: Prevent the file from shrinking
1963          *   SEAL_GROW: Prevent the file from growing
1964          *   SEAL_WRITE: Prevent write access to the file
1965          *
1966          * As we don't require any trust relationship between two parties, we
1967          * must prevent seals from being removed. Therefore, sealing a file
1968          * only adds a given set of seals to the file, it never touches
1969          * existing seals. Furthermore, the "setting seals"-operation can be
1970          * sealed itself, which basically prevents any further seal from being
1971          * added.
1972          *
1973          * Semantics of sealing are only defined on volatile files. Only
1974          * anonymous shmem files support sealing. More importantly, seals are
1975          * never written to disk. Therefore, there's no plan to support it on
1976          * other file types.
1977          */
1978
1979         if (file->f_op != &shmem_file_operations)
1980                 return -EINVAL;
1981         if (!(file->f_mode & FMODE_WRITE))
1982                 return -EPERM;
1983         if (seals & ~(unsigned int)F_ALL_SEALS)
1984                 return -EINVAL;
1985
1986         mutex_lock(&inode->i_mutex);
1987
1988         if (info->seals & F_SEAL_SEAL) {
1989                 error = -EPERM;
1990                 goto unlock;
1991         }
1992
1993         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
1994                 error = mapping_deny_writable(file->f_mapping);
1995                 if (error)
1996                         goto unlock;
1997
1998                 error = shmem_wait_for_pins(file->f_mapping);
1999                 if (error) {
2000                         mapping_allow_writable(file->f_mapping);
2001                         goto unlock;
2002                 }
2003         }
2004
2005         info->seals |= seals;
2006         error = 0;
2007
2008 unlock:
2009         mutex_unlock(&inode->i_mutex);
2010         return error;
2011 }
2012 EXPORT_SYMBOL_GPL(shmem_add_seals);
2013
2014 int shmem_get_seals(struct file *file)
2015 {
2016         if (file->f_op != &shmem_file_operations)
2017                 return -EINVAL;
2018
2019         return SHMEM_I(file_inode(file))->seals;
2020 }
2021 EXPORT_SYMBOL_GPL(shmem_get_seals);
2022
2023 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2024 {
2025         long error;
2026
2027         switch (cmd) {
2028         case F_ADD_SEALS:
2029                 /* disallow upper 32bit */
2030                 if (arg > UINT_MAX)
2031                         return -EINVAL;
2032
2033                 error = shmem_add_seals(file, arg);
2034                 break;
2035         case F_GET_SEALS:
2036                 error = shmem_get_seals(file);
2037                 break;
2038         default:
2039                 error = -EINVAL;
2040                 break;
2041         }
2042
2043         return error;
2044 }
2045
2046 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2047                                                          loff_t len)
2048 {
2049         struct inode *inode = file_inode(file);
2050         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2051         struct shmem_inode_info *info = SHMEM_I(inode);
2052         struct shmem_falloc shmem_falloc;
2053         pgoff_t start, index, end;
2054         int error;
2055
2056         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2057                 return -EOPNOTSUPP;
2058
2059         mutex_lock(&inode->i_mutex);
2060
2061         if (mode & FALLOC_FL_PUNCH_HOLE) {
2062                 struct address_space *mapping = file->f_mapping;
2063                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2064                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2065                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2066
2067                 /* protected by i_mutex */
2068                 if (info->seals & F_SEAL_WRITE) {
2069                         error = -EPERM;
2070                         goto out;
2071                 }
2072
2073                 shmem_falloc.waitq = &shmem_falloc_waitq;
2074                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2075                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2076                 spin_lock(&inode->i_lock);
2077                 inode->i_private = &shmem_falloc;
2078                 spin_unlock(&inode->i_lock);
2079
2080                 if ((u64)unmap_end > (u64)unmap_start)
2081                         unmap_mapping_range(mapping, unmap_start,
2082                                             1 + unmap_end - unmap_start, 0);
2083                 shmem_truncate_range(inode, offset, offset + len - 1);
2084                 /* No need to unmap again: hole-punching leaves COWed pages */
2085
2086                 spin_lock(&inode->i_lock);
2087                 inode->i_private = NULL;
2088                 wake_up_all(&shmem_falloc_waitq);
2089                 spin_unlock(&inode->i_lock);
2090                 error = 0;
2091                 goto out;
2092         }
2093
2094         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2095         error = inode_newsize_ok(inode, offset + len);
2096         if (error)
2097                 goto out;
2098
2099         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2100                 error = -EPERM;
2101                 goto out;
2102         }
2103
2104         start = offset >> PAGE_CACHE_SHIFT;
2105         end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2106         /* Try to avoid a swapstorm if len is impossible to satisfy */
2107         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2108                 error = -ENOSPC;
2109                 goto out;
2110         }
2111
2112         shmem_falloc.waitq = NULL;
2113         shmem_falloc.start = start;
2114         shmem_falloc.next  = start;
2115         shmem_falloc.nr_falloced = 0;
2116         shmem_falloc.nr_unswapped = 0;
2117         spin_lock(&inode->i_lock);
2118         inode->i_private = &shmem_falloc;
2119         spin_unlock(&inode->i_lock);
2120
2121         for (index = start; index < end; index++) {
2122                 struct page *page;
2123
2124                 /*
2125                  * Good, the fallocate(2) manpage permits EINTR: we may have
2126                  * been interrupted because we are using up too much memory.
2127                  */
2128                 if (signal_pending(current))
2129                         error = -EINTR;
2130                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2131                         error = -ENOMEM;
2132                 else
2133                         error = shmem_getpage(inode, index, &page, SGP_FALLOC,
2134                                                                         NULL);
2135                 if (error) {
2136                         /* Remove the !PageUptodate pages we added */
2137                         shmem_undo_range(inode,
2138                                 (loff_t)start << PAGE_CACHE_SHIFT,
2139                                 (loff_t)index << PAGE_CACHE_SHIFT, true);
2140                         goto undone;
2141                 }
2142
2143                 /*
2144                  * Inform shmem_writepage() how far we have reached.
2145                  * No need for lock or barrier: we have the page lock.
2146                  */
2147                 shmem_falloc.next++;
2148                 if (!PageUptodate(page))
2149                         shmem_falloc.nr_falloced++;
2150
2151                 /*
2152                  * If !PageUptodate, leave it that way so that freeable pages
2153                  * can be recognized if we need to rollback on error later.
2154                  * But set_page_dirty so that memory pressure will swap rather
2155                  * than free the pages we are allocating (and SGP_CACHE pages
2156                  * might still be clean: we now need to mark those dirty too).
2157                  */
2158                 set_page_dirty(page);
2159                 unlock_page(page);
2160                 page_cache_release(page);
2161                 cond_resched();
2162         }
2163
2164         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2165                 i_size_write(inode, offset + len);
2166         inode->i_ctime = CURRENT_TIME;
2167 undone:
2168         spin_lock(&inode->i_lock);
2169         inode->i_private = NULL;
2170         spin_unlock(&inode->i_lock);
2171 out:
2172         mutex_unlock(&inode->i_mutex);
2173         return error;
2174 }
2175
2176 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2177 {
2178         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2179
2180         buf->f_type = TMPFS_MAGIC;
2181         buf->f_bsize = PAGE_CACHE_SIZE;
2182         buf->f_namelen = NAME_MAX;
2183         if (sbinfo->max_blocks) {
2184                 buf->f_blocks = sbinfo->max_blocks;
2185                 buf->f_bavail =
2186                 buf->f_bfree  = sbinfo->max_blocks -
2187                                 percpu_counter_sum(&sbinfo->used_blocks);
2188         }
2189         if (sbinfo->max_inodes) {
2190                 buf->f_files = sbinfo->max_inodes;
2191                 buf->f_ffree = sbinfo->free_inodes;
2192         }
2193         /* else leave those fields 0 like simple_statfs */
2194         return 0;
2195 }
2196
2197 /*
2198  * File creation. Allocate an inode, and we're done..
2199  */
2200 static int
2201 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2202 {
2203         struct inode *inode;
2204         int error = -ENOSPC;
2205
2206         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2207         if (inode) {
2208                 error = simple_acl_create(dir, inode);
2209                 if (error)
2210                         goto out_iput;
2211                 error = security_inode_init_security(inode, dir,
2212                                                      &dentry->d_name,
2213                                                      shmem_initxattrs, NULL);
2214                 if (error && error != -EOPNOTSUPP)
2215                         goto out_iput;
2216
2217                 error = 0;
2218                 dir->i_size += BOGO_DIRENT_SIZE;
2219                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2220                 d_instantiate(dentry, inode);
2221                 dget(dentry); /* Extra count - pin the dentry in core */
2222         }
2223         return error;
2224 out_iput:
2225         iput(inode);
2226         return error;
2227 }
2228
2229 static int
2230 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2231 {
2232         struct inode *inode;
2233         int error = -ENOSPC;
2234
2235         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2236         if (inode) {
2237                 error = security_inode_init_security(inode, dir,
2238                                                      NULL,
2239                                                      shmem_initxattrs, NULL);
2240                 if (error && error != -EOPNOTSUPP)
2241                         goto out_iput;
2242                 error = simple_acl_create(dir, inode);
2243                 if (error)
2244                         goto out_iput;
2245                 d_tmpfile(dentry, inode);
2246         }
2247         return error;
2248 out_iput:
2249         iput(inode);
2250         return error;
2251 }
2252
2253 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2254 {
2255         int error;
2256
2257         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2258                 return error;
2259         inc_nlink(dir);
2260         return 0;
2261 }
2262
2263 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2264                 bool excl)
2265 {
2266         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2267 }
2268
2269 /*
2270  * Link a file..
2271  */
2272 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2273 {
2274         struct inode *inode = old_dentry->d_inode;
2275         int ret;
2276
2277         /*
2278          * No ordinary (disk based) filesystem counts links as inodes;
2279          * but each new link needs a new dentry, pinning lowmem, and
2280          * tmpfs dentries cannot be pruned until they are unlinked.
2281          */
2282         ret = shmem_reserve_inode(inode->i_sb);
2283         if (ret)
2284                 goto out;
2285
2286         dir->i_size += BOGO_DIRENT_SIZE;
2287         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2288         inc_nlink(inode);
2289         ihold(inode);   /* New dentry reference */
2290         dget(dentry);           /* Extra pinning count for the created dentry */
2291         d_instantiate(dentry, inode);
2292 out:
2293         return ret;
2294 }
2295
2296 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2297 {
2298         struct inode *inode = dentry->d_inode;
2299
2300         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2301                 shmem_free_inode(inode->i_sb);
2302
2303         dir->i_size -= BOGO_DIRENT_SIZE;
2304         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2305         drop_nlink(inode);
2306         dput(dentry);   /* Undo the count from "create" - this does all the work */
2307         return 0;
2308 }
2309
2310 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2311 {
2312         if (!simple_empty(dentry))
2313                 return -ENOTEMPTY;
2314
2315         drop_nlink(dentry->d_inode);
2316         drop_nlink(dir);
2317         return shmem_unlink(dir, dentry);
2318 }
2319
2320 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2321 {
2322         bool old_is_dir = d_is_dir(old_dentry);
2323         bool new_is_dir = d_is_dir(new_dentry);
2324
2325         if (old_dir != new_dir && old_is_dir != new_is_dir) {
2326                 if (old_is_dir) {
2327                         drop_nlink(old_dir);
2328                         inc_nlink(new_dir);
2329                 } else {
2330                         drop_nlink(new_dir);
2331                         inc_nlink(old_dir);
2332                 }
2333         }
2334         old_dir->i_ctime = old_dir->i_mtime =
2335         new_dir->i_ctime = new_dir->i_mtime =
2336         old_dentry->d_inode->i_ctime =
2337         new_dentry->d_inode->i_ctime = CURRENT_TIME;
2338
2339         return 0;
2340 }
2341
2342 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2343 {
2344         struct dentry *whiteout;
2345         int error;
2346
2347         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2348         if (!whiteout)
2349                 return -ENOMEM;
2350
2351         error = shmem_mknod(old_dir, whiteout,
2352                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2353         dput(whiteout);
2354         if (error)
2355                 return error;
2356
2357         /*
2358          * Cheat and hash the whiteout while the old dentry is still in
2359          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2360          *
2361          * d_lookup() will consistently find one of them at this point,
2362          * not sure which one, but that isn't even important.
2363          */
2364         d_rehash(whiteout);
2365         return 0;
2366 }
2367
2368 /*
2369  * The VFS layer already does all the dentry stuff for rename,
2370  * we just have to decrement the usage count for the target if
2371  * it exists so that the VFS layer correctly free's it when it
2372  * gets overwritten.
2373  */
2374 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
2375 {
2376         struct inode *inode = old_dentry->d_inode;
2377         int they_are_dirs = S_ISDIR(inode->i_mode);
2378
2379         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2380                 return -EINVAL;
2381
2382         if (flags & RENAME_EXCHANGE)
2383                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
2384
2385         if (!simple_empty(new_dentry))
2386                 return -ENOTEMPTY;
2387
2388         if (flags & RENAME_WHITEOUT) {
2389                 int error;
2390
2391                 error = shmem_whiteout(old_dir, old_dentry);
2392                 if (error)
2393                         return error;
2394         }
2395
2396         if (new_dentry->d_inode) {
2397                 (void) shmem_unlink(new_dir, new_dentry);
2398                 if (they_are_dirs) {
2399                         drop_nlink(new_dentry->d_inode);
2400                         drop_nlink(old_dir);
2401                 }
2402         } else if (they_are_dirs) {
2403                 drop_nlink(old_dir);
2404                 inc_nlink(new_dir);
2405         }
2406
2407         old_dir->i_size -= BOGO_DIRENT_SIZE;
2408         new_dir->i_size += BOGO_DIRENT_SIZE;
2409         old_dir->i_ctime = old_dir->i_mtime =
2410         new_dir->i_ctime = new_dir->i_mtime =
2411         inode->i_ctime = CURRENT_TIME;
2412         return 0;
2413 }
2414
2415 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2416 {
2417         int error;
2418         int len;
2419         struct inode *inode;
2420         struct page *page;
2421         char *kaddr;
2422         struct shmem_inode_info *info;
2423
2424         len = strlen(symname) + 1;
2425         if (len > PAGE_CACHE_SIZE)
2426                 return -ENAMETOOLONG;
2427
2428         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2429         if (!inode)
2430                 return -ENOSPC;
2431
2432         error = security_inode_init_security(inode, dir, &dentry->d_name,
2433                                              shmem_initxattrs, NULL);
2434         if (error) {
2435                 if (error != -EOPNOTSUPP) {
2436                         iput(inode);
2437                         return error;
2438                 }
2439                 error = 0;
2440         }
2441
2442         info = SHMEM_I(inode);
2443         inode->i_size = len-1;
2444         if (len <= SHORT_SYMLINK_LEN) {
2445                 info->symlink = kmemdup(symname, len, GFP_KERNEL);
2446                 if (!info->symlink) {
2447                         iput(inode);
2448                         return -ENOMEM;
2449                 }
2450                 inode->i_op = &shmem_short_symlink_operations;
2451         } else {
2452                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2453                 if (error) {
2454                         iput(inode);
2455                         return error;
2456                 }
2457                 inode->i_mapping->a_ops = &shmem_aops;
2458                 inode->i_op = &shmem_symlink_inode_operations;
2459                 kaddr = kmap_atomic(page);
2460                 memcpy(kaddr, symname, len);
2461                 kunmap_atomic(kaddr);
2462                 SetPageUptodate(page);
2463                 set_page_dirty(page);
2464                 unlock_page(page);
2465                 page_cache_release(page);
2466         }
2467         dir->i_size += BOGO_DIRENT_SIZE;
2468         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2469         d_instantiate(dentry, inode);
2470         dget(dentry);
2471         return 0;
2472 }
2473
2474 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
2475 {
2476         nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
2477         return NULL;
2478 }
2479
2480 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2481 {
2482         struct page *page = NULL;
2483         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2484         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
2485         if (page)
2486                 unlock_page(page);
2487         return page;
2488 }
2489
2490 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2491 {
2492         if (!IS_ERR(nd_get_link(nd))) {
2493                 struct page *page = cookie;
2494                 kunmap(page);
2495                 mark_page_accessed(page);
2496                 page_cache_release(page);
2497         }
2498 }
2499
2500 #ifdef CONFIG_TMPFS_XATTR
2501 /*
2502  * Superblocks without xattr inode operations may get some security.* xattr
2503  * support from the LSM "for free". As soon as we have any other xattrs
2504  * like ACLs, we also need to implement the security.* handlers at
2505  * filesystem level, though.
2506  */
2507
2508 /*
2509  * Callback for security_inode_init_security() for acquiring xattrs.
2510  */
2511 static int shmem_initxattrs(struct inode *inode,
2512                             const struct xattr *xattr_array,
2513                             void *fs_info)
2514 {
2515         struct shmem_inode_info *info = SHMEM_I(inode);
2516         const struct xattr *xattr;
2517         struct simple_xattr *new_xattr;
2518         size_t len;
2519
2520         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2521                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
2522                 if (!new_xattr)
2523                         return -ENOMEM;
2524
2525                 len = strlen(xattr->name) + 1;
2526                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
2527                                           GFP_KERNEL);
2528                 if (!new_xattr->name) {
2529                         kfree(new_xattr);
2530                         return -ENOMEM;
2531                 }
2532
2533                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
2534                        XATTR_SECURITY_PREFIX_LEN);
2535                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
2536                        xattr->name, len);
2537
2538                 simple_xattr_list_add(&info->xattrs, new_xattr);
2539         }
2540
2541         return 0;
2542 }
2543
2544 static const struct xattr_handler *shmem_xattr_handlers[] = {
2545 #ifdef CONFIG_TMPFS_POSIX_ACL
2546         &posix_acl_access_xattr_handler,
2547         &posix_acl_default_xattr_handler,
2548 #endif
2549         NULL
2550 };
2551
2552 static int shmem_xattr_validate(const char *name)
2553 {
2554         struct { const char *prefix; size_t len; } arr[] = {
2555                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2556                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2557         };
2558         int i;
2559
2560         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2561                 size_t preflen = arr[i].len;
2562                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2563                         if (!name[preflen])
2564                                 return -EINVAL;
2565                         return 0;
2566                 }
2567         }
2568         return -EOPNOTSUPP;
2569 }
2570
2571 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2572                               void *buffer, size_t size)
2573 {
2574         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2575         int err;
2576
2577         /*
2578          * If this is a request for a synthetic attribute in the system.*
2579          * namespace use the generic infrastructure to resolve a handler
2580          * for it via sb->s_xattr.
2581          */
2582         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2583                 return generic_getxattr(dentry, name, buffer, size);
2584
2585         err = shmem_xattr_validate(name);
2586         if (err)
2587                 return err;
2588
2589         return simple_xattr_get(&info->xattrs, name, buffer, size);
2590 }
2591
2592 static int shmem_setxattr(struct dentry *dentry, const char *name,
2593                           const void *value, size_t size, int flags)
2594 {
2595         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2596         int err;
2597
2598         /*
2599          * If this is a request for a synthetic attribute in the system.*
2600          * namespace use the generic infrastructure to resolve a handler
2601          * for it via sb->s_xattr.
2602          */
2603         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2604                 return generic_setxattr(dentry, name, value, size, flags);
2605
2606         err = shmem_xattr_validate(name);
2607         if (err)
2608                 return err;
2609
2610         return simple_xattr_set(&info->xattrs, name, value, size, flags);
2611 }
2612
2613 static int shmem_removexattr(struct dentry *dentry, const char *name)
2614 {
2615         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2616         int err;
2617
2618         /*
2619          * If this is a request for a synthetic attribute in the system.*
2620          * namespace use the generic infrastructure to resolve a handler
2621          * for it via sb->s_xattr.
2622          */
2623         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2624                 return generic_removexattr(dentry, name);
2625
2626         err = shmem_xattr_validate(name);
2627         if (err)
2628                 return err;
2629
2630         return simple_xattr_remove(&info->xattrs, name);
2631 }
2632
2633 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2634 {
2635         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2636         return simple_xattr_list(&info->xattrs, buffer, size);
2637 }
2638 #endif /* CONFIG_TMPFS_XATTR */
2639
2640 static const struct inode_operations shmem_short_symlink_operations = {
2641         .readlink       = generic_readlink,
2642         .follow_link    = shmem_follow_short_symlink,
2643 #ifdef CONFIG_TMPFS_XATTR
2644         .setxattr       = shmem_setxattr,
2645         .getxattr       = shmem_getxattr,
2646         .listxattr      = shmem_listxattr,
2647         .removexattr    = shmem_removexattr,
2648 #endif
2649 };
2650
2651 static const struct inode_operations shmem_symlink_inode_operations = {
2652         .readlink       = generic_readlink,
2653         .follow_link    = shmem_follow_link,
2654         .put_link       = shmem_put_link,
2655 #ifdef CONFIG_TMPFS_XATTR
2656         .setxattr       = shmem_setxattr,
2657         .getxattr       = shmem_getxattr,
2658         .listxattr      = shmem_listxattr,
2659         .removexattr    = shmem_removexattr,
2660 #endif
2661 };
2662
2663 static struct dentry *shmem_get_parent(struct dentry *child)
2664 {
2665         return ERR_PTR(-ESTALE);
2666 }
2667
2668 static int shmem_match(struct inode *ino, void *vfh)
2669 {
2670         __u32 *fh = vfh;
2671         __u64 inum = fh[2];
2672         inum = (inum << 32) | fh[1];
2673         return ino->i_ino == inum && fh[0] == ino->i_generation;
2674 }
2675
2676 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2677                 struct fid *fid, int fh_len, int fh_type)
2678 {
2679         struct inode *inode;
2680         struct dentry *dentry = NULL;
2681         u64 inum;
2682
2683         if (fh_len < 3)
2684                 return NULL;
2685
2686         inum = fid->raw[2];
2687         inum = (inum << 32) | fid->raw[1];
2688
2689         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2690                         shmem_match, fid->raw);
2691         if (inode) {
2692                 dentry = d_find_alias(inode);
2693                 iput(inode);
2694         }
2695
2696         return dentry;
2697 }
2698
2699 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2700                                 struct inode *parent)
2701 {
2702         if (*len < 3) {
2703                 *len = 3;
2704                 return FILEID_INVALID;
2705         }
2706
2707         if (inode_unhashed(inode)) {
2708                 /* Unfortunately insert_inode_hash is not idempotent,
2709                  * so as we hash inodes here rather than at creation
2710                  * time, we need a lock to ensure we only try
2711                  * to do it once
2712                  */
2713                 static DEFINE_SPINLOCK(lock);
2714                 spin_lock(&lock);
2715                 if (inode_unhashed(inode))
2716                         __insert_inode_hash(inode,
2717                                             inode->i_ino + inode->i_generation);
2718                 spin_unlock(&lock);
2719         }
2720
2721         fh[0] = inode->i_generation;
2722         fh[1] = inode->i_ino;
2723         fh[2] = ((__u64)inode->i_ino) >> 32;
2724
2725         *len = 3;
2726         return 1;
2727 }
2728
2729 static const struct export_operations shmem_export_ops = {
2730         .get_parent     = shmem_get_parent,
2731         .encode_fh      = shmem_encode_fh,
2732         .fh_to_dentry   = shmem_fh_to_dentry,
2733 };
2734
2735 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2736                                bool remount)
2737 {
2738         char *this_char, *value, *rest;
2739         struct mempolicy *mpol = NULL;
2740         uid_t uid;
2741         gid_t gid;
2742
2743         while (options != NULL) {
2744                 this_char = options;
2745                 for (;;) {
2746                         /*
2747                          * NUL-terminate this option: unfortunately,
2748                          * mount options form a comma-separated list,
2749                          * but mpol's nodelist may also contain commas.
2750                          */
2751                         options = strchr(options, ',');
2752                         if (options == NULL)
2753                                 break;
2754                         options++;
2755                         if (!isdigit(*options)) {
2756                                 options[-1] = '\0';
2757                                 break;
2758                         }
2759                 }
2760                 if (!*this_char)
2761                         continue;
2762                 if ((value = strchr(this_char,'=')) != NULL) {
2763                         *value++ = 0;
2764                 } else {
2765                         printk(KERN_ERR
2766                             "tmpfs: No value for mount option '%s'\n",
2767                             this_char);
2768                         goto error;
2769                 }
2770
2771                 if (!strcmp(this_char,"size")) {
2772                         unsigned long long size;
2773                         size = memparse(value,&rest);
2774                         if (*rest == '%') {
2775                                 size <<= PAGE_SHIFT;
2776                                 size *= totalram_pages;
2777                                 do_div(size, 100);
2778                                 rest++;
2779                         }
2780                         if (*rest)
2781                                 goto bad_val;
2782                         sbinfo->max_blocks =
2783                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2784                 } else if (!strcmp(this_char,"nr_blocks")) {
2785                         sbinfo->max_blocks = memparse(value, &rest);
2786                         if (*rest)
2787                                 goto bad_val;
2788                 } else if (!strcmp(this_char,"nr_inodes")) {
2789                         sbinfo->max_inodes = memparse(value, &rest);
2790                         if (*rest)
2791                                 goto bad_val;
2792                 } else if (!strcmp(this_char,"mode")) {
2793                         if (remount)
2794                                 continue;
2795                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2796                         if (*rest)
2797                                 goto bad_val;
2798                 } else if (!strcmp(this_char,"uid")) {
2799                         if (remount)
2800                                 continue;
2801                         uid = simple_strtoul(value, &rest, 0);
2802                         if (*rest)
2803                                 goto bad_val;
2804                         sbinfo->uid = make_kuid(current_user_ns(), uid);
2805                         if (!uid_valid(sbinfo->uid))
2806                                 goto bad_val;
2807                 } else if (!strcmp(this_char,"gid")) {
2808                         if (remount)
2809                                 continue;
2810                         gid = simple_strtoul(value, &rest, 0);
2811                         if (*rest)
2812                                 goto bad_val;
2813                         sbinfo->gid = make_kgid(current_user_ns(), gid);
2814                         if (!gid_valid(sbinfo->gid))
2815                                 goto bad_val;
2816                 } else if (!strcmp(this_char,"mpol")) {
2817                         mpol_put(mpol);
2818                         mpol = NULL;
2819                         if (mpol_parse_str(value, &mpol))
2820                                 goto bad_val;
2821                 } else {
2822                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2823                                this_char);
2824                         goto error;
2825                 }
2826         }
2827         sbinfo->mpol = mpol;
2828         return 0;
2829
2830 bad_val:
2831         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2832                value, this_char);
2833 error:
2834         mpol_put(mpol);
2835         return 1;
2836
2837 }
2838
2839 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2840 {
2841         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2842         struct shmem_sb_info config = *sbinfo;
2843         unsigned long inodes;
2844         int error = -EINVAL;
2845
2846         config.mpol = NULL;
2847         if (shmem_parse_options(data, &config, true))
2848                 return error;
2849
2850         spin_lock(&sbinfo->stat_lock);
2851         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2852         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2853                 goto out;
2854         if (config.max_inodes < inodes)
2855                 goto out;
2856         /*
2857          * Those tests disallow limited->unlimited while any are in use;
2858          * but we must separately disallow unlimited->limited, because
2859          * in that case we have no record of how much is already in use.
2860          */
2861         if (config.max_blocks && !sbinfo->max_blocks)
2862                 goto out;
2863         if (config.max_inodes && !sbinfo->max_inodes)
2864                 goto out;
2865
2866         error = 0;
2867         sbinfo->max_blocks  = config.max_blocks;
2868         sbinfo->max_inodes  = config.max_inodes;
2869         sbinfo->free_inodes = config.max_inodes - inodes;
2870
2871         /*
2872          * Preserve previous mempolicy unless mpol remount option was specified.
2873          */
2874         if (config.mpol) {
2875                 mpol_put(sbinfo->mpol);
2876                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
2877         }
2878 out:
2879         spin_unlock(&sbinfo->stat_lock);
2880         return error;
2881 }
2882
2883 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2884 {
2885         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2886
2887         if (sbinfo->max_blocks != shmem_default_max_blocks())
2888                 seq_printf(seq, ",size=%luk",
2889                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2890         if (sbinfo->max_inodes != shmem_default_max_inodes())
2891                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2892         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2893                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2894         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2895                 seq_printf(seq, ",uid=%u",
2896                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
2897         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2898                 seq_printf(seq, ",gid=%u",
2899                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
2900         shmem_show_mpol(seq, sbinfo->mpol);
2901         return 0;
2902 }
2903
2904 #define MFD_NAME_PREFIX "memfd:"
2905 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
2906 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
2907
2908 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
2909
2910 SYSCALL_DEFINE2(memfd_create,
2911                 const char __user *, uname,
2912                 unsigned int, flags)
2913 {
2914         struct shmem_inode_info *info;
2915         struct file *file;
2916         int fd, error;
2917         char *name;
2918         long len;
2919
2920         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
2921                 return -EINVAL;
2922
2923         /* length includes terminating zero */
2924         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
2925         if (len <= 0)
2926                 return -EFAULT;
2927         if (len > MFD_NAME_MAX_LEN + 1)
2928                 return -EINVAL;
2929
2930         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
2931         if (!name)
2932                 return -ENOMEM;
2933
2934         strcpy(name, MFD_NAME_PREFIX);
2935         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
2936                 error = -EFAULT;
2937                 goto err_name;
2938         }
2939
2940         /* terminating-zero may have changed after strnlen_user() returned */
2941         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
2942                 error = -EFAULT;
2943                 goto err_name;
2944         }
2945
2946         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
2947         if (fd < 0) {
2948                 error = fd;
2949                 goto err_name;
2950         }
2951
2952         file = shmem_file_setup(name, 0, VM_NORESERVE);
2953         if (IS_ERR(file)) {
2954                 error = PTR_ERR(file);
2955                 goto err_fd;
2956         }
2957         info = SHMEM_I(file_inode(file));
2958         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
2959         file->f_flags |= O_RDWR | O_LARGEFILE;
2960         if (flags & MFD_ALLOW_SEALING)
2961                 info->seals &= ~F_SEAL_SEAL;
2962
2963         fd_install(fd, file);
2964         kfree(name);
2965         return fd;
2966
2967 err_fd:
2968         put_unused_fd(fd);
2969 err_name:
2970         kfree(name);
2971         return error;
2972 }
2973
2974 #endif /* CONFIG_TMPFS */
2975
2976 static void shmem_put_super(struct super_block *sb)
2977 {
2978         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2979
2980         percpu_counter_destroy(&sbinfo->used_blocks);
2981         mpol_put(sbinfo->mpol);
2982         kfree(sbinfo);
2983         sb->s_fs_info = NULL;
2984 }
2985
2986 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2987 {
2988         struct inode *inode;
2989         struct shmem_sb_info *sbinfo;
2990         int err = -ENOMEM;
2991
2992         /* Round up to L1_CACHE_BYTES to resist false sharing */
2993         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2994                                 L1_CACHE_BYTES), GFP_KERNEL);
2995         if (!sbinfo)
2996                 return -ENOMEM;
2997
2998         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2999         sbinfo->uid = current_fsuid();
3000         sbinfo->gid = current_fsgid();
3001         sb->s_fs_info = sbinfo;
3002
3003 #ifdef CONFIG_TMPFS
3004         /*
3005          * Per default we only allow half of the physical ram per
3006          * tmpfs instance, limiting inodes to one per page of lowmem;
3007          * but the internal instance is left unlimited.
3008          */
3009         if (!(sb->s_flags & MS_KERNMOUNT)) {
3010                 sbinfo->max_blocks = shmem_default_max_blocks();
3011                 sbinfo->max_inodes = shmem_default_max_inodes();
3012                 if (shmem_parse_options(data, sbinfo, false)) {
3013                         err = -EINVAL;
3014                         goto failed;
3015                 }
3016         } else {
3017                 sb->s_flags |= MS_NOUSER;
3018         }
3019         sb->s_export_op = &shmem_export_ops;
3020         sb->s_flags |= MS_NOSEC;
3021 #else
3022         sb->s_flags |= MS_NOUSER;
3023 #endif
3024
3025         spin_lock_init(&sbinfo->stat_lock);
3026         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3027                 goto failed;
3028         sbinfo->free_inodes = sbinfo->max_inodes;
3029
3030         sb->s_maxbytes = MAX_LFS_FILESIZE;
3031         sb->s_blocksize = PAGE_CACHE_SIZE;
3032         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
3033         sb->s_magic = TMPFS_MAGIC;
3034         sb->s_op = &shmem_ops;
3035         sb->s_time_gran = 1;
3036 #ifdef CONFIG_TMPFS_XATTR
3037         sb->s_xattr = shmem_xattr_handlers;
3038 #endif
3039 #ifdef CONFIG_TMPFS_POSIX_ACL
3040         sb->s_flags |= MS_POSIXACL;
3041 #endif
3042
3043         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3044         if (!inode)
3045                 goto failed;
3046         inode->i_uid = sbinfo->uid;
3047         inode->i_gid = sbinfo->gid;
3048         sb->s_root = d_make_root(inode);
3049         if (!sb->s_root)
3050                 goto failed;
3051         return 0;
3052
3053 failed:
3054         shmem_put_super(sb);
3055         return err;
3056 }
3057
3058 static struct kmem_cache *shmem_inode_cachep;
3059
3060 static struct inode *shmem_alloc_inode(struct super_block *sb)
3061 {
3062         struct shmem_inode_info *info;
3063         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3064         if (!info)
3065                 return NULL;
3066         return &info->vfs_inode;
3067 }
3068
3069 static void shmem_destroy_callback(struct rcu_head *head)
3070 {
3071         struct inode *inode = container_of(head, struct inode, i_rcu);
3072         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3073 }
3074
3075 static void shmem_destroy_inode(struct inode *inode)
3076 {
3077         if (S_ISREG(inode->i_mode))
3078                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3079         call_rcu(&inode->i_rcu, shmem_destroy_callback);
3080 }
3081
3082 static void shmem_init_inode(void *foo)
3083 {
3084         struct shmem_inode_info *info = foo;
3085         inode_init_once(&info->vfs_inode);
3086 }
3087
3088 static int shmem_init_inodecache(void)
3089 {
3090         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3091                                 sizeof(struct shmem_inode_info),
3092                                 0, SLAB_PANIC, shmem_init_inode);
3093         return 0;
3094 }
3095
3096 static void shmem_destroy_inodecache(void)
3097 {
3098         kmem_cache_destroy(shmem_inode_cachep);
3099 }
3100
3101 static const struct address_space_operations shmem_aops = {
3102         .writepage      = shmem_writepage,
3103         .set_page_dirty = __set_page_dirty_no_writeback,
3104 #ifdef CONFIG_TMPFS
3105         .write_begin    = shmem_write_begin,
3106         .write_end      = shmem_write_end,
3107 #endif
3108 #ifdef CONFIG_MIGRATION
3109         .migratepage    = migrate_page,
3110 #endif
3111         .error_remove_page = generic_error_remove_page,
3112 };
3113
3114 static const struct file_operations shmem_file_operations = {
3115         .mmap           = shmem_mmap,
3116 #ifdef CONFIG_TMPFS
3117         .llseek         = shmem_file_llseek,
3118         .read           = new_sync_read,
3119         .write          = new_sync_write,
3120         .read_iter      = shmem_file_read_iter,
3121         .write_iter     = generic_file_write_iter,
3122         .fsync          = noop_fsync,
3123         .splice_read    = shmem_file_splice_read,
3124         .splice_write   = iter_file_splice_write,
3125         .fallocate      = shmem_fallocate,
3126 #endif
3127 };
3128
3129 static const struct inode_operations shmem_inode_operations = {
3130         .setattr        = shmem_setattr,
3131 #ifdef CONFIG_TMPFS_XATTR
3132         .setxattr       = shmem_setxattr,
3133         .getxattr       = shmem_getxattr,
3134         .listxattr      = shmem_listxattr,
3135         .removexattr    = shmem_removexattr,
3136         .set_acl        = simple_set_acl,
3137 #endif
3138 };
3139
3140 static const struct inode_operations shmem_dir_inode_operations = {
3141 #ifdef CONFIG_TMPFS
3142         .create         = shmem_create,
3143         .lookup         = simple_lookup,
3144         .link           = shmem_link,
3145         .unlink         = shmem_unlink,
3146         .symlink        = shmem_symlink,
3147         .mkdir          = shmem_mkdir,
3148         .rmdir          = shmem_rmdir,
3149         .mknod          = shmem_mknod,
3150         .rename2        = shmem_rename2,
3151         .tmpfile        = shmem_tmpfile,
3152 #endif
3153 #ifdef CONFIG_TMPFS_XATTR
3154         .setxattr       = shmem_setxattr,
3155         .getxattr       = shmem_getxattr,
3156         .listxattr      = shmem_listxattr,
3157         .removexattr    = shmem_removexattr,
3158 #endif
3159 #ifdef CONFIG_TMPFS_POSIX_ACL
3160         .setattr        = shmem_setattr,
3161         .set_acl        = simple_set_acl,
3162 #endif
3163 };
3164
3165 static const struct inode_operations shmem_special_inode_operations = {
3166 #ifdef CONFIG_TMPFS_XATTR
3167         .setxattr       = shmem_setxattr,
3168         .getxattr       = shmem_getxattr,
3169         .listxattr      = shmem_listxattr,
3170         .removexattr    = shmem_removexattr,
3171 #endif
3172 #ifdef CONFIG_TMPFS_POSIX_ACL
3173         .setattr        = shmem_setattr,
3174         .set_acl        = simple_set_acl,
3175 #endif
3176 };
3177
3178 static const struct super_operations shmem_ops = {
3179         .alloc_inode    = shmem_alloc_inode,
3180         .destroy_inode  = shmem_destroy_inode,
3181 #ifdef CONFIG_TMPFS
3182         .statfs         = shmem_statfs,
3183         .remount_fs     = shmem_remount_fs,
3184         .show_options   = shmem_show_options,
3185 #endif
3186         .evict_inode    = shmem_evict_inode,
3187         .drop_inode     = generic_delete_inode,
3188         .put_super      = shmem_put_super,
3189 };
3190
3191 static const struct vm_operations_struct shmem_vm_ops = {
3192         .fault          = shmem_fault,
3193         .map_pages      = filemap_map_pages,
3194 #ifdef CONFIG_NUMA
3195         .set_policy     = shmem_set_policy,
3196         .get_policy     = shmem_get_policy,
3197 #endif
3198 };
3199
3200 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3201         int flags, const char *dev_name, void *data)
3202 {
3203         return mount_nodev(fs_type, flags, data, shmem_fill_super);
3204 }
3205
3206 static struct file_system_type shmem_fs_type = {
3207         .owner          = THIS_MODULE,
3208         .name           = "tmpfs",
3209         .mount          = shmem_mount,
3210         .kill_sb        = kill_litter_super,
3211         .fs_flags       = FS_USERNS_MOUNT,
3212 };
3213
3214 int __init shmem_init(void)
3215 {
3216         int error;
3217
3218         /* If rootfs called this, don't re-init */
3219         if (shmem_inode_cachep)
3220                 return 0;
3221
3222         error = shmem_init_inodecache();
3223         if (error)
3224                 goto out3;
3225
3226         error = register_filesystem(&shmem_fs_type);
3227         if (error) {
3228                 printk(KERN_ERR "Could not register tmpfs\n");
3229                 goto out2;
3230         }
3231
3232         shm_mnt = kern_mount(&shmem_fs_type);
3233         if (IS_ERR(shm_mnt)) {
3234                 error = PTR_ERR(shm_mnt);
3235                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
3236                 goto out1;
3237         }
3238         return 0;
3239
3240 out1:
3241         unregister_filesystem(&shmem_fs_type);
3242 out2:
3243         shmem_destroy_inodecache();
3244 out3:
3245         shm_mnt = ERR_PTR(error);
3246         return error;
3247 }
3248
3249 #else /* !CONFIG_SHMEM */
3250
3251 /*
3252  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3253  *
3254  * This is intended for small system where the benefits of the full
3255  * shmem code (swap-backed and resource-limited) are outweighed by
3256  * their complexity. On systems without swap this code should be
3257  * effectively equivalent, but much lighter weight.
3258  */
3259
3260 static struct file_system_type shmem_fs_type = {
3261         .name           = "tmpfs",
3262         .mount          = ramfs_mount,
3263         .kill_sb        = kill_litter_super,
3264         .fs_flags       = FS_USERNS_MOUNT,
3265 };
3266
3267 int __init shmem_init(void)
3268 {
3269         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3270
3271         shm_mnt = kern_mount(&shmem_fs_type);
3272         BUG_ON(IS_ERR(shm_mnt));
3273
3274         return 0;
3275 }
3276
3277 int shmem_unuse(swp_entry_t swap, struct page *page)
3278 {
3279         return 0;
3280 }
3281
3282 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3283 {
3284         return 0;
3285 }
3286
3287 void shmem_unlock_mapping(struct address_space *mapping)
3288 {
3289 }
3290
3291 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3292 {
3293         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3294 }
3295 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3296
3297 #define shmem_vm_ops                            generic_file_vm_ops
3298 #define shmem_file_operations                   ramfs_file_operations
3299 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3300 #define shmem_acct_size(flags, size)            0
3301 #define shmem_unacct_size(flags, size)          do {} while (0)
3302
3303 #endif /* CONFIG_SHMEM */
3304
3305 /* common code */
3306
3307 static struct dentry_operations anon_ops = {
3308         .d_dname = simple_dname
3309 };
3310
3311 static struct file *__shmem_file_setup(const char *name, loff_t size,
3312                                        unsigned long flags, unsigned int i_flags)
3313 {
3314         struct file *res;
3315         struct inode *inode;
3316         struct path path;
3317         struct super_block *sb;
3318         struct qstr this;
3319
3320         if (IS_ERR(shm_mnt))
3321                 return ERR_CAST(shm_mnt);
3322
3323         if (size < 0 || size > MAX_LFS_FILESIZE)
3324                 return ERR_PTR(-EINVAL);
3325
3326         if (shmem_acct_size(flags, size))
3327                 return ERR_PTR(-ENOMEM);
3328
3329         res = ERR_PTR(-ENOMEM);
3330         this.name = name;
3331         this.len = strlen(name);
3332         this.hash = 0; /* will go */
3333         sb = shm_mnt->mnt_sb;
3334         path.mnt = mntget(shm_mnt);
3335         path.dentry = d_alloc_pseudo(sb, &this);
3336         if (!path.dentry)
3337                 goto put_memory;
3338         d_set_d_op(path.dentry, &anon_ops);
3339
3340         res = ERR_PTR(-ENOSPC);
3341         inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3342         if (!inode)
3343                 goto put_memory;
3344
3345         inode->i_flags |= i_flags;
3346         d_instantiate(path.dentry, inode);
3347         inode->i_size = size;
3348         clear_nlink(inode);     /* It is unlinked */
3349         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3350         if (IS_ERR(res))
3351                 goto put_path;
3352
3353         res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3354                   &shmem_file_operations);
3355         if (IS_ERR(res))
3356                 goto put_path;
3357
3358         return res;
3359
3360 put_memory:
3361         shmem_unacct_size(flags, size);
3362 put_path:
3363         path_put(&path);
3364         return res;
3365 }
3366
3367 /**
3368  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3369  *      kernel internal.  There will be NO LSM permission checks against the
3370  *      underlying inode.  So users of this interface must do LSM checks at a
3371  *      higher layer.  The one user is the big_key implementation.  LSM checks
3372  *      are provided at the key level rather than the inode level.
3373  * @name: name for dentry (to be seen in /proc/<pid>/maps
3374  * @size: size to be set for the file
3375  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3376  */
3377 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3378 {
3379         return __shmem_file_setup(name, size, flags, S_PRIVATE);
3380 }
3381
3382 /**
3383  * shmem_file_setup - get an unlinked file living in tmpfs
3384  * @name: name for dentry (to be seen in /proc/<pid>/maps
3385  * @size: size to be set for the file
3386  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3387  */
3388 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3389 {
3390         return __shmem_file_setup(name, size, flags, 0);
3391 }
3392 EXPORT_SYMBOL_GPL(shmem_file_setup);
3393
3394 /**
3395  * shmem_zero_setup - setup a shared anonymous mapping
3396  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3397  */
3398 int shmem_zero_setup(struct vm_area_struct *vma)
3399 {
3400         struct file *file;
3401         loff_t size = vma->vm_end - vma->vm_start;
3402
3403         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3404         if (IS_ERR(file))
3405                 return PTR_ERR(file);
3406
3407         if (vma->vm_file)
3408                 fput(vma->vm_file);
3409         vma->vm_file = file;
3410         vma->vm_ops = &shmem_vm_ops;
3411         return 0;
3412 }
3413
3414 /**
3415  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3416  * @mapping:    the page's address_space
3417  * @index:      the page index
3418  * @gfp:        the page allocator flags to use if allocating
3419  *
3420  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3421  * with any new page allocations done using the specified allocation flags.
3422  * But read_cache_page_gfp() uses the ->readpage() method: which does not
3423  * suit tmpfs, since it may have pages in swapcache, and needs to find those
3424  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3425  *
3426  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3427  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3428  */
3429 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3430                                          pgoff_t index, gfp_t gfp)
3431 {
3432 #ifdef CONFIG_SHMEM
3433         struct inode *inode = mapping->host;
3434         struct page *page;
3435         int error;
3436
3437         BUG_ON(mapping->a_ops != &shmem_aops);
3438         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3439         if (error)
3440                 page = ERR_PTR(error);
3441         else
3442                 unlock_page(page);
3443         return page;
3444 #else
3445         /*
3446          * The tiny !SHMEM case uses ramfs without swap
3447          */
3448         return read_cache_page_gfp(mapping, index, gfp);
3449 #endif
3450 }
3451 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);