tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
[pandora-kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
33
34 static struct vfsmount *shm_mnt;
35
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
66 #include <linux/syscalls.h>
67 #include <linux/fcntl.h>
68 #include <uapi/linux/memfd.h>
69
70 #include <asm/uaccess.h>
71 #include <asm/pgtable.h>
72
73 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
74 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
80 #define SHORT_SYMLINK_LEN 128
81
82 /*
83  * vmtruncate_range() communicates with shmem_fault via
84  * inode->i_private (with i_mutex making sure that it has only one user at
85  * a time): we would prefer not to enlarge the shmem inode just for that.
86  */
87 struct shmem_falloc {
88         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89         pgoff_t start;          /* start of range currently being fallocated */
90         pgoff_t next;           /* the next page offset to be fallocated */
91 };
92
93 struct shmem_xattr {
94         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
95         char *name;             /* xattr name */
96         size_t size;
97         char value[0];
98 };
99
100 /* Flag allocation requirements to shmem_getpage */
101 enum sgp_type {
102         SGP_READ,       /* don't exceed i_size, don't allocate page */
103         SGP_CACHE,      /* don't exceed i_size, may allocate page */
104         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
105         SGP_WRITE,      /* may exceed i_size, may allocate page */
106 };
107
108 #ifdef CONFIG_TMPFS
109 static unsigned long shmem_default_max_blocks(void)
110 {
111         return totalram_pages / 2;
112 }
113
114 static unsigned long shmem_default_max_inodes(void)
115 {
116         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
117 }
118 #endif
119
120 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
121         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
122
123 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
124         struct page **pagep, enum sgp_type sgp, int *fault_type)
125 {
126         return shmem_getpage_gfp(inode, index, pagep, sgp,
127                         mapping_gfp_mask(inode->i_mapping), fault_type);
128 }
129
130 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
131 {
132         return sb->s_fs_info;
133 }
134
135 /*
136  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
137  * for shared memory and for shared anonymous (/dev/zero) mappings
138  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
139  * consistent with the pre-accounting of private mappings ...
140  */
141 static inline int shmem_acct_size(unsigned long flags, loff_t size)
142 {
143         return (flags & VM_NORESERVE) ?
144                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
145 }
146
147 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
148 {
149         if (!(flags & VM_NORESERVE))
150                 vm_unacct_memory(VM_ACCT(size));
151 }
152
153 /*
154  * ... whereas tmpfs objects are accounted incrementally as
155  * pages are allocated, in order to allow huge sparse files.
156  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
157  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
158  */
159 static inline int shmem_acct_block(unsigned long flags)
160 {
161         return (flags & VM_NORESERVE) ?
162                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
163 }
164
165 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
166 {
167         if (flags & VM_NORESERVE)
168                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
169 }
170
171 static const struct super_operations shmem_ops;
172 static const struct address_space_operations shmem_aops;
173 static const struct file_operations shmem_file_operations;
174 static const struct inode_operations shmem_inode_operations;
175 static const struct inode_operations shmem_dir_inode_operations;
176 static const struct inode_operations shmem_special_inode_operations;
177 static const struct vm_operations_struct shmem_vm_ops;
178
179 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
180         .ra_pages       = 0,    /* No readahead */
181         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
182 };
183
184 static LIST_HEAD(shmem_swaplist);
185 static DEFINE_MUTEX(shmem_swaplist_mutex);
186
187 static int shmem_reserve_inode(struct super_block *sb)
188 {
189         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
190         if (sbinfo->max_inodes) {
191                 spin_lock(&sbinfo->stat_lock);
192                 if (!sbinfo->free_inodes) {
193                         spin_unlock(&sbinfo->stat_lock);
194                         return -ENOSPC;
195                 }
196                 sbinfo->free_inodes--;
197                 spin_unlock(&sbinfo->stat_lock);
198         }
199         return 0;
200 }
201
202 static void shmem_free_inode(struct super_block *sb)
203 {
204         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
205         if (sbinfo->max_inodes) {
206                 spin_lock(&sbinfo->stat_lock);
207                 sbinfo->free_inodes++;
208                 spin_unlock(&sbinfo->stat_lock);
209         }
210 }
211
212 /**
213  * shmem_recalc_inode - recalculate the block usage of an inode
214  * @inode: inode to recalc
215  *
216  * We have to calculate the free blocks since the mm can drop
217  * undirtied hole pages behind our back.
218  *
219  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
220  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
221  *
222  * It has to be called with the spinlock held.
223  */
224 static void shmem_recalc_inode(struct inode *inode)
225 {
226         struct shmem_inode_info *info = SHMEM_I(inode);
227         long freed;
228
229         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
230         if (freed > 0) {
231                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
232                 if (sbinfo->max_blocks)
233                         percpu_counter_add(&sbinfo->used_blocks, -freed);
234                 info->alloced -= freed;
235                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
236                 shmem_unacct_blocks(info->flags, freed);
237         }
238 }
239
240 /*
241  * Replace item expected in radix tree by a new item, while holding tree lock.
242  */
243 static int shmem_radix_tree_replace(struct address_space *mapping,
244                         pgoff_t index, void *expected, void *replacement)
245 {
246         void **pslot;
247         void *item = NULL;
248
249         VM_BUG_ON(!expected);
250         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
251         if (pslot)
252                 item = radix_tree_deref_slot_protected(pslot,
253                                                         &mapping->tree_lock);
254         if (item != expected)
255                 return -ENOENT;
256         if (replacement)
257                 radix_tree_replace_slot(pslot, replacement);
258         else
259                 radix_tree_delete(&mapping->page_tree, index);
260         return 0;
261 }
262
263 /*
264  * Like add_to_page_cache_locked, but error if expected item has gone.
265  */
266 static int shmem_add_to_page_cache(struct page *page,
267                                    struct address_space *mapping,
268                                    pgoff_t index, gfp_t gfp, void *expected)
269 {
270         int error = 0;
271
272         VM_BUG_ON(!PageLocked(page));
273         VM_BUG_ON(!PageSwapBacked(page));
274
275         if (!expected)
276                 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
277         if (!error) {
278                 page_cache_get(page);
279                 page->mapping = mapping;
280                 page->index = index;
281
282                 spin_lock_irq(&mapping->tree_lock);
283                 if (!expected)
284                         error = radix_tree_insert(&mapping->page_tree,
285                                                         index, page);
286                 else
287                         error = shmem_radix_tree_replace(mapping, index,
288                                                         expected, page);
289                 if (!error) {
290                         mapping->nrpages++;
291                         __inc_zone_page_state(page, NR_FILE_PAGES);
292                         __inc_zone_page_state(page, NR_SHMEM);
293                         spin_unlock_irq(&mapping->tree_lock);
294                 } else {
295                         page->mapping = NULL;
296                         spin_unlock_irq(&mapping->tree_lock);
297                         page_cache_release(page);
298                 }
299                 if (!expected)
300                         radix_tree_preload_end();
301         }
302         if (error)
303                 mem_cgroup_uncharge_cache_page(page);
304         return error;
305 }
306
307 /*
308  * Like delete_from_page_cache, but substitutes swap for page.
309  */
310 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
311 {
312         struct address_space *mapping = page->mapping;
313         int error;
314
315         spin_lock_irq(&mapping->tree_lock);
316         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
317         page->mapping = NULL;
318         mapping->nrpages--;
319         __dec_zone_page_state(page, NR_FILE_PAGES);
320         __dec_zone_page_state(page, NR_SHMEM);
321         spin_unlock_irq(&mapping->tree_lock);
322         page_cache_release(page);
323         BUG_ON(error);
324 }
325
326 /*
327  * Like find_get_pages, but collecting swap entries as well as pages.
328  */
329 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
330                                         pgoff_t start, unsigned int nr_pages,
331                                         struct page **pages, pgoff_t *indices)
332 {
333         unsigned int i;
334         unsigned int ret;
335         unsigned int nr_found;
336
337         rcu_read_lock();
338 restart:
339         nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
340                                 (void ***)pages, indices, start, nr_pages);
341         ret = 0;
342         for (i = 0; i < nr_found; i++) {
343                 struct page *page;
344 repeat:
345                 page = radix_tree_deref_slot((void **)pages[i]);
346                 if (unlikely(!page))
347                         continue;
348                 if (radix_tree_exception(page)) {
349                         if (radix_tree_deref_retry(page))
350                                 goto restart;
351                         /*
352                          * Otherwise, we must be storing a swap entry
353                          * here as an exceptional entry: so return it
354                          * without attempting to raise page count.
355                          */
356                         goto export;
357                 }
358                 if (!page_cache_get_speculative(page))
359                         goto repeat;
360
361                 /* Has the page moved? */
362                 if (unlikely(page != *((void **)pages[i]))) {
363                         page_cache_release(page);
364                         goto repeat;
365                 }
366 export:
367                 indices[ret] = indices[i];
368                 pages[ret] = page;
369                 ret++;
370         }
371         if (unlikely(!ret && nr_found))
372                 goto restart;
373         rcu_read_unlock();
374         return ret;
375 }
376
377 /*
378  * Remove swap entry from radix tree, free the swap and its page cache.
379  */
380 static int shmem_free_swap(struct address_space *mapping,
381                            pgoff_t index, void *radswap)
382 {
383         int error;
384
385         spin_lock_irq(&mapping->tree_lock);
386         error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
387         spin_unlock_irq(&mapping->tree_lock);
388         if (!error)
389                 free_swap_and_cache(radix_to_swp_entry(radswap));
390         return error;
391 }
392
393 /*
394  * Pagevec may contain swap entries, so shuffle up pages before releasing.
395  */
396 static void shmem_deswap_pagevec(struct pagevec *pvec)
397 {
398         int i, j;
399
400         for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
401                 struct page *page = pvec->pages[i];
402                 if (!radix_tree_exceptional_entry(page))
403                         pvec->pages[j++] = page;
404         }
405         pvec->nr = j;
406 }
407
408 /*
409  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
410  */
411 void shmem_unlock_mapping(struct address_space *mapping)
412 {
413         struct pagevec pvec;
414         pgoff_t indices[PAGEVEC_SIZE];
415         pgoff_t index = 0;
416
417         pagevec_init(&pvec, 0);
418         /*
419          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
420          */
421         while (!mapping_unevictable(mapping)) {
422                 /*
423                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
424                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
425                  */
426                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
427                                         PAGEVEC_SIZE, pvec.pages, indices);
428                 if (!pvec.nr)
429                         break;
430                 index = indices[pvec.nr - 1] + 1;
431                 shmem_deswap_pagevec(&pvec);
432                 check_move_unevictable_pages(pvec.pages, pvec.nr);
433                 pagevec_release(&pvec);
434                 cond_resched();
435         }
436 }
437
438 /*
439  * Remove range of pages and swap entries from radix tree, and free them.
440  */
441 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
442 {
443         struct address_space *mapping = inode->i_mapping;
444         struct shmem_inode_info *info = SHMEM_I(inode);
445         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
446         unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
447         pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
448         struct pagevec pvec;
449         pgoff_t indices[PAGEVEC_SIZE];
450         long nr_swaps_freed = 0;
451         pgoff_t index;
452         int i;
453
454         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
455
456         pagevec_init(&pvec, 0);
457         index = start;
458         while (index <= end) {
459                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
460                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
461                                                         pvec.pages, indices);
462                 if (!pvec.nr)
463                         break;
464                 mem_cgroup_uncharge_start();
465                 for (i = 0; i < pagevec_count(&pvec); i++) {
466                         struct page *page = pvec.pages[i];
467
468                         index = indices[i];
469                         if (index > end)
470                                 break;
471
472                         if (radix_tree_exceptional_entry(page)) {
473                                 nr_swaps_freed += !shmem_free_swap(mapping,
474                                                                 index, page);
475                                 continue;
476                         }
477
478                         if (!trylock_page(page))
479                                 continue;
480                         if (page->mapping == mapping) {
481                                 VM_BUG_ON(PageWriteback(page));
482                                 truncate_inode_page(mapping, page);
483                         }
484                         unlock_page(page);
485                 }
486                 shmem_deswap_pagevec(&pvec);
487                 pagevec_release(&pvec);
488                 mem_cgroup_uncharge_end();
489                 cond_resched();
490                 index++;
491         }
492
493         if (partial) {
494                 struct page *page = NULL;
495                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
496                 if (page) {
497                         zero_user_segment(page, partial, PAGE_CACHE_SIZE);
498                         set_page_dirty(page);
499                         unlock_page(page);
500                         page_cache_release(page);
501                 }
502         }
503
504         index = start;
505         while (index <= end) {
506                 cond_resched();
507                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
508                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
509                                                         pvec.pages, indices);
510                 if (!pvec.nr) {
511                         /* If all gone or hole-punch, we're done */
512                         if (index == start || end != -1)
513                                 break;
514                         /* But if truncating, restart to make sure all gone */
515                         index = start;
516                         continue;
517                 }
518                 mem_cgroup_uncharge_start();
519                 for (i = 0; i < pagevec_count(&pvec); i++) {
520                         struct page *page = pvec.pages[i];
521
522                         index = indices[i];
523                         if (index > end)
524                                 break;
525
526                         if (radix_tree_exceptional_entry(page)) {
527                                 if (shmem_free_swap(mapping, index, page)) {
528                                         /* Swap was replaced by page: retry */
529                                         index--;
530                                         break;
531                                 }
532                                 nr_swaps_freed++;
533                                 continue;
534                         }
535
536                         lock_page(page);
537                         if (page->mapping == mapping) {
538                                 VM_BUG_ON(PageWriteback(page));
539                                 truncate_inode_page(mapping, page);
540                         } else {
541                                 /* Page was replaced by swap: retry */
542                                 unlock_page(page);
543                                 index--;
544                                 break;
545                         }
546                         unlock_page(page);
547                 }
548                 shmem_deswap_pagevec(&pvec);
549                 pagevec_release(&pvec);
550                 mem_cgroup_uncharge_end();
551                 index++;
552         }
553
554         spin_lock(&info->lock);
555         info->swapped -= nr_swaps_freed;
556         shmem_recalc_inode(inode);
557         spin_unlock(&info->lock);
558
559         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
560 }
561 EXPORT_SYMBOL_GPL(shmem_truncate_range);
562
563 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
564 {
565         struct inode *inode = dentry->d_inode;
566         struct shmem_inode_info *info = SHMEM_I(inode);
567         int error;
568
569         error = setattr_prepare(dentry, attr);
570         if (error)
571                 return error;
572
573         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
574                 loff_t oldsize = inode->i_size;
575                 loff_t newsize = attr->ia_size;
576
577                 /* protected by i_mutex */
578                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
579                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
580                         return -EPERM;
581
582                 if (newsize != oldsize) {
583                         i_size_write(inode, newsize);
584                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
585                 }
586                 if (newsize < oldsize) {
587                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
588                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
589                         shmem_truncate_range(inode, newsize, (loff_t)-1);
590                         /* unmap again to remove racily COWed private pages */
591                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
592                 }
593         }
594
595         setattr_copy(inode, attr);
596 #ifdef CONFIG_TMPFS_POSIX_ACL
597         if (attr->ia_valid & ATTR_MODE)
598                 error = generic_acl_chmod(inode);
599 #endif
600         return error;
601 }
602
603 static void shmem_evict_inode(struct inode *inode)
604 {
605         struct shmem_inode_info *info = SHMEM_I(inode);
606         struct shmem_xattr *xattr, *nxattr;
607
608         if (inode->i_mapping->a_ops == &shmem_aops) {
609                 shmem_unacct_size(info->flags, inode->i_size);
610                 inode->i_size = 0;
611                 shmem_truncate_range(inode, 0, (loff_t)-1);
612                 if (!list_empty(&info->swaplist)) {
613                         mutex_lock(&shmem_swaplist_mutex);
614                         list_del_init(&info->swaplist);
615                         mutex_unlock(&shmem_swaplist_mutex);
616                 }
617         } else
618                 kfree(info->symlink);
619
620         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
621                 kfree(xattr->name);
622                 kfree(xattr);
623         }
624         WARN_ON(inode->i_blocks);
625         shmem_free_inode(inode->i_sb);
626         end_writeback(inode);
627 }
628
629 /*
630  * If swap found in inode, free it and move page from swapcache to filecache.
631  */
632 static int shmem_unuse_inode(struct shmem_inode_info *info,
633                              swp_entry_t swap, struct page *page)
634 {
635         struct address_space *mapping = info->vfs_inode.i_mapping;
636         void *radswap;
637         pgoff_t index;
638         int error;
639
640         radswap = swp_to_radix_entry(swap);
641         index = radix_tree_locate_item(&mapping->page_tree, radswap);
642         if (index == -1)
643                 return 0;
644
645         /*
646          * Move _head_ to start search for next from here.
647          * But be careful: shmem_evict_inode checks list_empty without taking
648          * mutex, and there's an instant in list_move_tail when info->swaplist
649          * would appear empty, if it were the only one on shmem_swaplist.
650          */
651         if (shmem_swaplist.next != &info->swaplist)
652                 list_move_tail(&shmem_swaplist, &info->swaplist);
653
654         /*
655          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
656          * but also to hold up shmem_evict_inode(): so inode cannot be freed
657          * beneath us (pagelock doesn't help until the page is in pagecache).
658          */
659         error = shmem_add_to_page_cache(page, mapping, index,
660                                                 GFP_NOWAIT, radswap);
661         /* which does mem_cgroup_uncharge_cache_page on error */
662
663         if (error != -ENOMEM) {
664                 /*
665                  * Truncation and eviction use free_swap_and_cache(), which
666                  * only does trylock page: if we raced, best clean up here.
667                  */
668                 delete_from_swap_cache(page);
669                 set_page_dirty(page);
670                 if (!error) {
671                         spin_lock(&info->lock);
672                         info->swapped--;
673                         spin_unlock(&info->lock);
674                         swap_free(swap);
675                 }
676                 error = 1;      /* not an error, but entry was found */
677         }
678         return error;
679 }
680
681 /*
682  * Search through swapped inodes to find and replace swap by page.
683  */
684 int shmem_unuse(swp_entry_t swap, struct page *page)
685 {
686         struct list_head *this, *next;
687         struct shmem_inode_info *info;
688         int found = 0;
689         int error;
690
691         /*
692          * Charge page using GFP_KERNEL while we can wait, before taking
693          * the shmem_swaplist_mutex which might hold up shmem_writepage().
694          * Charged back to the user (not to caller) when swap account is used.
695          */
696         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
697         if (error)
698                 goto out;
699         /* No radix_tree_preload: swap entry keeps a place for page in tree */
700
701         mutex_lock(&shmem_swaplist_mutex);
702         list_for_each_safe(this, next, &shmem_swaplist) {
703                 info = list_entry(this, struct shmem_inode_info, swaplist);
704                 if (info->swapped)
705                         found = shmem_unuse_inode(info, swap, page);
706                 else
707                         list_del_init(&info->swaplist);
708                 cond_resched();
709                 if (found)
710                         break;
711         }
712         mutex_unlock(&shmem_swaplist_mutex);
713
714         if (!found)
715                 mem_cgroup_uncharge_cache_page(page);
716         if (found < 0)
717                 error = found;
718 out:
719         unlock_page(page);
720         page_cache_release(page);
721         return error;
722 }
723
724 /*
725  * Move the page from the page cache to the swap cache.
726  */
727 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
728 {
729         struct shmem_inode_info *info;
730         struct address_space *mapping;
731         struct inode *inode;
732         swp_entry_t swap;
733         pgoff_t index;
734
735         BUG_ON(!PageLocked(page));
736         mapping = page->mapping;
737         index = page->index;
738         inode = mapping->host;
739         info = SHMEM_I(inode);
740         if (info->flags & VM_LOCKED)
741                 goto redirty;
742         if (!total_swap_pages)
743                 goto redirty;
744
745         /*
746          * shmem_backing_dev_info's capabilities prevent regular writeback or
747          * sync from ever calling shmem_writepage; but a stacking filesystem
748          * might use ->writepage of its underlying filesystem, in which case
749          * tmpfs should write out to swap only in response to memory pressure,
750          * and not for the writeback threads or sync.
751          */
752         if (!wbc->for_reclaim) {
753                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
754                 goto redirty;
755         }
756         swap = get_swap_page();
757         if (!swap.val)
758                 goto redirty;
759
760         /*
761          * Add inode to shmem_unuse()'s list of swapped-out inodes,
762          * if it's not already there.  Do it now before the page is
763          * moved to swap cache, when its pagelock no longer protects
764          * the inode from eviction.  But don't unlock the mutex until
765          * we've incremented swapped, because shmem_unuse_inode() will
766          * prune a !swapped inode from the swaplist under this mutex.
767          */
768         mutex_lock(&shmem_swaplist_mutex);
769         if (list_empty(&info->swaplist))
770                 list_add_tail(&info->swaplist, &shmem_swaplist);
771
772         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
773                 swap_shmem_alloc(swap);
774                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
775
776                 spin_lock(&info->lock);
777                 info->swapped++;
778                 shmem_recalc_inode(inode);
779                 spin_unlock(&info->lock);
780
781                 mutex_unlock(&shmem_swaplist_mutex);
782                 BUG_ON(page_mapped(page));
783                 swap_writepage(page, wbc);
784                 return 0;
785         }
786
787         mutex_unlock(&shmem_swaplist_mutex);
788         swapcache_free(swap, NULL);
789 redirty:
790         set_page_dirty(page);
791         if (wbc->for_reclaim)
792                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
793         unlock_page(page);
794         return 0;
795 }
796
797 #ifdef CONFIG_NUMA
798 #ifdef CONFIG_TMPFS
799 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
800 {
801         char buffer[64];
802
803         if (!mpol || mpol->mode == MPOL_DEFAULT)
804                 return;         /* show nothing */
805
806         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
807
808         seq_printf(seq, ",mpol=%s", buffer);
809 }
810
811 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
812 {
813         struct mempolicy *mpol = NULL;
814         if (sbinfo->mpol) {
815                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
816                 mpol = sbinfo->mpol;
817                 mpol_get(mpol);
818                 spin_unlock(&sbinfo->stat_lock);
819         }
820         return mpol;
821 }
822 #endif /* CONFIG_TMPFS */
823
824 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
825                         struct shmem_inode_info *info, pgoff_t index)
826 {
827         struct vm_area_struct pvma;
828         struct page *page;
829
830         /* Create a pseudo vma that just contains the policy */
831         pvma.vm_start = 0;
832         pvma.vm_pgoff = index;
833         pvma.vm_ops = NULL;
834         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
835
836         page = swapin_readahead(swap, gfp, &pvma, 0);
837
838         /* Drop reference taken by mpol_shared_policy_lookup() */
839         mpol_cond_put(pvma.vm_policy);
840
841         return page;
842 }
843
844 static struct page *shmem_alloc_page(gfp_t gfp,
845                         struct shmem_inode_info *info, pgoff_t index)
846 {
847         struct vm_area_struct pvma;
848         struct page *page;
849
850         /* Create a pseudo vma that just contains the policy */
851         pvma.vm_start = 0;
852         pvma.vm_pgoff = index;
853         pvma.vm_ops = NULL;
854         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
855
856         page = alloc_page_vma(gfp, &pvma, 0);
857
858         /* Drop reference taken by mpol_shared_policy_lookup() */
859         mpol_cond_put(pvma.vm_policy);
860
861         return page;
862 }
863 #else /* !CONFIG_NUMA */
864 #ifdef CONFIG_TMPFS
865 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
866 {
867 }
868 #endif /* CONFIG_TMPFS */
869
870 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
871                         struct shmem_inode_info *info, pgoff_t index)
872 {
873         return swapin_readahead(swap, gfp, NULL, 0);
874 }
875
876 static inline struct page *shmem_alloc_page(gfp_t gfp,
877                         struct shmem_inode_info *info, pgoff_t index)
878 {
879         return alloc_page(gfp);
880 }
881 #endif /* CONFIG_NUMA */
882
883 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
884 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
885 {
886         return NULL;
887 }
888 #endif
889
890 /*
891  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
892  *
893  * If we allocate a new one we do not mark it dirty. That's up to the
894  * vm. If we swap it in we mark it dirty since we also free the swap
895  * entry since a page cannot live in both the swap and page cache
896  */
897 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
898         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
899 {
900         struct address_space *mapping = inode->i_mapping;
901         struct shmem_inode_info *info;
902         struct shmem_sb_info *sbinfo;
903         struct page *page;
904         swp_entry_t swap;
905         int error;
906         int once = 0;
907
908         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
909                 return -EFBIG;
910 repeat:
911         swap.val = 0;
912         page = find_lock_page(mapping, index);
913         if (radix_tree_exceptional_entry(page)) {
914                 swap = radix_to_swp_entry(page);
915                 page = NULL;
916         }
917
918         if (sgp != SGP_WRITE &&
919             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
920                 error = -EINVAL;
921                 goto failed;
922         }
923
924         if (page || (sgp == SGP_READ && !swap.val)) {
925                 /*
926                  * Once we can get the page lock, it must be uptodate:
927                  * if there were an error in reading back from swap,
928                  * the page would not be inserted into the filecache.
929                  */
930                 BUG_ON(page && !PageUptodate(page));
931                 *pagep = page;
932                 return 0;
933         }
934
935         /*
936          * Fast cache lookup did not find it:
937          * bring it back from swap or allocate.
938          */
939         info = SHMEM_I(inode);
940         sbinfo = SHMEM_SB(inode->i_sb);
941
942         if (swap.val) {
943                 /* Look it up and read it in.. */
944                 page = lookup_swap_cache(swap);
945                 if (!page) {
946                         /* here we actually do the io */
947                         if (fault_type)
948                                 *fault_type |= VM_FAULT_MAJOR;
949                         page = shmem_swapin(swap, gfp, info, index);
950                         if (!page) {
951                                 error = -ENOMEM;
952                                 goto failed;
953                         }
954                 }
955
956                 /* We have to do this with page locked to prevent races */
957                 lock_page(page);
958                 if (!PageUptodate(page)) {
959                         error = -EIO;
960                         goto failed;
961                 }
962                 wait_on_page_writeback(page);
963
964                 /* Someone may have already done it for us */
965                 if (page->mapping) {
966                         if (page->mapping == mapping &&
967                             page->index == index)
968                                 goto done;
969                         error = -EEXIST;
970                         goto failed;
971                 }
972
973                 error = mem_cgroup_cache_charge(page, current->mm,
974                                                 gfp & GFP_RECLAIM_MASK);
975                 if (!error)
976                         error = shmem_add_to_page_cache(page, mapping, index,
977                                                 gfp, swp_to_radix_entry(swap));
978                 if (error)
979                         goto failed;
980
981                 spin_lock(&info->lock);
982                 info->swapped--;
983                 shmem_recalc_inode(inode);
984                 spin_unlock(&info->lock);
985
986                 delete_from_swap_cache(page);
987                 set_page_dirty(page);
988                 swap_free(swap);
989
990         } else {
991                 if (shmem_acct_block(info->flags)) {
992                         error = -ENOSPC;
993                         goto failed;
994                 }
995                 if (sbinfo->max_blocks) {
996                         if (percpu_counter_compare(&sbinfo->used_blocks,
997                                                 sbinfo->max_blocks) >= 0) {
998                                 error = -ENOSPC;
999                                 goto unacct;
1000                         }
1001                         percpu_counter_inc(&sbinfo->used_blocks);
1002                 }
1003
1004                 page = shmem_alloc_page(gfp, info, index);
1005                 if (!page) {
1006                         error = -ENOMEM;
1007                         goto decused;
1008                 }
1009
1010                 SetPageSwapBacked(page);
1011                 __set_page_locked(page);
1012                 error = mem_cgroup_cache_charge(page, current->mm,
1013                                                 gfp & GFP_RECLAIM_MASK);
1014                 if (!error)
1015                         error = shmem_add_to_page_cache(page, mapping, index,
1016                                                 gfp, NULL);
1017                 if (error)
1018                         goto decused;
1019                 lru_cache_add_anon(page);
1020
1021                 spin_lock(&info->lock);
1022                 info->alloced++;
1023                 inode->i_blocks += BLOCKS_PER_PAGE;
1024                 shmem_recalc_inode(inode);
1025                 spin_unlock(&info->lock);
1026
1027                 clear_highpage(page);
1028                 flush_dcache_page(page);
1029                 SetPageUptodate(page);
1030                 if (sgp == SGP_DIRTY)
1031                         set_page_dirty(page);
1032         }
1033 done:
1034         /* Perhaps the file has been truncated since we checked */
1035         if (sgp != SGP_WRITE &&
1036             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1037                 error = -EINVAL;
1038                 goto trunc;
1039         }
1040         *pagep = page;
1041         return 0;
1042
1043         /*
1044          * Error recovery.
1045          */
1046 trunc:
1047         ClearPageDirty(page);
1048         delete_from_page_cache(page);
1049         spin_lock(&info->lock);
1050         info->alloced--;
1051         inode->i_blocks -= BLOCKS_PER_PAGE;
1052         spin_unlock(&info->lock);
1053 decused:
1054         if (sbinfo->max_blocks)
1055                 percpu_counter_add(&sbinfo->used_blocks, -1);
1056 unacct:
1057         shmem_unacct_blocks(info->flags, 1);
1058 failed:
1059         if (swap.val && error != -EINVAL) {
1060                 struct page *test = find_get_page(mapping, index);
1061                 if (test && !radix_tree_exceptional_entry(test))
1062                         page_cache_release(test);
1063                 /* Have another try if the entry has changed */
1064                 if (test != swp_to_radix_entry(swap))
1065                         error = -EEXIST;
1066         }
1067         if (page) {
1068                 unlock_page(page);
1069                 page_cache_release(page);
1070         }
1071         if (error == -ENOSPC && !once++) {
1072                 info = SHMEM_I(inode);
1073                 spin_lock(&info->lock);
1074                 shmem_recalc_inode(inode);
1075                 spin_unlock(&info->lock);
1076                 goto repeat;
1077         }
1078         if (error == -EEXIST)
1079                 goto repeat;
1080         return error;
1081 }
1082
1083 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1084 {
1085         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1086         int error;
1087         int ret = VM_FAULT_LOCKED;
1088
1089         /*
1090          * Trinity finds that probing a hole which tmpfs is punching can
1091          * prevent the hole-punch from ever completing: which in turn
1092          * locks writers out with its hold on i_mutex.  So refrain from
1093          * faulting pages into the hole while it's being punched.  Although
1094          * shmem_truncate_range() does remove the additions, it may be unable to
1095          * keep up, as each new page needs its own unmap_mapping_range() call,
1096          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1097          *
1098          * It does not matter if we sometimes reach this check just before the
1099          * hole-punch begins, so that one fault then races with the punch:
1100          * we just need to make racing faults a rare case.
1101          *
1102          * The implementation below would be much simpler if we just used a
1103          * standard mutex or completion: but we cannot take i_mutex in fault,
1104          * and bloating every shmem inode for this unlikely case would be sad.
1105          */
1106         if (unlikely(inode->i_private)) {
1107                 struct shmem_falloc *shmem_falloc;
1108
1109                 spin_lock(&inode->i_lock);
1110                 shmem_falloc = inode->i_private;
1111                 if (shmem_falloc &&
1112                     vmf->pgoff >= shmem_falloc->start &&
1113                     vmf->pgoff < shmem_falloc->next) {
1114                         wait_queue_head_t *shmem_falloc_waitq;
1115                         DEFINE_WAIT(shmem_fault_wait);
1116
1117                         ret = VM_FAULT_NOPAGE;
1118                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1119                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1120                                 /* It's polite to up mmap_sem if we can */
1121                                 up_read(&vma->vm_mm->mmap_sem);
1122                                 ret = VM_FAULT_RETRY;
1123                         }
1124
1125                         shmem_falloc_waitq = shmem_falloc->waitq;
1126                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1127                                         TASK_UNINTERRUPTIBLE);
1128                         spin_unlock(&inode->i_lock);
1129                         schedule();
1130
1131                         /*
1132                          * shmem_falloc_waitq points into the vmtruncate_range()
1133                          * stack of the hole-punching task: shmem_falloc_waitq
1134                          * is usually invalid by the time we reach here, but
1135                          * finish_wait() does not dereference it in that case;
1136                          * though i_lock needed lest racing with wake_up_all().
1137                          */
1138                         spin_lock(&inode->i_lock);
1139                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1140                         spin_unlock(&inode->i_lock);
1141                         return ret;
1142                 }
1143                 spin_unlock(&inode->i_lock);
1144         }
1145
1146         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1147         if (error)
1148                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1149
1150         if (ret & VM_FAULT_MAJOR) {
1151                 count_vm_event(PGMAJFAULT);
1152                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1153         }
1154         return ret;
1155 }
1156
1157 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1158 {
1159         /*
1160          * If the underlying filesystem is not going to provide
1161          * a way to truncate a range of blocks (punch a hole) -
1162          * we should return failure right now.
1163          * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
1164          */
1165         if (inode->i_op->truncate_range != shmem_truncate_range)
1166                 return -ENOSYS;
1167
1168         mutex_lock(&inode->i_mutex);
1169         {
1170                 struct shmem_falloc shmem_falloc;
1171                 struct address_space *mapping = inode->i_mapping;
1172                 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
1173                 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
1174                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1175
1176                 shmem_falloc.waitq = &shmem_falloc_waitq;
1177                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1178                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1179                 spin_lock(&inode->i_lock);
1180                 inode->i_private = &shmem_falloc;
1181                 spin_unlock(&inode->i_lock);
1182
1183                 if ((u64)unmap_end > (u64)unmap_start)
1184                         unmap_mapping_range(mapping, unmap_start,
1185                                             1 + unmap_end - unmap_start, 0);
1186                 shmem_truncate_range(inode, lstart, lend);
1187                 /* No need to unmap again: hole-punching leaves COWed pages */
1188
1189                 spin_lock(&inode->i_lock);
1190                 inode->i_private = NULL;
1191                 wake_up_all(&shmem_falloc_waitq);
1192                 spin_unlock(&inode->i_lock);
1193         }
1194         mutex_unlock(&inode->i_mutex);
1195         return 0;
1196 }
1197 EXPORT_SYMBOL_GPL(vmtruncate_range);
1198
1199 #ifdef CONFIG_NUMA
1200 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1201 {
1202         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1203         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1204 }
1205
1206 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1207                                           unsigned long addr)
1208 {
1209         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1210         pgoff_t index;
1211
1212         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1213         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1214 }
1215 #endif
1216
1217 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1218 {
1219         struct inode *inode = file->f_path.dentry->d_inode;
1220         struct shmem_inode_info *info = SHMEM_I(inode);
1221         int retval = -ENOMEM;
1222
1223         spin_lock(&info->lock);
1224         if (lock && !(info->flags & VM_LOCKED)) {
1225                 if (!user_shm_lock(inode->i_size, user))
1226                         goto out_nomem;
1227                 info->flags |= VM_LOCKED;
1228                 mapping_set_unevictable(file->f_mapping);
1229         }
1230         if (!lock && (info->flags & VM_LOCKED) && user) {
1231                 user_shm_unlock(inode->i_size, user);
1232                 info->flags &= ~VM_LOCKED;
1233                 mapping_clear_unevictable(file->f_mapping);
1234         }
1235         retval = 0;
1236
1237 out_nomem:
1238         spin_unlock(&info->lock);
1239         return retval;
1240 }
1241
1242 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1243 {
1244         file_accessed(file);
1245         vma->vm_ops = &shmem_vm_ops;
1246         vma->vm_flags |= VM_CAN_NONLINEAR;
1247         return 0;
1248 }
1249
1250 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1251                                      int mode, dev_t dev, unsigned long flags)
1252 {
1253         struct inode *inode;
1254         struct shmem_inode_info *info;
1255         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1256
1257         if (shmem_reserve_inode(sb))
1258                 return NULL;
1259
1260         inode = new_inode(sb);
1261         if (inode) {
1262                 inode->i_ino = get_next_ino();
1263                 inode_init_owner(inode, dir, mode);
1264                 inode->i_blocks = 0;
1265                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1266                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1267                 inode->i_generation = get_seconds();
1268                 info = SHMEM_I(inode);
1269                 memset(info, 0, (char *)inode - (char *)info);
1270                 spin_lock_init(&info->lock);
1271                 info->seals = F_SEAL_SEAL;
1272                 info->flags = flags & VM_NORESERVE;
1273                 INIT_LIST_HEAD(&info->swaplist);
1274                 INIT_LIST_HEAD(&info->xattr_list);
1275                 cache_no_acl(inode);
1276
1277                 switch (mode & S_IFMT) {
1278                 default:
1279                         inode->i_op = &shmem_special_inode_operations;
1280                         init_special_inode(inode, mode, dev);
1281                         break;
1282                 case S_IFREG:
1283                         inode->i_mapping->a_ops = &shmem_aops;
1284                         inode->i_op = &shmem_inode_operations;
1285                         inode->i_fop = &shmem_file_operations;
1286                         mpol_shared_policy_init(&info->policy,
1287                                                  shmem_get_sbmpol(sbinfo));
1288                         break;
1289                 case S_IFDIR:
1290                         inc_nlink(inode);
1291                         /* Some things misbehave if size == 0 on a directory */
1292                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1293                         inode->i_op = &shmem_dir_inode_operations;
1294                         inode->i_fop = &simple_dir_operations;
1295                         break;
1296                 case S_IFLNK:
1297                         /*
1298                          * Must not load anything in the rbtree,
1299                          * mpol_free_shared_policy will not be called.
1300                          */
1301                         mpol_shared_policy_init(&info->policy, NULL);
1302                         break;
1303                 }
1304         } else
1305                 shmem_free_inode(sb);
1306         return inode;
1307 }
1308
1309 #ifdef CONFIG_TMPFS
1310 static const struct inode_operations shmem_symlink_inode_operations;
1311 static const struct inode_operations shmem_short_symlink_operations;
1312
1313 static int
1314 shmem_write_begin(struct file *file, struct address_space *mapping,
1315                         loff_t pos, unsigned len, unsigned flags,
1316                         struct page **pagep, void **fsdata)
1317 {
1318         struct inode *inode = mapping->host;
1319         struct shmem_inode_info *info = SHMEM_I(inode);
1320         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1321
1322         /* i_mutex is held by caller */
1323         if (unlikely(info->seals)) {
1324                 if (info->seals & F_SEAL_WRITE)
1325                         return -EPERM;
1326                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1327                         return -EPERM;
1328         }
1329
1330         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1331 }
1332
1333 static int
1334 shmem_write_end(struct file *file, struct address_space *mapping,
1335                         loff_t pos, unsigned len, unsigned copied,
1336                         struct page *page, void *fsdata)
1337 {
1338         struct inode *inode = mapping->host;
1339
1340         if (pos + copied > inode->i_size)
1341                 i_size_write(inode, pos + copied);
1342
1343         set_page_dirty(page);
1344         unlock_page(page);
1345         page_cache_release(page);
1346
1347         return copied;
1348 }
1349
1350 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1351 {
1352         struct inode *inode = filp->f_path.dentry->d_inode;
1353         struct address_space *mapping = inode->i_mapping;
1354         pgoff_t index;
1355         unsigned long offset;
1356         enum sgp_type sgp = SGP_READ;
1357
1358         /*
1359          * Might this read be for a stacking filesystem?  Then when reading
1360          * holes of a sparse file, we actually need to allocate those pages,
1361          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1362          */
1363         if (segment_eq(get_fs(), KERNEL_DS))
1364                 sgp = SGP_DIRTY;
1365
1366         index = *ppos >> PAGE_CACHE_SHIFT;
1367         offset = *ppos & ~PAGE_CACHE_MASK;
1368
1369         for (;;) {
1370                 struct page *page = NULL;
1371                 pgoff_t end_index;
1372                 unsigned long nr, ret;
1373                 loff_t i_size = i_size_read(inode);
1374
1375                 end_index = i_size >> PAGE_CACHE_SHIFT;
1376                 if (index > end_index)
1377                         break;
1378                 if (index == end_index) {
1379                         nr = i_size & ~PAGE_CACHE_MASK;
1380                         if (nr <= offset)
1381                                 break;
1382                 }
1383
1384                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1385                 if (desc->error) {
1386                         if (desc->error == -EINVAL)
1387                                 desc->error = 0;
1388                         break;
1389                 }
1390                 if (page)
1391                         unlock_page(page);
1392
1393                 /*
1394                  * We must evaluate after, since reads (unlike writes)
1395                  * are called without i_mutex protection against truncate
1396                  */
1397                 nr = PAGE_CACHE_SIZE;
1398                 i_size = i_size_read(inode);
1399                 end_index = i_size >> PAGE_CACHE_SHIFT;
1400                 if (index == end_index) {
1401                         nr = i_size & ~PAGE_CACHE_MASK;
1402                         if (nr <= offset) {
1403                                 if (page)
1404                                         page_cache_release(page);
1405                                 break;
1406                         }
1407                 }
1408                 nr -= offset;
1409
1410                 if (page) {
1411                         /*
1412                          * If users can be writing to this page using arbitrary
1413                          * virtual addresses, take care about potential aliasing
1414                          * before reading the page on the kernel side.
1415                          */
1416                         if (mapping_writably_mapped(mapping))
1417                                 flush_dcache_page(page);
1418                         /*
1419                          * Mark the page accessed if we read the beginning.
1420                          */
1421                         if (!offset)
1422                                 mark_page_accessed(page);
1423                 } else {
1424                         page = ZERO_PAGE(0);
1425                         page_cache_get(page);
1426                 }
1427
1428                 /*
1429                  * Ok, we have the page, and it's up-to-date, so
1430                  * now we can copy it to user space...
1431                  *
1432                  * The actor routine returns how many bytes were actually used..
1433                  * NOTE! This may not be the same as how much of a user buffer
1434                  * we filled up (we may be padding etc), so we can only update
1435                  * "pos" here (the actor routine has to update the user buffer
1436                  * pointers and the remaining count).
1437                  */
1438                 ret = actor(desc, page, offset, nr);
1439                 offset += ret;
1440                 index += offset >> PAGE_CACHE_SHIFT;
1441                 offset &= ~PAGE_CACHE_MASK;
1442
1443                 page_cache_release(page);
1444                 if (ret != nr || !desc->count)
1445                         break;
1446
1447                 cond_resched();
1448         }
1449
1450         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1451         file_accessed(filp);
1452 }
1453
1454 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1455                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1456 {
1457         struct file *filp = iocb->ki_filp;
1458         ssize_t retval;
1459         unsigned long seg;
1460         size_t count;
1461         loff_t *ppos = &iocb->ki_pos;
1462
1463         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1464         if (retval)
1465                 return retval;
1466
1467         for (seg = 0; seg < nr_segs; seg++) {
1468                 read_descriptor_t desc;
1469
1470                 desc.written = 0;
1471                 desc.arg.buf = iov[seg].iov_base;
1472                 desc.count = iov[seg].iov_len;
1473                 if (desc.count == 0)
1474                         continue;
1475                 desc.error = 0;
1476                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1477                 retval += desc.written;
1478                 if (desc.error) {
1479                         retval = retval ?: desc.error;
1480                         break;
1481                 }
1482                 if (desc.count > 0)
1483                         break;
1484         }
1485         return retval;
1486 }
1487
1488 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1489                                 struct pipe_inode_info *pipe, size_t len,
1490                                 unsigned int flags)
1491 {
1492         struct address_space *mapping = in->f_mapping;
1493         struct inode *inode = mapping->host;
1494         unsigned int loff, nr_pages, req_pages;
1495         struct page *pages[PIPE_DEF_BUFFERS];
1496         struct partial_page partial[PIPE_DEF_BUFFERS];
1497         struct page *page;
1498         pgoff_t index, end_index;
1499         loff_t isize, left;
1500         int error, page_nr;
1501         struct splice_pipe_desc spd = {
1502                 .pages = pages,
1503                 .partial = partial,
1504                 .nr_pages_max = PIPE_DEF_BUFFERS,
1505                 .flags = flags,
1506                 .ops = &page_cache_pipe_buf_ops,
1507                 .spd_release = spd_release_page,
1508         };
1509
1510         isize = i_size_read(inode);
1511         if (unlikely(*ppos >= isize))
1512                 return 0;
1513
1514         left = isize - *ppos;
1515         if (unlikely(left < len))
1516                 len = left;
1517
1518         if (splice_grow_spd(pipe, &spd))
1519                 return -ENOMEM;
1520
1521         index = *ppos >> PAGE_CACHE_SHIFT;
1522         loff = *ppos & ~PAGE_CACHE_MASK;
1523         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1524         nr_pages = min(req_pages, pipe->buffers);
1525
1526         spd.nr_pages = find_get_pages_contig(mapping, index,
1527                                                 nr_pages, spd.pages);
1528         index += spd.nr_pages;
1529         error = 0;
1530
1531         while (spd.nr_pages < nr_pages) {
1532                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1533                 if (error)
1534                         break;
1535                 unlock_page(page);
1536                 spd.pages[spd.nr_pages++] = page;
1537                 index++;
1538         }
1539
1540         index = *ppos >> PAGE_CACHE_SHIFT;
1541         nr_pages = spd.nr_pages;
1542         spd.nr_pages = 0;
1543
1544         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1545                 unsigned int this_len;
1546
1547                 if (!len)
1548                         break;
1549
1550                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1551                 page = spd.pages[page_nr];
1552
1553                 if (!PageUptodate(page) || page->mapping != mapping) {
1554                         error = shmem_getpage(inode, index, &page,
1555                                                         SGP_CACHE, NULL);
1556                         if (error)
1557                                 break;
1558                         unlock_page(page);
1559                         page_cache_release(spd.pages[page_nr]);
1560                         spd.pages[page_nr] = page;
1561                 }
1562
1563                 isize = i_size_read(inode);
1564                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1565                 if (unlikely(!isize || index > end_index))
1566                         break;
1567
1568                 if (end_index == index) {
1569                         unsigned int plen;
1570
1571                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1572                         if (plen <= loff)
1573                                 break;
1574
1575                         this_len = min(this_len, plen - loff);
1576                         len = this_len;
1577                 }
1578
1579                 spd.partial[page_nr].offset = loff;
1580                 spd.partial[page_nr].len = this_len;
1581                 len -= this_len;
1582                 loff = 0;
1583                 spd.nr_pages++;
1584                 index++;
1585         }
1586
1587         while (page_nr < nr_pages)
1588                 page_cache_release(spd.pages[page_nr++]);
1589
1590         if (spd.nr_pages)
1591                 error = splice_to_pipe(pipe, &spd);
1592
1593         splice_shrink_spd(&spd);
1594
1595         if (error > 0) {
1596                 *ppos += error;
1597                 file_accessed(in);
1598         }
1599         return error;
1600 }
1601
1602 static int shmem_wait_for_pins(struct address_space *mapping)
1603 {
1604         return 0;
1605 }
1606
1607 #define F_ALL_SEALS (F_SEAL_SEAL | \
1608                      F_SEAL_SHRINK | \
1609                      F_SEAL_GROW | \
1610                      F_SEAL_WRITE)
1611
1612 int shmem_add_seals(struct file *file, unsigned int seals)
1613 {
1614         struct inode *inode = file_inode(file);
1615         struct shmem_inode_info *info = SHMEM_I(inode);
1616         int error;
1617
1618         /*
1619          * SEALING
1620          * Sealing allows multiple parties to share a shmem-file but restrict
1621          * access to a specific subset of file operations. Seals can only be
1622          * added, but never removed. This way, mutually untrusted parties can
1623          * share common memory regions with a well-defined policy. A malicious
1624          * peer can thus never perform unwanted operations on a shared object.
1625          *
1626          * Seals are only supported on special shmem-files and always affect
1627          * the whole underlying inode. Once a seal is set, it may prevent some
1628          * kinds of access to the file. Currently, the following seals are
1629          * defined:
1630          *   SEAL_SEAL: Prevent further seals from being set on this file
1631          *   SEAL_SHRINK: Prevent the file from shrinking
1632          *   SEAL_GROW: Prevent the file from growing
1633          *   SEAL_WRITE: Prevent write access to the file
1634          *
1635          * As we don't require any trust relationship between two parties, we
1636          * must prevent seals from being removed. Therefore, sealing a file
1637          * only adds a given set of seals to the file, it never touches
1638          * existing seals. Furthermore, the "setting seals"-operation can be
1639          * sealed itself, which basically prevents any further seal from being
1640          * added.
1641          *
1642          * Semantics of sealing are only defined on volatile files. Only
1643          * anonymous shmem files support sealing. More importantly, seals are
1644          * never written to disk. Therefore, there's no plan to support it on
1645          * other file types.
1646          */
1647
1648         if (file->f_op != &shmem_file_operations)
1649                 return -EINVAL;
1650         if (!(file->f_mode & FMODE_WRITE))
1651                 return -EPERM;
1652         if (seals & ~(unsigned int)F_ALL_SEALS)
1653                 return -EINVAL;
1654
1655         mutex_lock(&inode->i_mutex);
1656
1657         if (info->seals & F_SEAL_SEAL) {
1658                 error = -EPERM;
1659                 goto unlock;
1660         }
1661
1662         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
1663                 error = mapping_deny_writable(file->f_mapping);
1664                 if (error)
1665                         goto unlock;
1666
1667                 error = shmem_wait_for_pins(file->f_mapping);
1668                 if (error) {
1669                         mapping_allow_writable(file->f_mapping);
1670                         goto unlock;
1671                 }
1672         }
1673
1674         info->seals |= seals;
1675         error = 0;
1676
1677 unlock:
1678         mutex_unlock(&inode->i_mutex);
1679         return error;
1680 }
1681 EXPORT_SYMBOL_GPL(shmem_add_seals);
1682
1683 int shmem_get_seals(struct file *file)
1684 {
1685         if (file->f_op != &shmem_file_operations)
1686                 return -EINVAL;
1687
1688         return SHMEM_I(file_inode(file))->seals;
1689 }
1690 EXPORT_SYMBOL_GPL(shmem_get_seals);
1691
1692 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1693 {
1694         long error;
1695
1696         switch (cmd) {
1697         case F_ADD_SEALS:
1698                 /* disallow upper 32bit */
1699                 if (arg > UINT_MAX)
1700                         return -EINVAL;
1701
1702                 error = shmem_add_seals(file, arg);
1703                 break;
1704         case F_GET_SEALS:
1705                 error = shmem_get_seals(file);
1706                 break;
1707         default:
1708                 error = -EINVAL;
1709                 break;
1710         }
1711
1712         return error;
1713 }
1714
1715 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1716 {
1717         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1718
1719         buf->f_type = TMPFS_MAGIC;
1720         buf->f_bsize = PAGE_CACHE_SIZE;
1721         buf->f_namelen = NAME_MAX;
1722         if (sbinfo->max_blocks) {
1723                 buf->f_blocks = sbinfo->max_blocks;
1724                 buf->f_bavail =
1725                 buf->f_bfree  = sbinfo->max_blocks -
1726                                 percpu_counter_sum(&sbinfo->used_blocks);
1727         }
1728         if (sbinfo->max_inodes) {
1729                 buf->f_files = sbinfo->max_inodes;
1730                 buf->f_ffree = sbinfo->free_inodes;
1731         }
1732         /* else leave those fields 0 like simple_statfs */
1733         return 0;
1734 }
1735
1736 /*
1737  * File creation. Allocate an inode, and we're done..
1738  */
1739 static int
1740 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1741 {
1742         struct inode *inode;
1743         int error = -ENOSPC;
1744
1745         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1746         if (inode) {
1747                 error = security_inode_init_security(inode, dir,
1748                                                      &dentry->d_name,
1749                                                      NULL, NULL);
1750                 if (error) {
1751                         if (error != -EOPNOTSUPP) {
1752                                 iput(inode);
1753                                 return error;
1754                         }
1755                 }
1756 #ifdef CONFIG_TMPFS_POSIX_ACL
1757                 error = generic_acl_init(inode, dir);
1758                 if (error) {
1759                         iput(inode);
1760                         return error;
1761                 }
1762 #else
1763                 error = 0;
1764 #endif
1765                 dir->i_size += BOGO_DIRENT_SIZE;
1766                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1767                 d_instantiate(dentry, inode);
1768                 dget(dentry); /* Extra count - pin the dentry in core */
1769         }
1770         return error;
1771 }
1772
1773 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1774 {
1775         int error;
1776
1777         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1778                 return error;
1779         inc_nlink(dir);
1780         return 0;
1781 }
1782
1783 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1784                 struct nameidata *nd)
1785 {
1786         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1787 }
1788
1789 /*
1790  * Link a file..
1791  */
1792 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1793 {
1794         struct inode *inode = old_dentry->d_inode;
1795         int ret;
1796
1797         /*
1798          * No ordinary (disk based) filesystem counts links as inodes;
1799          * but each new link needs a new dentry, pinning lowmem, and
1800          * tmpfs dentries cannot be pruned until they are unlinked.
1801          */
1802         ret = shmem_reserve_inode(inode->i_sb);
1803         if (ret)
1804                 goto out;
1805
1806         dir->i_size += BOGO_DIRENT_SIZE;
1807         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1808         inc_nlink(inode);
1809         ihold(inode);   /* New dentry reference */
1810         dget(dentry);           /* Extra pinning count for the created dentry */
1811         d_instantiate(dentry, inode);
1812 out:
1813         return ret;
1814 }
1815
1816 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1817 {
1818         struct inode *inode = dentry->d_inode;
1819
1820         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1821                 shmem_free_inode(inode->i_sb);
1822
1823         dir->i_size -= BOGO_DIRENT_SIZE;
1824         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1825         drop_nlink(inode);
1826         dput(dentry);   /* Undo the count from "create" - this does all the work */
1827         return 0;
1828 }
1829
1830 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1831 {
1832         if (!simple_empty(dentry))
1833                 return -ENOTEMPTY;
1834
1835         drop_nlink(dentry->d_inode);
1836         drop_nlink(dir);
1837         return shmem_unlink(dir, dentry);
1838 }
1839
1840 /*
1841  * The VFS layer already does all the dentry stuff for rename,
1842  * we just have to decrement the usage count for the target if
1843  * it exists so that the VFS layer correctly free's it when it
1844  * gets overwritten.
1845  */
1846 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1847 {
1848         struct inode *inode = old_dentry->d_inode;
1849         int they_are_dirs = S_ISDIR(inode->i_mode);
1850
1851         if (!simple_empty(new_dentry))
1852                 return -ENOTEMPTY;
1853
1854         if (new_dentry->d_inode) {
1855                 (void) shmem_unlink(new_dir, new_dentry);
1856                 if (they_are_dirs) {
1857                         drop_nlink(new_dentry->d_inode);
1858                         drop_nlink(old_dir);
1859                 }
1860         } else if (they_are_dirs) {
1861                 drop_nlink(old_dir);
1862                 inc_nlink(new_dir);
1863         }
1864
1865         old_dir->i_size -= BOGO_DIRENT_SIZE;
1866         new_dir->i_size += BOGO_DIRENT_SIZE;
1867         old_dir->i_ctime = old_dir->i_mtime =
1868         new_dir->i_ctime = new_dir->i_mtime =
1869         inode->i_ctime = CURRENT_TIME;
1870         return 0;
1871 }
1872
1873 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1874 {
1875         int error;
1876         int len;
1877         struct inode *inode;
1878         struct page *page;
1879         char *kaddr;
1880         struct shmem_inode_info *info;
1881
1882         len = strlen(symname) + 1;
1883         if (len > PAGE_CACHE_SIZE)
1884                 return -ENAMETOOLONG;
1885
1886         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1887         if (!inode)
1888                 return -ENOSPC;
1889
1890         error = security_inode_init_security(inode, dir, &dentry->d_name,
1891                                              NULL, NULL);
1892         if (error) {
1893                 if (error != -EOPNOTSUPP) {
1894                         iput(inode);
1895                         return error;
1896                 }
1897                 error = 0;
1898         }
1899
1900         info = SHMEM_I(inode);
1901         inode->i_size = len-1;
1902         if (len <= SHORT_SYMLINK_LEN) {
1903                 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1904                 if (!info->symlink) {
1905                         iput(inode);
1906                         return -ENOMEM;
1907                 }
1908                 inode->i_op = &shmem_short_symlink_operations;
1909         } else {
1910                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1911                 if (error) {
1912                         iput(inode);
1913                         return error;
1914                 }
1915                 inode->i_mapping->a_ops = &shmem_aops;
1916                 inode->i_op = &shmem_symlink_inode_operations;
1917                 kaddr = kmap_atomic(page, KM_USER0);
1918                 memcpy(kaddr, symname, len);
1919                 kunmap_atomic(kaddr, KM_USER0);
1920                 set_page_dirty(page);
1921                 unlock_page(page);
1922                 page_cache_release(page);
1923         }
1924         dir->i_size += BOGO_DIRENT_SIZE;
1925         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1926         d_instantiate(dentry, inode);
1927         dget(dentry);
1928         return 0;
1929 }
1930
1931 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1932 {
1933         nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1934         return NULL;
1935 }
1936
1937 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1938 {
1939         struct page *page = NULL;
1940         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1941         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1942         if (page)
1943                 unlock_page(page);
1944         return page;
1945 }
1946
1947 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1948 {
1949         if (!IS_ERR(nd_get_link(nd))) {
1950                 struct page *page = cookie;
1951                 kunmap(page);
1952                 mark_page_accessed(page);
1953                 page_cache_release(page);
1954         }
1955 }
1956
1957 #ifdef CONFIG_TMPFS_XATTR
1958 /*
1959  * Superblocks without xattr inode operations may get some security.* xattr
1960  * support from the LSM "for free". As soon as we have any other xattrs
1961  * like ACLs, we also need to implement the security.* handlers at
1962  * filesystem level, though.
1963  */
1964
1965 static int shmem_xattr_get(struct dentry *dentry, const char *name,
1966                            void *buffer, size_t size)
1967 {
1968         struct shmem_inode_info *info;
1969         struct shmem_xattr *xattr;
1970         int ret = -ENODATA;
1971
1972         info = SHMEM_I(dentry->d_inode);
1973
1974         spin_lock(&info->lock);
1975         list_for_each_entry(xattr, &info->xattr_list, list) {
1976                 if (strcmp(name, xattr->name))
1977                         continue;
1978
1979                 ret = xattr->size;
1980                 if (buffer) {
1981                         if (size < xattr->size)
1982                                 ret = -ERANGE;
1983                         else
1984                                 memcpy(buffer, xattr->value, xattr->size);
1985                 }
1986                 break;
1987         }
1988         spin_unlock(&info->lock);
1989         return ret;
1990 }
1991
1992 static int shmem_xattr_set(struct dentry *dentry, const char *name,
1993                            const void *value, size_t size, int flags)
1994 {
1995         struct inode *inode = dentry->d_inode;
1996         struct shmem_inode_info *info = SHMEM_I(inode);
1997         struct shmem_xattr *xattr;
1998         struct shmem_xattr *new_xattr = NULL;
1999         size_t len;
2000         int err = 0;
2001
2002         /* value == NULL means remove */
2003         if (value) {
2004                 /* wrap around? */
2005                 len = sizeof(*new_xattr) + size;
2006                 if (len <= sizeof(*new_xattr))
2007                         return -ENOMEM;
2008
2009                 new_xattr = kmalloc(len, GFP_KERNEL);
2010                 if (!new_xattr)
2011                         return -ENOMEM;
2012
2013                 new_xattr->name = kstrdup(name, GFP_KERNEL);
2014                 if (!new_xattr->name) {
2015                         kfree(new_xattr);
2016                         return -ENOMEM;
2017                 }
2018
2019                 new_xattr->size = size;
2020                 memcpy(new_xattr->value, value, size);
2021         }
2022
2023         spin_lock(&info->lock);
2024         list_for_each_entry(xattr, &info->xattr_list, list) {
2025                 if (!strcmp(name, xattr->name)) {
2026                         if (flags & XATTR_CREATE) {
2027                                 xattr = new_xattr;
2028                                 err = -EEXIST;
2029                         } else if (new_xattr) {
2030                                 list_replace(&xattr->list, &new_xattr->list);
2031                         } else {
2032                                 list_del(&xattr->list);
2033                         }
2034                         goto out;
2035                 }
2036         }
2037         if (flags & XATTR_REPLACE) {
2038                 xattr = new_xattr;
2039                 err = -ENODATA;
2040         } else {
2041                 list_add(&new_xattr->list, &info->xattr_list);
2042                 xattr = NULL;
2043         }
2044 out:
2045         spin_unlock(&info->lock);
2046         if (xattr)
2047                 kfree(xattr->name);
2048         kfree(xattr);
2049         return err;
2050 }
2051
2052 static const struct xattr_handler *shmem_xattr_handlers[] = {
2053 #ifdef CONFIG_TMPFS_POSIX_ACL
2054         &generic_acl_access_handler,
2055         &generic_acl_default_handler,
2056 #endif
2057         NULL
2058 };
2059
2060 static int shmem_xattr_validate(const char *name)
2061 {
2062         struct { const char *prefix; size_t len; } arr[] = {
2063                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2064                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2065         };
2066         int i;
2067
2068         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2069                 size_t preflen = arr[i].len;
2070                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2071                         if (!name[preflen])
2072                                 return -EINVAL;
2073                         return 0;
2074                 }
2075         }
2076         return -EOPNOTSUPP;
2077 }
2078
2079 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2080                               void *buffer, size_t size)
2081 {
2082         int err;
2083
2084         /*
2085          * If this is a request for a synthetic attribute in the system.*
2086          * namespace use the generic infrastructure to resolve a handler
2087          * for it via sb->s_xattr.
2088          */
2089         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2090                 return generic_getxattr(dentry, name, buffer, size);
2091
2092         err = shmem_xattr_validate(name);
2093         if (err)
2094                 return err;
2095
2096         return shmem_xattr_get(dentry, name, buffer, size);
2097 }
2098
2099 static int shmem_setxattr(struct dentry *dentry, const char *name,
2100                           const void *value, size_t size, int flags)
2101 {
2102         int err;
2103
2104         /*
2105          * If this is a request for a synthetic attribute in the system.*
2106          * namespace use the generic infrastructure to resolve a handler
2107          * for it via sb->s_xattr.
2108          */
2109         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2110                 return generic_setxattr(dentry, name, value, size, flags);
2111
2112         err = shmem_xattr_validate(name);
2113         if (err)
2114                 return err;
2115
2116         if (size == 0)
2117                 value = "";  /* empty EA, do not remove */
2118
2119         return shmem_xattr_set(dentry, name, value, size, flags);
2120
2121 }
2122
2123 static int shmem_removexattr(struct dentry *dentry, const char *name)
2124 {
2125         int err;
2126
2127         /*
2128          * If this is a request for a synthetic attribute in the system.*
2129          * namespace use the generic infrastructure to resolve a handler
2130          * for it via sb->s_xattr.
2131          */
2132         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2133                 return generic_removexattr(dentry, name);
2134
2135         err = shmem_xattr_validate(name);
2136         if (err)
2137                 return err;
2138
2139         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2140 }
2141
2142 static bool xattr_is_trusted(const char *name)
2143 {
2144         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2145 }
2146
2147 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2148 {
2149         bool trusted = capable(CAP_SYS_ADMIN);
2150         struct shmem_xattr *xattr;
2151         struct shmem_inode_info *info;
2152         size_t used = 0;
2153
2154         info = SHMEM_I(dentry->d_inode);
2155
2156         spin_lock(&info->lock);
2157         list_for_each_entry(xattr, &info->xattr_list, list) {
2158                 size_t len;
2159
2160                 /* skip "trusted." attributes for unprivileged callers */
2161                 if (!trusted && xattr_is_trusted(xattr->name))
2162                         continue;
2163
2164                 len = strlen(xattr->name) + 1;
2165                 used += len;
2166                 if (buffer) {
2167                         if (size < used) {
2168                                 used = -ERANGE;
2169                                 break;
2170                         }
2171                         memcpy(buffer, xattr->name, len);
2172                         buffer += len;
2173                 }
2174         }
2175         spin_unlock(&info->lock);
2176
2177         return used;
2178 }
2179 #endif /* CONFIG_TMPFS_XATTR */
2180
2181 static const struct inode_operations shmem_short_symlink_operations = {
2182         .readlink       = generic_readlink,
2183         .follow_link    = shmem_follow_short_symlink,
2184 #ifdef CONFIG_TMPFS_XATTR
2185         .setxattr       = shmem_setxattr,
2186         .getxattr       = shmem_getxattr,
2187         .listxattr      = shmem_listxattr,
2188         .removexattr    = shmem_removexattr,
2189 #endif
2190 };
2191
2192 static const struct inode_operations shmem_symlink_inode_operations = {
2193         .readlink       = generic_readlink,
2194         .follow_link    = shmem_follow_link,
2195         .put_link       = shmem_put_link,
2196 #ifdef CONFIG_TMPFS_XATTR
2197         .setxattr       = shmem_setxattr,
2198         .getxattr       = shmem_getxattr,
2199         .listxattr      = shmem_listxattr,
2200         .removexattr    = shmem_removexattr,
2201 #endif
2202 };
2203
2204 static struct dentry *shmem_get_parent(struct dentry *child)
2205 {
2206         return ERR_PTR(-ESTALE);
2207 }
2208
2209 static int shmem_match(struct inode *ino, void *vfh)
2210 {
2211         __u32 *fh = vfh;
2212         __u64 inum = fh[2];
2213         inum = (inum << 32) | fh[1];
2214         return ino->i_ino == inum && fh[0] == ino->i_generation;
2215 }
2216
2217 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2218                 struct fid *fid, int fh_len, int fh_type)
2219 {
2220         struct inode *inode;
2221         struct dentry *dentry = NULL;
2222         u64 inum;
2223
2224         if (fh_len < 3)
2225                 return NULL;
2226
2227         inum = fid->raw[2];
2228         inum = (inum << 32) | fid->raw[1];
2229
2230         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2231                         shmem_match, fid->raw);
2232         if (inode) {
2233                 dentry = d_find_alias(inode);
2234                 iput(inode);
2235         }
2236
2237         return dentry;
2238 }
2239
2240 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2241                                 int connectable)
2242 {
2243         struct inode *inode = dentry->d_inode;
2244
2245         if (*len < 3) {
2246                 *len = 3;
2247                 return 255;
2248         }
2249
2250         if (inode_unhashed(inode)) {
2251                 /* Unfortunately insert_inode_hash is not idempotent,
2252                  * so as we hash inodes here rather than at creation
2253                  * time, we need a lock to ensure we only try
2254                  * to do it once
2255                  */
2256                 static DEFINE_SPINLOCK(lock);
2257                 spin_lock(&lock);
2258                 if (inode_unhashed(inode))
2259                         __insert_inode_hash(inode,
2260                                             inode->i_ino + inode->i_generation);
2261                 spin_unlock(&lock);
2262         }
2263
2264         fh[0] = inode->i_generation;
2265         fh[1] = inode->i_ino;
2266         fh[2] = ((__u64)inode->i_ino) >> 32;
2267
2268         *len = 3;
2269         return 1;
2270 }
2271
2272 static const struct export_operations shmem_export_ops = {
2273         .get_parent     = shmem_get_parent,
2274         .encode_fh      = shmem_encode_fh,
2275         .fh_to_dentry   = shmem_fh_to_dentry,
2276 };
2277
2278 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2279                                bool remount)
2280 {
2281         char *this_char, *value, *rest;
2282
2283         while (options != NULL) {
2284                 this_char = options;
2285                 for (;;) {
2286                         /*
2287                          * NUL-terminate this option: unfortunately,
2288                          * mount options form a comma-separated list,
2289                          * but mpol's nodelist may also contain commas.
2290                          */
2291                         options = strchr(options, ',');
2292                         if (options == NULL)
2293                                 break;
2294                         options++;
2295                         if (!isdigit(*options)) {
2296                                 options[-1] = '\0';
2297                                 break;
2298                         }
2299                 }
2300                 if (!*this_char)
2301                         continue;
2302                 if ((value = strchr(this_char,'=')) != NULL) {
2303                         *value++ = 0;
2304                 } else {
2305                         printk(KERN_ERR
2306                             "tmpfs: No value for mount option '%s'\n",
2307                             this_char);
2308                         return 1;
2309                 }
2310
2311                 if (!strcmp(this_char,"size")) {
2312                         unsigned long long size;
2313                         size = memparse(value,&rest);
2314                         if (*rest == '%') {
2315                                 size <<= PAGE_SHIFT;
2316                                 size *= totalram_pages;
2317                                 do_div(size, 100);
2318                                 rest++;
2319                         }
2320                         if (*rest)
2321                                 goto bad_val;
2322                         sbinfo->max_blocks =
2323                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2324                 } else if (!strcmp(this_char,"nr_blocks")) {
2325                         sbinfo->max_blocks = memparse(value, &rest);
2326                         if (*rest)
2327                                 goto bad_val;
2328                 } else if (!strcmp(this_char,"nr_inodes")) {
2329                         sbinfo->max_inodes = memparse(value, &rest);
2330                         if (*rest)
2331                                 goto bad_val;
2332                 } else if (!strcmp(this_char,"mode")) {
2333                         if (remount)
2334                                 continue;
2335                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2336                         if (*rest)
2337                                 goto bad_val;
2338                 } else if (!strcmp(this_char,"uid")) {
2339                         if (remount)
2340                                 continue;
2341                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2342                         if (*rest)
2343                                 goto bad_val;
2344                 } else if (!strcmp(this_char,"gid")) {
2345                         if (remount)
2346                                 continue;
2347                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2348                         if (*rest)
2349                                 goto bad_val;
2350                 } else if (!strcmp(this_char,"mpol")) {
2351                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2352                                 goto bad_val;
2353                 } else {
2354                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2355                                this_char);
2356                         return 1;
2357                 }
2358         }
2359         return 0;
2360
2361 bad_val:
2362         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2363                value, this_char);
2364         return 1;
2365
2366 }
2367
2368 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2369 {
2370         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2371         struct shmem_sb_info config = *sbinfo;
2372         unsigned long inodes;
2373         int error = -EINVAL;
2374
2375         config.mpol = NULL;
2376         if (shmem_parse_options(data, &config, true))
2377                 return error;
2378
2379         spin_lock(&sbinfo->stat_lock);
2380         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2381         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2382                 goto out;
2383         if (config.max_inodes < inodes)
2384                 goto out;
2385         /*
2386          * Those tests disallow limited->unlimited while any are in use;
2387          * but we must separately disallow unlimited->limited, because
2388          * in that case we have no record of how much is already in use.
2389          */
2390         if (config.max_blocks && !sbinfo->max_blocks)
2391                 goto out;
2392         if (config.max_inodes && !sbinfo->max_inodes)
2393                 goto out;
2394
2395         error = 0;
2396         sbinfo->max_blocks  = config.max_blocks;
2397         sbinfo->max_inodes  = config.max_inodes;
2398         sbinfo->free_inodes = config.max_inodes - inodes;
2399
2400         /*
2401          * Preserve previous mempolicy unless mpol remount option was specified.
2402          */
2403         if (config.mpol) {
2404                 mpol_put(sbinfo->mpol);
2405                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
2406         }
2407 out:
2408         spin_unlock(&sbinfo->stat_lock);
2409         return error;
2410 }
2411
2412 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2413 {
2414         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2415
2416         if (sbinfo->max_blocks != shmem_default_max_blocks())
2417                 seq_printf(seq, ",size=%luk",
2418                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2419         if (sbinfo->max_inodes != shmem_default_max_inodes())
2420                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2421         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2422                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2423         if (sbinfo->uid != 0)
2424                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2425         if (sbinfo->gid != 0)
2426                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2427         shmem_show_mpol(seq, sbinfo->mpol);
2428         return 0;
2429 }
2430
2431 #define MFD_NAME_PREFIX "memfd:"
2432 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
2433 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
2434
2435 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
2436
2437 SYSCALL_DEFINE2(memfd_create,
2438                 const char __user *, uname,
2439                 unsigned int, flags)
2440 {
2441         struct shmem_inode_info *info;
2442         struct file *file;
2443         int fd, error;
2444         char *name;
2445         long len;
2446
2447         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
2448                 return -EINVAL;
2449
2450         /* length includes terminating zero */
2451         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
2452         if (len <= 0)
2453                 return -EFAULT;
2454         if (len > MFD_NAME_MAX_LEN + 1)
2455                 return -EINVAL;
2456
2457         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
2458         if (!name)
2459                 return -ENOMEM;
2460
2461         strcpy(name, MFD_NAME_PREFIX);
2462         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
2463                 error = -EFAULT;
2464                 goto err_name;
2465         }
2466
2467         /* terminating-zero may have changed after strnlen_user() returned */
2468         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
2469                 error = -EFAULT;
2470                 goto err_name;
2471         }
2472
2473         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
2474         if (fd < 0) {
2475                 error = fd;
2476                 goto err_name;
2477         }
2478
2479         file = shmem_file_setup(name, 0, VM_NORESERVE);
2480         if (IS_ERR(file)) {
2481                 error = PTR_ERR(file);
2482                 goto err_fd;
2483         }
2484         info = SHMEM_I(file_inode(file));
2485         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
2486         file->f_flags |= O_RDWR | O_LARGEFILE;
2487         if (flags & MFD_ALLOW_SEALING)
2488                 info->seals &= ~F_SEAL_SEAL;
2489
2490         fd_install(fd, file);
2491         kfree(name);
2492         return fd;
2493
2494 err_fd:
2495         put_unused_fd(fd);
2496 err_name:
2497         kfree(name);
2498         return error;
2499 }
2500
2501 #endif /* CONFIG_TMPFS */
2502
2503 static void shmem_put_super(struct super_block *sb)
2504 {
2505         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2506
2507         percpu_counter_destroy(&sbinfo->used_blocks);
2508         kfree(sbinfo);
2509         sb->s_fs_info = NULL;
2510 }
2511
2512 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2513 {
2514         struct inode *inode;
2515         struct dentry *root;
2516         struct shmem_sb_info *sbinfo;
2517         int err = -ENOMEM;
2518
2519         /* Round up to L1_CACHE_BYTES to resist false sharing */
2520         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2521                                 L1_CACHE_BYTES), GFP_KERNEL);
2522         if (!sbinfo)
2523                 return -ENOMEM;
2524
2525         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2526         sbinfo->uid = current_fsuid();
2527         sbinfo->gid = current_fsgid();
2528         sb->s_fs_info = sbinfo;
2529
2530 #ifdef CONFIG_TMPFS
2531         /*
2532          * Per default we only allow half of the physical ram per
2533          * tmpfs instance, limiting inodes to one per page of lowmem;
2534          * but the internal instance is left unlimited.
2535          */
2536         if (!(sb->s_flags & MS_NOUSER)) {
2537                 sbinfo->max_blocks = shmem_default_max_blocks();
2538                 sbinfo->max_inodes = shmem_default_max_inodes();
2539                 if (shmem_parse_options(data, sbinfo, false)) {
2540                         err = -EINVAL;
2541                         goto failed;
2542                 }
2543         }
2544         sb->s_export_op = &shmem_export_ops;
2545 #else
2546         sb->s_flags |= MS_NOUSER;
2547 #endif
2548
2549         spin_lock_init(&sbinfo->stat_lock);
2550         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2551                 goto failed;
2552         sbinfo->free_inodes = sbinfo->max_inodes;
2553
2554         sb->s_maxbytes = MAX_LFS_FILESIZE;
2555         sb->s_blocksize = PAGE_CACHE_SIZE;
2556         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2557         sb->s_magic = TMPFS_MAGIC;
2558         sb->s_op = &shmem_ops;
2559         sb->s_time_gran = 1;
2560 #ifdef CONFIG_TMPFS_XATTR
2561         sb->s_xattr = shmem_xattr_handlers;
2562 #endif
2563 #ifdef CONFIG_TMPFS_POSIX_ACL
2564         sb->s_flags |= MS_POSIXACL;
2565 #endif
2566
2567         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2568         if (!inode)
2569                 goto failed;
2570         inode->i_uid = sbinfo->uid;
2571         inode->i_gid = sbinfo->gid;
2572         root = d_alloc_root(inode);
2573         if (!root)
2574                 goto failed_iput;
2575         sb->s_root = root;
2576         return 0;
2577
2578 failed_iput:
2579         iput(inode);
2580 failed:
2581         shmem_put_super(sb);
2582         return err;
2583 }
2584
2585 static struct kmem_cache *shmem_inode_cachep;
2586
2587 static struct inode *shmem_alloc_inode(struct super_block *sb)
2588 {
2589         struct shmem_inode_info *info;
2590         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2591         if (!info)
2592                 return NULL;
2593         return &info->vfs_inode;
2594 }
2595
2596 static void shmem_destroy_callback(struct rcu_head *head)
2597 {
2598         struct inode *inode = container_of(head, struct inode, i_rcu);
2599         INIT_LIST_HEAD(&inode->i_dentry);
2600         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2601 }
2602
2603 static void shmem_destroy_inode(struct inode *inode)
2604 {
2605         if ((inode->i_mode & S_IFMT) == S_IFREG)
2606                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2607         call_rcu(&inode->i_rcu, shmem_destroy_callback);
2608 }
2609
2610 static void shmem_init_inode(void *foo)
2611 {
2612         struct shmem_inode_info *info = foo;
2613         inode_init_once(&info->vfs_inode);
2614 }
2615
2616 static int shmem_init_inodecache(void)
2617 {
2618         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2619                                 sizeof(struct shmem_inode_info),
2620                                 0, SLAB_PANIC, shmem_init_inode);
2621         return 0;
2622 }
2623
2624 static void shmem_destroy_inodecache(void)
2625 {
2626         kmem_cache_destroy(shmem_inode_cachep);
2627 }
2628
2629 static const struct address_space_operations shmem_aops = {
2630         .writepage      = shmem_writepage,
2631         .set_page_dirty = __set_page_dirty_no_writeback,
2632 #ifdef CONFIG_TMPFS
2633         .write_begin    = shmem_write_begin,
2634         .write_end      = shmem_write_end,
2635 #endif
2636         .migratepage    = migrate_page,
2637         .error_remove_page = generic_error_remove_page,
2638 };
2639
2640 static const struct file_operations shmem_file_operations = {
2641         .mmap           = shmem_mmap,
2642 #ifdef CONFIG_TMPFS
2643         .llseek         = generic_file_llseek,
2644         .read           = do_sync_read,
2645         .write          = do_sync_write,
2646         .aio_read       = shmem_file_aio_read,
2647         .aio_write      = generic_file_aio_write,
2648         .fsync          = noop_fsync,
2649         .splice_read    = shmem_file_splice_read,
2650         .splice_write   = generic_file_splice_write,
2651 #endif
2652 };
2653
2654 static const struct inode_operations shmem_inode_operations = {
2655         .setattr        = shmem_setattr,
2656         .truncate_range = shmem_truncate_range,
2657 #ifdef CONFIG_TMPFS_XATTR
2658         .setxattr       = shmem_setxattr,
2659         .getxattr       = shmem_getxattr,
2660         .listxattr      = shmem_listxattr,
2661         .removexattr    = shmem_removexattr,
2662 #endif
2663 };
2664
2665 static const struct inode_operations shmem_dir_inode_operations = {
2666 #ifdef CONFIG_TMPFS
2667         .create         = shmem_create,
2668         .lookup         = simple_lookup,
2669         .link           = shmem_link,
2670         .unlink         = shmem_unlink,
2671         .symlink        = shmem_symlink,
2672         .mkdir          = shmem_mkdir,
2673         .rmdir          = shmem_rmdir,
2674         .mknod          = shmem_mknod,
2675         .rename         = shmem_rename,
2676 #endif
2677 #ifdef CONFIG_TMPFS_XATTR
2678         .setxattr       = shmem_setxattr,
2679         .getxattr       = shmem_getxattr,
2680         .listxattr      = shmem_listxattr,
2681         .removexattr    = shmem_removexattr,
2682 #endif
2683 #ifdef CONFIG_TMPFS_POSIX_ACL
2684         .setattr        = shmem_setattr,
2685 #endif
2686 };
2687
2688 static const struct inode_operations shmem_special_inode_operations = {
2689 #ifdef CONFIG_TMPFS_XATTR
2690         .setxattr       = shmem_setxattr,
2691         .getxattr       = shmem_getxattr,
2692         .listxattr      = shmem_listxattr,
2693         .removexattr    = shmem_removexattr,
2694 #endif
2695 #ifdef CONFIG_TMPFS_POSIX_ACL
2696         .setattr        = shmem_setattr,
2697 #endif
2698 };
2699
2700 static const struct super_operations shmem_ops = {
2701         .alloc_inode    = shmem_alloc_inode,
2702         .destroy_inode  = shmem_destroy_inode,
2703 #ifdef CONFIG_TMPFS
2704         .statfs         = shmem_statfs,
2705         .remount_fs     = shmem_remount_fs,
2706         .show_options   = shmem_show_options,
2707 #endif
2708         .evict_inode    = shmem_evict_inode,
2709         .drop_inode     = generic_delete_inode,
2710         .put_super      = shmem_put_super,
2711 };
2712
2713 static const struct vm_operations_struct shmem_vm_ops = {
2714         .fault          = shmem_fault,
2715 #ifdef CONFIG_NUMA
2716         .set_policy     = shmem_set_policy,
2717         .get_policy     = shmem_get_policy,
2718 #endif
2719 };
2720
2721 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2722         int flags, const char *dev_name, void *data)
2723 {
2724         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2725 }
2726
2727 static struct file_system_type shmem_fs_type = {
2728         .owner          = THIS_MODULE,
2729         .name           = "tmpfs",
2730         .mount          = shmem_mount,
2731         .kill_sb        = kill_litter_super,
2732 };
2733
2734 int __init shmem_init(void)
2735 {
2736         int error;
2737
2738         error = bdi_init(&shmem_backing_dev_info);
2739         if (error)
2740                 goto out4;
2741
2742         error = shmem_init_inodecache();
2743         if (error)
2744                 goto out3;
2745
2746         error = register_filesystem(&shmem_fs_type);
2747         if (error) {
2748                 printk(KERN_ERR "Could not register tmpfs\n");
2749                 goto out2;
2750         }
2751
2752         shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2753                                  shmem_fs_type.name, NULL);
2754         if (IS_ERR(shm_mnt)) {
2755                 error = PTR_ERR(shm_mnt);
2756                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2757                 goto out1;
2758         }
2759         return 0;
2760
2761 out1:
2762         unregister_filesystem(&shmem_fs_type);
2763 out2:
2764         shmem_destroy_inodecache();
2765 out3:
2766         bdi_destroy(&shmem_backing_dev_info);
2767 out4:
2768         shm_mnt = ERR_PTR(error);
2769         return error;
2770 }
2771
2772 #else /* !CONFIG_SHMEM */
2773
2774 /*
2775  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2776  *
2777  * This is intended for small system where the benefits of the full
2778  * shmem code (swap-backed and resource-limited) are outweighed by
2779  * their complexity. On systems without swap this code should be
2780  * effectively equivalent, but much lighter weight.
2781  */
2782
2783 #include <linux/ramfs.h>
2784
2785 static struct file_system_type shmem_fs_type = {
2786         .name           = "tmpfs",
2787         .mount          = ramfs_mount,
2788         .kill_sb        = kill_litter_super,
2789 };
2790
2791 int __init shmem_init(void)
2792 {
2793         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2794
2795         shm_mnt = kern_mount(&shmem_fs_type);
2796         BUG_ON(IS_ERR(shm_mnt));
2797
2798         return 0;
2799 }
2800
2801 int shmem_unuse(swp_entry_t swap, struct page *page)
2802 {
2803         return 0;
2804 }
2805
2806 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2807 {
2808         return 0;
2809 }
2810
2811 void shmem_unlock_mapping(struct address_space *mapping)
2812 {
2813 }
2814
2815 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2816 {
2817         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2818 }
2819 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2820
2821 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2822 {
2823         /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
2824         return -ENOSYS;
2825 }
2826
2827 #define shmem_vm_ops                            generic_file_vm_ops
2828 #define shmem_file_operations                   ramfs_file_operations
2829 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
2830 #define shmem_acct_size(flags, size)            0
2831 #define shmem_unacct_size(flags, size)          do {} while (0)
2832
2833 #endif /* CONFIG_SHMEM */
2834
2835 /* common code */
2836
2837 /**
2838  * shmem_file_setup - get an unlinked file living in tmpfs
2839  * @name: name for dentry (to be seen in /proc/<pid>/maps
2840  * @size: size to be set for the file
2841  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2842  */
2843 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2844 {
2845         int error;
2846         struct file *file;
2847         struct inode *inode;
2848         struct path path;
2849         struct dentry *root;
2850         struct qstr this;
2851
2852         if (IS_ERR(shm_mnt))
2853                 return (void *)shm_mnt;
2854
2855         if (size < 0 || size > MAX_LFS_FILESIZE)
2856                 return ERR_PTR(-EINVAL);
2857
2858         if (shmem_acct_size(flags, size))
2859                 return ERR_PTR(-ENOMEM);
2860
2861         error = -ENOMEM;
2862         this.name = name;
2863         this.len = strlen(name);
2864         this.hash = 0; /* will go */
2865         root = shm_mnt->mnt_root;
2866         path.dentry = d_alloc(root, &this);
2867         if (!path.dentry)
2868                 goto put_memory;
2869         path.mnt = mntget(shm_mnt);
2870
2871         error = -ENOSPC;
2872         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2873         if (!inode)
2874                 goto put_dentry;
2875
2876         d_instantiate(path.dentry, inode);
2877         inode->i_size = size;
2878         clear_nlink(inode);     /* It is unlinked */
2879 #ifndef CONFIG_MMU
2880         error = ramfs_nommu_expand_for_mapping(inode, size);
2881         if (error)
2882                 goto put_dentry;
2883 #endif
2884
2885         error = -ENFILE;
2886         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2887                   &shmem_file_operations);
2888         if (!file)
2889                 goto put_dentry;
2890
2891         return file;
2892
2893 put_dentry:
2894         path_put(&path);
2895 put_memory:
2896         shmem_unacct_size(flags, size);
2897         return ERR_PTR(error);
2898 }
2899 EXPORT_SYMBOL_GPL(shmem_file_setup);
2900
2901 /**
2902  * shmem_zero_setup - setup a shared anonymous mapping
2903  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2904  */
2905 int shmem_zero_setup(struct vm_area_struct *vma)
2906 {
2907         struct file *file;
2908         loff_t size = vma->vm_end - vma->vm_start;
2909
2910         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2911         if (IS_ERR(file))
2912                 return PTR_ERR(file);
2913
2914         if (vma->vm_file)
2915                 fput(vma->vm_file);
2916         vma->vm_file = file;
2917         vma->vm_ops = &shmem_vm_ops;
2918         vma->vm_flags |= VM_CAN_NONLINEAR;
2919         return 0;
2920 }
2921 EXPORT_SYMBOL_GPL(shmem_zero_setup);
2922
2923 /**
2924  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2925  * @mapping:    the page's address_space
2926  * @index:      the page index
2927  * @gfp:        the page allocator flags to use if allocating
2928  *
2929  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2930  * with any new page allocations done using the specified allocation flags.
2931  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2932  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2933  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2934  *
2935  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2936  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2937  */
2938 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2939                                          pgoff_t index, gfp_t gfp)
2940 {
2941 #ifdef CONFIG_SHMEM
2942         struct inode *inode = mapping->host;
2943         struct page *page;
2944         int error;
2945
2946         BUG_ON(mapping->a_ops != &shmem_aops);
2947         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2948         if (error)
2949                 page = ERR_PTR(error);
2950         else
2951                 unlock_page(page);
2952         return page;
2953 #else
2954         /*
2955          * The tiny !SHMEM case uses ramfs without swap
2956          */
2957         return read_cache_page_gfp(mapping, index, gfp);
2958 #endif
2959 }
2960 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);