Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
33
34 static struct vfsmount *shm_mnt;
35
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
66
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
69
70 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
71 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
73 /* Pretend that each entry is of this size in directory's i_size */
74 #define BOGO_DIRENT_SIZE 20
75
76 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77 #define SHORT_SYMLINK_LEN 128
78
79 struct shmem_xattr {
80         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
81         char *name;             /* xattr name */
82         size_t size;
83         char value[0];
84 };
85
86 /* Flag allocation requirements to shmem_getpage */
87 enum sgp_type {
88         SGP_READ,       /* don't exceed i_size, don't allocate page */
89         SGP_CACHE,      /* don't exceed i_size, may allocate page */
90         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
91         SGP_WRITE,      /* may exceed i_size, may allocate page */
92 };
93
94 #ifdef CONFIG_TMPFS
95 static unsigned long shmem_default_max_blocks(void)
96 {
97         return totalram_pages / 2;
98 }
99
100 static unsigned long shmem_default_max_inodes(void)
101 {
102         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103 }
104 #endif
105
106 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
107         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
108
109 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
110         struct page **pagep, enum sgp_type sgp, int *fault_type)
111 {
112         return shmem_getpage_gfp(inode, index, pagep, sgp,
113                         mapping_gfp_mask(inode->i_mapping), fault_type);
114 }
115
116 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
117 {
118         return sb->s_fs_info;
119 }
120
121 /*
122  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
123  * for shared memory and for shared anonymous (/dev/zero) mappings
124  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
125  * consistent with the pre-accounting of private mappings ...
126  */
127 static inline int shmem_acct_size(unsigned long flags, loff_t size)
128 {
129         return (flags & VM_NORESERVE) ?
130                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
131 }
132
133 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
134 {
135         if (!(flags & VM_NORESERVE))
136                 vm_unacct_memory(VM_ACCT(size));
137 }
138
139 /*
140  * ... whereas tmpfs objects are accounted incrementally as
141  * pages are allocated, in order to allow huge sparse files.
142  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
143  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
144  */
145 static inline int shmem_acct_block(unsigned long flags)
146 {
147         return (flags & VM_NORESERVE) ?
148                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
149 }
150
151 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
152 {
153         if (flags & VM_NORESERVE)
154                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
155 }
156
157 static const struct super_operations shmem_ops;
158 static const struct address_space_operations shmem_aops;
159 static const struct file_operations shmem_file_operations;
160 static const struct inode_operations shmem_inode_operations;
161 static const struct inode_operations shmem_dir_inode_operations;
162 static const struct inode_operations shmem_special_inode_operations;
163 static const struct vm_operations_struct shmem_vm_ops;
164
165 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
166         .ra_pages       = 0,    /* No readahead */
167         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
168 };
169
170 static LIST_HEAD(shmem_swaplist);
171 static DEFINE_MUTEX(shmem_swaplist_mutex);
172
173 static int shmem_reserve_inode(struct super_block *sb)
174 {
175         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
176         if (sbinfo->max_inodes) {
177                 spin_lock(&sbinfo->stat_lock);
178                 if (!sbinfo->free_inodes) {
179                         spin_unlock(&sbinfo->stat_lock);
180                         return -ENOSPC;
181                 }
182                 sbinfo->free_inodes--;
183                 spin_unlock(&sbinfo->stat_lock);
184         }
185         return 0;
186 }
187
188 static void shmem_free_inode(struct super_block *sb)
189 {
190         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
191         if (sbinfo->max_inodes) {
192                 spin_lock(&sbinfo->stat_lock);
193                 sbinfo->free_inodes++;
194                 spin_unlock(&sbinfo->stat_lock);
195         }
196 }
197
198 /**
199  * shmem_recalc_inode - recalculate the block usage of an inode
200  * @inode: inode to recalc
201  *
202  * We have to calculate the free blocks since the mm can drop
203  * undirtied hole pages behind our back.
204  *
205  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
206  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
207  *
208  * It has to be called with the spinlock held.
209  */
210 static void shmem_recalc_inode(struct inode *inode)
211 {
212         struct shmem_inode_info *info = SHMEM_I(inode);
213         long freed;
214
215         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
216         if (freed > 0) {
217                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
218                 if (sbinfo->max_blocks)
219                         percpu_counter_add(&sbinfo->used_blocks, -freed);
220                 info->alloced -= freed;
221                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
222                 shmem_unacct_blocks(info->flags, freed);
223         }
224 }
225
226 /*
227  * Replace item expected in radix tree by a new item, while holding tree lock.
228  */
229 static int shmem_radix_tree_replace(struct address_space *mapping,
230                         pgoff_t index, void *expected, void *replacement)
231 {
232         void **pslot;
233         void *item = NULL;
234
235         VM_BUG_ON(!expected);
236         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
237         if (pslot)
238                 item = radix_tree_deref_slot_protected(pslot,
239                                                         &mapping->tree_lock);
240         if (item != expected)
241                 return -ENOENT;
242         if (replacement)
243                 radix_tree_replace_slot(pslot, replacement);
244         else
245                 radix_tree_delete(&mapping->page_tree, index);
246         return 0;
247 }
248
249 /*
250  * Like add_to_page_cache_locked, but error if expected item has gone.
251  */
252 static int shmem_add_to_page_cache(struct page *page,
253                                    struct address_space *mapping,
254                                    pgoff_t index, gfp_t gfp, void *expected)
255 {
256         int error = 0;
257
258         VM_BUG_ON(!PageLocked(page));
259         VM_BUG_ON(!PageSwapBacked(page));
260
261         if (!expected)
262                 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
263         if (!error) {
264                 page_cache_get(page);
265                 page->mapping = mapping;
266                 page->index = index;
267
268                 spin_lock_irq(&mapping->tree_lock);
269                 if (!expected)
270                         error = radix_tree_insert(&mapping->page_tree,
271                                                         index, page);
272                 else
273                         error = shmem_radix_tree_replace(mapping, index,
274                                                         expected, page);
275                 if (!error) {
276                         mapping->nrpages++;
277                         __inc_zone_page_state(page, NR_FILE_PAGES);
278                         __inc_zone_page_state(page, NR_SHMEM);
279                         spin_unlock_irq(&mapping->tree_lock);
280                 } else {
281                         page->mapping = NULL;
282                         spin_unlock_irq(&mapping->tree_lock);
283                         page_cache_release(page);
284                 }
285                 if (!expected)
286                         radix_tree_preload_end();
287         }
288         if (error)
289                 mem_cgroup_uncharge_cache_page(page);
290         return error;
291 }
292
293 /*
294  * Like delete_from_page_cache, but substitutes swap for page.
295  */
296 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
297 {
298         struct address_space *mapping = page->mapping;
299         int error;
300
301         spin_lock_irq(&mapping->tree_lock);
302         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
303         page->mapping = NULL;
304         mapping->nrpages--;
305         __dec_zone_page_state(page, NR_FILE_PAGES);
306         __dec_zone_page_state(page, NR_SHMEM);
307         spin_unlock_irq(&mapping->tree_lock);
308         page_cache_release(page);
309         BUG_ON(error);
310 }
311
312 /*
313  * Like find_get_pages, but collecting swap entries as well as pages.
314  */
315 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
316                                         pgoff_t start, unsigned int nr_pages,
317                                         struct page **pages, pgoff_t *indices)
318 {
319         unsigned int i;
320         unsigned int ret;
321         unsigned int nr_found;
322
323         rcu_read_lock();
324 restart:
325         nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
326                                 (void ***)pages, indices, start, nr_pages);
327         ret = 0;
328         for (i = 0; i < nr_found; i++) {
329                 struct page *page;
330 repeat:
331                 page = radix_tree_deref_slot((void **)pages[i]);
332                 if (unlikely(!page))
333                         continue;
334                 if (radix_tree_exception(page)) {
335                         if (radix_tree_deref_retry(page))
336                                 goto restart;
337                         /*
338                          * Otherwise, we must be storing a swap entry
339                          * here as an exceptional entry: so return it
340                          * without attempting to raise page count.
341                          */
342                         goto export;
343                 }
344                 if (!page_cache_get_speculative(page))
345                         goto repeat;
346
347                 /* Has the page moved? */
348                 if (unlikely(page != *((void **)pages[i]))) {
349                         page_cache_release(page);
350                         goto repeat;
351                 }
352 export:
353                 indices[ret] = indices[i];
354                 pages[ret] = page;
355                 ret++;
356         }
357         if (unlikely(!ret && nr_found))
358                 goto restart;
359         rcu_read_unlock();
360         return ret;
361 }
362
363 /*
364  * Remove swap entry from radix tree, free the swap and its page cache.
365  */
366 static int shmem_free_swap(struct address_space *mapping,
367                            pgoff_t index, void *radswap)
368 {
369         int error;
370
371         spin_lock_irq(&mapping->tree_lock);
372         error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
373         spin_unlock_irq(&mapping->tree_lock);
374         if (!error)
375                 free_swap_and_cache(radix_to_swp_entry(radswap));
376         return error;
377 }
378
379 /*
380  * Pagevec may contain swap entries, so shuffle up pages before releasing.
381  */
382 static void shmem_deswap_pagevec(struct pagevec *pvec)
383 {
384         int i, j;
385
386         for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
387                 struct page *page = pvec->pages[i];
388                 if (!radix_tree_exceptional_entry(page))
389                         pvec->pages[j++] = page;
390         }
391         pvec->nr = j;
392 }
393
394 /*
395  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
396  */
397 void shmem_unlock_mapping(struct address_space *mapping)
398 {
399         struct pagevec pvec;
400         pgoff_t indices[PAGEVEC_SIZE];
401         pgoff_t index = 0;
402
403         pagevec_init(&pvec, 0);
404         /*
405          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
406          */
407         while (!mapping_unevictable(mapping)) {
408                 /*
409                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
411                  */
412                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
413                                         PAGEVEC_SIZE, pvec.pages, indices);
414                 if (!pvec.nr)
415                         break;
416                 index = indices[pvec.nr - 1] + 1;
417                 shmem_deswap_pagevec(&pvec);
418                 check_move_unevictable_pages(pvec.pages, pvec.nr);
419                 pagevec_release(&pvec);
420                 cond_resched();
421         }
422 }
423
424 /*
425  * Remove range of pages and swap entries from radix tree, and free them.
426  */
427 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
428 {
429         struct address_space *mapping = inode->i_mapping;
430         struct shmem_inode_info *info = SHMEM_I(inode);
431         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
432         unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
433         pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
434         struct pagevec pvec;
435         pgoff_t indices[PAGEVEC_SIZE];
436         long nr_swaps_freed = 0;
437         pgoff_t index;
438         int i;
439
440         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
441
442         pagevec_init(&pvec, 0);
443         index = start;
444         while (index <= end) {
445                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
446                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
447                                                         pvec.pages, indices);
448                 if (!pvec.nr)
449                         break;
450                 mem_cgroup_uncharge_start();
451                 for (i = 0; i < pagevec_count(&pvec); i++) {
452                         struct page *page = pvec.pages[i];
453
454                         index = indices[i];
455                         if (index > end)
456                                 break;
457
458                         if (radix_tree_exceptional_entry(page)) {
459                                 nr_swaps_freed += !shmem_free_swap(mapping,
460                                                                 index, page);
461                                 continue;
462                         }
463
464                         if (!trylock_page(page))
465                                 continue;
466                         if (page->mapping == mapping) {
467                                 VM_BUG_ON(PageWriteback(page));
468                                 truncate_inode_page(mapping, page);
469                         }
470                         unlock_page(page);
471                 }
472                 shmem_deswap_pagevec(&pvec);
473                 pagevec_release(&pvec);
474                 mem_cgroup_uncharge_end();
475                 cond_resched();
476                 index++;
477         }
478
479         if (partial) {
480                 struct page *page = NULL;
481                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
482                 if (page) {
483                         zero_user_segment(page, partial, PAGE_CACHE_SIZE);
484                         set_page_dirty(page);
485                         unlock_page(page);
486                         page_cache_release(page);
487                 }
488         }
489
490         index = start;
491         for ( ; ; ) {
492                 cond_resched();
493                 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
494                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
495                                                         pvec.pages, indices);
496                 if (!pvec.nr) {
497                         if (index == start)
498                                 break;
499                         index = start;
500                         continue;
501                 }
502                 if (index == start && indices[0] > end) {
503                         shmem_deswap_pagevec(&pvec);
504                         pagevec_release(&pvec);
505                         break;
506                 }
507                 mem_cgroup_uncharge_start();
508                 for (i = 0; i < pagevec_count(&pvec); i++) {
509                         struct page *page = pvec.pages[i];
510
511                         index = indices[i];
512                         if (index > end)
513                                 break;
514
515                         if (radix_tree_exceptional_entry(page)) {
516                                 nr_swaps_freed += !shmem_free_swap(mapping,
517                                                                 index, page);
518                                 continue;
519                         }
520
521                         lock_page(page);
522                         if (page->mapping == mapping) {
523                                 VM_BUG_ON(PageWriteback(page));
524                                 truncate_inode_page(mapping, page);
525                         }
526                         unlock_page(page);
527                 }
528                 shmem_deswap_pagevec(&pvec);
529                 pagevec_release(&pvec);
530                 mem_cgroup_uncharge_end();
531                 index++;
532         }
533
534         spin_lock(&info->lock);
535         info->swapped -= nr_swaps_freed;
536         shmem_recalc_inode(inode);
537         spin_unlock(&info->lock);
538
539         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
540 }
541 EXPORT_SYMBOL_GPL(shmem_truncate_range);
542
543 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
544 {
545         struct inode *inode = dentry->d_inode;
546         int error;
547
548         error = inode_change_ok(inode, attr);
549         if (error)
550                 return error;
551
552         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
553                 loff_t oldsize = inode->i_size;
554                 loff_t newsize = attr->ia_size;
555
556                 if (newsize != oldsize) {
557                         i_size_write(inode, newsize);
558                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
559                 }
560                 if (newsize < oldsize) {
561                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
562                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
563                         shmem_truncate_range(inode, newsize, (loff_t)-1);
564                         /* unmap again to remove racily COWed private pages */
565                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
566                 }
567         }
568
569         setattr_copy(inode, attr);
570 #ifdef CONFIG_TMPFS_POSIX_ACL
571         if (attr->ia_valid & ATTR_MODE)
572                 error = generic_acl_chmod(inode);
573 #endif
574         return error;
575 }
576
577 static void shmem_evict_inode(struct inode *inode)
578 {
579         struct shmem_inode_info *info = SHMEM_I(inode);
580         struct shmem_xattr *xattr, *nxattr;
581
582         if (inode->i_mapping->a_ops == &shmem_aops) {
583                 shmem_unacct_size(info->flags, inode->i_size);
584                 inode->i_size = 0;
585                 shmem_truncate_range(inode, 0, (loff_t)-1);
586                 if (!list_empty(&info->swaplist)) {
587                         mutex_lock(&shmem_swaplist_mutex);
588                         list_del_init(&info->swaplist);
589                         mutex_unlock(&shmem_swaplist_mutex);
590                 }
591         } else
592                 kfree(info->symlink);
593
594         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
595                 kfree(xattr->name);
596                 kfree(xattr);
597         }
598         BUG_ON(inode->i_blocks);
599         shmem_free_inode(inode->i_sb);
600         end_writeback(inode);
601 }
602
603 /*
604  * If swap found in inode, free it and move page from swapcache to filecache.
605  */
606 static int shmem_unuse_inode(struct shmem_inode_info *info,
607                              swp_entry_t swap, struct page *page)
608 {
609         struct address_space *mapping = info->vfs_inode.i_mapping;
610         void *radswap;
611         pgoff_t index;
612         int error;
613
614         radswap = swp_to_radix_entry(swap);
615         index = radix_tree_locate_item(&mapping->page_tree, radswap);
616         if (index == -1)
617                 return 0;
618
619         /*
620          * Move _head_ to start search for next from here.
621          * But be careful: shmem_evict_inode checks list_empty without taking
622          * mutex, and there's an instant in list_move_tail when info->swaplist
623          * would appear empty, if it were the only one on shmem_swaplist.
624          */
625         if (shmem_swaplist.next != &info->swaplist)
626                 list_move_tail(&shmem_swaplist, &info->swaplist);
627
628         /*
629          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
630          * but also to hold up shmem_evict_inode(): so inode cannot be freed
631          * beneath us (pagelock doesn't help until the page is in pagecache).
632          */
633         error = shmem_add_to_page_cache(page, mapping, index,
634                                                 GFP_NOWAIT, radswap);
635         /* which does mem_cgroup_uncharge_cache_page on error */
636
637         if (error != -ENOMEM) {
638                 /*
639                  * Truncation and eviction use free_swap_and_cache(), which
640                  * only does trylock page: if we raced, best clean up here.
641                  */
642                 delete_from_swap_cache(page);
643                 set_page_dirty(page);
644                 if (!error) {
645                         spin_lock(&info->lock);
646                         info->swapped--;
647                         spin_unlock(&info->lock);
648                         swap_free(swap);
649                 }
650                 error = 1;      /* not an error, but entry was found */
651         }
652         return error;
653 }
654
655 /*
656  * Search through swapped inodes to find and replace swap by page.
657  */
658 int shmem_unuse(swp_entry_t swap, struct page *page)
659 {
660         struct list_head *this, *next;
661         struct shmem_inode_info *info;
662         int found = 0;
663         int error;
664
665         /*
666          * Charge page using GFP_KERNEL while we can wait, before taking
667          * the shmem_swaplist_mutex which might hold up shmem_writepage().
668          * Charged back to the user (not to caller) when swap account is used.
669          */
670         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
671         if (error)
672                 goto out;
673         /* No radix_tree_preload: swap entry keeps a place for page in tree */
674
675         mutex_lock(&shmem_swaplist_mutex);
676         list_for_each_safe(this, next, &shmem_swaplist) {
677                 info = list_entry(this, struct shmem_inode_info, swaplist);
678                 if (info->swapped)
679                         found = shmem_unuse_inode(info, swap, page);
680                 else
681                         list_del_init(&info->swaplist);
682                 cond_resched();
683                 if (found)
684                         break;
685         }
686         mutex_unlock(&shmem_swaplist_mutex);
687
688         if (!found)
689                 mem_cgroup_uncharge_cache_page(page);
690         if (found < 0)
691                 error = found;
692 out:
693         unlock_page(page);
694         page_cache_release(page);
695         return error;
696 }
697
698 /*
699  * Move the page from the page cache to the swap cache.
700  */
701 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
702 {
703         struct shmem_inode_info *info;
704         struct address_space *mapping;
705         struct inode *inode;
706         swp_entry_t swap;
707         pgoff_t index;
708
709         BUG_ON(!PageLocked(page));
710         mapping = page->mapping;
711         index = page->index;
712         inode = mapping->host;
713         info = SHMEM_I(inode);
714         if (info->flags & VM_LOCKED)
715                 goto redirty;
716         if (!total_swap_pages)
717                 goto redirty;
718
719         /*
720          * shmem_backing_dev_info's capabilities prevent regular writeback or
721          * sync from ever calling shmem_writepage; but a stacking filesystem
722          * might use ->writepage of its underlying filesystem, in which case
723          * tmpfs should write out to swap only in response to memory pressure,
724          * and not for the writeback threads or sync.
725          */
726         if (!wbc->for_reclaim) {
727                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
728                 goto redirty;
729         }
730         swap = get_swap_page();
731         if (!swap.val)
732                 goto redirty;
733
734         /*
735          * Add inode to shmem_unuse()'s list of swapped-out inodes,
736          * if it's not already there.  Do it now before the page is
737          * moved to swap cache, when its pagelock no longer protects
738          * the inode from eviction.  But don't unlock the mutex until
739          * we've incremented swapped, because shmem_unuse_inode() will
740          * prune a !swapped inode from the swaplist under this mutex.
741          */
742         mutex_lock(&shmem_swaplist_mutex);
743         if (list_empty(&info->swaplist))
744                 list_add_tail(&info->swaplist, &shmem_swaplist);
745
746         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
747                 swap_shmem_alloc(swap);
748                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
749
750                 spin_lock(&info->lock);
751                 info->swapped++;
752                 shmem_recalc_inode(inode);
753                 spin_unlock(&info->lock);
754
755                 mutex_unlock(&shmem_swaplist_mutex);
756                 BUG_ON(page_mapped(page));
757                 swap_writepage(page, wbc);
758                 return 0;
759         }
760
761         mutex_unlock(&shmem_swaplist_mutex);
762         swapcache_free(swap, NULL);
763 redirty:
764         set_page_dirty(page);
765         if (wbc->for_reclaim)
766                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
767         unlock_page(page);
768         return 0;
769 }
770
771 #ifdef CONFIG_NUMA
772 #ifdef CONFIG_TMPFS
773 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
774 {
775         char buffer[64];
776
777         if (!mpol || mpol->mode == MPOL_DEFAULT)
778                 return;         /* show nothing */
779
780         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
781
782         seq_printf(seq, ",mpol=%s", buffer);
783 }
784
785 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
786 {
787         struct mempolicy *mpol = NULL;
788         if (sbinfo->mpol) {
789                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
790                 mpol = sbinfo->mpol;
791                 mpol_get(mpol);
792                 spin_unlock(&sbinfo->stat_lock);
793         }
794         return mpol;
795 }
796 #endif /* CONFIG_TMPFS */
797
798 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
799                         struct shmem_inode_info *info, pgoff_t index)
800 {
801         struct mempolicy mpol, *spol;
802         struct vm_area_struct pvma;
803
804         spol = mpol_cond_copy(&mpol,
805                         mpol_shared_policy_lookup(&info->policy, index));
806
807         /* Create a pseudo vma that just contains the policy */
808         pvma.vm_start = 0;
809         pvma.vm_pgoff = index;
810         pvma.vm_ops = NULL;
811         pvma.vm_policy = spol;
812         return swapin_readahead(swap, gfp, &pvma, 0);
813 }
814
815 static struct page *shmem_alloc_page(gfp_t gfp,
816                         struct shmem_inode_info *info, pgoff_t index)
817 {
818         struct vm_area_struct pvma;
819
820         /* Create a pseudo vma that just contains the policy */
821         pvma.vm_start = 0;
822         pvma.vm_pgoff = index;
823         pvma.vm_ops = NULL;
824         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
825
826         /*
827          * alloc_page_vma() will drop the shared policy reference
828          */
829         return alloc_page_vma(gfp, &pvma, 0);
830 }
831 #else /* !CONFIG_NUMA */
832 #ifdef CONFIG_TMPFS
833 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
834 {
835 }
836 #endif /* CONFIG_TMPFS */
837
838 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
839                         struct shmem_inode_info *info, pgoff_t index)
840 {
841         return swapin_readahead(swap, gfp, NULL, 0);
842 }
843
844 static inline struct page *shmem_alloc_page(gfp_t gfp,
845                         struct shmem_inode_info *info, pgoff_t index)
846 {
847         return alloc_page(gfp);
848 }
849 #endif /* CONFIG_NUMA */
850
851 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
852 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
853 {
854         return NULL;
855 }
856 #endif
857
858 /*
859  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
860  *
861  * If we allocate a new one we do not mark it dirty. That's up to the
862  * vm. If we swap it in we mark it dirty since we also free the swap
863  * entry since a page cannot live in both the swap and page cache
864  */
865 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
866         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
867 {
868         struct address_space *mapping = inode->i_mapping;
869         struct shmem_inode_info *info;
870         struct shmem_sb_info *sbinfo;
871         struct page *page;
872         swp_entry_t swap;
873         int error;
874         int once = 0;
875
876         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
877                 return -EFBIG;
878 repeat:
879         swap.val = 0;
880         page = find_lock_page(mapping, index);
881         if (radix_tree_exceptional_entry(page)) {
882                 swap = radix_to_swp_entry(page);
883                 page = NULL;
884         }
885
886         if (sgp != SGP_WRITE &&
887             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
888                 error = -EINVAL;
889                 goto failed;
890         }
891
892         if (page || (sgp == SGP_READ && !swap.val)) {
893                 /*
894                  * Once we can get the page lock, it must be uptodate:
895                  * if there were an error in reading back from swap,
896                  * the page would not be inserted into the filecache.
897                  */
898                 BUG_ON(page && !PageUptodate(page));
899                 *pagep = page;
900                 return 0;
901         }
902
903         /*
904          * Fast cache lookup did not find it:
905          * bring it back from swap or allocate.
906          */
907         info = SHMEM_I(inode);
908         sbinfo = SHMEM_SB(inode->i_sb);
909
910         if (swap.val) {
911                 /* Look it up and read it in.. */
912                 page = lookup_swap_cache(swap);
913                 if (!page) {
914                         /* here we actually do the io */
915                         if (fault_type)
916                                 *fault_type |= VM_FAULT_MAJOR;
917                         page = shmem_swapin(swap, gfp, info, index);
918                         if (!page) {
919                                 error = -ENOMEM;
920                                 goto failed;
921                         }
922                 }
923
924                 /* We have to do this with page locked to prevent races */
925                 lock_page(page);
926                 if (!PageUptodate(page)) {
927                         error = -EIO;
928                         goto failed;
929                 }
930                 wait_on_page_writeback(page);
931
932                 /* Someone may have already done it for us */
933                 if (page->mapping) {
934                         if (page->mapping == mapping &&
935                             page->index == index)
936                                 goto done;
937                         error = -EEXIST;
938                         goto failed;
939                 }
940
941                 error = mem_cgroup_cache_charge(page, current->mm,
942                                                 gfp & GFP_RECLAIM_MASK);
943                 if (!error)
944                         error = shmem_add_to_page_cache(page, mapping, index,
945                                                 gfp, swp_to_radix_entry(swap));
946                 if (error)
947                         goto failed;
948
949                 spin_lock(&info->lock);
950                 info->swapped--;
951                 shmem_recalc_inode(inode);
952                 spin_unlock(&info->lock);
953
954                 delete_from_swap_cache(page);
955                 set_page_dirty(page);
956                 swap_free(swap);
957
958         } else {
959                 if (shmem_acct_block(info->flags)) {
960                         error = -ENOSPC;
961                         goto failed;
962                 }
963                 if (sbinfo->max_blocks) {
964                         if (percpu_counter_compare(&sbinfo->used_blocks,
965                                                 sbinfo->max_blocks) >= 0) {
966                                 error = -ENOSPC;
967                                 goto unacct;
968                         }
969                         percpu_counter_inc(&sbinfo->used_blocks);
970                 }
971
972                 page = shmem_alloc_page(gfp, info, index);
973                 if (!page) {
974                         error = -ENOMEM;
975                         goto decused;
976                 }
977
978                 SetPageSwapBacked(page);
979                 __set_page_locked(page);
980                 error = mem_cgroup_cache_charge(page, current->mm,
981                                                 gfp & GFP_RECLAIM_MASK);
982                 if (!error)
983                         error = shmem_add_to_page_cache(page, mapping, index,
984                                                 gfp, NULL);
985                 if (error)
986                         goto decused;
987                 lru_cache_add_anon(page);
988
989                 spin_lock(&info->lock);
990                 info->alloced++;
991                 inode->i_blocks += BLOCKS_PER_PAGE;
992                 shmem_recalc_inode(inode);
993                 spin_unlock(&info->lock);
994
995                 clear_highpage(page);
996                 flush_dcache_page(page);
997                 SetPageUptodate(page);
998                 if (sgp == SGP_DIRTY)
999                         set_page_dirty(page);
1000         }
1001 done:
1002         /* Perhaps the file has been truncated since we checked */
1003         if (sgp != SGP_WRITE &&
1004             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1005                 error = -EINVAL;
1006                 goto trunc;
1007         }
1008         *pagep = page;
1009         return 0;
1010
1011         /*
1012          * Error recovery.
1013          */
1014 trunc:
1015         ClearPageDirty(page);
1016         delete_from_page_cache(page);
1017         spin_lock(&info->lock);
1018         info->alloced--;
1019         inode->i_blocks -= BLOCKS_PER_PAGE;
1020         spin_unlock(&info->lock);
1021 decused:
1022         if (sbinfo->max_blocks)
1023                 percpu_counter_add(&sbinfo->used_blocks, -1);
1024 unacct:
1025         shmem_unacct_blocks(info->flags, 1);
1026 failed:
1027         if (swap.val && error != -EINVAL) {
1028                 struct page *test = find_get_page(mapping, index);
1029                 if (test && !radix_tree_exceptional_entry(test))
1030                         page_cache_release(test);
1031                 /* Have another try if the entry has changed */
1032                 if (test != swp_to_radix_entry(swap))
1033                         error = -EEXIST;
1034         }
1035         if (page) {
1036                 unlock_page(page);
1037                 page_cache_release(page);
1038         }
1039         if (error == -ENOSPC && !once++) {
1040                 info = SHMEM_I(inode);
1041                 spin_lock(&info->lock);
1042                 shmem_recalc_inode(inode);
1043                 spin_unlock(&info->lock);
1044                 goto repeat;
1045         }
1046         if (error == -EEXIST)
1047                 goto repeat;
1048         return error;
1049 }
1050
1051 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1052 {
1053         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1054         int error;
1055         int ret = VM_FAULT_LOCKED;
1056
1057         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1058         if (error)
1059                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1060
1061         if (ret & VM_FAULT_MAJOR) {
1062                 count_vm_event(PGMAJFAULT);
1063                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1064         }
1065         return ret;
1066 }
1067
1068 #ifdef CONFIG_NUMA
1069 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1070 {
1071         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1072         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1073 }
1074
1075 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1076                                           unsigned long addr)
1077 {
1078         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1079         pgoff_t index;
1080
1081         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1082         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1083 }
1084 #endif
1085
1086 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1087 {
1088         struct inode *inode = file->f_path.dentry->d_inode;
1089         struct shmem_inode_info *info = SHMEM_I(inode);
1090         int retval = -ENOMEM;
1091
1092         spin_lock(&info->lock);
1093         if (lock && !(info->flags & VM_LOCKED)) {
1094                 if (!user_shm_lock(inode->i_size, user))
1095                         goto out_nomem;
1096                 info->flags |= VM_LOCKED;
1097                 mapping_set_unevictable(file->f_mapping);
1098         }
1099         if (!lock && (info->flags & VM_LOCKED) && user) {
1100                 user_shm_unlock(inode->i_size, user);
1101                 info->flags &= ~VM_LOCKED;
1102                 mapping_clear_unevictable(file->f_mapping);
1103         }
1104         retval = 0;
1105
1106 out_nomem:
1107         spin_unlock(&info->lock);
1108         return retval;
1109 }
1110
1111 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1112 {
1113         file_accessed(file);
1114         vma->vm_ops = &shmem_vm_ops;
1115         vma->vm_flags |= VM_CAN_NONLINEAR;
1116         return 0;
1117 }
1118
1119 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1120                                      int mode, dev_t dev, unsigned long flags)
1121 {
1122         struct inode *inode;
1123         struct shmem_inode_info *info;
1124         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1125
1126         if (shmem_reserve_inode(sb))
1127                 return NULL;
1128
1129         inode = new_inode(sb);
1130         if (inode) {
1131                 inode->i_ino = get_next_ino();
1132                 inode_init_owner(inode, dir, mode);
1133                 inode->i_blocks = 0;
1134                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1135                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1136                 inode->i_generation = get_seconds();
1137                 info = SHMEM_I(inode);
1138                 memset(info, 0, (char *)inode - (char *)info);
1139                 spin_lock_init(&info->lock);
1140                 info->flags = flags & VM_NORESERVE;
1141                 INIT_LIST_HEAD(&info->swaplist);
1142                 INIT_LIST_HEAD(&info->xattr_list);
1143                 cache_no_acl(inode);
1144
1145                 switch (mode & S_IFMT) {
1146                 default:
1147                         inode->i_op = &shmem_special_inode_operations;
1148                         init_special_inode(inode, mode, dev);
1149                         break;
1150                 case S_IFREG:
1151                         inode->i_mapping->a_ops = &shmem_aops;
1152                         inode->i_op = &shmem_inode_operations;
1153                         inode->i_fop = &shmem_file_operations;
1154                         mpol_shared_policy_init(&info->policy,
1155                                                  shmem_get_sbmpol(sbinfo));
1156                         break;
1157                 case S_IFDIR:
1158                         inc_nlink(inode);
1159                         /* Some things misbehave if size == 0 on a directory */
1160                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1161                         inode->i_op = &shmem_dir_inode_operations;
1162                         inode->i_fop = &simple_dir_operations;
1163                         break;
1164                 case S_IFLNK:
1165                         /*
1166                          * Must not load anything in the rbtree,
1167                          * mpol_free_shared_policy will not be called.
1168                          */
1169                         mpol_shared_policy_init(&info->policy, NULL);
1170                         break;
1171                 }
1172         } else
1173                 shmem_free_inode(sb);
1174         return inode;
1175 }
1176
1177 #ifdef CONFIG_TMPFS
1178 static const struct inode_operations shmem_symlink_inode_operations;
1179 static const struct inode_operations shmem_short_symlink_operations;
1180
1181 static int
1182 shmem_write_begin(struct file *file, struct address_space *mapping,
1183                         loff_t pos, unsigned len, unsigned flags,
1184                         struct page **pagep, void **fsdata)
1185 {
1186         struct inode *inode = mapping->host;
1187         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1188         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1189 }
1190
1191 static int
1192 shmem_write_end(struct file *file, struct address_space *mapping,
1193                         loff_t pos, unsigned len, unsigned copied,
1194                         struct page *page, void *fsdata)
1195 {
1196         struct inode *inode = mapping->host;
1197
1198         if (pos + copied > inode->i_size)
1199                 i_size_write(inode, pos + copied);
1200
1201         set_page_dirty(page);
1202         unlock_page(page);
1203         page_cache_release(page);
1204
1205         return copied;
1206 }
1207
1208 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1209 {
1210         struct inode *inode = filp->f_path.dentry->d_inode;
1211         struct address_space *mapping = inode->i_mapping;
1212         pgoff_t index;
1213         unsigned long offset;
1214         enum sgp_type sgp = SGP_READ;
1215
1216         /*
1217          * Might this read be for a stacking filesystem?  Then when reading
1218          * holes of a sparse file, we actually need to allocate those pages,
1219          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1220          */
1221         if (segment_eq(get_fs(), KERNEL_DS))
1222                 sgp = SGP_DIRTY;
1223
1224         index = *ppos >> PAGE_CACHE_SHIFT;
1225         offset = *ppos & ~PAGE_CACHE_MASK;
1226
1227         for (;;) {
1228                 struct page *page = NULL;
1229                 pgoff_t end_index;
1230                 unsigned long nr, ret;
1231                 loff_t i_size = i_size_read(inode);
1232
1233                 end_index = i_size >> PAGE_CACHE_SHIFT;
1234                 if (index > end_index)
1235                         break;
1236                 if (index == end_index) {
1237                         nr = i_size & ~PAGE_CACHE_MASK;
1238                         if (nr <= offset)
1239                                 break;
1240                 }
1241
1242                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1243                 if (desc->error) {
1244                         if (desc->error == -EINVAL)
1245                                 desc->error = 0;
1246                         break;
1247                 }
1248                 if (page)
1249                         unlock_page(page);
1250
1251                 /*
1252                  * We must evaluate after, since reads (unlike writes)
1253                  * are called without i_mutex protection against truncate
1254                  */
1255                 nr = PAGE_CACHE_SIZE;
1256                 i_size = i_size_read(inode);
1257                 end_index = i_size >> PAGE_CACHE_SHIFT;
1258                 if (index == end_index) {
1259                         nr = i_size & ~PAGE_CACHE_MASK;
1260                         if (nr <= offset) {
1261                                 if (page)
1262                                         page_cache_release(page);
1263                                 break;
1264                         }
1265                 }
1266                 nr -= offset;
1267
1268                 if (page) {
1269                         /*
1270                          * If users can be writing to this page using arbitrary
1271                          * virtual addresses, take care about potential aliasing
1272                          * before reading the page on the kernel side.
1273                          */
1274                         if (mapping_writably_mapped(mapping))
1275                                 flush_dcache_page(page);
1276                         /*
1277                          * Mark the page accessed if we read the beginning.
1278                          */
1279                         if (!offset)
1280                                 mark_page_accessed(page);
1281                 } else {
1282                         page = ZERO_PAGE(0);
1283                         page_cache_get(page);
1284                 }
1285
1286                 /*
1287                  * Ok, we have the page, and it's up-to-date, so
1288                  * now we can copy it to user space...
1289                  *
1290                  * The actor routine returns how many bytes were actually used..
1291                  * NOTE! This may not be the same as how much of a user buffer
1292                  * we filled up (we may be padding etc), so we can only update
1293                  * "pos" here (the actor routine has to update the user buffer
1294                  * pointers and the remaining count).
1295                  */
1296                 ret = actor(desc, page, offset, nr);
1297                 offset += ret;
1298                 index += offset >> PAGE_CACHE_SHIFT;
1299                 offset &= ~PAGE_CACHE_MASK;
1300
1301                 page_cache_release(page);
1302                 if (ret != nr || !desc->count)
1303                         break;
1304
1305                 cond_resched();
1306         }
1307
1308         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1309         file_accessed(filp);
1310 }
1311
1312 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1313                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1314 {
1315         struct file *filp = iocb->ki_filp;
1316         ssize_t retval;
1317         unsigned long seg;
1318         size_t count;
1319         loff_t *ppos = &iocb->ki_pos;
1320
1321         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1322         if (retval)
1323                 return retval;
1324
1325         for (seg = 0; seg < nr_segs; seg++) {
1326                 read_descriptor_t desc;
1327
1328                 desc.written = 0;
1329                 desc.arg.buf = iov[seg].iov_base;
1330                 desc.count = iov[seg].iov_len;
1331                 if (desc.count == 0)
1332                         continue;
1333                 desc.error = 0;
1334                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1335                 retval += desc.written;
1336                 if (desc.error) {
1337                         retval = retval ?: desc.error;
1338                         break;
1339                 }
1340                 if (desc.count > 0)
1341                         break;
1342         }
1343         return retval;
1344 }
1345
1346 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1347                                 struct pipe_inode_info *pipe, size_t len,
1348                                 unsigned int flags)
1349 {
1350         struct address_space *mapping = in->f_mapping;
1351         struct inode *inode = mapping->host;
1352         unsigned int loff, nr_pages, req_pages;
1353         struct page *pages[PIPE_DEF_BUFFERS];
1354         struct partial_page partial[PIPE_DEF_BUFFERS];
1355         struct page *page;
1356         pgoff_t index, end_index;
1357         loff_t isize, left;
1358         int error, page_nr;
1359         struct splice_pipe_desc spd = {
1360                 .pages = pages,
1361                 .partial = partial,
1362                 .flags = flags,
1363                 .ops = &page_cache_pipe_buf_ops,
1364                 .spd_release = spd_release_page,
1365         };
1366
1367         isize = i_size_read(inode);
1368         if (unlikely(*ppos >= isize))
1369                 return 0;
1370
1371         left = isize - *ppos;
1372         if (unlikely(left < len))
1373                 len = left;
1374
1375         if (splice_grow_spd(pipe, &spd))
1376                 return -ENOMEM;
1377
1378         index = *ppos >> PAGE_CACHE_SHIFT;
1379         loff = *ppos & ~PAGE_CACHE_MASK;
1380         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1381         nr_pages = min(req_pages, pipe->buffers);
1382
1383         spd.nr_pages = find_get_pages_contig(mapping, index,
1384                                                 nr_pages, spd.pages);
1385         index += spd.nr_pages;
1386         error = 0;
1387
1388         while (spd.nr_pages < nr_pages) {
1389                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1390                 if (error)
1391                         break;
1392                 unlock_page(page);
1393                 spd.pages[spd.nr_pages++] = page;
1394                 index++;
1395         }
1396
1397         index = *ppos >> PAGE_CACHE_SHIFT;
1398         nr_pages = spd.nr_pages;
1399         spd.nr_pages = 0;
1400
1401         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1402                 unsigned int this_len;
1403
1404                 if (!len)
1405                         break;
1406
1407                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1408                 page = spd.pages[page_nr];
1409
1410                 if (!PageUptodate(page) || page->mapping != mapping) {
1411                         error = shmem_getpage(inode, index, &page,
1412                                                         SGP_CACHE, NULL);
1413                         if (error)
1414                                 break;
1415                         unlock_page(page);
1416                         page_cache_release(spd.pages[page_nr]);
1417                         spd.pages[page_nr] = page;
1418                 }
1419
1420                 isize = i_size_read(inode);
1421                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1422                 if (unlikely(!isize || index > end_index))
1423                         break;
1424
1425                 if (end_index == index) {
1426                         unsigned int plen;
1427
1428                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1429                         if (plen <= loff)
1430                                 break;
1431
1432                         this_len = min(this_len, plen - loff);
1433                         len = this_len;
1434                 }
1435
1436                 spd.partial[page_nr].offset = loff;
1437                 spd.partial[page_nr].len = this_len;
1438                 len -= this_len;
1439                 loff = 0;
1440                 spd.nr_pages++;
1441                 index++;
1442         }
1443
1444         while (page_nr < nr_pages)
1445                 page_cache_release(spd.pages[page_nr++]);
1446
1447         if (spd.nr_pages)
1448                 error = splice_to_pipe(pipe, &spd);
1449
1450         splice_shrink_spd(pipe, &spd);
1451
1452         if (error > 0) {
1453                 *ppos += error;
1454                 file_accessed(in);
1455         }
1456         return error;
1457 }
1458
1459 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1460 {
1461         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1462
1463         buf->f_type = TMPFS_MAGIC;
1464         buf->f_bsize = PAGE_CACHE_SIZE;
1465         buf->f_namelen = NAME_MAX;
1466         if (sbinfo->max_blocks) {
1467                 buf->f_blocks = sbinfo->max_blocks;
1468                 buf->f_bavail =
1469                 buf->f_bfree  = sbinfo->max_blocks -
1470                                 percpu_counter_sum(&sbinfo->used_blocks);
1471         }
1472         if (sbinfo->max_inodes) {
1473                 buf->f_files = sbinfo->max_inodes;
1474                 buf->f_ffree = sbinfo->free_inodes;
1475         }
1476         /* else leave those fields 0 like simple_statfs */
1477         return 0;
1478 }
1479
1480 /*
1481  * File creation. Allocate an inode, and we're done..
1482  */
1483 static int
1484 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1485 {
1486         struct inode *inode;
1487         int error = -ENOSPC;
1488
1489         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1490         if (inode) {
1491                 error = security_inode_init_security(inode, dir,
1492                                                      &dentry->d_name,
1493                                                      NULL, NULL);
1494                 if (error) {
1495                         if (error != -EOPNOTSUPP) {
1496                                 iput(inode);
1497                                 return error;
1498                         }
1499                 }
1500 #ifdef CONFIG_TMPFS_POSIX_ACL
1501                 error = generic_acl_init(inode, dir);
1502                 if (error) {
1503                         iput(inode);
1504                         return error;
1505                 }
1506 #else
1507                 error = 0;
1508 #endif
1509                 dir->i_size += BOGO_DIRENT_SIZE;
1510                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1511                 d_instantiate(dentry, inode);
1512                 dget(dentry); /* Extra count - pin the dentry in core */
1513         }
1514         return error;
1515 }
1516
1517 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1518 {
1519         int error;
1520
1521         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1522                 return error;
1523         inc_nlink(dir);
1524         return 0;
1525 }
1526
1527 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1528                 struct nameidata *nd)
1529 {
1530         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1531 }
1532
1533 /*
1534  * Link a file..
1535  */
1536 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1537 {
1538         struct inode *inode = old_dentry->d_inode;
1539         int ret;
1540
1541         /*
1542          * No ordinary (disk based) filesystem counts links as inodes;
1543          * but each new link needs a new dentry, pinning lowmem, and
1544          * tmpfs dentries cannot be pruned until they are unlinked.
1545          */
1546         ret = shmem_reserve_inode(inode->i_sb);
1547         if (ret)
1548                 goto out;
1549
1550         dir->i_size += BOGO_DIRENT_SIZE;
1551         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1552         inc_nlink(inode);
1553         ihold(inode);   /* New dentry reference */
1554         dget(dentry);           /* Extra pinning count for the created dentry */
1555         d_instantiate(dentry, inode);
1556 out:
1557         return ret;
1558 }
1559
1560 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1561 {
1562         struct inode *inode = dentry->d_inode;
1563
1564         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1565                 shmem_free_inode(inode->i_sb);
1566
1567         dir->i_size -= BOGO_DIRENT_SIZE;
1568         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1569         drop_nlink(inode);
1570         dput(dentry);   /* Undo the count from "create" - this does all the work */
1571         return 0;
1572 }
1573
1574 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1575 {
1576         if (!simple_empty(dentry))
1577                 return -ENOTEMPTY;
1578
1579         drop_nlink(dentry->d_inode);
1580         drop_nlink(dir);
1581         return shmem_unlink(dir, dentry);
1582 }
1583
1584 /*
1585  * The VFS layer already does all the dentry stuff for rename,
1586  * we just have to decrement the usage count for the target if
1587  * it exists so that the VFS layer correctly free's it when it
1588  * gets overwritten.
1589  */
1590 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1591 {
1592         struct inode *inode = old_dentry->d_inode;
1593         int they_are_dirs = S_ISDIR(inode->i_mode);
1594
1595         if (!simple_empty(new_dentry))
1596                 return -ENOTEMPTY;
1597
1598         if (new_dentry->d_inode) {
1599                 (void) shmem_unlink(new_dir, new_dentry);
1600                 if (they_are_dirs)
1601                         drop_nlink(old_dir);
1602         } else if (they_are_dirs) {
1603                 drop_nlink(old_dir);
1604                 inc_nlink(new_dir);
1605         }
1606
1607         old_dir->i_size -= BOGO_DIRENT_SIZE;
1608         new_dir->i_size += BOGO_DIRENT_SIZE;
1609         old_dir->i_ctime = old_dir->i_mtime =
1610         new_dir->i_ctime = new_dir->i_mtime =
1611         inode->i_ctime = CURRENT_TIME;
1612         return 0;
1613 }
1614
1615 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1616 {
1617         int error;
1618         int len;
1619         struct inode *inode;
1620         struct page *page;
1621         char *kaddr;
1622         struct shmem_inode_info *info;
1623
1624         len = strlen(symname) + 1;
1625         if (len > PAGE_CACHE_SIZE)
1626                 return -ENAMETOOLONG;
1627
1628         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1629         if (!inode)
1630                 return -ENOSPC;
1631
1632         error = security_inode_init_security(inode, dir, &dentry->d_name,
1633                                              NULL, NULL);
1634         if (error) {
1635                 if (error != -EOPNOTSUPP) {
1636                         iput(inode);
1637                         return error;
1638                 }
1639                 error = 0;
1640         }
1641
1642         info = SHMEM_I(inode);
1643         inode->i_size = len-1;
1644         if (len <= SHORT_SYMLINK_LEN) {
1645                 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1646                 if (!info->symlink) {
1647                         iput(inode);
1648                         return -ENOMEM;
1649                 }
1650                 inode->i_op = &shmem_short_symlink_operations;
1651         } else {
1652                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1653                 if (error) {
1654                         iput(inode);
1655                         return error;
1656                 }
1657                 inode->i_mapping->a_ops = &shmem_aops;
1658                 inode->i_op = &shmem_symlink_inode_operations;
1659                 kaddr = kmap_atomic(page, KM_USER0);
1660                 memcpy(kaddr, symname, len);
1661                 kunmap_atomic(kaddr, KM_USER0);
1662                 set_page_dirty(page);
1663                 unlock_page(page);
1664                 page_cache_release(page);
1665         }
1666         dir->i_size += BOGO_DIRENT_SIZE;
1667         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1668         d_instantiate(dentry, inode);
1669         dget(dentry);
1670         return 0;
1671 }
1672
1673 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1674 {
1675         nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1676         return NULL;
1677 }
1678
1679 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1680 {
1681         struct page *page = NULL;
1682         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1683         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1684         if (page)
1685                 unlock_page(page);
1686         return page;
1687 }
1688
1689 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1690 {
1691         if (!IS_ERR(nd_get_link(nd))) {
1692                 struct page *page = cookie;
1693                 kunmap(page);
1694                 mark_page_accessed(page);
1695                 page_cache_release(page);
1696         }
1697 }
1698
1699 #ifdef CONFIG_TMPFS_XATTR
1700 /*
1701  * Superblocks without xattr inode operations may get some security.* xattr
1702  * support from the LSM "for free". As soon as we have any other xattrs
1703  * like ACLs, we also need to implement the security.* handlers at
1704  * filesystem level, though.
1705  */
1706
1707 static int shmem_xattr_get(struct dentry *dentry, const char *name,
1708                            void *buffer, size_t size)
1709 {
1710         struct shmem_inode_info *info;
1711         struct shmem_xattr *xattr;
1712         int ret = -ENODATA;
1713
1714         info = SHMEM_I(dentry->d_inode);
1715
1716         spin_lock(&info->lock);
1717         list_for_each_entry(xattr, &info->xattr_list, list) {
1718                 if (strcmp(name, xattr->name))
1719                         continue;
1720
1721                 ret = xattr->size;
1722                 if (buffer) {
1723                         if (size < xattr->size)
1724                                 ret = -ERANGE;
1725                         else
1726                                 memcpy(buffer, xattr->value, xattr->size);
1727                 }
1728                 break;
1729         }
1730         spin_unlock(&info->lock);
1731         return ret;
1732 }
1733
1734 static int shmem_xattr_set(struct dentry *dentry, const char *name,
1735                            const void *value, size_t size, int flags)
1736 {
1737         struct inode *inode = dentry->d_inode;
1738         struct shmem_inode_info *info = SHMEM_I(inode);
1739         struct shmem_xattr *xattr;
1740         struct shmem_xattr *new_xattr = NULL;
1741         size_t len;
1742         int err = 0;
1743
1744         /* value == NULL means remove */
1745         if (value) {
1746                 /* wrap around? */
1747                 len = sizeof(*new_xattr) + size;
1748                 if (len <= sizeof(*new_xattr))
1749                         return -ENOMEM;
1750
1751                 new_xattr = kmalloc(len, GFP_KERNEL);
1752                 if (!new_xattr)
1753                         return -ENOMEM;
1754
1755                 new_xattr->name = kstrdup(name, GFP_KERNEL);
1756                 if (!new_xattr->name) {
1757                         kfree(new_xattr);
1758                         return -ENOMEM;
1759                 }
1760
1761                 new_xattr->size = size;
1762                 memcpy(new_xattr->value, value, size);
1763         }
1764
1765         spin_lock(&info->lock);
1766         list_for_each_entry(xattr, &info->xattr_list, list) {
1767                 if (!strcmp(name, xattr->name)) {
1768                         if (flags & XATTR_CREATE) {
1769                                 xattr = new_xattr;
1770                                 err = -EEXIST;
1771                         } else if (new_xattr) {
1772                                 list_replace(&xattr->list, &new_xattr->list);
1773                         } else {
1774                                 list_del(&xattr->list);
1775                         }
1776                         goto out;
1777                 }
1778         }
1779         if (flags & XATTR_REPLACE) {
1780                 xattr = new_xattr;
1781                 err = -ENODATA;
1782         } else {
1783                 list_add(&new_xattr->list, &info->xattr_list);
1784                 xattr = NULL;
1785         }
1786 out:
1787         spin_unlock(&info->lock);
1788         if (xattr)
1789                 kfree(xattr->name);
1790         kfree(xattr);
1791         return err;
1792 }
1793
1794 static const struct xattr_handler *shmem_xattr_handlers[] = {
1795 #ifdef CONFIG_TMPFS_POSIX_ACL
1796         &generic_acl_access_handler,
1797         &generic_acl_default_handler,
1798 #endif
1799         NULL
1800 };
1801
1802 static int shmem_xattr_validate(const char *name)
1803 {
1804         struct { const char *prefix; size_t len; } arr[] = {
1805                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1806                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1807         };
1808         int i;
1809
1810         for (i = 0; i < ARRAY_SIZE(arr); i++) {
1811                 size_t preflen = arr[i].len;
1812                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1813                         if (!name[preflen])
1814                                 return -EINVAL;
1815                         return 0;
1816                 }
1817         }
1818         return -EOPNOTSUPP;
1819 }
1820
1821 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1822                               void *buffer, size_t size)
1823 {
1824         int err;
1825
1826         /*
1827          * If this is a request for a synthetic attribute in the system.*
1828          * namespace use the generic infrastructure to resolve a handler
1829          * for it via sb->s_xattr.
1830          */
1831         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1832                 return generic_getxattr(dentry, name, buffer, size);
1833
1834         err = shmem_xattr_validate(name);
1835         if (err)
1836                 return err;
1837
1838         return shmem_xattr_get(dentry, name, buffer, size);
1839 }
1840
1841 static int shmem_setxattr(struct dentry *dentry, const char *name,
1842                           const void *value, size_t size, int flags)
1843 {
1844         int err;
1845
1846         /*
1847          * If this is a request for a synthetic attribute in the system.*
1848          * namespace use the generic infrastructure to resolve a handler
1849          * for it via sb->s_xattr.
1850          */
1851         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1852                 return generic_setxattr(dentry, name, value, size, flags);
1853
1854         err = shmem_xattr_validate(name);
1855         if (err)
1856                 return err;
1857
1858         if (size == 0)
1859                 value = "";  /* empty EA, do not remove */
1860
1861         return shmem_xattr_set(dentry, name, value, size, flags);
1862
1863 }
1864
1865 static int shmem_removexattr(struct dentry *dentry, const char *name)
1866 {
1867         int err;
1868
1869         /*
1870          * If this is a request for a synthetic attribute in the system.*
1871          * namespace use the generic infrastructure to resolve a handler
1872          * for it via sb->s_xattr.
1873          */
1874         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1875                 return generic_removexattr(dentry, name);
1876
1877         err = shmem_xattr_validate(name);
1878         if (err)
1879                 return err;
1880
1881         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1882 }
1883
1884 static bool xattr_is_trusted(const char *name)
1885 {
1886         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1887 }
1888
1889 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1890 {
1891         bool trusted = capable(CAP_SYS_ADMIN);
1892         struct shmem_xattr *xattr;
1893         struct shmem_inode_info *info;
1894         size_t used = 0;
1895
1896         info = SHMEM_I(dentry->d_inode);
1897
1898         spin_lock(&info->lock);
1899         list_for_each_entry(xattr, &info->xattr_list, list) {
1900                 size_t len;
1901
1902                 /* skip "trusted." attributes for unprivileged callers */
1903                 if (!trusted && xattr_is_trusted(xattr->name))
1904                         continue;
1905
1906                 len = strlen(xattr->name) + 1;
1907                 used += len;
1908                 if (buffer) {
1909                         if (size < used) {
1910                                 used = -ERANGE;
1911                                 break;
1912                         }
1913                         memcpy(buffer, xattr->name, len);
1914                         buffer += len;
1915                 }
1916         }
1917         spin_unlock(&info->lock);
1918
1919         return used;
1920 }
1921 #endif /* CONFIG_TMPFS_XATTR */
1922
1923 static const struct inode_operations shmem_short_symlink_operations = {
1924         .readlink       = generic_readlink,
1925         .follow_link    = shmem_follow_short_symlink,
1926 #ifdef CONFIG_TMPFS_XATTR
1927         .setxattr       = shmem_setxattr,
1928         .getxattr       = shmem_getxattr,
1929         .listxattr      = shmem_listxattr,
1930         .removexattr    = shmem_removexattr,
1931 #endif
1932 };
1933
1934 static const struct inode_operations shmem_symlink_inode_operations = {
1935         .readlink       = generic_readlink,
1936         .follow_link    = shmem_follow_link,
1937         .put_link       = shmem_put_link,
1938 #ifdef CONFIG_TMPFS_XATTR
1939         .setxattr       = shmem_setxattr,
1940         .getxattr       = shmem_getxattr,
1941         .listxattr      = shmem_listxattr,
1942         .removexattr    = shmem_removexattr,
1943 #endif
1944 };
1945
1946 static struct dentry *shmem_get_parent(struct dentry *child)
1947 {
1948         return ERR_PTR(-ESTALE);
1949 }
1950
1951 static int shmem_match(struct inode *ino, void *vfh)
1952 {
1953         __u32 *fh = vfh;
1954         __u64 inum = fh[2];
1955         inum = (inum << 32) | fh[1];
1956         return ino->i_ino == inum && fh[0] == ino->i_generation;
1957 }
1958
1959 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1960                 struct fid *fid, int fh_len, int fh_type)
1961 {
1962         struct inode *inode;
1963         struct dentry *dentry = NULL;
1964         u64 inum = fid->raw[2];
1965         inum = (inum << 32) | fid->raw[1];
1966
1967         if (fh_len < 3)
1968                 return NULL;
1969
1970         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1971                         shmem_match, fid->raw);
1972         if (inode) {
1973                 dentry = d_find_alias(inode);
1974                 iput(inode);
1975         }
1976
1977         return dentry;
1978 }
1979
1980 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1981                                 int connectable)
1982 {
1983         struct inode *inode = dentry->d_inode;
1984
1985         if (*len < 3) {
1986                 *len = 3;
1987                 return 255;
1988         }
1989
1990         if (inode_unhashed(inode)) {
1991                 /* Unfortunately insert_inode_hash is not idempotent,
1992                  * so as we hash inodes here rather than at creation
1993                  * time, we need a lock to ensure we only try
1994                  * to do it once
1995                  */
1996                 static DEFINE_SPINLOCK(lock);
1997                 spin_lock(&lock);
1998                 if (inode_unhashed(inode))
1999                         __insert_inode_hash(inode,
2000                                             inode->i_ino + inode->i_generation);
2001                 spin_unlock(&lock);
2002         }
2003
2004         fh[0] = inode->i_generation;
2005         fh[1] = inode->i_ino;
2006         fh[2] = ((__u64)inode->i_ino) >> 32;
2007
2008         *len = 3;
2009         return 1;
2010 }
2011
2012 static const struct export_operations shmem_export_ops = {
2013         .get_parent     = shmem_get_parent,
2014         .encode_fh      = shmem_encode_fh,
2015         .fh_to_dentry   = shmem_fh_to_dentry,
2016 };
2017
2018 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2019                                bool remount)
2020 {
2021         char *this_char, *value, *rest;
2022
2023         while (options != NULL) {
2024                 this_char = options;
2025                 for (;;) {
2026                         /*
2027                          * NUL-terminate this option: unfortunately,
2028                          * mount options form a comma-separated list,
2029                          * but mpol's nodelist may also contain commas.
2030                          */
2031                         options = strchr(options, ',');
2032                         if (options == NULL)
2033                                 break;
2034                         options++;
2035                         if (!isdigit(*options)) {
2036                                 options[-1] = '\0';
2037                                 break;
2038                         }
2039                 }
2040                 if (!*this_char)
2041                         continue;
2042                 if ((value = strchr(this_char,'=')) != NULL) {
2043                         *value++ = 0;
2044                 } else {
2045                         printk(KERN_ERR
2046                             "tmpfs: No value for mount option '%s'\n",
2047                             this_char);
2048                         return 1;
2049                 }
2050
2051                 if (!strcmp(this_char,"size")) {
2052                         unsigned long long size;
2053                         size = memparse(value,&rest);
2054                         if (*rest == '%') {
2055                                 size <<= PAGE_SHIFT;
2056                                 size *= totalram_pages;
2057                                 do_div(size, 100);
2058                                 rest++;
2059                         }
2060                         if (*rest)
2061                                 goto bad_val;
2062                         sbinfo->max_blocks =
2063                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2064                 } else if (!strcmp(this_char,"nr_blocks")) {
2065                         sbinfo->max_blocks = memparse(value, &rest);
2066                         if (*rest)
2067                                 goto bad_val;
2068                 } else if (!strcmp(this_char,"nr_inodes")) {
2069                         sbinfo->max_inodes = memparse(value, &rest);
2070                         if (*rest)
2071                                 goto bad_val;
2072                 } else if (!strcmp(this_char,"mode")) {
2073                         if (remount)
2074                                 continue;
2075                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2076                         if (*rest)
2077                                 goto bad_val;
2078                 } else if (!strcmp(this_char,"uid")) {
2079                         if (remount)
2080                                 continue;
2081                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2082                         if (*rest)
2083                                 goto bad_val;
2084                 } else if (!strcmp(this_char,"gid")) {
2085                         if (remount)
2086                                 continue;
2087                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2088                         if (*rest)
2089                                 goto bad_val;
2090                 } else if (!strcmp(this_char,"mpol")) {
2091                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2092                                 goto bad_val;
2093                 } else {
2094                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2095                                this_char);
2096                         return 1;
2097                 }
2098         }
2099         return 0;
2100
2101 bad_val:
2102         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2103                value, this_char);
2104         return 1;
2105
2106 }
2107
2108 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2109 {
2110         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2111         struct shmem_sb_info config = *sbinfo;
2112         unsigned long inodes;
2113         int error = -EINVAL;
2114
2115         if (shmem_parse_options(data, &config, true))
2116                 return error;
2117
2118         spin_lock(&sbinfo->stat_lock);
2119         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2120         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2121                 goto out;
2122         if (config.max_inodes < inodes)
2123                 goto out;
2124         /*
2125          * Those tests disallow limited->unlimited while any are in use;
2126          * but we must separately disallow unlimited->limited, because
2127          * in that case we have no record of how much is already in use.
2128          */
2129         if (config.max_blocks && !sbinfo->max_blocks)
2130                 goto out;
2131         if (config.max_inodes && !sbinfo->max_inodes)
2132                 goto out;
2133
2134         error = 0;
2135         sbinfo->max_blocks  = config.max_blocks;
2136         sbinfo->max_inodes  = config.max_inodes;
2137         sbinfo->free_inodes = config.max_inodes - inodes;
2138
2139         mpol_put(sbinfo->mpol);
2140         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2141 out:
2142         spin_unlock(&sbinfo->stat_lock);
2143         return error;
2144 }
2145
2146 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2147 {
2148         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2149
2150         if (sbinfo->max_blocks != shmem_default_max_blocks())
2151                 seq_printf(seq, ",size=%luk",
2152                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2153         if (sbinfo->max_inodes != shmem_default_max_inodes())
2154                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2155         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2156                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2157         if (sbinfo->uid != 0)
2158                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2159         if (sbinfo->gid != 0)
2160                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2161         shmem_show_mpol(seq, sbinfo->mpol);
2162         return 0;
2163 }
2164 #endif /* CONFIG_TMPFS */
2165
2166 static void shmem_put_super(struct super_block *sb)
2167 {
2168         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2169
2170         percpu_counter_destroy(&sbinfo->used_blocks);
2171         kfree(sbinfo);
2172         sb->s_fs_info = NULL;
2173 }
2174
2175 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2176 {
2177         struct inode *inode;
2178         struct dentry *root;
2179         struct shmem_sb_info *sbinfo;
2180         int err = -ENOMEM;
2181
2182         /* Round up to L1_CACHE_BYTES to resist false sharing */
2183         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2184                                 L1_CACHE_BYTES), GFP_KERNEL);
2185         if (!sbinfo)
2186                 return -ENOMEM;
2187
2188         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2189         sbinfo->uid = current_fsuid();
2190         sbinfo->gid = current_fsgid();
2191         sb->s_fs_info = sbinfo;
2192
2193 #ifdef CONFIG_TMPFS
2194         /*
2195          * Per default we only allow half of the physical ram per
2196          * tmpfs instance, limiting inodes to one per page of lowmem;
2197          * but the internal instance is left unlimited.
2198          */
2199         if (!(sb->s_flags & MS_NOUSER)) {
2200                 sbinfo->max_blocks = shmem_default_max_blocks();
2201                 sbinfo->max_inodes = shmem_default_max_inodes();
2202                 if (shmem_parse_options(data, sbinfo, false)) {
2203                         err = -EINVAL;
2204                         goto failed;
2205                 }
2206         }
2207         sb->s_export_op = &shmem_export_ops;
2208 #else
2209         sb->s_flags |= MS_NOUSER;
2210 #endif
2211
2212         spin_lock_init(&sbinfo->stat_lock);
2213         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2214                 goto failed;
2215         sbinfo->free_inodes = sbinfo->max_inodes;
2216
2217         sb->s_maxbytes = MAX_LFS_FILESIZE;
2218         sb->s_blocksize = PAGE_CACHE_SIZE;
2219         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2220         sb->s_magic = TMPFS_MAGIC;
2221         sb->s_op = &shmem_ops;
2222         sb->s_time_gran = 1;
2223 #ifdef CONFIG_TMPFS_XATTR
2224         sb->s_xattr = shmem_xattr_handlers;
2225 #endif
2226 #ifdef CONFIG_TMPFS_POSIX_ACL
2227         sb->s_flags |= MS_POSIXACL;
2228 #endif
2229
2230         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2231         if (!inode)
2232                 goto failed;
2233         inode->i_uid = sbinfo->uid;
2234         inode->i_gid = sbinfo->gid;
2235         root = d_alloc_root(inode);
2236         if (!root)
2237                 goto failed_iput;
2238         sb->s_root = root;
2239         return 0;
2240
2241 failed_iput:
2242         iput(inode);
2243 failed:
2244         shmem_put_super(sb);
2245         return err;
2246 }
2247
2248 static struct kmem_cache *shmem_inode_cachep;
2249
2250 static struct inode *shmem_alloc_inode(struct super_block *sb)
2251 {
2252         struct shmem_inode_info *info;
2253         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2254         if (!info)
2255                 return NULL;
2256         return &info->vfs_inode;
2257 }
2258
2259 static void shmem_destroy_callback(struct rcu_head *head)
2260 {
2261         struct inode *inode = container_of(head, struct inode, i_rcu);
2262         INIT_LIST_HEAD(&inode->i_dentry);
2263         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2264 }
2265
2266 static void shmem_destroy_inode(struct inode *inode)
2267 {
2268         if ((inode->i_mode & S_IFMT) == S_IFREG)
2269                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2270         call_rcu(&inode->i_rcu, shmem_destroy_callback);
2271 }
2272
2273 static void shmem_init_inode(void *foo)
2274 {
2275         struct shmem_inode_info *info = foo;
2276         inode_init_once(&info->vfs_inode);
2277 }
2278
2279 static int shmem_init_inodecache(void)
2280 {
2281         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2282                                 sizeof(struct shmem_inode_info),
2283                                 0, SLAB_PANIC, shmem_init_inode);
2284         return 0;
2285 }
2286
2287 static void shmem_destroy_inodecache(void)
2288 {
2289         kmem_cache_destroy(shmem_inode_cachep);
2290 }
2291
2292 static const struct address_space_operations shmem_aops = {
2293         .writepage      = shmem_writepage,
2294         .set_page_dirty = __set_page_dirty_no_writeback,
2295 #ifdef CONFIG_TMPFS
2296         .write_begin    = shmem_write_begin,
2297         .write_end      = shmem_write_end,
2298 #endif
2299         .migratepage    = migrate_page,
2300         .error_remove_page = generic_error_remove_page,
2301 };
2302
2303 static const struct file_operations shmem_file_operations = {
2304         .mmap           = shmem_mmap,
2305 #ifdef CONFIG_TMPFS
2306         .llseek         = generic_file_llseek,
2307         .read           = do_sync_read,
2308         .write          = do_sync_write,
2309         .aio_read       = shmem_file_aio_read,
2310         .aio_write      = generic_file_aio_write,
2311         .fsync          = noop_fsync,
2312         .splice_read    = shmem_file_splice_read,
2313         .splice_write   = generic_file_splice_write,
2314 #endif
2315 };
2316
2317 static const struct inode_operations shmem_inode_operations = {
2318         .setattr        = shmem_setattr,
2319         .truncate_range = shmem_truncate_range,
2320 #ifdef CONFIG_TMPFS_XATTR
2321         .setxattr       = shmem_setxattr,
2322         .getxattr       = shmem_getxattr,
2323         .listxattr      = shmem_listxattr,
2324         .removexattr    = shmem_removexattr,
2325 #endif
2326 };
2327
2328 static const struct inode_operations shmem_dir_inode_operations = {
2329 #ifdef CONFIG_TMPFS
2330         .create         = shmem_create,
2331         .lookup         = simple_lookup,
2332         .link           = shmem_link,
2333         .unlink         = shmem_unlink,
2334         .symlink        = shmem_symlink,
2335         .mkdir          = shmem_mkdir,
2336         .rmdir          = shmem_rmdir,
2337         .mknod          = shmem_mknod,
2338         .rename         = shmem_rename,
2339 #endif
2340 #ifdef CONFIG_TMPFS_XATTR
2341         .setxattr       = shmem_setxattr,
2342         .getxattr       = shmem_getxattr,
2343         .listxattr      = shmem_listxattr,
2344         .removexattr    = shmem_removexattr,
2345 #endif
2346 #ifdef CONFIG_TMPFS_POSIX_ACL
2347         .setattr        = shmem_setattr,
2348 #endif
2349 };
2350
2351 static const struct inode_operations shmem_special_inode_operations = {
2352 #ifdef CONFIG_TMPFS_XATTR
2353         .setxattr       = shmem_setxattr,
2354         .getxattr       = shmem_getxattr,
2355         .listxattr      = shmem_listxattr,
2356         .removexattr    = shmem_removexattr,
2357 #endif
2358 #ifdef CONFIG_TMPFS_POSIX_ACL
2359         .setattr        = shmem_setattr,
2360 #endif
2361 };
2362
2363 static const struct super_operations shmem_ops = {
2364         .alloc_inode    = shmem_alloc_inode,
2365         .destroy_inode  = shmem_destroy_inode,
2366 #ifdef CONFIG_TMPFS
2367         .statfs         = shmem_statfs,
2368         .remount_fs     = shmem_remount_fs,
2369         .show_options   = shmem_show_options,
2370 #endif
2371         .evict_inode    = shmem_evict_inode,
2372         .drop_inode     = generic_delete_inode,
2373         .put_super      = shmem_put_super,
2374 };
2375
2376 static const struct vm_operations_struct shmem_vm_ops = {
2377         .fault          = shmem_fault,
2378 #ifdef CONFIG_NUMA
2379         .set_policy     = shmem_set_policy,
2380         .get_policy     = shmem_get_policy,
2381 #endif
2382 };
2383
2384 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2385         int flags, const char *dev_name, void *data)
2386 {
2387         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2388 }
2389
2390 static struct file_system_type shmem_fs_type = {
2391         .owner          = THIS_MODULE,
2392         .name           = "tmpfs",
2393         .mount          = shmem_mount,
2394         .kill_sb        = kill_litter_super,
2395 };
2396
2397 int __init shmem_init(void)
2398 {
2399         int error;
2400
2401         error = bdi_init(&shmem_backing_dev_info);
2402         if (error)
2403                 goto out4;
2404
2405         error = shmem_init_inodecache();
2406         if (error)
2407                 goto out3;
2408
2409         error = register_filesystem(&shmem_fs_type);
2410         if (error) {
2411                 printk(KERN_ERR "Could not register tmpfs\n");
2412                 goto out2;
2413         }
2414
2415         shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2416                                  shmem_fs_type.name, NULL);
2417         if (IS_ERR(shm_mnt)) {
2418                 error = PTR_ERR(shm_mnt);
2419                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2420                 goto out1;
2421         }
2422         return 0;
2423
2424 out1:
2425         unregister_filesystem(&shmem_fs_type);
2426 out2:
2427         shmem_destroy_inodecache();
2428 out3:
2429         bdi_destroy(&shmem_backing_dev_info);
2430 out4:
2431         shm_mnt = ERR_PTR(error);
2432         return error;
2433 }
2434
2435 #else /* !CONFIG_SHMEM */
2436
2437 /*
2438  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2439  *
2440  * This is intended for small system where the benefits of the full
2441  * shmem code (swap-backed and resource-limited) are outweighed by
2442  * their complexity. On systems without swap this code should be
2443  * effectively equivalent, but much lighter weight.
2444  */
2445
2446 #include <linux/ramfs.h>
2447
2448 static struct file_system_type shmem_fs_type = {
2449         .name           = "tmpfs",
2450         .mount          = ramfs_mount,
2451         .kill_sb        = kill_litter_super,
2452 };
2453
2454 int __init shmem_init(void)
2455 {
2456         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2457
2458         shm_mnt = kern_mount(&shmem_fs_type);
2459         BUG_ON(IS_ERR(shm_mnt));
2460
2461         return 0;
2462 }
2463
2464 int shmem_unuse(swp_entry_t swap, struct page *page)
2465 {
2466         return 0;
2467 }
2468
2469 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2470 {
2471         return 0;
2472 }
2473
2474 void shmem_unlock_mapping(struct address_space *mapping)
2475 {
2476 }
2477
2478 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2479 {
2480         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2481 }
2482 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2483
2484 #define shmem_vm_ops                            generic_file_vm_ops
2485 #define shmem_file_operations                   ramfs_file_operations
2486 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
2487 #define shmem_acct_size(flags, size)            0
2488 #define shmem_unacct_size(flags, size)          do {} while (0)
2489
2490 #endif /* CONFIG_SHMEM */
2491
2492 /* common code */
2493
2494 /**
2495  * shmem_file_setup - get an unlinked file living in tmpfs
2496  * @name: name for dentry (to be seen in /proc/<pid>/maps
2497  * @size: size to be set for the file
2498  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2499  */
2500 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2501 {
2502         int error;
2503         struct file *file;
2504         struct inode *inode;
2505         struct path path;
2506         struct dentry *root;
2507         struct qstr this;
2508
2509         if (IS_ERR(shm_mnt))
2510                 return (void *)shm_mnt;
2511
2512         if (size < 0 || size > MAX_LFS_FILESIZE)
2513                 return ERR_PTR(-EINVAL);
2514
2515         if (shmem_acct_size(flags, size))
2516                 return ERR_PTR(-ENOMEM);
2517
2518         error = -ENOMEM;
2519         this.name = name;
2520         this.len = strlen(name);
2521         this.hash = 0; /* will go */
2522         root = shm_mnt->mnt_root;
2523         path.dentry = d_alloc(root, &this);
2524         if (!path.dentry)
2525                 goto put_memory;
2526         path.mnt = mntget(shm_mnt);
2527
2528         error = -ENOSPC;
2529         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2530         if (!inode)
2531                 goto put_dentry;
2532
2533         d_instantiate(path.dentry, inode);
2534         inode->i_size = size;
2535         clear_nlink(inode);     /* It is unlinked */
2536 #ifndef CONFIG_MMU
2537         error = ramfs_nommu_expand_for_mapping(inode, size);
2538         if (error)
2539                 goto put_dentry;
2540 #endif
2541
2542         error = -ENFILE;
2543         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2544                   &shmem_file_operations);
2545         if (!file)
2546                 goto put_dentry;
2547
2548         return file;
2549
2550 put_dentry:
2551         path_put(&path);
2552 put_memory:
2553         shmem_unacct_size(flags, size);
2554         return ERR_PTR(error);
2555 }
2556 EXPORT_SYMBOL_GPL(shmem_file_setup);
2557
2558 /**
2559  * shmem_zero_setup - setup a shared anonymous mapping
2560  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2561  */
2562 int shmem_zero_setup(struct vm_area_struct *vma)
2563 {
2564         struct file *file;
2565         loff_t size = vma->vm_end - vma->vm_start;
2566
2567         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2568         if (IS_ERR(file))
2569                 return PTR_ERR(file);
2570
2571         if (vma->vm_file)
2572                 fput(vma->vm_file);
2573         vma->vm_file = file;
2574         vma->vm_ops = &shmem_vm_ops;
2575         vma->vm_flags |= VM_CAN_NONLINEAR;
2576         return 0;
2577 }
2578 EXPORT_SYMBOL_GPL(shmem_zero_setup);
2579
2580 /**
2581  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2582  * @mapping:    the page's address_space
2583  * @index:      the page index
2584  * @gfp:        the page allocator flags to use if allocating
2585  *
2586  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2587  * with any new page allocations done using the specified allocation flags.
2588  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2589  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2590  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2591  *
2592  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2593  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2594  */
2595 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2596                                          pgoff_t index, gfp_t gfp)
2597 {
2598 #ifdef CONFIG_SHMEM
2599         struct inode *inode = mapping->host;
2600         struct page *page;
2601         int error;
2602
2603         BUG_ON(mapping->a_ops != &shmem_aops);
2604         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2605         if (error)
2606                 page = ERR_PTR(error);
2607         else
2608                 unlock_page(page);
2609         return page;
2610 #else
2611         /*
2612          * The tiny !SHMEM case uses ramfs without swap
2613          */
2614         return read_cache_page_gfp(mapping, index, gfp);
2615 #endif
2616 }
2617 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);