tmpfs: refine shmem_file_splice_read
[pandora-kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/percpu_counter.h>
32 #include <linux/swap.h>
33
34 static struct vfsmount *shm_mnt;
35
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/splice.h>
55 #include <linux/security.h>
56 #include <linux/swapops.h>
57 #include <linux/mempolicy.h>
58 #include <linux/namei.h>
59 #include <linux/ctype.h>
60 #include <linux/migrate.h>
61 #include <linux/highmem.h>
62 #include <linux/seq_file.h>
63 #include <linux/magic.h>
64
65 #include <asm/uaccess.h>
66 #include <asm/div64.h>
67 #include <asm/pgtable.h>
68
69 /*
70  * The maximum size of a shmem/tmpfs file is limited by the maximum size of
71  * its triple-indirect swap vector - see illustration at shmem_swp_entry().
72  *
73  * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
74  * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum
75  * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
76  * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
77  *
78  * We use / and * instead of shifts in the definitions below, so that the swap
79  * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
80  */
81 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
82 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
83
84 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
85 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
86
87 #define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
88 #define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
89
90 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
91 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
92
93 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
94 #define SHMEM_PAGEIN     VM_READ
95 #define SHMEM_TRUNCATE   VM_WRITE
96
97 /* Definition to limit shmem_truncate's steps between cond_rescheds */
98 #define LATENCY_LIMIT    64
99
100 /* Pretend that each entry is of this size in directory's i_size */
101 #define BOGO_DIRENT_SIZE 20
102
103 struct shmem_xattr {
104         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
105         char *name;             /* xattr name */
106         size_t size;
107         char value[0];
108 };
109
110 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
111 enum sgp_type {
112         SGP_READ,       /* don't exceed i_size, don't allocate page */
113         SGP_CACHE,      /* don't exceed i_size, may allocate page */
114         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
115         SGP_WRITE,      /* may exceed i_size, may allocate page */
116 };
117
118 #ifdef CONFIG_TMPFS
119 static unsigned long shmem_default_max_blocks(void)
120 {
121         return totalram_pages / 2;
122 }
123
124 static unsigned long shmem_default_max_inodes(void)
125 {
126         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
127 }
128 #endif
129
130 static int shmem_getpage(struct inode *inode, unsigned long idx,
131                          struct page **pagep, enum sgp_type sgp, int *type);
132
133 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
134 {
135         /*
136          * The above definition of ENTRIES_PER_PAGE, and the use of
137          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
138          * might be reconsidered if it ever diverges from PAGE_SIZE.
139          *
140          * Mobility flags are masked out as swap vectors cannot move
141          */
142         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
143                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
144 }
145
146 static inline void shmem_dir_free(struct page *page)
147 {
148         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
149 }
150
151 static struct page **shmem_dir_map(struct page *page)
152 {
153         return (struct page **)kmap_atomic(page, KM_USER0);
154 }
155
156 static inline void shmem_dir_unmap(struct page **dir)
157 {
158         kunmap_atomic(dir, KM_USER0);
159 }
160
161 static swp_entry_t *shmem_swp_map(struct page *page)
162 {
163         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
164 }
165
166 static inline void shmem_swp_balance_unmap(void)
167 {
168         /*
169          * When passing a pointer to an i_direct entry, to code which
170          * also handles indirect entries and so will shmem_swp_unmap,
171          * we must arrange for the preempt count to remain in balance.
172          * What kmap_atomic of a lowmem page does depends on config
173          * and architecture, so pretend to kmap_atomic some lowmem page.
174          */
175         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
176 }
177
178 static inline void shmem_swp_unmap(swp_entry_t *entry)
179 {
180         kunmap_atomic(entry, KM_USER1);
181 }
182
183 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
184 {
185         return sb->s_fs_info;
186 }
187
188 /*
189  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
190  * for shared memory and for shared anonymous (/dev/zero) mappings
191  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
192  * consistent with the pre-accounting of private mappings ...
193  */
194 static inline int shmem_acct_size(unsigned long flags, loff_t size)
195 {
196         return (flags & VM_NORESERVE) ?
197                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
198 }
199
200 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
201 {
202         if (!(flags & VM_NORESERVE))
203                 vm_unacct_memory(VM_ACCT(size));
204 }
205
206 /*
207  * ... whereas tmpfs objects are accounted incrementally as
208  * pages are allocated, in order to allow huge sparse files.
209  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
210  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
211  */
212 static inline int shmem_acct_block(unsigned long flags)
213 {
214         return (flags & VM_NORESERVE) ?
215                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
216 }
217
218 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
219 {
220         if (flags & VM_NORESERVE)
221                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
222 }
223
224 static const struct super_operations shmem_ops;
225 static const struct address_space_operations shmem_aops;
226 static const struct file_operations shmem_file_operations;
227 static const struct inode_operations shmem_inode_operations;
228 static const struct inode_operations shmem_dir_inode_operations;
229 static const struct inode_operations shmem_special_inode_operations;
230 static const struct vm_operations_struct shmem_vm_ops;
231
232 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
233         .ra_pages       = 0,    /* No readahead */
234         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
235 };
236
237 static LIST_HEAD(shmem_swaplist);
238 static DEFINE_MUTEX(shmem_swaplist_mutex);
239
240 static void shmem_free_blocks(struct inode *inode, long pages)
241 {
242         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
243         if (sbinfo->max_blocks) {
244                 percpu_counter_add(&sbinfo->used_blocks, -pages);
245                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
246         }
247 }
248
249 static int shmem_reserve_inode(struct super_block *sb)
250 {
251         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
252         if (sbinfo->max_inodes) {
253                 spin_lock(&sbinfo->stat_lock);
254                 if (!sbinfo->free_inodes) {
255                         spin_unlock(&sbinfo->stat_lock);
256                         return -ENOSPC;
257                 }
258                 sbinfo->free_inodes--;
259                 spin_unlock(&sbinfo->stat_lock);
260         }
261         return 0;
262 }
263
264 static void shmem_free_inode(struct super_block *sb)
265 {
266         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
267         if (sbinfo->max_inodes) {
268                 spin_lock(&sbinfo->stat_lock);
269                 sbinfo->free_inodes++;
270                 spin_unlock(&sbinfo->stat_lock);
271         }
272 }
273
274 /**
275  * shmem_recalc_inode - recalculate the size of an inode
276  * @inode: inode to recalc
277  *
278  * We have to calculate the free blocks since the mm can drop
279  * undirtied hole pages behind our back.
280  *
281  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
282  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
283  *
284  * It has to be called with the spinlock held.
285  */
286 static void shmem_recalc_inode(struct inode *inode)
287 {
288         struct shmem_inode_info *info = SHMEM_I(inode);
289         long freed;
290
291         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
292         if (freed > 0) {
293                 info->alloced -= freed;
294                 shmem_unacct_blocks(info->flags, freed);
295                 shmem_free_blocks(inode, freed);
296         }
297 }
298
299 /**
300  * shmem_swp_entry - find the swap vector position in the info structure
301  * @info:  info structure for the inode
302  * @index: index of the page to find
303  * @page:  optional page to add to the structure. Has to be preset to
304  *         all zeros
305  *
306  * If there is no space allocated yet it will return NULL when
307  * page is NULL, else it will use the page for the needed block,
308  * setting it to NULL on return to indicate that it has been used.
309  *
310  * The swap vector is organized the following way:
311  *
312  * There are SHMEM_NR_DIRECT entries directly stored in the
313  * shmem_inode_info structure. So small files do not need an addional
314  * allocation.
315  *
316  * For pages with index > SHMEM_NR_DIRECT there is the pointer
317  * i_indirect which points to a page which holds in the first half
318  * doubly indirect blocks, in the second half triple indirect blocks:
319  *
320  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
321  * following layout (for SHMEM_NR_DIRECT == 16):
322  *
323  * i_indirect -> dir --> 16-19
324  *            |      +-> 20-23
325  *            |
326  *            +-->dir2 --> 24-27
327  *            |        +-> 28-31
328  *            |        +-> 32-35
329  *            |        +-> 36-39
330  *            |
331  *            +-->dir3 --> 40-43
332  *                     +-> 44-47
333  *                     +-> 48-51
334  *                     +-> 52-55
335  */
336 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
337 {
338         unsigned long offset;
339         struct page **dir;
340         struct page *subdir;
341
342         if (index < SHMEM_NR_DIRECT) {
343                 shmem_swp_balance_unmap();
344                 return info->i_direct+index;
345         }
346         if (!info->i_indirect) {
347                 if (page) {
348                         info->i_indirect = *page;
349                         *page = NULL;
350                 }
351                 return NULL;                    /* need another page */
352         }
353
354         index -= SHMEM_NR_DIRECT;
355         offset = index % ENTRIES_PER_PAGE;
356         index /= ENTRIES_PER_PAGE;
357         dir = shmem_dir_map(info->i_indirect);
358
359         if (index >= ENTRIES_PER_PAGE/2) {
360                 index -= ENTRIES_PER_PAGE/2;
361                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
362                 index %= ENTRIES_PER_PAGE;
363                 subdir = *dir;
364                 if (!subdir) {
365                         if (page) {
366                                 *dir = *page;
367                                 *page = NULL;
368                         }
369                         shmem_dir_unmap(dir);
370                         return NULL;            /* need another page */
371                 }
372                 shmem_dir_unmap(dir);
373                 dir = shmem_dir_map(subdir);
374         }
375
376         dir += index;
377         subdir = *dir;
378         if (!subdir) {
379                 if (!page || !(subdir = *page)) {
380                         shmem_dir_unmap(dir);
381                         return NULL;            /* need a page */
382                 }
383                 *dir = subdir;
384                 *page = NULL;
385         }
386         shmem_dir_unmap(dir);
387         return shmem_swp_map(subdir) + offset;
388 }
389
390 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
391 {
392         long incdec = value? 1: -1;
393
394         entry->val = value;
395         info->swapped += incdec;
396         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
397                 struct page *page = kmap_atomic_to_page(entry);
398                 set_page_private(page, page_private(page) + incdec);
399         }
400 }
401
402 /**
403  * shmem_swp_alloc - get the position of the swap entry for the page.
404  * @info:       info structure for the inode
405  * @index:      index of the page to find
406  * @sgp:        check and recheck i_size? skip allocation?
407  *
408  * If the entry does not exist, allocate it.
409  */
410 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
411 {
412         struct inode *inode = &info->vfs_inode;
413         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
414         struct page *page = NULL;
415         swp_entry_t *entry;
416
417         if (sgp != SGP_WRITE &&
418             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
419                 return ERR_PTR(-EINVAL);
420
421         while (!(entry = shmem_swp_entry(info, index, &page))) {
422                 if (sgp == SGP_READ)
423                         return shmem_swp_map(ZERO_PAGE(0));
424                 /*
425                  * Test used_blocks against 1 less max_blocks, since we have 1 data
426                  * page (and perhaps indirect index pages) yet to allocate:
427                  * a waste to allocate index if we cannot allocate data.
428                  */
429                 if (sbinfo->max_blocks) {
430                         if (percpu_counter_compare(&sbinfo->used_blocks,
431                                                 sbinfo->max_blocks - 1) >= 0)
432                                 return ERR_PTR(-ENOSPC);
433                         percpu_counter_inc(&sbinfo->used_blocks);
434                         inode->i_blocks += BLOCKS_PER_PAGE;
435                 }
436
437                 spin_unlock(&info->lock);
438                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
439                 spin_lock(&info->lock);
440
441                 if (!page) {
442                         shmem_free_blocks(inode, 1);
443                         return ERR_PTR(-ENOMEM);
444                 }
445                 if (sgp != SGP_WRITE &&
446                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
447                         entry = ERR_PTR(-EINVAL);
448                         break;
449                 }
450                 if (info->next_index <= index)
451                         info->next_index = index + 1;
452         }
453         if (page) {
454                 /* another task gave its page, or truncated the file */
455                 shmem_free_blocks(inode, 1);
456                 shmem_dir_free(page);
457         }
458         if (info->next_index <= index && !IS_ERR(entry))
459                 info->next_index = index + 1;
460         return entry;
461 }
462
463 /**
464  * shmem_free_swp - free some swap entries in a directory
465  * @dir:        pointer to the directory
466  * @edir:       pointer after last entry of the directory
467  * @punch_lock: pointer to spinlock when needed for the holepunch case
468  */
469 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
470                                                 spinlock_t *punch_lock)
471 {
472         spinlock_t *punch_unlock = NULL;
473         swp_entry_t *ptr;
474         int freed = 0;
475
476         for (ptr = dir; ptr < edir; ptr++) {
477                 if (ptr->val) {
478                         if (unlikely(punch_lock)) {
479                                 punch_unlock = punch_lock;
480                                 punch_lock = NULL;
481                                 spin_lock(punch_unlock);
482                                 if (!ptr->val)
483                                         continue;
484                         }
485                         free_swap_and_cache(*ptr);
486                         *ptr = (swp_entry_t){0};
487                         freed++;
488                 }
489         }
490         if (punch_unlock)
491                 spin_unlock(punch_unlock);
492         return freed;
493 }
494
495 static int shmem_map_and_free_swp(struct page *subdir, int offset,
496                 int limit, struct page ***dir, spinlock_t *punch_lock)
497 {
498         swp_entry_t *ptr;
499         int freed = 0;
500
501         ptr = shmem_swp_map(subdir);
502         for (; offset < limit; offset += LATENCY_LIMIT) {
503                 int size = limit - offset;
504                 if (size > LATENCY_LIMIT)
505                         size = LATENCY_LIMIT;
506                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
507                                                         punch_lock);
508                 if (need_resched()) {
509                         shmem_swp_unmap(ptr);
510                         if (*dir) {
511                                 shmem_dir_unmap(*dir);
512                                 *dir = NULL;
513                         }
514                         cond_resched();
515                         ptr = shmem_swp_map(subdir);
516                 }
517         }
518         shmem_swp_unmap(ptr);
519         return freed;
520 }
521
522 static void shmem_free_pages(struct list_head *next)
523 {
524         struct page *page;
525         int freed = 0;
526
527         do {
528                 page = container_of(next, struct page, lru);
529                 next = next->next;
530                 shmem_dir_free(page);
531                 freed++;
532                 if (freed >= LATENCY_LIMIT) {
533                         cond_resched();
534                         freed = 0;
535                 }
536         } while (next);
537 }
538
539 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
540 {
541         struct shmem_inode_info *info = SHMEM_I(inode);
542         unsigned long idx;
543         unsigned long size;
544         unsigned long limit;
545         unsigned long stage;
546         unsigned long diroff;
547         struct page **dir;
548         struct page *topdir;
549         struct page *middir;
550         struct page *subdir;
551         swp_entry_t *ptr;
552         LIST_HEAD(pages_to_free);
553         long nr_pages_to_free = 0;
554         long nr_swaps_freed = 0;
555         int offset;
556         int freed;
557         int punch_hole;
558         spinlock_t *needs_lock;
559         spinlock_t *punch_lock;
560         unsigned long upper_limit;
561
562         truncate_inode_pages_range(inode->i_mapping, start, end);
563
564         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
565         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
566         if (idx >= info->next_index)
567                 return;
568
569         spin_lock(&info->lock);
570         info->flags |= SHMEM_TRUNCATE;
571         if (likely(end == (loff_t) -1)) {
572                 limit = info->next_index;
573                 upper_limit = SHMEM_MAX_INDEX;
574                 info->next_index = idx;
575                 needs_lock = NULL;
576                 punch_hole = 0;
577         } else {
578                 if (end + 1 >= inode->i_size) { /* we may free a little more */
579                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
580                                                         PAGE_CACHE_SHIFT;
581                         upper_limit = SHMEM_MAX_INDEX;
582                 } else {
583                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
584                         upper_limit = limit;
585                 }
586                 needs_lock = &info->lock;
587                 punch_hole = 1;
588         }
589
590         topdir = info->i_indirect;
591         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
592                 info->i_indirect = NULL;
593                 nr_pages_to_free++;
594                 list_add(&topdir->lru, &pages_to_free);
595         }
596         spin_unlock(&info->lock);
597
598         if (info->swapped && idx < SHMEM_NR_DIRECT) {
599                 ptr = info->i_direct;
600                 size = limit;
601                 if (size > SHMEM_NR_DIRECT)
602                         size = SHMEM_NR_DIRECT;
603                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
604         }
605
606         /*
607          * If there are no indirect blocks or we are punching a hole
608          * below indirect blocks, nothing to be done.
609          */
610         if (!topdir || limit <= SHMEM_NR_DIRECT)
611                 goto done2;
612
613         /*
614          * The truncation case has already dropped info->lock, and we're safe
615          * because i_size and next_index have already been lowered, preventing
616          * access beyond.  But in the punch_hole case, we still need to take
617          * the lock when updating the swap directory, because there might be
618          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
619          * shmem_writepage.  However, whenever we find we can remove a whole
620          * directory page (not at the misaligned start or end of the range),
621          * we first NULLify its pointer in the level above, and then have no
622          * need to take the lock when updating its contents: needs_lock and
623          * punch_lock (either pointing to info->lock or NULL) manage this.
624          */
625
626         upper_limit -= SHMEM_NR_DIRECT;
627         limit -= SHMEM_NR_DIRECT;
628         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
629         offset = idx % ENTRIES_PER_PAGE;
630         idx -= offset;
631
632         dir = shmem_dir_map(topdir);
633         stage = ENTRIES_PER_PAGEPAGE/2;
634         if (idx < ENTRIES_PER_PAGEPAGE/2) {
635                 middir = topdir;
636                 diroff = idx/ENTRIES_PER_PAGE;
637         } else {
638                 dir += ENTRIES_PER_PAGE/2;
639                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
640                 while (stage <= idx)
641                         stage += ENTRIES_PER_PAGEPAGE;
642                 middir = *dir;
643                 if (*dir) {
644                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
645                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
646                         if (!diroff && !offset && upper_limit >= stage) {
647                                 if (needs_lock) {
648                                         spin_lock(needs_lock);
649                                         *dir = NULL;
650                                         spin_unlock(needs_lock);
651                                         needs_lock = NULL;
652                                 } else
653                                         *dir = NULL;
654                                 nr_pages_to_free++;
655                                 list_add(&middir->lru, &pages_to_free);
656                         }
657                         shmem_dir_unmap(dir);
658                         dir = shmem_dir_map(middir);
659                 } else {
660                         diroff = 0;
661                         offset = 0;
662                         idx = stage;
663                 }
664         }
665
666         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
667                 if (unlikely(idx == stage)) {
668                         shmem_dir_unmap(dir);
669                         dir = shmem_dir_map(topdir) +
670                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
671                         while (!*dir) {
672                                 dir++;
673                                 idx += ENTRIES_PER_PAGEPAGE;
674                                 if (idx >= limit)
675                                         goto done1;
676                         }
677                         stage = idx + ENTRIES_PER_PAGEPAGE;
678                         middir = *dir;
679                         if (punch_hole)
680                                 needs_lock = &info->lock;
681                         if (upper_limit >= stage) {
682                                 if (needs_lock) {
683                                         spin_lock(needs_lock);
684                                         *dir = NULL;
685                                         spin_unlock(needs_lock);
686                                         needs_lock = NULL;
687                                 } else
688                                         *dir = NULL;
689                                 nr_pages_to_free++;
690                                 list_add(&middir->lru, &pages_to_free);
691                         }
692                         shmem_dir_unmap(dir);
693                         cond_resched();
694                         dir = shmem_dir_map(middir);
695                         diroff = 0;
696                 }
697                 punch_lock = needs_lock;
698                 subdir = dir[diroff];
699                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
700                         if (needs_lock) {
701                                 spin_lock(needs_lock);
702                                 dir[diroff] = NULL;
703                                 spin_unlock(needs_lock);
704                                 punch_lock = NULL;
705                         } else
706                                 dir[diroff] = NULL;
707                         nr_pages_to_free++;
708                         list_add(&subdir->lru, &pages_to_free);
709                 }
710                 if (subdir && page_private(subdir) /* has swap entries */) {
711                         size = limit - idx;
712                         if (size > ENTRIES_PER_PAGE)
713                                 size = ENTRIES_PER_PAGE;
714                         freed = shmem_map_and_free_swp(subdir,
715                                         offset, size, &dir, punch_lock);
716                         if (!dir)
717                                 dir = shmem_dir_map(middir);
718                         nr_swaps_freed += freed;
719                         if (offset || punch_lock) {
720                                 spin_lock(&info->lock);
721                                 set_page_private(subdir,
722                                         page_private(subdir) - freed);
723                                 spin_unlock(&info->lock);
724                         } else
725                                 BUG_ON(page_private(subdir) != freed);
726                 }
727                 offset = 0;
728         }
729 done1:
730         shmem_dir_unmap(dir);
731 done2:
732         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
733                 /*
734                  * Call truncate_inode_pages again: racing shmem_unuse_inode
735                  * may have swizzled a page in from swap since
736                  * truncate_pagecache or generic_delete_inode did it, before we
737                  * lowered next_index.  Also, though shmem_getpage checks
738                  * i_size before adding to cache, no recheck after: so fix the
739                  * narrow window there too.
740                  */
741                 truncate_inode_pages_range(inode->i_mapping, start, end);
742         }
743
744         spin_lock(&info->lock);
745         info->flags &= ~SHMEM_TRUNCATE;
746         info->swapped -= nr_swaps_freed;
747         if (nr_pages_to_free)
748                 shmem_free_blocks(inode, nr_pages_to_free);
749         shmem_recalc_inode(inode);
750         spin_unlock(&info->lock);
751
752         /*
753          * Empty swap vector directory pages to be freed?
754          */
755         if (!list_empty(&pages_to_free)) {
756                 pages_to_free.prev->next = NULL;
757                 shmem_free_pages(pages_to_free.next);
758         }
759 }
760 EXPORT_SYMBOL_GPL(shmem_truncate_range);
761
762 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
763 {
764         struct inode *inode = dentry->d_inode;
765         int error;
766
767         error = inode_change_ok(inode, attr);
768         if (error)
769                 return error;
770
771         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
772                 loff_t oldsize = inode->i_size;
773                 loff_t newsize = attr->ia_size;
774                 struct page *page = NULL;
775
776                 if (newsize < oldsize) {
777                         /*
778                          * If truncating down to a partial page, then
779                          * if that page is already allocated, hold it
780                          * in memory until the truncation is over, so
781                          * truncate_partial_page cannot miss it were
782                          * it assigned to swap.
783                          */
784                         if (newsize & (PAGE_CACHE_SIZE-1)) {
785                                 (void) shmem_getpage(inode,
786                                         newsize >> PAGE_CACHE_SHIFT,
787                                                 &page, SGP_READ, NULL);
788                                 if (page)
789                                         unlock_page(page);
790                         }
791                         /*
792                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
793                          * detect if any pages might have been added to cache
794                          * after truncate_inode_pages.  But we needn't bother
795                          * if it's being fully truncated to zero-length: the
796                          * nrpages check is efficient enough in that case.
797                          */
798                         if (newsize) {
799                                 struct shmem_inode_info *info = SHMEM_I(inode);
800                                 spin_lock(&info->lock);
801                                 info->flags &= ~SHMEM_PAGEIN;
802                                 spin_unlock(&info->lock);
803                         }
804                 }
805                 if (newsize != oldsize) {
806                         i_size_write(inode, newsize);
807                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
808                 }
809                 if (newsize < oldsize) {
810                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
811                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
812                         shmem_truncate_range(inode, newsize, (loff_t)-1);
813                         /* unmap again to remove racily COWed private pages */
814                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
815                 }
816                 if (page)
817                         page_cache_release(page);
818         }
819
820         setattr_copy(inode, attr);
821 #ifdef CONFIG_TMPFS_POSIX_ACL
822         if (attr->ia_valid & ATTR_MODE)
823                 error = generic_acl_chmod(inode);
824 #endif
825         return error;
826 }
827
828 static void shmem_evict_inode(struct inode *inode)
829 {
830         struct shmem_inode_info *info = SHMEM_I(inode);
831         struct shmem_xattr *xattr, *nxattr;
832
833         if (inode->i_mapping->a_ops == &shmem_aops) {
834                 shmem_unacct_size(info->flags, inode->i_size);
835                 inode->i_size = 0;
836                 shmem_truncate_range(inode, 0, (loff_t)-1);
837                 if (!list_empty(&info->swaplist)) {
838                         mutex_lock(&shmem_swaplist_mutex);
839                         list_del_init(&info->swaplist);
840                         mutex_unlock(&shmem_swaplist_mutex);
841                 }
842         }
843
844         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
845                 kfree(xattr->name);
846                 kfree(xattr);
847         }
848         BUG_ON(inode->i_blocks);
849         shmem_free_inode(inode->i_sb);
850         end_writeback(inode);
851 }
852
853 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
854 {
855         swp_entry_t *ptr;
856
857         for (ptr = dir; ptr < edir; ptr++) {
858                 if (ptr->val == entry.val)
859                         return ptr - dir;
860         }
861         return -1;
862 }
863
864 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
865 {
866         struct address_space *mapping;
867         unsigned long idx;
868         unsigned long size;
869         unsigned long limit;
870         unsigned long stage;
871         struct page **dir;
872         struct page *subdir;
873         swp_entry_t *ptr;
874         int offset;
875         int error;
876
877         idx = 0;
878         ptr = info->i_direct;
879         spin_lock(&info->lock);
880         if (!info->swapped) {
881                 list_del_init(&info->swaplist);
882                 goto lost2;
883         }
884         limit = info->next_index;
885         size = limit;
886         if (size > SHMEM_NR_DIRECT)
887                 size = SHMEM_NR_DIRECT;
888         offset = shmem_find_swp(entry, ptr, ptr+size);
889         if (offset >= 0) {
890                 shmem_swp_balance_unmap();
891                 goto found;
892         }
893         if (!info->i_indirect)
894                 goto lost2;
895
896         dir = shmem_dir_map(info->i_indirect);
897         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
898
899         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
900                 if (unlikely(idx == stage)) {
901                         shmem_dir_unmap(dir-1);
902                         if (cond_resched_lock(&info->lock)) {
903                                 /* check it has not been truncated */
904                                 if (limit > info->next_index) {
905                                         limit = info->next_index;
906                                         if (idx >= limit)
907                                                 goto lost2;
908                                 }
909                         }
910                         dir = shmem_dir_map(info->i_indirect) +
911                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
912                         while (!*dir) {
913                                 dir++;
914                                 idx += ENTRIES_PER_PAGEPAGE;
915                                 if (idx >= limit)
916                                         goto lost1;
917                         }
918                         stage = idx + ENTRIES_PER_PAGEPAGE;
919                         subdir = *dir;
920                         shmem_dir_unmap(dir);
921                         dir = shmem_dir_map(subdir);
922                 }
923                 subdir = *dir;
924                 if (subdir && page_private(subdir)) {
925                         ptr = shmem_swp_map(subdir);
926                         size = limit - idx;
927                         if (size > ENTRIES_PER_PAGE)
928                                 size = ENTRIES_PER_PAGE;
929                         offset = shmem_find_swp(entry, ptr, ptr+size);
930                         shmem_swp_unmap(ptr);
931                         if (offset >= 0) {
932                                 shmem_dir_unmap(dir);
933                                 ptr = shmem_swp_map(subdir);
934                                 goto found;
935                         }
936                 }
937         }
938 lost1:
939         shmem_dir_unmap(dir-1);
940 lost2:
941         spin_unlock(&info->lock);
942         return 0;
943 found:
944         idx += offset;
945         ptr += offset;
946
947         /*
948          * Move _head_ to start search for next from here.
949          * But be careful: shmem_evict_inode checks list_empty without taking
950          * mutex, and there's an instant in list_move_tail when info->swaplist
951          * would appear empty, if it were the only one on shmem_swaplist.  We
952          * could avoid doing it if inode NULL; or use this minor optimization.
953          */
954         if (shmem_swaplist.next != &info->swaplist)
955                 list_move_tail(&shmem_swaplist, &info->swaplist);
956
957         /*
958          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
959          * but also to hold up shmem_evict_inode(): so inode cannot be freed
960          * beneath us (pagelock doesn't help until the page is in pagecache).
961          */
962         mapping = info->vfs_inode.i_mapping;
963         error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
964         /* which does mem_cgroup_uncharge_cache_page on error */
965
966         if (error == -EEXIST) {
967                 struct page *filepage = find_get_page(mapping, idx);
968                 error = 1;
969                 if (filepage) {
970                         /*
971                          * There might be a more uptodate page coming down
972                          * from a stacked writepage: forget our swappage if so.
973                          */
974                         if (PageUptodate(filepage))
975                                 error = 0;
976                         page_cache_release(filepage);
977                 }
978         }
979         if (!error) {
980                 delete_from_swap_cache(page);
981                 set_page_dirty(page);
982                 info->flags |= SHMEM_PAGEIN;
983                 shmem_swp_set(info, ptr, 0);
984                 swap_free(entry);
985                 error = 1;      /* not an error, but entry was found */
986         }
987         shmem_swp_unmap(ptr);
988         spin_unlock(&info->lock);
989         return error;
990 }
991
992 /*
993  * shmem_unuse() search for an eventually swapped out shmem page.
994  */
995 int shmem_unuse(swp_entry_t entry, struct page *page)
996 {
997         struct list_head *p, *next;
998         struct shmem_inode_info *info;
999         int found = 0;
1000         int error;
1001
1002         /*
1003          * Charge page using GFP_KERNEL while we can wait, before taking
1004          * the shmem_swaplist_mutex which might hold up shmem_writepage().
1005          * Charged back to the user (not to caller) when swap account is used.
1006          * add_to_page_cache() will be called with GFP_NOWAIT.
1007          */
1008         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
1009         if (error)
1010                 goto out;
1011         /*
1012          * Try to preload while we can wait, to not make a habit of
1013          * draining atomic reserves; but don't latch on to this cpu,
1014          * it's okay if sometimes we get rescheduled after this.
1015          */
1016         error = radix_tree_preload(GFP_KERNEL);
1017         if (error)
1018                 goto uncharge;
1019         radix_tree_preload_end();
1020
1021         mutex_lock(&shmem_swaplist_mutex);
1022         list_for_each_safe(p, next, &shmem_swaplist) {
1023                 info = list_entry(p, struct shmem_inode_info, swaplist);
1024                 found = shmem_unuse_inode(info, entry, page);
1025                 cond_resched();
1026                 if (found)
1027                         break;
1028         }
1029         mutex_unlock(&shmem_swaplist_mutex);
1030
1031 uncharge:
1032         if (!found)
1033                 mem_cgroup_uncharge_cache_page(page);
1034         if (found < 0)
1035                 error = found;
1036 out:
1037         unlock_page(page);
1038         page_cache_release(page);
1039         return error;
1040 }
1041
1042 /*
1043  * Move the page from the page cache to the swap cache.
1044  */
1045 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1046 {
1047         struct shmem_inode_info *info;
1048         swp_entry_t *entry, swap;
1049         struct address_space *mapping;
1050         unsigned long index;
1051         struct inode *inode;
1052
1053         BUG_ON(!PageLocked(page));
1054         mapping = page->mapping;
1055         index = page->index;
1056         inode = mapping->host;
1057         info = SHMEM_I(inode);
1058         if (info->flags & VM_LOCKED)
1059                 goto redirty;
1060         if (!total_swap_pages)
1061                 goto redirty;
1062
1063         /*
1064          * shmem_backing_dev_info's capabilities prevent regular writeback or
1065          * sync from ever calling shmem_writepage; but a stacking filesystem
1066          * may use the ->writepage of its underlying filesystem, in which case
1067          * tmpfs should write out to swap only in response to memory pressure,
1068          * and not for the writeback threads or sync.  However, in those cases,
1069          * we do still want to check if there's a redundant swappage to be
1070          * discarded.
1071          */
1072         if (wbc->for_reclaim)
1073                 swap = get_swap_page();
1074         else
1075                 swap.val = 0;
1076
1077         /*
1078          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1079          * if it's not already there.  Do it now because we cannot take
1080          * mutex while holding spinlock, and must do so before the page
1081          * is moved to swap cache, when its pagelock no longer protects
1082          * the inode from eviction.  But don't unlock the mutex until
1083          * we've taken the spinlock, because shmem_unuse_inode() will
1084          * prune a !swapped inode from the swaplist under both locks.
1085          */
1086         if (swap.val) {
1087                 mutex_lock(&shmem_swaplist_mutex);
1088                 if (list_empty(&info->swaplist))
1089                         list_add_tail(&info->swaplist, &shmem_swaplist);
1090         }
1091
1092         spin_lock(&info->lock);
1093         if (swap.val)
1094                 mutex_unlock(&shmem_swaplist_mutex);
1095
1096         if (index >= info->next_index) {
1097                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1098                 goto unlock;
1099         }
1100         entry = shmem_swp_entry(info, index, NULL);
1101         if (entry->val) {
1102                 /*
1103                  * The more uptodate page coming down from a stacked
1104                  * writepage should replace our old swappage.
1105                  */
1106                 free_swap_and_cache(*entry);
1107                 shmem_swp_set(info, entry, 0);
1108         }
1109         shmem_recalc_inode(inode);
1110
1111         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1112                 delete_from_page_cache(page);
1113                 shmem_swp_set(info, entry, swap.val);
1114                 shmem_swp_unmap(entry);
1115                 swap_shmem_alloc(swap);
1116                 spin_unlock(&info->lock);
1117                 BUG_ON(page_mapped(page));
1118                 swap_writepage(page, wbc);
1119                 return 0;
1120         }
1121
1122         shmem_swp_unmap(entry);
1123 unlock:
1124         spin_unlock(&info->lock);
1125         /*
1126          * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1127          * clear SWAP_HAS_CACHE flag.
1128          */
1129         swapcache_free(swap, NULL);
1130 redirty:
1131         set_page_dirty(page);
1132         if (wbc->for_reclaim)
1133                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1134         unlock_page(page);
1135         return 0;
1136 }
1137
1138 #ifdef CONFIG_NUMA
1139 #ifdef CONFIG_TMPFS
1140 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1141 {
1142         char buffer[64];
1143
1144         if (!mpol || mpol->mode == MPOL_DEFAULT)
1145                 return;         /* show nothing */
1146
1147         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1148
1149         seq_printf(seq, ",mpol=%s", buffer);
1150 }
1151
1152 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1153 {
1154         struct mempolicy *mpol = NULL;
1155         if (sbinfo->mpol) {
1156                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1157                 mpol = sbinfo->mpol;
1158                 mpol_get(mpol);
1159                 spin_unlock(&sbinfo->stat_lock);
1160         }
1161         return mpol;
1162 }
1163 #endif /* CONFIG_TMPFS */
1164
1165 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1166                         struct shmem_inode_info *info, unsigned long idx)
1167 {
1168         struct mempolicy mpol, *spol;
1169         struct vm_area_struct pvma;
1170         struct page *page;
1171
1172         spol = mpol_cond_copy(&mpol,
1173                                 mpol_shared_policy_lookup(&info->policy, idx));
1174
1175         /* Create a pseudo vma that just contains the policy */
1176         pvma.vm_start = 0;
1177         pvma.vm_pgoff = idx;
1178         pvma.vm_ops = NULL;
1179         pvma.vm_policy = spol;
1180         page = swapin_readahead(entry, gfp, &pvma, 0);
1181         return page;
1182 }
1183
1184 static struct page *shmem_alloc_page(gfp_t gfp,
1185                         struct shmem_inode_info *info, unsigned long idx)
1186 {
1187         struct vm_area_struct pvma;
1188
1189         /* Create a pseudo vma that just contains the policy */
1190         pvma.vm_start = 0;
1191         pvma.vm_pgoff = idx;
1192         pvma.vm_ops = NULL;
1193         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1194
1195         /*
1196          * alloc_page_vma() will drop the shared policy reference
1197          */
1198         return alloc_page_vma(gfp, &pvma, 0);
1199 }
1200 #else /* !CONFIG_NUMA */
1201 #ifdef CONFIG_TMPFS
1202 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1203 {
1204 }
1205 #endif /* CONFIG_TMPFS */
1206
1207 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1208                         struct shmem_inode_info *info, unsigned long idx)
1209 {
1210         return swapin_readahead(entry, gfp, NULL, 0);
1211 }
1212
1213 static inline struct page *shmem_alloc_page(gfp_t gfp,
1214                         struct shmem_inode_info *info, unsigned long idx)
1215 {
1216         return alloc_page(gfp);
1217 }
1218 #endif /* CONFIG_NUMA */
1219
1220 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1221 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1222 {
1223         return NULL;
1224 }
1225 #endif
1226
1227 /*
1228  * shmem_getpage - either get the page from swap or allocate a new one
1229  *
1230  * If we allocate a new one we do not mark it dirty. That's up to the
1231  * vm. If we swap it in we mark it dirty since we also free the swap
1232  * entry since a page cannot live in both the swap and page cache
1233  */
1234 static int shmem_getpage(struct inode *inode, unsigned long idx,
1235                         struct page **pagep, enum sgp_type sgp, int *type)
1236 {
1237         struct address_space *mapping = inode->i_mapping;
1238         struct shmem_inode_info *info = SHMEM_I(inode);
1239         struct shmem_sb_info *sbinfo;
1240         struct page *filepage = *pagep;
1241         struct page *swappage;
1242         struct page *prealloc_page = NULL;
1243         swp_entry_t *entry;
1244         swp_entry_t swap;
1245         gfp_t gfp;
1246         int error;
1247
1248         if (idx >= SHMEM_MAX_INDEX)
1249                 return -EFBIG;
1250
1251         if (type)
1252                 *type = 0;
1253
1254         /*
1255          * Normally, filepage is NULL on entry, and either found
1256          * uptodate immediately, or allocated and zeroed, or read
1257          * in under swappage, which is then assigned to filepage.
1258          * But shmem_readpage (required for splice) passes in a locked
1259          * filepage, which may be found not uptodate by other callers
1260          * too, and may need to be copied from the swappage read in.
1261          */
1262 repeat:
1263         if (!filepage)
1264                 filepage = find_lock_page(mapping, idx);
1265         if (filepage && PageUptodate(filepage))
1266                 goto done;
1267         gfp = mapping_gfp_mask(mapping);
1268         if (!filepage) {
1269                 /*
1270                  * Try to preload while we can wait, to not make a habit of
1271                  * draining atomic reserves; but don't latch on to this cpu.
1272                  */
1273                 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1274                 if (error)
1275                         goto failed;
1276                 radix_tree_preload_end();
1277                 if (sgp != SGP_READ && !prealloc_page) {
1278                         /* We don't care if this fails */
1279                         prealloc_page = shmem_alloc_page(gfp, info, idx);
1280                         if (prealloc_page) {
1281                                 if (mem_cgroup_cache_charge(prealloc_page,
1282                                                 current->mm, GFP_KERNEL)) {
1283                                         page_cache_release(prealloc_page);
1284                                         prealloc_page = NULL;
1285                                 }
1286                         }
1287                 }
1288         }
1289         error = 0;
1290
1291         spin_lock(&info->lock);
1292         shmem_recalc_inode(inode);
1293         entry = shmem_swp_alloc(info, idx, sgp);
1294         if (IS_ERR(entry)) {
1295                 spin_unlock(&info->lock);
1296                 error = PTR_ERR(entry);
1297                 goto failed;
1298         }
1299         swap = *entry;
1300
1301         if (swap.val) {
1302                 /* Look it up and read it in.. */
1303                 swappage = lookup_swap_cache(swap);
1304                 if (!swappage) {
1305                         shmem_swp_unmap(entry);
1306                         spin_unlock(&info->lock);
1307                         /* here we actually do the io */
1308                         if (type)
1309                                 *type |= VM_FAULT_MAJOR;
1310                         swappage = shmem_swapin(swap, gfp, info, idx);
1311                         if (!swappage) {
1312                                 spin_lock(&info->lock);
1313                                 entry = shmem_swp_alloc(info, idx, sgp);
1314                                 if (IS_ERR(entry))
1315                                         error = PTR_ERR(entry);
1316                                 else {
1317                                         if (entry->val == swap.val)
1318                                                 error = -ENOMEM;
1319                                         shmem_swp_unmap(entry);
1320                                 }
1321                                 spin_unlock(&info->lock);
1322                                 if (error)
1323                                         goto failed;
1324                                 goto repeat;
1325                         }
1326                         wait_on_page_locked(swappage);
1327                         page_cache_release(swappage);
1328                         goto repeat;
1329                 }
1330
1331                 /* We have to do this with page locked to prevent races */
1332                 if (!trylock_page(swappage)) {
1333                         shmem_swp_unmap(entry);
1334                         spin_unlock(&info->lock);
1335                         wait_on_page_locked(swappage);
1336                         page_cache_release(swappage);
1337                         goto repeat;
1338                 }
1339                 if (PageWriteback(swappage)) {
1340                         shmem_swp_unmap(entry);
1341                         spin_unlock(&info->lock);
1342                         wait_on_page_writeback(swappage);
1343                         unlock_page(swappage);
1344                         page_cache_release(swappage);
1345                         goto repeat;
1346                 }
1347                 if (!PageUptodate(swappage)) {
1348                         shmem_swp_unmap(entry);
1349                         spin_unlock(&info->lock);
1350                         unlock_page(swappage);
1351                         page_cache_release(swappage);
1352                         error = -EIO;
1353                         goto failed;
1354                 }
1355
1356                 if (filepage) {
1357                         shmem_swp_set(info, entry, 0);
1358                         shmem_swp_unmap(entry);
1359                         delete_from_swap_cache(swappage);
1360                         spin_unlock(&info->lock);
1361                         copy_highpage(filepage, swappage);
1362                         unlock_page(swappage);
1363                         page_cache_release(swappage);
1364                         flush_dcache_page(filepage);
1365                         SetPageUptodate(filepage);
1366                         set_page_dirty(filepage);
1367                         swap_free(swap);
1368                 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1369                                         idx, GFP_NOWAIT))) {
1370                         info->flags |= SHMEM_PAGEIN;
1371                         shmem_swp_set(info, entry, 0);
1372                         shmem_swp_unmap(entry);
1373                         delete_from_swap_cache(swappage);
1374                         spin_unlock(&info->lock);
1375                         filepage = swappage;
1376                         set_page_dirty(filepage);
1377                         swap_free(swap);
1378                 } else {
1379                         shmem_swp_unmap(entry);
1380                         spin_unlock(&info->lock);
1381                         if (error == -ENOMEM) {
1382                                 /*
1383                                  * reclaim from proper memory cgroup and
1384                                  * call memcg's OOM if needed.
1385                                  */
1386                                 error = mem_cgroup_shmem_charge_fallback(
1387                                                                 swappage,
1388                                                                 current->mm,
1389                                                                 gfp);
1390                                 if (error) {
1391                                         unlock_page(swappage);
1392                                         page_cache_release(swappage);
1393                                         goto failed;
1394                                 }
1395                         }
1396                         unlock_page(swappage);
1397                         page_cache_release(swappage);
1398                         goto repeat;
1399                 }
1400         } else if (sgp == SGP_READ && !filepage) {
1401                 shmem_swp_unmap(entry);
1402                 filepage = find_get_page(mapping, idx);
1403                 if (filepage &&
1404                     (!PageUptodate(filepage) || !trylock_page(filepage))) {
1405                         spin_unlock(&info->lock);
1406                         wait_on_page_locked(filepage);
1407                         page_cache_release(filepage);
1408                         filepage = NULL;
1409                         goto repeat;
1410                 }
1411                 spin_unlock(&info->lock);
1412         } else {
1413                 shmem_swp_unmap(entry);
1414                 sbinfo = SHMEM_SB(inode->i_sb);
1415                 if (sbinfo->max_blocks) {
1416                         if (percpu_counter_compare(&sbinfo->used_blocks,
1417                                                 sbinfo->max_blocks) >= 0 ||
1418                             shmem_acct_block(info->flags))
1419                                 goto nospace;
1420                         percpu_counter_inc(&sbinfo->used_blocks);
1421                         inode->i_blocks += BLOCKS_PER_PAGE;
1422                 } else if (shmem_acct_block(info->flags))
1423                         goto nospace;
1424
1425                 if (!filepage) {
1426                         int ret;
1427
1428                         if (!prealloc_page) {
1429                                 spin_unlock(&info->lock);
1430                                 filepage = shmem_alloc_page(gfp, info, idx);
1431                                 if (!filepage) {
1432                                         spin_lock(&info->lock);
1433                                         shmem_unacct_blocks(info->flags, 1);
1434                                         shmem_free_blocks(inode, 1);
1435                                         spin_unlock(&info->lock);
1436                                         error = -ENOMEM;
1437                                         goto failed;
1438                                 }
1439                                 SetPageSwapBacked(filepage);
1440
1441                                 /*
1442                                  * Precharge page while we can wait, compensate
1443                                  * after
1444                                  */
1445                                 error = mem_cgroup_cache_charge(filepage,
1446                                         current->mm, GFP_KERNEL);
1447                                 if (error) {
1448                                         page_cache_release(filepage);
1449                                         spin_lock(&info->lock);
1450                                         shmem_unacct_blocks(info->flags, 1);
1451                                         shmem_free_blocks(inode, 1);
1452                                         spin_unlock(&info->lock);
1453                                         filepage = NULL;
1454                                         goto failed;
1455                                 }
1456
1457                                 spin_lock(&info->lock);
1458                         } else {
1459                                 filepage = prealloc_page;
1460                                 prealloc_page = NULL;
1461                                 SetPageSwapBacked(filepage);
1462                         }
1463
1464                         entry = shmem_swp_alloc(info, idx, sgp);
1465                         if (IS_ERR(entry))
1466                                 error = PTR_ERR(entry);
1467                         else {
1468                                 swap = *entry;
1469                                 shmem_swp_unmap(entry);
1470                         }
1471                         ret = error || swap.val;
1472                         if (ret)
1473                                 mem_cgroup_uncharge_cache_page(filepage);
1474                         else
1475                                 ret = add_to_page_cache_lru(filepage, mapping,
1476                                                 idx, GFP_NOWAIT);
1477                         /*
1478                          * At add_to_page_cache_lru() failure, uncharge will
1479                          * be done automatically.
1480                          */
1481                         if (ret) {
1482                                 shmem_unacct_blocks(info->flags, 1);
1483                                 shmem_free_blocks(inode, 1);
1484                                 spin_unlock(&info->lock);
1485                                 page_cache_release(filepage);
1486                                 filepage = NULL;
1487                                 if (error)
1488                                         goto failed;
1489                                 goto repeat;
1490                         }
1491                         info->flags |= SHMEM_PAGEIN;
1492                 }
1493
1494                 info->alloced++;
1495                 spin_unlock(&info->lock);
1496                 clear_highpage(filepage);
1497                 flush_dcache_page(filepage);
1498                 SetPageUptodate(filepage);
1499                 if (sgp == SGP_DIRTY)
1500                         set_page_dirty(filepage);
1501         }
1502 done:
1503         *pagep = filepage;
1504         error = 0;
1505         goto out;
1506
1507 nospace:
1508         /*
1509          * Perhaps the page was brought in from swap between find_lock_page
1510          * and taking info->lock?  We allow for that at add_to_page_cache_lru,
1511          * but must also avoid reporting a spurious ENOSPC while working on a
1512          * full tmpfs.  (When filepage has been passed in to shmem_getpage, it
1513          * is already in page cache, which prevents this race from occurring.)
1514          */
1515         if (!filepage) {
1516                 struct page *page = find_get_page(mapping, idx);
1517                 if (page) {
1518                         spin_unlock(&info->lock);
1519                         page_cache_release(page);
1520                         goto repeat;
1521                 }
1522         }
1523         spin_unlock(&info->lock);
1524         error = -ENOSPC;
1525 failed:
1526         if (*pagep != filepage) {
1527                 unlock_page(filepage);
1528                 page_cache_release(filepage);
1529         }
1530 out:
1531         if (prealloc_page) {
1532                 mem_cgroup_uncharge_cache_page(prealloc_page);
1533                 page_cache_release(prealloc_page);
1534         }
1535         return error;
1536 }
1537
1538 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1539 {
1540         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1541         int error;
1542         int ret;
1543
1544         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1545                 return VM_FAULT_SIGBUS;
1546
1547         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1548         if (error)
1549                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1550         if (ret & VM_FAULT_MAJOR) {
1551                 count_vm_event(PGMAJFAULT);
1552                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1553         }
1554         return ret | VM_FAULT_LOCKED;
1555 }
1556
1557 #ifdef CONFIG_NUMA
1558 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1559 {
1560         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1561         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1562 }
1563
1564 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1565                                           unsigned long addr)
1566 {
1567         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1568         unsigned long idx;
1569
1570         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1571         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1572 }
1573 #endif
1574
1575 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1576 {
1577         struct inode *inode = file->f_path.dentry->d_inode;
1578         struct shmem_inode_info *info = SHMEM_I(inode);
1579         int retval = -ENOMEM;
1580
1581         spin_lock(&info->lock);
1582         if (lock && !(info->flags & VM_LOCKED)) {
1583                 if (!user_shm_lock(inode->i_size, user))
1584                         goto out_nomem;
1585                 info->flags |= VM_LOCKED;
1586                 mapping_set_unevictable(file->f_mapping);
1587         }
1588         if (!lock && (info->flags & VM_LOCKED) && user) {
1589                 user_shm_unlock(inode->i_size, user);
1590                 info->flags &= ~VM_LOCKED;
1591                 mapping_clear_unevictable(file->f_mapping);
1592                 scan_mapping_unevictable_pages(file->f_mapping);
1593         }
1594         retval = 0;
1595
1596 out_nomem:
1597         spin_unlock(&info->lock);
1598         return retval;
1599 }
1600
1601 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1602 {
1603         file_accessed(file);
1604         vma->vm_ops = &shmem_vm_ops;
1605         vma->vm_flags |= VM_CAN_NONLINEAR;
1606         return 0;
1607 }
1608
1609 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1610                                      int mode, dev_t dev, unsigned long flags)
1611 {
1612         struct inode *inode;
1613         struct shmem_inode_info *info;
1614         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1615
1616         if (shmem_reserve_inode(sb))
1617                 return NULL;
1618
1619         inode = new_inode(sb);
1620         if (inode) {
1621                 inode->i_ino = get_next_ino();
1622                 inode_init_owner(inode, dir, mode);
1623                 inode->i_blocks = 0;
1624                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1625                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1626                 inode->i_generation = get_seconds();
1627                 info = SHMEM_I(inode);
1628                 memset(info, 0, (char *)inode - (char *)info);
1629                 spin_lock_init(&info->lock);
1630                 info->flags = flags & VM_NORESERVE;
1631                 INIT_LIST_HEAD(&info->swaplist);
1632                 INIT_LIST_HEAD(&info->xattr_list);
1633                 cache_no_acl(inode);
1634
1635                 switch (mode & S_IFMT) {
1636                 default:
1637                         inode->i_op = &shmem_special_inode_operations;
1638                         init_special_inode(inode, mode, dev);
1639                         break;
1640                 case S_IFREG:
1641                         inode->i_mapping->a_ops = &shmem_aops;
1642                         inode->i_op = &shmem_inode_operations;
1643                         inode->i_fop = &shmem_file_operations;
1644                         mpol_shared_policy_init(&info->policy,
1645                                                  shmem_get_sbmpol(sbinfo));
1646                         break;
1647                 case S_IFDIR:
1648                         inc_nlink(inode);
1649                         /* Some things misbehave if size == 0 on a directory */
1650                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1651                         inode->i_op = &shmem_dir_inode_operations;
1652                         inode->i_fop = &simple_dir_operations;
1653                         break;
1654                 case S_IFLNK:
1655                         /*
1656                          * Must not load anything in the rbtree,
1657                          * mpol_free_shared_policy will not be called.
1658                          */
1659                         mpol_shared_policy_init(&info->policy, NULL);
1660                         break;
1661                 }
1662         } else
1663                 shmem_free_inode(sb);
1664         return inode;
1665 }
1666
1667 #ifdef CONFIG_TMPFS
1668 static const struct inode_operations shmem_symlink_inode_operations;
1669 static const struct inode_operations shmem_symlink_inline_operations;
1670
1671 /*
1672  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1673  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1674  * below the loop driver, in the generic fashion that many filesystems support.
1675  */
1676 static int shmem_readpage(struct file *file, struct page *page)
1677 {
1678         struct inode *inode = page->mapping->host;
1679         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1680         unlock_page(page);
1681         return error;
1682 }
1683
1684 static int
1685 shmem_write_begin(struct file *file, struct address_space *mapping,
1686                         loff_t pos, unsigned len, unsigned flags,
1687                         struct page **pagep, void **fsdata)
1688 {
1689         struct inode *inode = mapping->host;
1690         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1691         *pagep = NULL;
1692         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1693 }
1694
1695 static int
1696 shmem_write_end(struct file *file, struct address_space *mapping,
1697                         loff_t pos, unsigned len, unsigned copied,
1698                         struct page *page, void *fsdata)
1699 {
1700         struct inode *inode = mapping->host;
1701
1702         if (pos + copied > inode->i_size)
1703                 i_size_write(inode, pos + copied);
1704
1705         set_page_dirty(page);
1706         unlock_page(page);
1707         page_cache_release(page);
1708
1709         return copied;
1710 }
1711
1712 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1713 {
1714         struct inode *inode = filp->f_path.dentry->d_inode;
1715         struct address_space *mapping = inode->i_mapping;
1716         unsigned long index, offset;
1717         enum sgp_type sgp = SGP_READ;
1718
1719         /*
1720          * Might this read be for a stacking filesystem?  Then when reading
1721          * holes of a sparse file, we actually need to allocate those pages,
1722          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1723          */
1724         if (segment_eq(get_fs(), KERNEL_DS))
1725                 sgp = SGP_DIRTY;
1726
1727         index = *ppos >> PAGE_CACHE_SHIFT;
1728         offset = *ppos & ~PAGE_CACHE_MASK;
1729
1730         for (;;) {
1731                 struct page *page = NULL;
1732                 unsigned long end_index, nr, ret;
1733                 loff_t i_size = i_size_read(inode);
1734
1735                 end_index = i_size >> PAGE_CACHE_SHIFT;
1736                 if (index > end_index)
1737                         break;
1738                 if (index == end_index) {
1739                         nr = i_size & ~PAGE_CACHE_MASK;
1740                         if (nr <= offset)
1741                                 break;
1742                 }
1743
1744                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1745                 if (desc->error) {
1746                         if (desc->error == -EINVAL)
1747                                 desc->error = 0;
1748                         break;
1749                 }
1750                 if (page)
1751                         unlock_page(page);
1752
1753                 /*
1754                  * We must evaluate after, since reads (unlike writes)
1755                  * are called without i_mutex protection against truncate
1756                  */
1757                 nr = PAGE_CACHE_SIZE;
1758                 i_size = i_size_read(inode);
1759                 end_index = i_size >> PAGE_CACHE_SHIFT;
1760                 if (index == end_index) {
1761                         nr = i_size & ~PAGE_CACHE_MASK;
1762                         if (nr <= offset) {
1763                                 if (page)
1764                                         page_cache_release(page);
1765                                 break;
1766                         }
1767                 }
1768                 nr -= offset;
1769
1770                 if (page) {
1771                         /*
1772                          * If users can be writing to this page using arbitrary
1773                          * virtual addresses, take care about potential aliasing
1774                          * before reading the page on the kernel side.
1775                          */
1776                         if (mapping_writably_mapped(mapping))
1777                                 flush_dcache_page(page);
1778                         /*
1779                          * Mark the page accessed if we read the beginning.
1780                          */
1781                         if (!offset)
1782                                 mark_page_accessed(page);
1783                 } else {
1784                         page = ZERO_PAGE(0);
1785                         page_cache_get(page);
1786                 }
1787
1788                 /*
1789                  * Ok, we have the page, and it's up-to-date, so
1790                  * now we can copy it to user space...
1791                  *
1792                  * The actor routine returns how many bytes were actually used..
1793                  * NOTE! This may not be the same as how much of a user buffer
1794                  * we filled up (we may be padding etc), so we can only update
1795                  * "pos" here (the actor routine has to update the user buffer
1796                  * pointers and the remaining count).
1797                  */
1798                 ret = actor(desc, page, offset, nr);
1799                 offset += ret;
1800                 index += offset >> PAGE_CACHE_SHIFT;
1801                 offset &= ~PAGE_CACHE_MASK;
1802
1803                 page_cache_release(page);
1804                 if (ret != nr || !desc->count)
1805                         break;
1806
1807                 cond_resched();
1808         }
1809
1810         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1811         file_accessed(filp);
1812 }
1813
1814 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1815                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1816 {
1817         struct file *filp = iocb->ki_filp;
1818         ssize_t retval;
1819         unsigned long seg;
1820         size_t count;
1821         loff_t *ppos = &iocb->ki_pos;
1822
1823         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1824         if (retval)
1825                 return retval;
1826
1827         for (seg = 0; seg < nr_segs; seg++) {
1828                 read_descriptor_t desc;
1829
1830                 desc.written = 0;
1831                 desc.arg.buf = iov[seg].iov_base;
1832                 desc.count = iov[seg].iov_len;
1833                 if (desc.count == 0)
1834                         continue;
1835                 desc.error = 0;
1836                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1837                 retval += desc.written;
1838                 if (desc.error) {
1839                         retval = retval ?: desc.error;
1840                         break;
1841                 }
1842                 if (desc.count > 0)
1843                         break;
1844         }
1845         return retval;
1846 }
1847
1848 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1849                                 struct pipe_inode_info *pipe, size_t len,
1850                                 unsigned int flags)
1851 {
1852         struct address_space *mapping = in->f_mapping;
1853         struct inode *inode = mapping->host;
1854         unsigned int loff, nr_pages, req_pages;
1855         struct page *pages[PIPE_DEF_BUFFERS];
1856         struct partial_page partial[PIPE_DEF_BUFFERS];
1857         struct page *page;
1858         pgoff_t index, end_index;
1859         loff_t isize, left;
1860         int error, page_nr;
1861         struct splice_pipe_desc spd = {
1862                 .pages = pages,
1863                 .partial = partial,
1864                 .flags = flags,
1865                 .ops = &page_cache_pipe_buf_ops,
1866                 .spd_release = spd_release_page,
1867         };
1868
1869         isize = i_size_read(inode);
1870         if (unlikely(*ppos >= isize))
1871                 return 0;
1872
1873         left = isize - *ppos;
1874         if (unlikely(left < len))
1875                 len = left;
1876
1877         if (splice_grow_spd(pipe, &spd))
1878                 return -ENOMEM;
1879
1880         index = *ppos >> PAGE_CACHE_SHIFT;
1881         loff = *ppos & ~PAGE_CACHE_MASK;
1882         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1883         nr_pages = min(req_pages, pipe->buffers);
1884
1885         spd.nr_pages = find_get_pages_contig(mapping, index,
1886                                                 nr_pages, spd.pages);
1887         index += spd.nr_pages;
1888         error = 0;
1889
1890         while (spd.nr_pages < nr_pages) {
1891                 page = NULL;
1892                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1893                 if (error)
1894                         break;
1895                 unlock_page(page);
1896                 spd.pages[spd.nr_pages++] = page;
1897                 index++;
1898         }
1899
1900         index = *ppos >> PAGE_CACHE_SHIFT;
1901         nr_pages = spd.nr_pages;
1902         spd.nr_pages = 0;
1903
1904         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1905                 unsigned int this_len;
1906
1907                 if (!len)
1908                         break;
1909
1910                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1911                 page = spd.pages[page_nr];
1912
1913                 if (!PageUptodate(page) || page->mapping != mapping) {
1914                         page = NULL;
1915                         error = shmem_getpage(inode, index, &page,
1916                                                         SGP_CACHE, NULL);
1917                         if (error)
1918                                 break;
1919                         unlock_page(page);
1920                         page_cache_release(spd.pages[page_nr]);
1921                         spd.pages[page_nr] = page;
1922                 }
1923
1924                 isize = i_size_read(inode);
1925                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1926                 if (unlikely(!isize || index > end_index))
1927                         break;
1928
1929                 if (end_index == index) {
1930                         unsigned int plen;
1931
1932                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1933                         if (plen <= loff)
1934                                 break;
1935
1936                         this_len = min(this_len, plen - loff);
1937                         len = this_len;
1938                 }
1939
1940                 spd.partial[page_nr].offset = loff;
1941                 spd.partial[page_nr].len = this_len;
1942                 len -= this_len;
1943                 loff = 0;
1944                 spd.nr_pages++;
1945                 index++;
1946         }
1947
1948         while (page_nr < nr_pages)
1949                 page_cache_release(spd.pages[page_nr++]);
1950
1951         if (spd.nr_pages)
1952                 error = splice_to_pipe(pipe, &spd);
1953
1954         splice_shrink_spd(pipe, &spd);
1955
1956         if (error > 0) {
1957                 *ppos += error;
1958                 file_accessed(in);
1959         }
1960         return error;
1961 }
1962
1963 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1964 {
1965         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1966
1967         buf->f_type = TMPFS_MAGIC;
1968         buf->f_bsize = PAGE_CACHE_SIZE;
1969         buf->f_namelen = NAME_MAX;
1970         if (sbinfo->max_blocks) {
1971                 buf->f_blocks = sbinfo->max_blocks;
1972                 buf->f_bavail = buf->f_bfree =
1973                                 sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
1974         }
1975         if (sbinfo->max_inodes) {
1976                 buf->f_files = sbinfo->max_inodes;
1977                 buf->f_ffree = sbinfo->free_inodes;
1978         }
1979         /* else leave those fields 0 like simple_statfs */
1980         return 0;
1981 }
1982
1983 /*
1984  * File creation. Allocate an inode, and we're done..
1985  */
1986 static int
1987 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1988 {
1989         struct inode *inode;
1990         int error = -ENOSPC;
1991
1992         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1993         if (inode) {
1994                 error = security_inode_init_security(inode, dir,
1995                                                      &dentry->d_name, NULL,
1996                                                      NULL, NULL);
1997                 if (error) {
1998                         if (error != -EOPNOTSUPP) {
1999                                 iput(inode);
2000                                 return error;
2001                         }
2002                 }
2003 #ifdef CONFIG_TMPFS_POSIX_ACL
2004                 error = generic_acl_init(inode, dir);
2005                 if (error) {
2006                         iput(inode);
2007                         return error;
2008                 }
2009 #else
2010                 error = 0;
2011 #endif
2012                 dir->i_size += BOGO_DIRENT_SIZE;
2013                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2014                 d_instantiate(dentry, inode);
2015                 dget(dentry); /* Extra count - pin the dentry in core */
2016         }
2017         return error;
2018 }
2019
2020 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2021 {
2022         int error;
2023
2024         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2025                 return error;
2026         inc_nlink(dir);
2027         return 0;
2028 }
2029
2030 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
2031                 struct nameidata *nd)
2032 {
2033         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2034 }
2035
2036 /*
2037  * Link a file..
2038  */
2039 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2040 {
2041         struct inode *inode = old_dentry->d_inode;
2042         int ret;
2043
2044         /*
2045          * No ordinary (disk based) filesystem counts links as inodes;
2046          * but each new link needs a new dentry, pinning lowmem, and
2047          * tmpfs dentries cannot be pruned until they are unlinked.
2048          */
2049         ret = shmem_reserve_inode(inode->i_sb);
2050         if (ret)
2051                 goto out;
2052
2053         dir->i_size += BOGO_DIRENT_SIZE;
2054         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2055         inc_nlink(inode);
2056         ihold(inode);   /* New dentry reference */
2057         dget(dentry);           /* Extra pinning count for the created dentry */
2058         d_instantiate(dentry, inode);
2059 out:
2060         return ret;
2061 }
2062
2063 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2064 {
2065         struct inode *inode = dentry->d_inode;
2066
2067         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2068                 shmem_free_inode(inode->i_sb);
2069
2070         dir->i_size -= BOGO_DIRENT_SIZE;
2071         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2072         drop_nlink(inode);
2073         dput(dentry);   /* Undo the count from "create" - this does all the work */
2074         return 0;
2075 }
2076
2077 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2078 {
2079         if (!simple_empty(dentry))
2080                 return -ENOTEMPTY;
2081
2082         drop_nlink(dentry->d_inode);
2083         drop_nlink(dir);
2084         return shmem_unlink(dir, dentry);
2085 }
2086
2087 /*
2088  * The VFS layer already does all the dentry stuff for rename,
2089  * we just have to decrement the usage count for the target if
2090  * it exists so that the VFS layer correctly free's it when it
2091  * gets overwritten.
2092  */
2093 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2094 {
2095         struct inode *inode = old_dentry->d_inode;
2096         int they_are_dirs = S_ISDIR(inode->i_mode);
2097
2098         if (!simple_empty(new_dentry))
2099                 return -ENOTEMPTY;
2100
2101         if (new_dentry->d_inode) {
2102                 (void) shmem_unlink(new_dir, new_dentry);
2103                 if (they_are_dirs)
2104                         drop_nlink(old_dir);
2105         } else if (they_are_dirs) {
2106                 drop_nlink(old_dir);
2107                 inc_nlink(new_dir);
2108         }
2109
2110         old_dir->i_size -= BOGO_DIRENT_SIZE;
2111         new_dir->i_size += BOGO_DIRENT_SIZE;
2112         old_dir->i_ctime = old_dir->i_mtime =
2113         new_dir->i_ctime = new_dir->i_mtime =
2114         inode->i_ctime = CURRENT_TIME;
2115         return 0;
2116 }
2117
2118 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2119 {
2120         int error;
2121         int len;
2122         struct inode *inode;
2123         struct page *page = NULL;
2124         char *kaddr;
2125         struct shmem_inode_info *info;
2126
2127         len = strlen(symname) + 1;
2128         if (len > PAGE_CACHE_SIZE)
2129                 return -ENAMETOOLONG;
2130
2131         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2132         if (!inode)
2133                 return -ENOSPC;
2134
2135         error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
2136                                              NULL, NULL);
2137         if (error) {
2138                 if (error != -EOPNOTSUPP) {
2139                         iput(inode);
2140                         return error;
2141                 }
2142                 error = 0;
2143         }
2144
2145         info = SHMEM_I(inode);
2146         inode->i_size = len-1;
2147         if (len <= SHMEM_SYMLINK_INLINE_LEN) {
2148                 /* do it inline */
2149                 memcpy(info->inline_symlink, symname, len);
2150                 inode->i_op = &shmem_symlink_inline_operations;
2151         } else {
2152                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2153                 if (error) {
2154                         iput(inode);
2155                         return error;
2156                 }
2157                 inode->i_mapping->a_ops = &shmem_aops;
2158                 inode->i_op = &shmem_symlink_inode_operations;
2159                 kaddr = kmap_atomic(page, KM_USER0);
2160                 memcpy(kaddr, symname, len);
2161                 kunmap_atomic(kaddr, KM_USER0);
2162                 set_page_dirty(page);
2163                 unlock_page(page);
2164                 page_cache_release(page);
2165         }
2166         dir->i_size += BOGO_DIRENT_SIZE;
2167         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2168         d_instantiate(dentry, inode);
2169         dget(dentry);
2170         return 0;
2171 }
2172
2173 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2174 {
2175         nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
2176         return NULL;
2177 }
2178
2179 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2180 {
2181         struct page *page = NULL;
2182         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2183         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
2184         if (page)
2185                 unlock_page(page);
2186         return page;
2187 }
2188
2189 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2190 {
2191         if (!IS_ERR(nd_get_link(nd))) {
2192                 struct page *page = cookie;
2193                 kunmap(page);
2194                 mark_page_accessed(page);
2195                 page_cache_release(page);
2196         }
2197 }
2198
2199 #ifdef CONFIG_TMPFS_XATTR
2200 /*
2201  * Superblocks without xattr inode operations may get some security.* xattr
2202  * support from the LSM "for free". As soon as we have any other xattrs
2203  * like ACLs, we also need to implement the security.* handlers at
2204  * filesystem level, though.
2205  */
2206
2207 static int shmem_xattr_get(struct dentry *dentry, const char *name,
2208                            void *buffer, size_t size)
2209 {
2210         struct shmem_inode_info *info;
2211         struct shmem_xattr *xattr;
2212         int ret = -ENODATA;
2213
2214         info = SHMEM_I(dentry->d_inode);
2215
2216         spin_lock(&info->lock);
2217         list_for_each_entry(xattr, &info->xattr_list, list) {
2218                 if (strcmp(name, xattr->name))
2219                         continue;
2220
2221                 ret = xattr->size;
2222                 if (buffer) {
2223                         if (size < xattr->size)
2224                                 ret = -ERANGE;
2225                         else
2226                                 memcpy(buffer, xattr->value, xattr->size);
2227                 }
2228                 break;
2229         }
2230         spin_unlock(&info->lock);
2231         return ret;
2232 }
2233
2234 static int shmem_xattr_set(struct dentry *dentry, const char *name,
2235                            const void *value, size_t size, int flags)
2236 {
2237         struct inode *inode = dentry->d_inode;
2238         struct shmem_inode_info *info = SHMEM_I(inode);
2239         struct shmem_xattr *xattr;
2240         struct shmem_xattr *new_xattr = NULL;
2241         size_t len;
2242         int err = 0;
2243
2244         /* value == NULL means remove */
2245         if (value) {
2246                 /* wrap around? */
2247                 len = sizeof(*new_xattr) + size;
2248                 if (len <= sizeof(*new_xattr))
2249                         return -ENOMEM;
2250
2251                 new_xattr = kmalloc(len, GFP_KERNEL);
2252                 if (!new_xattr)
2253                         return -ENOMEM;
2254
2255                 new_xattr->name = kstrdup(name, GFP_KERNEL);
2256                 if (!new_xattr->name) {
2257                         kfree(new_xattr);
2258                         return -ENOMEM;
2259                 }
2260
2261                 new_xattr->size = size;
2262                 memcpy(new_xattr->value, value, size);
2263         }
2264
2265         spin_lock(&info->lock);
2266         list_for_each_entry(xattr, &info->xattr_list, list) {
2267                 if (!strcmp(name, xattr->name)) {
2268                         if (flags & XATTR_CREATE) {
2269                                 xattr = new_xattr;
2270                                 err = -EEXIST;
2271                         } else if (new_xattr) {
2272                                 list_replace(&xattr->list, &new_xattr->list);
2273                         } else {
2274                                 list_del(&xattr->list);
2275                         }
2276                         goto out;
2277                 }
2278         }
2279         if (flags & XATTR_REPLACE) {
2280                 xattr = new_xattr;
2281                 err = -ENODATA;
2282         } else {
2283                 list_add(&new_xattr->list, &info->xattr_list);
2284                 xattr = NULL;
2285         }
2286 out:
2287         spin_unlock(&info->lock);
2288         if (xattr)
2289                 kfree(xattr->name);
2290         kfree(xattr);
2291         return err;
2292 }
2293
2294
2295 static const struct xattr_handler *shmem_xattr_handlers[] = {
2296 #ifdef CONFIG_TMPFS_POSIX_ACL
2297         &generic_acl_access_handler,
2298         &generic_acl_default_handler,
2299 #endif
2300         NULL
2301 };
2302
2303 static int shmem_xattr_validate(const char *name)
2304 {
2305         struct { const char *prefix; size_t len; } arr[] = {
2306                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2307                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2308         };
2309         int i;
2310
2311         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2312                 size_t preflen = arr[i].len;
2313                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2314                         if (!name[preflen])
2315                                 return -EINVAL;
2316                         return 0;
2317                 }
2318         }
2319         return -EOPNOTSUPP;
2320 }
2321
2322 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2323                               void *buffer, size_t size)
2324 {
2325         int err;
2326
2327         /*
2328          * If this is a request for a synthetic attribute in the system.*
2329          * namespace use the generic infrastructure to resolve a handler
2330          * for it via sb->s_xattr.
2331          */
2332         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2333                 return generic_getxattr(dentry, name, buffer, size);
2334
2335         err = shmem_xattr_validate(name);
2336         if (err)
2337                 return err;
2338
2339         return shmem_xattr_get(dentry, name, buffer, size);
2340 }
2341
2342 static int shmem_setxattr(struct dentry *dentry, const char *name,
2343                           const void *value, size_t size, int flags)
2344 {
2345         int err;
2346
2347         /*
2348          * If this is a request for a synthetic attribute in the system.*
2349          * namespace use the generic infrastructure to resolve a handler
2350          * for it via sb->s_xattr.
2351          */
2352         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2353                 return generic_setxattr(dentry, name, value, size, flags);
2354
2355         err = shmem_xattr_validate(name);
2356         if (err)
2357                 return err;
2358
2359         if (size == 0)
2360                 value = "";  /* empty EA, do not remove */
2361
2362         return shmem_xattr_set(dentry, name, value, size, flags);
2363
2364 }
2365
2366 static int shmem_removexattr(struct dentry *dentry, const char *name)
2367 {
2368         int err;
2369
2370         /*
2371          * If this is a request for a synthetic attribute in the system.*
2372          * namespace use the generic infrastructure to resolve a handler
2373          * for it via sb->s_xattr.
2374          */
2375         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2376                 return generic_removexattr(dentry, name);
2377
2378         err = shmem_xattr_validate(name);
2379         if (err)
2380                 return err;
2381
2382         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2383 }
2384
2385 static bool xattr_is_trusted(const char *name)
2386 {
2387         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2388 }
2389
2390 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2391 {
2392         bool trusted = capable(CAP_SYS_ADMIN);
2393         struct shmem_xattr *xattr;
2394         struct shmem_inode_info *info;
2395         size_t used = 0;
2396
2397         info = SHMEM_I(dentry->d_inode);
2398
2399         spin_lock(&info->lock);
2400         list_for_each_entry(xattr, &info->xattr_list, list) {
2401                 size_t len;
2402
2403                 /* skip "trusted." attributes for unprivileged callers */
2404                 if (!trusted && xattr_is_trusted(xattr->name))
2405                         continue;
2406
2407                 len = strlen(xattr->name) + 1;
2408                 used += len;
2409                 if (buffer) {
2410                         if (size < used) {
2411                                 used = -ERANGE;
2412                                 break;
2413                         }
2414                         memcpy(buffer, xattr->name, len);
2415                         buffer += len;
2416                 }
2417         }
2418         spin_unlock(&info->lock);
2419
2420         return used;
2421 }
2422 #endif /* CONFIG_TMPFS_XATTR */
2423
2424 static const struct inode_operations shmem_symlink_inline_operations = {
2425         .readlink       = generic_readlink,
2426         .follow_link    = shmem_follow_link_inline,
2427 #ifdef CONFIG_TMPFS_XATTR
2428         .setxattr       = shmem_setxattr,
2429         .getxattr       = shmem_getxattr,
2430         .listxattr      = shmem_listxattr,
2431         .removexattr    = shmem_removexattr,
2432 #endif
2433 };
2434
2435 static const struct inode_operations shmem_symlink_inode_operations = {
2436         .readlink       = generic_readlink,
2437         .follow_link    = shmem_follow_link,
2438         .put_link       = shmem_put_link,
2439 #ifdef CONFIG_TMPFS_XATTR
2440         .setxattr       = shmem_setxattr,
2441         .getxattr       = shmem_getxattr,
2442         .listxattr      = shmem_listxattr,
2443         .removexattr    = shmem_removexattr,
2444 #endif
2445 };
2446
2447 static struct dentry *shmem_get_parent(struct dentry *child)
2448 {
2449         return ERR_PTR(-ESTALE);
2450 }
2451
2452 static int shmem_match(struct inode *ino, void *vfh)
2453 {
2454         __u32 *fh = vfh;
2455         __u64 inum = fh[2];
2456         inum = (inum << 32) | fh[1];
2457         return ino->i_ino == inum && fh[0] == ino->i_generation;
2458 }
2459
2460 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2461                 struct fid *fid, int fh_len, int fh_type)
2462 {
2463         struct inode *inode;
2464         struct dentry *dentry = NULL;
2465         u64 inum = fid->raw[2];
2466         inum = (inum << 32) | fid->raw[1];
2467
2468         if (fh_len < 3)
2469                 return NULL;
2470
2471         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2472                         shmem_match, fid->raw);
2473         if (inode) {
2474                 dentry = d_find_alias(inode);
2475                 iput(inode);
2476         }
2477
2478         return dentry;
2479 }
2480
2481 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2482                                 int connectable)
2483 {
2484         struct inode *inode = dentry->d_inode;
2485
2486         if (*len < 3) {
2487                 *len = 3;
2488                 return 255;
2489         }
2490
2491         if (inode_unhashed(inode)) {
2492                 /* Unfortunately insert_inode_hash is not idempotent,
2493                  * so as we hash inodes here rather than at creation
2494                  * time, we need a lock to ensure we only try
2495                  * to do it once
2496                  */
2497                 static DEFINE_SPINLOCK(lock);
2498                 spin_lock(&lock);
2499                 if (inode_unhashed(inode))
2500                         __insert_inode_hash(inode,
2501                                             inode->i_ino + inode->i_generation);
2502                 spin_unlock(&lock);
2503         }
2504
2505         fh[0] = inode->i_generation;
2506         fh[1] = inode->i_ino;
2507         fh[2] = ((__u64)inode->i_ino) >> 32;
2508
2509         *len = 3;
2510         return 1;
2511 }
2512
2513 static const struct export_operations shmem_export_ops = {
2514         .get_parent     = shmem_get_parent,
2515         .encode_fh      = shmem_encode_fh,
2516         .fh_to_dentry   = shmem_fh_to_dentry,
2517 };
2518
2519 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2520                                bool remount)
2521 {
2522         char *this_char, *value, *rest;
2523
2524         while (options != NULL) {
2525                 this_char = options;
2526                 for (;;) {
2527                         /*
2528                          * NUL-terminate this option: unfortunately,
2529                          * mount options form a comma-separated list,
2530                          * but mpol's nodelist may also contain commas.
2531                          */
2532                         options = strchr(options, ',');
2533                         if (options == NULL)
2534                                 break;
2535                         options++;
2536                         if (!isdigit(*options)) {
2537                                 options[-1] = '\0';
2538                                 break;
2539                         }
2540                 }
2541                 if (!*this_char)
2542                         continue;
2543                 if ((value = strchr(this_char,'=')) != NULL) {
2544                         *value++ = 0;
2545                 } else {
2546                         printk(KERN_ERR
2547                             "tmpfs: No value for mount option '%s'\n",
2548                             this_char);
2549                         return 1;
2550                 }
2551
2552                 if (!strcmp(this_char,"size")) {
2553                         unsigned long long size;
2554                         size = memparse(value,&rest);
2555                         if (*rest == '%') {
2556                                 size <<= PAGE_SHIFT;
2557                                 size *= totalram_pages;
2558                                 do_div(size, 100);
2559                                 rest++;
2560                         }
2561                         if (*rest)
2562                                 goto bad_val;
2563                         sbinfo->max_blocks =
2564                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2565                 } else if (!strcmp(this_char,"nr_blocks")) {
2566                         sbinfo->max_blocks = memparse(value, &rest);
2567                         if (*rest)
2568                                 goto bad_val;
2569                 } else if (!strcmp(this_char,"nr_inodes")) {
2570                         sbinfo->max_inodes = memparse(value, &rest);
2571                         if (*rest)
2572                                 goto bad_val;
2573                 } else if (!strcmp(this_char,"mode")) {
2574                         if (remount)
2575                                 continue;
2576                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2577                         if (*rest)
2578                                 goto bad_val;
2579                 } else if (!strcmp(this_char,"uid")) {
2580                         if (remount)
2581                                 continue;
2582                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2583                         if (*rest)
2584                                 goto bad_val;
2585                 } else if (!strcmp(this_char,"gid")) {
2586                         if (remount)
2587                                 continue;
2588                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2589                         if (*rest)
2590                                 goto bad_val;
2591                 } else if (!strcmp(this_char,"mpol")) {
2592                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2593                                 goto bad_val;
2594                 } else {
2595                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2596                                this_char);
2597                         return 1;
2598                 }
2599         }
2600         return 0;
2601
2602 bad_val:
2603         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2604                value, this_char);
2605         return 1;
2606
2607 }
2608
2609 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2610 {
2611         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2612         struct shmem_sb_info config = *sbinfo;
2613         unsigned long inodes;
2614         int error = -EINVAL;
2615
2616         if (shmem_parse_options(data, &config, true))
2617                 return error;
2618
2619         spin_lock(&sbinfo->stat_lock);
2620         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2621         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2622                 goto out;
2623         if (config.max_inodes < inodes)
2624                 goto out;
2625         /*
2626          * Those tests also disallow limited->unlimited while any are in
2627          * use, so i_blocks will always be zero when max_blocks is zero;
2628          * but we must separately disallow unlimited->limited, because
2629          * in that case we have no record of how much is already in use.
2630          */
2631         if (config.max_blocks && !sbinfo->max_blocks)
2632                 goto out;
2633         if (config.max_inodes && !sbinfo->max_inodes)
2634                 goto out;
2635
2636         error = 0;
2637         sbinfo->max_blocks  = config.max_blocks;
2638         sbinfo->max_inodes  = config.max_inodes;
2639         sbinfo->free_inodes = config.max_inodes - inodes;
2640
2641         mpol_put(sbinfo->mpol);
2642         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2643 out:
2644         spin_unlock(&sbinfo->stat_lock);
2645         return error;
2646 }
2647
2648 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2649 {
2650         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2651
2652         if (sbinfo->max_blocks != shmem_default_max_blocks())
2653                 seq_printf(seq, ",size=%luk",
2654                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2655         if (sbinfo->max_inodes != shmem_default_max_inodes())
2656                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2657         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2658                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2659         if (sbinfo->uid != 0)
2660                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2661         if (sbinfo->gid != 0)
2662                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2663         shmem_show_mpol(seq, sbinfo->mpol);
2664         return 0;
2665 }
2666 #endif /* CONFIG_TMPFS */
2667
2668 static void shmem_put_super(struct super_block *sb)
2669 {
2670         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2671
2672         percpu_counter_destroy(&sbinfo->used_blocks);
2673         kfree(sbinfo);
2674         sb->s_fs_info = NULL;
2675 }
2676
2677 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2678 {
2679         struct inode *inode;
2680         struct dentry *root;
2681         struct shmem_sb_info *sbinfo;
2682         int err = -ENOMEM;
2683
2684         /* Round up to L1_CACHE_BYTES to resist false sharing */
2685         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2686                                 L1_CACHE_BYTES), GFP_KERNEL);
2687         if (!sbinfo)
2688                 return -ENOMEM;
2689
2690         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2691         sbinfo->uid = current_fsuid();
2692         sbinfo->gid = current_fsgid();
2693         sb->s_fs_info = sbinfo;
2694
2695 #ifdef CONFIG_TMPFS
2696         /*
2697          * Per default we only allow half of the physical ram per
2698          * tmpfs instance, limiting inodes to one per page of lowmem;
2699          * but the internal instance is left unlimited.
2700          */
2701         if (!(sb->s_flags & MS_NOUSER)) {
2702                 sbinfo->max_blocks = shmem_default_max_blocks();
2703                 sbinfo->max_inodes = shmem_default_max_inodes();
2704                 if (shmem_parse_options(data, sbinfo, false)) {
2705                         err = -EINVAL;
2706                         goto failed;
2707                 }
2708         }
2709         sb->s_export_op = &shmem_export_ops;
2710 #else
2711         sb->s_flags |= MS_NOUSER;
2712 #endif
2713
2714         spin_lock_init(&sbinfo->stat_lock);
2715         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2716                 goto failed;
2717         sbinfo->free_inodes = sbinfo->max_inodes;
2718
2719         sb->s_maxbytes = SHMEM_MAX_BYTES;
2720         sb->s_blocksize = PAGE_CACHE_SIZE;
2721         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2722         sb->s_magic = TMPFS_MAGIC;
2723         sb->s_op = &shmem_ops;
2724         sb->s_time_gran = 1;
2725 #ifdef CONFIG_TMPFS_XATTR
2726         sb->s_xattr = shmem_xattr_handlers;
2727 #endif
2728 #ifdef CONFIG_TMPFS_POSIX_ACL
2729         sb->s_flags |= MS_POSIXACL;
2730 #endif
2731
2732         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2733         if (!inode)
2734                 goto failed;
2735         inode->i_uid = sbinfo->uid;
2736         inode->i_gid = sbinfo->gid;
2737         root = d_alloc_root(inode);
2738         if (!root)
2739                 goto failed_iput;
2740         sb->s_root = root;
2741         return 0;
2742
2743 failed_iput:
2744         iput(inode);
2745 failed:
2746         shmem_put_super(sb);
2747         return err;
2748 }
2749
2750 static struct kmem_cache *shmem_inode_cachep;
2751
2752 static struct inode *shmem_alloc_inode(struct super_block *sb)
2753 {
2754         struct shmem_inode_info *p;
2755         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2756         if (!p)
2757                 return NULL;
2758         return &p->vfs_inode;
2759 }
2760
2761 static void shmem_i_callback(struct rcu_head *head)
2762 {
2763         struct inode *inode = container_of(head, struct inode, i_rcu);
2764         INIT_LIST_HEAD(&inode->i_dentry);
2765         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2766 }
2767
2768 static void shmem_destroy_inode(struct inode *inode)
2769 {
2770         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2771                 /* only struct inode is valid if it's an inline symlink */
2772                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2773         }
2774         call_rcu(&inode->i_rcu, shmem_i_callback);
2775 }
2776
2777 static void init_once(void *foo)
2778 {
2779         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2780
2781         inode_init_once(&p->vfs_inode);
2782 }
2783
2784 static int init_inodecache(void)
2785 {
2786         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2787                                 sizeof(struct shmem_inode_info),
2788                                 0, SLAB_PANIC, init_once);
2789         return 0;
2790 }
2791
2792 static void destroy_inodecache(void)
2793 {
2794         kmem_cache_destroy(shmem_inode_cachep);
2795 }
2796
2797 static const struct address_space_operations shmem_aops = {
2798         .writepage      = shmem_writepage,
2799         .set_page_dirty = __set_page_dirty_no_writeback,
2800 #ifdef CONFIG_TMPFS
2801         .readpage       = shmem_readpage,
2802         .write_begin    = shmem_write_begin,
2803         .write_end      = shmem_write_end,
2804 #endif
2805         .migratepage    = migrate_page,
2806         .error_remove_page = generic_error_remove_page,
2807 };
2808
2809 static const struct file_operations shmem_file_operations = {
2810         .mmap           = shmem_mmap,
2811 #ifdef CONFIG_TMPFS
2812         .llseek         = generic_file_llseek,
2813         .read           = do_sync_read,
2814         .write          = do_sync_write,
2815         .aio_read       = shmem_file_aio_read,
2816         .aio_write      = generic_file_aio_write,
2817         .fsync          = noop_fsync,
2818         .splice_read    = shmem_file_splice_read,
2819         .splice_write   = generic_file_splice_write,
2820 #endif
2821 };
2822
2823 static const struct inode_operations shmem_inode_operations = {
2824         .setattr        = shmem_setattr,
2825         .truncate_range = shmem_truncate_range,
2826 #ifdef CONFIG_TMPFS_XATTR
2827         .setxattr       = shmem_setxattr,
2828         .getxattr       = shmem_getxattr,
2829         .listxattr      = shmem_listxattr,
2830         .removexattr    = shmem_removexattr,
2831 #endif
2832 #ifdef CONFIG_TMPFS_POSIX_ACL
2833         .check_acl      = generic_check_acl,
2834 #endif
2835
2836 };
2837
2838 static const struct inode_operations shmem_dir_inode_operations = {
2839 #ifdef CONFIG_TMPFS
2840         .create         = shmem_create,
2841         .lookup         = simple_lookup,
2842         .link           = shmem_link,
2843         .unlink         = shmem_unlink,
2844         .symlink        = shmem_symlink,
2845         .mkdir          = shmem_mkdir,
2846         .rmdir          = shmem_rmdir,
2847         .mknod          = shmem_mknod,
2848         .rename         = shmem_rename,
2849 #endif
2850 #ifdef CONFIG_TMPFS_XATTR
2851         .setxattr       = shmem_setxattr,
2852         .getxattr       = shmem_getxattr,
2853         .listxattr      = shmem_listxattr,
2854         .removexattr    = shmem_removexattr,
2855 #endif
2856 #ifdef CONFIG_TMPFS_POSIX_ACL
2857         .setattr        = shmem_setattr,
2858         .check_acl      = generic_check_acl,
2859 #endif
2860 };
2861
2862 static const struct inode_operations shmem_special_inode_operations = {
2863 #ifdef CONFIG_TMPFS_XATTR
2864         .setxattr       = shmem_setxattr,
2865         .getxattr       = shmem_getxattr,
2866         .listxattr      = shmem_listxattr,
2867         .removexattr    = shmem_removexattr,
2868 #endif
2869 #ifdef CONFIG_TMPFS_POSIX_ACL
2870         .setattr        = shmem_setattr,
2871         .check_acl      = generic_check_acl,
2872 #endif
2873 };
2874
2875 static const struct super_operations shmem_ops = {
2876         .alloc_inode    = shmem_alloc_inode,
2877         .destroy_inode  = shmem_destroy_inode,
2878 #ifdef CONFIG_TMPFS
2879         .statfs         = shmem_statfs,
2880         .remount_fs     = shmem_remount_fs,
2881         .show_options   = shmem_show_options,
2882 #endif
2883         .evict_inode    = shmem_evict_inode,
2884         .drop_inode     = generic_delete_inode,
2885         .put_super      = shmem_put_super,
2886 };
2887
2888 static const struct vm_operations_struct shmem_vm_ops = {
2889         .fault          = shmem_fault,
2890 #ifdef CONFIG_NUMA
2891         .set_policy     = shmem_set_policy,
2892         .get_policy     = shmem_get_policy,
2893 #endif
2894 };
2895
2896
2897 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2898         int flags, const char *dev_name, void *data)
2899 {
2900         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2901 }
2902
2903 static struct file_system_type tmpfs_fs_type = {
2904         .owner          = THIS_MODULE,
2905         .name           = "tmpfs",
2906         .mount          = shmem_mount,
2907         .kill_sb        = kill_litter_super,
2908 };
2909
2910 int __init init_tmpfs(void)
2911 {
2912         int error;
2913
2914         error = bdi_init(&shmem_backing_dev_info);
2915         if (error)
2916                 goto out4;
2917
2918         error = init_inodecache();
2919         if (error)
2920                 goto out3;
2921
2922         error = register_filesystem(&tmpfs_fs_type);
2923         if (error) {
2924                 printk(KERN_ERR "Could not register tmpfs\n");
2925                 goto out2;
2926         }
2927
2928         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2929                                 tmpfs_fs_type.name, NULL);
2930         if (IS_ERR(shm_mnt)) {
2931                 error = PTR_ERR(shm_mnt);
2932                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2933                 goto out1;
2934         }
2935         return 0;
2936
2937 out1:
2938         unregister_filesystem(&tmpfs_fs_type);
2939 out2:
2940         destroy_inodecache();
2941 out3:
2942         bdi_destroy(&shmem_backing_dev_info);
2943 out4:
2944         shm_mnt = ERR_PTR(error);
2945         return error;
2946 }
2947
2948 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2949 /**
2950  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2951  * @inode: the inode to be searched
2952  * @pgoff: the offset to be searched
2953  * @pagep: the pointer for the found page to be stored
2954  * @ent: the pointer for the found swap entry to be stored
2955  *
2956  * If a page is found, refcount of it is incremented. Callers should handle
2957  * these refcount.
2958  */
2959 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2960                                         struct page **pagep, swp_entry_t *ent)
2961 {
2962         swp_entry_t entry = { .val = 0 }, *ptr;
2963         struct page *page = NULL;
2964         struct shmem_inode_info *info = SHMEM_I(inode);
2965
2966         if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2967                 goto out;
2968
2969         spin_lock(&info->lock);
2970         ptr = shmem_swp_entry(info, pgoff, NULL);
2971 #ifdef CONFIG_SWAP
2972         if (ptr && ptr->val) {
2973                 entry.val = ptr->val;
2974                 page = find_get_page(&swapper_space, entry.val);
2975         } else
2976 #endif
2977                 page = find_get_page(inode->i_mapping, pgoff);
2978         if (ptr)
2979                 shmem_swp_unmap(ptr);
2980         spin_unlock(&info->lock);
2981 out:
2982         *pagep = page;
2983         *ent = entry;
2984 }
2985 #endif
2986
2987 #else /* !CONFIG_SHMEM */
2988
2989 /*
2990  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2991  *
2992  * This is intended for small system where the benefits of the full
2993  * shmem code (swap-backed and resource-limited) are outweighed by
2994  * their complexity. On systems without swap this code should be
2995  * effectively equivalent, but much lighter weight.
2996  */
2997
2998 #include <linux/ramfs.h>
2999
3000 static struct file_system_type tmpfs_fs_type = {
3001         .name           = "tmpfs",
3002         .mount          = ramfs_mount,
3003         .kill_sb        = kill_litter_super,
3004 };
3005
3006 int __init init_tmpfs(void)
3007 {
3008         BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
3009
3010         shm_mnt = kern_mount(&tmpfs_fs_type);
3011         BUG_ON(IS_ERR(shm_mnt));
3012
3013         return 0;
3014 }
3015
3016 int shmem_unuse(swp_entry_t entry, struct page *page)
3017 {
3018         return 0;
3019 }
3020
3021 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3022 {
3023         return 0;
3024 }
3025
3026 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
3027 {
3028         truncate_inode_pages_range(inode->i_mapping, start, end);
3029 }
3030 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3031
3032 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
3033 /**
3034  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
3035  * @inode: the inode to be searched
3036  * @pgoff: the offset to be searched
3037  * @pagep: the pointer for the found page to be stored
3038  * @ent: the pointer for the found swap entry to be stored
3039  *
3040  * If a page is found, refcount of it is incremented. Callers should handle
3041  * these refcount.
3042  */
3043 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
3044                                         struct page **pagep, swp_entry_t *ent)
3045 {
3046         struct page *page = NULL;
3047
3048         if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
3049                 goto out;
3050         page = find_get_page(inode->i_mapping, pgoff);
3051 out:
3052         *pagep = page;
3053         *ent = (swp_entry_t){ .val = 0 };
3054 }
3055 #endif
3056
3057 #define shmem_vm_ops                            generic_file_vm_ops
3058 #define shmem_file_operations                   ramfs_file_operations
3059 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3060 #define shmem_acct_size(flags, size)            0
3061 #define shmem_unacct_size(flags, size)          do {} while (0)
3062 #define SHMEM_MAX_BYTES                         MAX_LFS_FILESIZE
3063
3064 #endif /* CONFIG_SHMEM */
3065
3066 /* common code */
3067
3068 /**
3069  * shmem_file_setup - get an unlinked file living in tmpfs
3070  * @name: name for dentry (to be seen in /proc/<pid>/maps
3071  * @size: size to be set for the file
3072  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3073  */
3074 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3075 {
3076         int error;
3077         struct file *file;
3078         struct inode *inode;
3079         struct path path;
3080         struct dentry *root;
3081         struct qstr this;
3082
3083         if (IS_ERR(shm_mnt))
3084                 return (void *)shm_mnt;
3085
3086         if (size < 0 || size > SHMEM_MAX_BYTES)
3087                 return ERR_PTR(-EINVAL);
3088
3089         if (shmem_acct_size(flags, size))
3090                 return ERR_PTR(-ENOMEM);
3091
3092         error = -ENOMEM;
3093         this.name = name;
3094         this.len = strlen(name);
3095         this.hash = 0; /* will go */
3096         root = shm_mnt->mnt_root;
3097         path.dentry = d_alloc(root, &this);
3098         if (!path.dentry)
3099                 goto put_memory;
3100         path.mnt = mntget(shm_mnt);
3101
3102         error = -ENOSPC;
3103         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3104         if (!inode)
3105                 goto put_dentry;
3106
3107         d_instantiate(path.dentry, inode);
3108         inode->i_size = size;
3109         inode->i_nlink = 0;     /* It is unlinked */
3110 #ifndef CONFIG_MMU
3111         error = ramfs_nommu_expand_for_mapping(inode, size);
3112         if (error)
3113                 goto put_dentry;
3114 #endif
3115
3116         error = -ENFILE;
3117         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3118                   &shmem_file_operations);
3119         if (!file)
3120                 goto put_dentry;
3121
3122         return file;
3123
3124 put_dentry:
3125         path_put(&path);
3126 put_memory:
3127         shmem_unacct_size(flags, size);
3128         return ERR_PTR(error);
3129 }
3130 EXPORT_SYMBOL_GPL(shmem_file_setup);
3131
3132 /**
3133  * shmem_zero_setup - setup a shared anonymous mapping
3134  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3135  */
3136 int shmem_zero_setup(struct vm_area_struct *vma)
3137 {
3138         struct file *file;
3139         loff_t size = vma->vm_end - vma->vm_start;
3140
3141         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3142         if (IS_ERR(file))
3143                 return PTR_ERR(file);
3144
3145         if (vma->vm_file)
3146                 fput(vma->vm_file);
3147         vma->vm_file = file;
3148         vma->vm_ops = &shmem_vm_ops;
3149         vma->vm_flags |= VM_CAN_NONLINEAR;
3150         return 0;
3151 }
3152
3153 /**
3154  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3155  * @mapping:    the page's address_space
3156  * @index:      the page index
3157  * @gfp:        the page allocator flags to use if allocating
3158  *
3159  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3160  * with any new page allocations done using the specified allocation flags.
3161  * But read_cache_page_gfp() uses the ->readpage() method: which does not
3162  * suit tmpfs, since it may have pages in swapcache, and needs to find those
3163  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3164  *
3165  * Provide a stub for those callers to start using now, then later
3166  * flesh it out to call shmem_getpage() with additional gfp mask, when
3167  * shmem_file_splice_read() is added and shmem_readpage() is removed.
3168  */
3169 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3170                                          pgoff_t index, gfp_t gfp)
3171 {
3172         return read_cache_page_gfp(mapping, index, gfp);
3173 }
3174 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);