2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 * This file is released under the GPL.
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/percpu_counter.h>
32 #include <linux/swap.h>
34 static struct vfsmount *shm_mnt;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/splice.h>
55 #include <linux/security.h>
56 #include <linux/swapops.h>
57 #include <linux/mempolicy.h>
58 #include <linux/namei.h>
59 #include <linux/ctype.h>
60 #include <linux/migrate.h>
61 #include <linux/highmem.h>
62 #include <linux/seq_file.h>
63 #include <linux/magic.h>
65 #include <asm/uaccess.h>
66 #include <asm/div64.h>
67 #include <asm/pgtable.h>
70 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
71 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
73 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
74 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
75 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
76 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
78 * We use / and * instead of shifts in the definitions below, so that the swap
79 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
81 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
82 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
84 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
85 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
87 #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
88 #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
90 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
91 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
93 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
94 #define SHMEM_PAGEIN VM_READ
95 #define SHMEM_TRUNCATE VM_WRITE
97 /* Definition to limit shmem_truncate's steps between cond_rescheds */
98 #define LATENCY_LIMIT 64
100 /* Pretend that each entry is of this size in directory's i_size */
101 #define BOGO_DIRENT_SIZE 20
104 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
105 char *name; /* xattr name */
110 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
112 SGP_READ, /* don't exceed i_size, don't allocate page */
113 SGP_CACHE, /* don't exceed i_size, may allocate page */
114 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
115 SGP_WRITE, /* may exceed i_size, may allocate page */
119 static unsigned long shmem_default_max_blocks(void)
121 return totalram_pages / 2;
124 static unsigned long shmem_default_max_inodes(void)
126 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
130 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
131 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
133 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
134 struct page **pagep, enum sgp_type sgp, int *fault_type)
136 return shmem_getpage_gfp(inode, index, pagep, sgp,
137 mapping_gfp_mask(inode->i_mapping), fault_type);
140 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
143 * The above definition of ENTRIES_PER_PAGE, and the use of
144 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
145 * might be reconsidered if it ever diverges from PAGE_SIZE.
147 * Mobility flags are masked out as swap vectors cannot move
149 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
150 PAGE_CACHE_SHIFT-PAGE_SHIFT);
153 static inline void shmem_dir_free(struct page *page)
155 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
158 static struct page **shmem_dir_map(struct page *page)
160 return (struct page **)kmap_atomic(page, KM_USER0);
163 static inline void shmem_dir_unmap(struct page **dir)
165 kunmap_atomic(dir, KM_USER0);
168 static swp_entry_t *shmem_swp_map(struct page *page)
170 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
173 static inline void shmem_swp_balance_unmap(void)
176 * When passing a pointer to an i_direct entry, to code which
177 * also handles indirect entries and so will shmem_swp_unmap,
178 * we must arrange for the preempt count to remain in balance.
179 * What kmap_atomic of a lowmem page does depends on config
180 * and architecture, so pretend to kmap_atomic some lowmem page.
182 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
185 static inline void shmem_swp_unmap(swp_entry_t *entry)
187 kunmap_atomic(entry, KM_USER1);
190 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
192 return sb->s_fs_info;
196 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
197 * for shared memory and for shared anonymous (/dev/zero) mappings
198 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
199 * consistent with the pre-accounting of private mappings ...
201 static inline int shmem_acct_size(unsigned long flags, loff_t size)
203 return (flags & VM_NORESERVE) ?
204 0 : security_vm_enough_memory_kern(VM_ACCT(size));
207 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
209 if (!(flags & VM_NORESERVE))
210 vm_unacct_memory(VM_ACCT(size));
214 * ... whereas tmpfs objects are accounted incrementally as
215 * pages are allocated, in order to allow huge sparse files.
216 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
217 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
219 static inline int shmem_acct_block(unsigned long flags)
221 return (flags & VM_NORESERVE) ?
222 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
225 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
227 if (flags & VM_NORESERVE)
228 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
231 static const struct super_operations shmem_ops;
232 static const struct address_space_operations shmem_aops;
233 static const struct file_operations shmem_file_operations;
234 static const struct inode_operations shmem_inode_operations;
235 static const struct inode_operations shmem_dir_inode_operations;
236 static const struct inode_operations shmem_special_inode_operations;
237 static const struct vm_operations_struct shmem_vm_ops;
239 static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
240 .ra_pages = 0, /* No readahead */
241 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
244 static LIST_HEAD(shmem_swaplist);
245 static DEFINE_MUTEX(shmem_swaplist_mutex);
247 static void shmem_free_blocks(struct inode *inode, long pages)
249 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
250 if (sbinfo->max_blocks) {
251 percpu_counter_add(&sbinfo->used_blocks, -pages);
252 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
256 static int shmem_reserve_inode(struct super_block *sb)
258 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
259 if (sbinfo->max_inodes) {
260 spin_lock(&sbinfo->stat_lock);
261 if (!sbinfo->free_inodes) {
262 spin_unlock(&sbinfo->stat_lock);
265 sbinfo->free_inodes--;
266 spin_unlock(&sbinfo->stat_lock);
271 static void shmem_free_inode(struct super_block *sb)
273 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
274 if (sbinfo->max_inodes) {
275 spin_lock(&sbinfo->stat_lock);
276 sbinfo->free_inodes++;
277 spin_unlock(&sbinfo->stat_lock);
282 * shmem_recalc_inode - recalculate the size of an inode
283 * @inode: inode to recalc
285 * We have to calculate the free blocks since the mm can drop
286 * undirtied hole pages behind our back.
288 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
289 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
291 * It has to be called with the spinlock held.
293 static void shmem_recalc_inode(struct inode *inode)
295 struct shmem_inode_info *info = SHMEM_I(inode);
298 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
300 info->alloced -= freed;
301 shmem_unacct_blocks(info->flags, freed);
302 shmem_free_blocks(inode, freed);
307 * shmem_swp_entry - find the swap vector position in the info structure
308 * @info: info structure for the inode
309 * @index: index of the page to find
310 * @page: optional page to add to the structure. Has to be preset to
313 * If there is no space allocated yet it will return NULL when
314 * page is NULL, else it will use the page for the needed block,
315 * setting it to NULL on return to indicate that it has been used.
317 * The swap vector is organized the following way:
319 * There are SHMEM_NR_DIRECT entries directly stored in the
320 * shmem_inode_info structure. So small files do not need an addional
323 * For pages with index > SHMEM_NR_DIRECT there is the pointer
324 * i_indirect which points to a page which holds in the first half
325 * doubly indirect blocks, in the second half triple indirect blocks:
327 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
328 * following layout (for SHMEM_NR_DIRECT == 16):
330 * i_indirect -> dir --> 16-19
343 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
345 unsigned long offset;
349 if (index < SHMEM_NR_DIRECT) {
350 shmem_swp_balance_unmap();
351 return info->i_direct+index;
353 if (!info->i_indirect) {
355 info->i_indirect = *page;
358 return NULL; /* need another page */
361 index -= SHMEM_NR_DIRECT;
362 offset = index % ENTRIES_PER_PAGE;
363 index /= ENTRIES_PER_PAGE;
364 dir = shmem_dir_map(info->i_indirect);
366 if (index >= ENTRIES_PER_PAGE/2) {
367 index -= ENTRIES_PER_PAGE/2;
368 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
369 index %= ENTRIES_PER_PAGE;
376 shmem_dir_unmap(dir);
377 return NULL; /* need another page */
379 shmem_dir_unmap(dir);
380 dir = shmem_dir_map(subdir);
386 if (!page || !(subdir = *page)) {
387 shmem_dir_unmap(dir);
388 return NULL; /* need a page */
393 shmem_dir_unmap(dir);
394 return shmem_swp_map(subdir) + offset;
397 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
399 long incdec = value? 1: -1;
402 info->swapped += incdec;
403 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
404 struct page *page = kmap_atomic_to_page(entry);
405 set_page_private(page, page_private(page) + incdec);
410 * shmem_swp_alloc - get the position of the swap entry for the page.
411 * @info: info structure for the inode
412 * @index: index of the page to find
413 * @sgp: check and recheck i_size? skip allocation?
414 * @gfp: gfp mask to use for any page allocation
416 * If the entry does not exist, allocate it.
418 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info,
419 unsigned long index, enum sgp_type sgp, gfp_t gfp)
421 struct inode *inode = &info->vfs_inode;
422 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
423 struct page *page = NULL;
426 if (sgp != SGP_WRITE &&
427 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
428 return ERR_PTR(-EINVAL);
430 while (!(entry = shmem_swp_entry(info, index, &page))) {
432 return shmem_swp_map(ZERO_PAGE(0));
434 * Test used_blocks against 1 less max_blocks, since we have 1 data
435 * page (and perhaps indirect index pages) yet to allocate:
436 * a waste to allocate index if we cannot allocate data.
438 if (sbinfo->max_blocks) {
439 if (percpu_counter_compare(&sbinfo->used_blocks,
440 sbinfo->max_blocks - 1) >= 0)
441 return ERR_PTR(-ENOSPC);
442 percpu_counter_inc(&sbinfo->used_blocks);
443 inode->i_blocks += BLOCKS_PER_PAGE;
446 spin_unlock(&info->lock);
447 page = shmem_dir_alloc(gfp);
448 spin_lock(&info->lock);
451 shmem_free_blocks(inode, 1);
452 return ERR_PTR(-ENOMEM);
454 if (sgp != SGP_WRITE &&
455 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
456 entry = ERR_PTR(-EINVAL);
459 if (info->next_index <= index)
460 info->next_index = index + 1;
463 /* another task gave its page, or truncated the file */
464 shmem_free_blocks(inode, 1);
465 shmem_dir_free(page);
467 if (info->next_index <= index && !IS_ERR(entry))
468 info->next_index = index + 1;
473 * shmem_free_swp - free some swap entries in a directory
474 * @dir: pointer to the directory
475 * @edir: pointer after last entry of the directory
476 * @punch_lock: pointer to spinlock when needed for the holepunch case
478 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
479 spinlock_t *punch_lock)
481 spinlock_t *punch_unlock = NULL;
485 for (ptr = dir; ptr < edir; ptr++) {
487 if (unlikely(punch_lock)) {
488 punch_unlock = punch_lock;
490 spin_lock(punch_unlock);
494 free_swap_and_cache(*ptr);
495 *ptr = (swp_entry_t){0};
500 spin_unlock(punch_unlock);
504 static int shmem_map_and_free_swp(struct page *subdir, int offset,
505 int limit, struct page ***dir, spinlock_t *punch_lock)
510 ptr = shmem_swp_map(subdir);
511 for (; offset < limit; offset += LATENCY_LIMIT) {
512 int size = limit - offset;
513 if (size > LATENCY_LIMIT)
514 size = LATENCY_LIMIT;
515 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
517 if (need_resched()) {
518 shmem_swp_unmap(ptr);
520 shmem_dir_unmap(*dir);
524 ptr = shmem_swp_map(subdir);
527 shmem_swp_unmap(ptr);
531 static void shmem_free_pages(struct list_head *next)
537 page = container_of(next, struct page, lru);
539 shmem_dir_free(page);
541 if (freed >= LATENCY_LIMIT) {
548 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
550 struct shmem_inode_info *info = SHMEM_I(inode);
555 unsigned long diroff;
561 LIST_HEAD(pages_to_free);
562 long nr_pages_to_free = 0;
563 long nr_swaps_freed = 0;
567 spinlock_t *needs_lock;
568 spinlock_t *punch_lock;
569 unsigned long upper_limit;
571 truncate_inode_pages_range(inode->i_mapping, start, end);
573 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
574 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
575 if (idx >= info->next_index)
578 spin_lock(&info->lock);
579 info->flags |= SHMEM_TRUNCATE;
580 if (likely(end == (loff_t) -1)) {
581 limit = info->next_index;
582 upper_limit = SHMEM_MAX_INDEX;
583 info->next_index = idx;
587 if (end + 1 >= inode->i_size) { /* we may free a little more */
588 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
590 upper_limit = SHMEM_MAX_INDEX;
592 limit = (end + 1) >> PAGE_CACHE_SHIFT;
595 needs_lock = &info->lock;
599 topdir = info->i_indirect;
600 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
601 info->i_indirect = NULL;
603 list_add(&topdir->lru, &pages_to_free);
605 spin_unlock(&info->lock);
607 if (info->swapped && idx < SHMEM_NR_DIRECT) {
608 ptr = info->i_direct;
610 if (size > SHMEM_NR_DIRECT)
611 size = SHMEM_NR_DIRECT;
612 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
616 * If there are no indirect blocks or we are punching a hole
617 * below indirect blocks, nothing to be done.
619 if (!topdir || limit <= SHMEM_NR_DIRECT)
623 * The truncation case has already dropped info->lock, and we're safe
624 * because i_size and next_index have already been lowered, preventing
625 * access beyond. But in the punch_hole case, we still need to take
626 * the lock when updating the swap directory, because there might be
627 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
628 * shmem_writepage. However, whenever we find we can remove a whole
629 * directory page (not at the misaligned start or end of the range),
630 * we first NULLify its pointer in the level above, and then have no
631 * need to take the lock when updating its contents: needs_lock and
632 * punch_lock (either pointing to info->lock or NULL) manage this.
635 upper_limit -= SHMEM_NR_DIRECT;
636 limit -= SHMEM_NR_DIRECT;
637 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
638 offset = idx % ENTRIES_PER_PAGE;
641 dir = shmem_dir_map(topdir);
642 stage = ENTRIES_PER_PAGEPAGE/2;
643 if (idx < ENTRIES_PER_PAGEPAGE/2) {
645 diroff = idx/ENTRIES_PER_PAGE;
647 dir += ENTRIES_PER_PAGE/2;
648 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
650 stage += ENTRIES_PER_PAGEPAGE;
653 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
654 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
655 if (!diroff && !offset && upper_limit >= stage) {
657 spin_lock(needs_lock);
659 spin_unlock(needs_lock);
664 list_add(&middir->lru, &pages_to_free);
666 shmem_dir_unmap(dir);
667 dir = shmem_dir_map(middir);
675 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
676 if (unlikely(idx == stage)) {
677 shmem_dir_unmap(dir);
678 dir = shmem_dir_map(topdir) +
679 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
682 idx += ENTRIES_PER_PAGEPAGE;
686 stage = idx + ENTRIES_PER_PAGEPAGE;
689 needs_lock = &info->lock;
690 if (upper_limit >= stage) {
692 spin_lock(needs_lock);
694 spin_unlock(needs_lock);
699 list_add(&middir->lru, &pages_to_free);
701 shmem_dir_unmap(dir);
703 dir = shmem_dir_map(middir);
706 punch_lock = needs_lock;
707 subdir = dir[diroff];
708 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
710 spin_lock(needs_lock);
712 spin_unlock(needs_lock);
717 list_add(&subdir->lru, &pages_to_free);
719 if (subdir && page_private(subdir) /* has swap entries */) {
721 if (size > ENTRIES_PER_PAGE)
722 size = ENTRIES_PER_PAGE;
723 freed = shmem_map_and_free_swp(subdir,
724 offset, size, &dir, punch_lock);
726 dir = shmem_dir_map(middir);
727 nr_swaps_freed += freed;
728 if (offset || punch_lock) {
729 spin_lock(&info->lock);
730 set_page_private(subdir,
731 page_private(subdir) - freed);
732 spin_unlock(&info->lock);
734 BUG_ON(page_private(subdir) != freed);
739 shmem_dir_unmap(dir);
741 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
743 * Call truncate_inode_pages again: racing shmem_unuse_inode
744 * may have swizzled a page in from swap since
745 * truncate_pagecache or generic_delete_inode did it, before we
746 * lowered next_index. Also, though shmem_getpage checks
747 * i_size before adding to cache, no recheck after: so fix the
748 * narrow window there too.
750 truncate_inode_pages_range(inode->i_mapping, start, end);
753 spin_lock(&info->lock);
754 info->flags &= ~SHMEM_TRUNCATE;
755 info->swapped -= nr_swaps_freed;
756 if (nr_pages_to_free)
757 shmem_free_blocks(inode, nr_pages_to_free);
758 shmem_recalc_inode(inode);
759 spin_unlock(&info->lock);
762 * Empty swap vector directory pages to be freed?
764 if (!list_empty(&pages_to_free)) {
765 pages_to_free.prev->next = NULL;
766 shmem_free_pages(pages_to_free.next);
769 EXPORT_SYMBOL_GPL(shmem_truncate_range);
771 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
773 struct inode *inode = dentry->d_inode;
776 error = inode_change_ok(inode, attr);
780 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
781 loff_t oldsize = inode->i_size;
782 loff_t newsize = attr->ia_size;
783 struct page *page = NULL;
785 if (newsize < oldsize) {
787 * If truncating down to a partial page, then
788 * if that page is already allocated, hold it
789 * in memory until the truncation is over, so
790 * truncate_partial_page cannot miss it were
791 * it assigned to swap.
793 if (newsize & (PAGE_CACHE_SIZE-1)) {
794 (void) shmem_getpage(inode,
795 newsize >> PAGE_CACHE_SHIFT,
796 &page, SGP_READ, NULL);
801 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
802 * detect if any pages might have been added to cache
803 * after truncate_inode_pages. But we needn't bother
804 * if it's being fully truncated to zero-length: the
805 * nrpages check is efficient enough in that case.
808 struct shmem_inode_info *info = SHMEM_I(inode);
809 spin_lock(&info->lock);
810 info->flags &= ~SHMEM_PAGEIN;
811 spin_unlock(&info->lock);
814 if (newsize != oldsize) {
815 i_size_write(inode, newsize);
816 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
818 if (newsize < oldsize) {
819 loff_t holebegin = round_up(newsize, PAGE_SIZE);
820 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
821 shmem_truncate_range(inode, newsize, (loff_t)-1);
822 /* unmap again to remove racily COWed private pages */
823 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
826 page_cache_release(page);
829 setattr_copy(inode, attr);
830 #ifdef CONFIG_TMPFS_POSIX_ACL
831 if (attr->ia_valid & ATTR_MODE)
832 error = generic_acl_chmod(inode);
837 static void shmem_evict_inode(struct inode *inode)
839 struct shmem_inode_info *info = SHMEM_I(inode);
840 struct shmem_xattr *xattr, *nxattr;
842 if (inode->i_mapping->a_ops == &shmem_aops) {
843 shmem_unacct_size(info->flags, inode->i_size);
845 shmem_truncate_range(inode, 0, (loff_t)-1);
846 if (!list_empty(&info->swaplist)) {
847 mutex_lock(&shmem_swaplist_mutex);
848 list_del_init(&info->swaplist);
849 mutex_unlock(&shmem_swaplist_mutex);
853 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
857 BUG_ON(inode->i_blocks);
858 shmem_free_inode(inode->i_sb);
859 end_writeback(inode);
862 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
866 for (ptr = dir; ptr < edir; ptr++) {
867 if (ptr->val == entry.val)
873 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
875 struct address_space *mapping;
887 ptr = info->i_direct;
888 spin_lock(&info->lock);
889 if (!info->swapped) {
890 list_del_init(&info->swaplist);
893 limit = info->next_index;
895 if (size > SHMEM_NR_DIRECT)
896 size = SHMEM_NR_DIRECT;
897 offset = shmem_find_swp(entry, ptr, ptr+size);
899 shmem_swp_balance_unmap();
902 if (!info->i_indirect)
905 dir = shmem_dir_map(info->i_indirect);
906 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
908 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
909 if (unlikely(idx == stage)) {
910 shmem_dir_unmap(dir-1);
911 if (cond_resched_lock(&info->lock)) {
912 /* check it has not been truncated */
913 if (limit > info->next_index) {
914 limit = info->next_index;
919 dir = shmem_dir_map(info->i_indirect) +
920 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
923 idx += ENTRIES_PER_PAGEPAGE;
927 stage = idx + ENTRIES_PER_PAGEPAGE;
929 shmem_dir_unmap(dir);
930 dir = shmem_dir_map(subdir);
933 if (subdir && page_private(subdir)) {
934 ptr = shmem_swp_map(subdir);
936 if (size > ENTRIES_PER_PAGE)
937 size = ENTRIES_PER_PAGE;
938 offset = shmem_find_swp(entry, ptr, ptr+size);
939 shmem_swp_unmap(ptr);
941 shmem_dir_unmap(dir);
942 ptr = shmem_swp_map(subdir);
948 shmem_dir_unmap(dir-1);
950 spin_unlock(&info->lock);
957 * Move _head_ to start search for next from here.
958 * But be careful: shmem_evict_inode checks list_empty without taking
959 * mutex, and there's an instant in list_move_tail when info->swaplist
960 * would appear empty, if it were the only one on shmem_swaplist. We
961 * could avoid doing it if inode NULL; or use this minor optimization.
963 if (shmem_swaplist.next != &info->swaplist)
964 list_move_tail(&shmem_swaplist, &info->swaplist);
967 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
968 * but also to hold up shmem_evict_inode(): so inode cannot be freed
969 * beneath us (pagelock doesn't help until the page is in pagecache).
971 mapping = info->vfs_inode.i_mapping;
972 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
973 /* which does mem_cgroup_uncharge_cache_page on error */
975 if (error == -EEXIST) {
976 struct page *filepage = find_get_page(mapping, idx);
980 * There might be a more uptodate page coming down
981 * from a stacked writepage: forget our swappage if so.
983 if (PageUptodate(filepage))
985 page_cache_release(filepage);
989 delete_from_swap_cache(page);
990 set_page_dirty(page);
991 info->flags |= SHMEM_PAGEIN;
992 shmem_swp_set(info, ptr, 0);
994 error = 1; /* not an error, but entry was found */
996 shmem_swp_unmap(ptr);
997 spin_unlock(&info->lock);
1002 * shmem_unuse() search for an eventually swapped out shmem page.
1004 int shmem_unuse(swp_entry_t entry, struct page *page)
1006 struct list_head *p, *next;
1007 struct shmem_inode_info *info;
1012 * Charge page using GFP_KERNEL while we can wait, before taking
1013 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1014 * Charged back to the user (not to caller) when swap account is used.
1015 * add_to_page_cache() will be called with GFP_NOWAIT.
1017 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
1021 * Try to preload while we can wait, to not make a habit of
1022 * draining atomic reserves; but don't latch on to this cpu,
1023 * it's okay if sometimes we get rescheduled after this.
1025 error = radix_tree_preload(GFP_KERNEL);
1028 radix_tree_preload_end();
1030 mutex_lock(&shmem_swaplist_mutex);
1031 list_for_each_safe(p, next, &shmem_swaplist) {
1032 info = list_entry(p, struct shmem_inode_info, swaplist);
1033 found = shmem_unuse_inode(info, entry, page);
1038 mutex_unlock(&shmem_swaplist_mutex);
1042 mem_cgroup_uncharge_cache_page(page);
1047 page_cache_release(page);
1052 * Move the page from the page cache to the swap cache.
1054 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1056 struct shmem_inode_info *info;
1057 swp_entry_t *entry, swap;
1058 struct address_space *mapping;
1059 unsigned long index;
1060 struct inode *inode;
1062 BUG_ON(!PageLocked(page));
1063 mapping = page->mapping;
1064 index = page->index;
1065 inode = mapping->host;
1066 info = SHMEM_I(inode);
1067 if (info->flags & VM_LOCKED)
1069 if (!total_swap_pages)
1073 * shmem_backing_dev_info's capabilities prevent regular writeback or
1074 * sync from ever calling shmem_writepage; but a stacking filesystem
1075 * may use the ->writepage of its underlying filesystem, in which case
1076 * tmpfs should write out to swap only in response to memory pressure,
1077 * and not for the writeback threads or sync. However, in those cases,
1078 * we do still want to check if there's a redundant swappage to be
1081 if (wbc->for_reclaim)
1082 swap = get_swap_page();
1087 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1088 * if it's not already there. Do it now because we cannot take
1089 * mutex while holding spinlock, and must do so before the page
1090 * is moved to swap cache, when its pagelock no longer protects
1091 * the inode from eviction. But don't unlock the mutex until
1092 * we've taken the spinlock, because shmem_unuse_inode() will
1093 * prune a !swapped inode from the swaplist under both locks.
1096 mutex_lock(&shmem_swaplist_mutex);
1097 if (list_empty(&info->swaplist))
1098 list_add_tail(&info->swaplist, &shmem_swaplist);
1101 spin_lock(&info->lock);
1103 mutex_unlock(&shmem_swaplist_mutex);
1105 if (index >= info->next_index) {
1106 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1109 entry = shmem_swp_entry(info, index, NULL);
1112 * The more uptodate page coming down from a stacked
1113 * writepage should replace our old swappage.
1115 free_swap_and_cache(*entry);
1116 shmem_swp_set(info, entry, 0);
1118 shmem_recalc_inode(inode);
1120 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1121 delete_from_page_cache(page);
1122 shmem_swp_set(info, entry, swap.val);
1123 shmem_swp_unmap(entry);
1124 swap_shmem_alloc(swap);
1125 spin_unlock(&info->lock);
1126 BUG_ON(page_mapped(page));
1127 swap_writepage(page, wbc);
1131 shmem_swp_unmap(entry);
1133 spin_unlock(&info->lock);
1135 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1136 * clear SWAP_HAS_CACHE flag.
1138 swapcache_free(swap, NULL);
1140 set_page_dirty(page);
1141 if (wbc->for_reclaim)
1142 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1149 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1153 if (!mpol || mpol->mode == MPOL_DEFAULT)
1154 return; /* show nothing */
1156 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1158 seq_printf(seq, ",mpol=%s", buffer);
1161 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1163 struct mempolicy *mpol = NULL;
1165 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1166 mpol = sbinfo->mpol;
1168 spin_unlock(&sbinfo->stat_lock);
1172 #endif /* CONFIG_TMPFS */
1174 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1175 struct shmem_inode_info *info, unsigned long idx)
1177 struct mempolicy mpol, *spol;
1178 struct vm_area_struct pvma;
1181 spol = mpol_cond_copy(&mpol,
1182 mpol_shared_policy_lookup(&info->policy, idx));
1184 /* Create a pseudo vma that just contains the policy */
1186 pvma.vm_pgoff = idx;
1188 pvma.vm_policy = spol;
1189 page = swapin_readahead(entry, gfp, &pvma, 0);
1193 static struct page *shmem_alloc_page(gfp_t gfp,
1194 struct shmem_inode_info *info, unsigned long idx)
1196 struct vm_area_struct pvma;
1198 /* Create a pseudo vma that just contains the policy */
1200 pvma.vm_pgoff = idx;
1202 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1205 * alloc_page_vma() will drop the shared policy reference
1207 return alloc_page_vma(gfp, &pvma, 0);
1209 #else /* !CONFIG_NUMA */
1211 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1214 #endif /* CONFIG_TMPFS */
1216 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1217 struct shmem_inode_info *info, unsigned long idx)
1219 return swapin_readahead(entry, gfp, NULL, 0);
1222 static inline struct page *shmem_alloc_page(gfp_t gfp,
1223 struct shmem_inode_info *info, unsigned long idx)
1225 return alloc_page(gfp);
1227 #endif /* CONFIG_NUMA */
1229 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1230 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1237 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1239 * If we allocate a new one we do not mark it dirty. That's up to the
1240 * vm. If we swap it in we mark it dirty since we also free the swap
1241 * entry since a page cannot live in both the swap and page cache
1243 static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
1244 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1246 struct address_space *mapping = inode->i_mapping;
1247 struct shmem_inode_info *info = SHMEM_I(inode);
1248 struct shmem_sb_info *sbinfo;
1250 struct page *prealloc_page = NULL;
1256 if (idx >= SHMEM_MAX_INDEX)
1259 page = find_lock_page(mapping, idx);
1262 * Once we can get the page lock, it must be uptodate:
1263 * if there were an error in reading back from swap,
1264 * the page would not be inserted into the filecache.
1266 BUG_ON(!PageUptodate(page));
1271 * Try to preload while we can wait, to not make a habit of
1272 * draining atomic reserves; but don't latch on to this cpu.
1274 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
1277 radix_tree_preload_end();
1279 if (sgp != SGP_READ && !prealloc_page) {
1280 prealloc_page = shmem_alloc_page(gfp, info, idx);
1281 if (prealloc_page) {
1282 SetPageSwapBacked(prealloc_page);
1283 if (mem_cgroup_cache_charge(prealloc_page,
1284 current->mm, GFP_KERNEL)) {
1285 page_cache_release(prealloc_page);
1286 prealloc_page = NULL;
1291 spin_lock(&info->lock);
1292 shmem_recalc_inode(inode);
1293 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1294 if (IS_ERR(entry)) {
1295 spin_unlock(&info->lock);
1296 error = PTR_ERR(entry);
1302 /* Look it up and read it in.. */
1303 page = lookup_swap_cache(swap);
1305 shmem_swp_unmap(entry);
1306 spin_unlock(&info->lock);
1307 /* here we actually do the io */
1309 *fault_type |= VM_FAULT_MAJOR;
1310 page = shmem_swapin(swap, gfp, info, idx);
1312 spin_lock(&info->lock);
1313 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1315 error = PTR_ERR(entry);
1317 if (entry->val == swap.val)
1319 shmem_swp_unmap(entry);
1321 spin_unlock(&info->lock);
1326 wait_on_page_locked(page);
1327 page_cache_release(page);
1331 /* We have to do this with page locked to prevent races */
1332 if (!trylock_page(page)) {
1333 shmem_swp_unmap(entry);
1334 spin_unlock(&info->lock);
1335 wait_on_page_locked(page);
1336 page_cache_release(page);
1339 if (PageWriteback(page)) {
1340 shmem_swp_unmap(entry);
1341 spin_unlock(&info->lock);
1342 wait_on_page_writeback(page);
1344 page_cache_release(page);
1347 if (!PageUptodate(page)) {
1348 shmem_swp_unmap(entry);
1349 spin_unlock(&info->lock);
1351 page_cache_release(page);
1356 error = add_to_page_cache_locked(page, mapping,
1359 shmem_swp_unmap(entry);
1360 spin_unlock(&info->lock);
1361 if (error == -ENOMEM) {
1363 * reclaim from proper memory cgroup and
1364 * call memcg's OOM if needed.
1366 error = mem_cgroup_shmem_charge_fallback(
1367 page, current->mm, gfp);
1370 page_cache_release(page);
1375 page_cache_release(page);
1379 info->flags |= SHMEM_PAGEIN;
1380 shmem_swp_set(info, entry, 0);
1381 shmem_swp_unmap(entry);
1382 delete_from_swap_cache(page);
1383 spin_unlock(&info->lock);
1384 set_page_dirty(page);
1387 } else if (sgp == SGP_READ) {
1388 shmem_swp_unmap(entry);
1389 page = find_get_page(mapping, idx);
1390 if (page && !trylock_page(page)) {
1391 spin_unlock(&info->lock);
1392 wait_on_page_locked(page);
1393 page_cache_release(page);
1396 spin_unlock(&info->lock);
1398 } else if (prealloc_page) {
1399 shmem_swp_unmap(entry);
1400 sbinfo = SHMEM_SB(inode->i_sb);
1401 if (sbinfo->max_blocks) {
1402 if (percpu_counter_compare(&sbinfo->used_blocks,
1403 sbinfo->max_blocks) >= 0 ||
1404 shmem_acct_block(info->flags))
1406 percpu_counter_inc(&sbinfo->used_blocks);
1407 inode->i_blocks += BLOCKS_PER_PAGE;
1408 } else if (shmem_acct_block(info->flags))
1411 page = prealloc_page;
1412 prealloc_page = NULL;
1414 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1416 error = PTR_ERR(entry);
1419 shmem_swp_unmap(entry);
1421 ret = error || swap.val;
1423 mem_cgroup_uncharge_cache_page(page);
1425 ret = add_to_page_cache_lru(page, mapping,
1428 * At add_to_page_cache_lru() failure,
1429 * uncharge will be done automatically.
1432 shmem_unacct_blocks(info->flags, 1);
1433 shmem_free_blocks(inode, 1);
1434 spin_unlock(&info->lock);
1435 page_cache_release(page);
1441 info->flags |= SHMEM_PAGEIN;
1443 spin_unlock(&info->lock);
1444 clear_highpage(page);
1445 flush_dcache_page(page);
1446 SetPageUptodate(page);
1447 if (sgp == SGP_DIRTY)
1448 set_page_dirty(page);
1451 spin_unlock(&info->lock);
1459 if (prealloc_page) {
1460 mem_cgroup_uncharge_cache_page(prealloc_page);
1461 page_cache_release(prealloc_page);
1467 * Perhaps the page was brought in from swap between find_lock_page
1468 * and taking info->lock? We allow for that at add_to_page_cache_lru,
1469 * but must also avoid reporting a spurious ENOSPC while working on a
1472 page = find_get_page(mapping, idx);
1473 spin_unlock(&info->lock);
1475 page_cache_release(page);
1482 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1484 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1486 int ret = VM_FAULT_LOCKED;
1488 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1489 return VM_FAULT_SIGBUS;
1491 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1493 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1495 if (ret & VM_FAULT_MAJOR) {
1496 count_vm_event(PGMAJFAULT);
1497 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1503 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1505 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1506 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1509 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1512 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1515 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1516 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1520 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1522 struct inode *inode = file->f_path.dentry->d_inode;
1523 struct shmem_inode_info *info = SHMEM_I(inode);
1524 int retval = -ENOMEM;
1526 spin_lock(&info->lock);
1527 if (lock && !(info->flags & VM_LOCKED)) {
1528 if (!user_shm_lock(inode->i_size, user))
1530 info->flags |= VM_LOCKED;
1531 mapping_set_unevictable(file->f_mapping);
1533 if (!lock && (info->flags & VM_LOCKED) && user) {
1534 user_shm_unlock(inode->i_size, user);
1535 info->flags &= ~VM_LOCKED;
1536 mapping_clear_unevictable(file->f_mapping);
1537 scan_mapping_unevictable_pages(file->f_mapping);
1542 spin_unlock(&info->lock);
1546 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1548 file_accessed(file);
1549 vma->vm_ops = &shmem_vm_ops;
1550 vma->vm_flags |= VM_CAN_NONLINEAR;
1554 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1555 int mode, dev_t dev, unsigned long flags)
1557 struct inode *inode;
1558 struct shmem_inode_info *info;
1559 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1561 if (shmem_reserve_inode(sb))
1564 inode = new_inode(sb);
1566 inode->i_ino = get_next_ino();
1567 inode_init_owner(inode, dir, mode);
1568 inode->i_blocks = 0;
1569 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1570 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1571 inode->i_generation = get_seconds();
1572 info = SHMEM_I(inode);
1573 memset(info, 0, (char *)inode - (char *)info);
1574 spin_lock_init(&info->lock);
1575 info->flags = flags & VM_NORESERVE;
1576 INIT_LIST_HEAD(&info->swaplist);
1577 INIT_LIST_HEAD(&info->xattr_list);
1578 cache_no_acl(inode);
1580 switch (mode & S_IFMT) {
1582 inode->i_op = &shmem_special_inode_operations;
1583 init_special_inode(inode, mode, dev);
1586 inode->i_mapping->a_ops = &shmem_aops;
1587 inode->i_op = &shmem_inode_operations;
1588 inode->i_fop = &shmem_file_operations;
1589 mpol_shared_policy_init(&info->policy,
1590 shmem_get_sbmpol(sbinfo));
1594 /* Some things misbehave if size == 0 on a directory */
1595 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1596 inode->i_op = &shmem_dir_inode_operations;
1597 inode->i_fop = &simple_dir_operations;
1601 * Must not load anything in the rbtree,
1602 * mpol_free_shared_policy will not be called.
1604 mpol_shared_policy_init(&info->policy, NULL);
1608 shmem_free_inode(sb);
1613 static const struct inode_operations shmem_symlink_inode_operations;
1614 static const struct inode_operations shmem_symlink_inline_operations;
1617 shmem_write_begin(struct file *file, struct address_space *mapping,
1618 loff_t pos, unsigned len, unsigned flags,
1619 struct page **pagep, void **fsdata)
1621 struct inode *inode = mapping->host;
1622 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1623 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1627 shmem_write_end(struct file *file, struct address_space *mapping,
1628 loff_t pos, unsigned len, unsigned copied,
1629 struct page *page, void *fsdata)
1631 struct inode *inode = mapping->host;
1633 if (pos + copied > inode->i_size)
1634 i_size_write(inode, pos + copied);
1636 set_page_dirty(page);
1638 page_cache_release(page);
1643 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1645 struct inode *inode = filp->f_path.dentry->d_inode;
1646 struct address_space *mapping = inode->i_mapping;
1647 unsigned long index, offset;
1648 enum sgp_type sgp = SGP_READ;
1651 * Might this read be for a stacking filesystem? Then when reading
1652 * holes of a sparse file, we actually need to allocate those pages,
1653 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1655 if (segment_eq(get_fs(), KERNEL_DS))
1658 index = *ppos >> PAGE_CACHE_SHIFT;
1659 offset = *ppos & ~PAGE_CACHE_MASK;
1662 struct page *page = NULL;
1663 unsigned long end_index, nr, ret;
1664 loff_t i_size = i_size_read(inode);
1666 end_index = i_size >> PAGE_CACHE_SHIFT;
1667 if (index > end_index)
1669 if (index == end_index) {
1670 nr = i_size & ~PAGE_CACHE_MASK;
1675 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1677 if (desc->error == -EINVAL)
1685 * We must evaluate after, since reads (unlike writes)
1686 * are called without i_mutex protection against truncate
1688 nr = PAGE_CACHE_SIZE;
1689 i_size = i_size_read(inode);
1690 end_index = i_size >> PAGE_CACHE_SHIFT;
1691 if (index == end_index) {
1692 nr = i_size & ~PAGE_CACHE_MASK;
1695 page_cache_release(page);
1703 * If users can be writing to this page using arbitrary
1704 * virtual addresses, take care about potential aliasing
1705 * before reading the page on the kernel side.
1707 if (mapping_writably_mapped(mapping))
1708 flush_dcache_page(page);
1710 * Mark the page accessed if we read the beginning.
1713 mark_page_accessed(page);
1715 page = ZERO_PAGE(0);
1716 page_cache_get(page);
1720 * Ok, we have the page, and it's up-to-date, so
1721 * now we can copy it to user space...
1723 * The actor routine returns how many bytes were actually used..
1724 * NOTE! This may not be the same as how much of a user buffer
1725 * we filled up (we may be padding etc), so we can only update
1726 * "pos" here (the actor routine has to update the user buffer
1727 * pointers and the remaining count).
1729 ret = actor(desc, page, offset, nr);
1731 index += offset >> PAGE_CACHE_SHIFT;
1732 offset &= ~PAGE_CACHE_MASK;
1734 page_cache_release(page);
1735 if (ret != nr || !desc->count)
1741 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1742 file_accessed(filp);
1745 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1746 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1748 struct file *filp = iocb->ki_filp;
1752 loff_t *ppos = &iocb->ki_pos;
1754 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1758 for (seg = 0; seg < nr_segs; seg++) {
1759 read_descriptor_t desc;
1762 desc.arg.buf = iov[seg].iov_base;
1763 desc.count = iov[seg].iov_len;
1764 if (desc.count == 0)
1767 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1768 retval += desc.written;
1770 retval = retval ?: desc.error;
1779 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1780 struct pipe_inode_info *pipe, size_t len,
1783 struct address_space *mapping = in->f_mapping;
1784 struct inode *inode = mapping->host;
1785 unsigned int loff, nr_pages, req_pages;
1786 struct page *pages[PIPE_DEF_BUFFERS];
1787 struct partial_page partial[PIPE_DEF_BUFFERS];
1789 pgoff_t index, end_index;
1792 struct splice_pipe_desc spd = {
1796 .ops = &page_cache_pipe_buf_ops,
1797 .spd_release = spd_release_page,
1800 isize = i_size_read(inode);
1801 if (unlikely(*ppos >= isize))
1804 left = isize - *ppos;
1805 if (unlikely(left < len))
1808 if (splice_grow_spd(pipe, &spd))
1811 index = *ppos >> PAGE_CACHE_SHIFT;
1812 loff = *ppos & ~PAGE_CACHE_MASK;
1813 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1814 nr_pages = min(req_pages, pipe->buffers);
1816 spd.nr_pages = find_get_pages_contig(mapping, index,
1817 nr_pages, spd.pages);
1818 index += spd.nr_pages;
1821 while (spd.nr_pages < nr_pages) {
1822 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1826 spd.pages[spd.nr_pages++] = page;
1830 index = *ppos >> PAGE_CACHE_SHIFT;
1831 nr_pages = spd.nr_pages;
1834 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1835 unsigned int this_len;
1840 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1841 page = spd.pages[page_nr];
1843 if (!PageUptodate(page) || page->mapping != mapping) {
1844 error = shmem_getpage(inode, index, &page,
1849 page_cache_release(spd.pages[page_nr]);
1850 spd.pages[page_nr] = page;
1853 isize = i_size_read(inode);
1854 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1855 if (unlikely(!isize || index > end_index))
1858 if (end_index == index) {
1861 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1865 this_len = min(this_len, plen - loff);
1869 spd.partial[page_nr].offset = loff;
1870 spd.partial[page_nr].len = this_len;
1877 while (page_nr < nr_pages)
1878 page_cache_release(spd.pages[page_nr++]);
1881 error = splice_to_pipe(pipe, &spd);
1883 splice_shrink_spd(pipe, &spd);
1892 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1894 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1896 buf->f_type = TMPFS_MAGIC;
1897 buf->f_bsize = PAGE_CACHE_SIZE;
1898 buf->f_namelen = NAME_MAX;
1899 if (sbinfo->max_blocks) {
1900 buf->f_blocks = sbinfo->max_blocks;
1901 buf->f_bavail = buf->f_bfree =
1902 sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
1904 if (sbinfo->max_inodes) {
1905 buf->f_files = sbinfo->max_inodes;
1906 buf->f_ffree = sbinfo->free_inodes;
1908 /* else leave those fields 0 like simple_statfs */
1913 * File creation. Allocate an inode, and we're done..
1916 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1918 struct inode *inode;
1919 int error = -ENOSPC;
1921 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1923 error = security_inode_init_security(inode, dir,
1924 &dentry->d_name, NULL,
1927 if (error != -EOPNOTSUPP) {
1932 #ifdef CONFIG_TMPFS_POSIX_ACL
1933 error = generic_acl_init(inode, dir);
1941 dir->i_size += BOGO_DIRENT_SIZE;
1942 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1943 d_instantiate(dentry, inode);
1944 dget(dentry); /* Extra count - pin the dentry in core */
1949 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1953 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1959 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1960 struct nameidata *nd)
1962 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1968 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1970 struct inode *inode = old_dentry->d_inode;
1974 * No ordinary (disk based) filesystem counts links as inodes;
1975 * but each new link needs a new dentry, pinning lowmem, and
1976 * tmpfs dentries cannot be pruned until they are unlinked.
1978 ret = shmem_reserve_inode(inode->i_sb);
1982 dir->i_size += BOGO_DIRENT_SIZE;
1983 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1985 ihold(inode); /* New dentry reference */
1986 dget(dentry); /* Extra pinning count for the created dentry */
1987 d_instantiate(dentry, inode);
1992 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1994 struct inode *inode = dentry->d_inode;
1996 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1997 shmem_free_inode(inode->i_sb);
1999 dir->i_size -= BOGO_DIRENT_SIZE;
2000 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2002 dput(dentry); /* Undo the count from "create" - this does all the work */
2006 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2008 if (!simple_empty(dentry))
2011 drop_nlink(dentry->d_inode);
2013 return shmem_unlink(dir, dentry);
2017 * The VFS layer already does all the dentry stuff for rename,
2018 * we just have to decrement the usage count for the target if
2019 * it exists so that the VFS layer correctly free's it when it
2022 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2024 struct inode *inode = old_dentry->d_inode;
2025 int they_are_dirs = S_ISDIR(inode->i_mode);
2027 if (!simple_empty(new_dentry))
2030 if (new_dentry->d_inode) {
2031 (void) shmem_unlink(new_dir, new_dentry);
2033 drop_nlink(old_dir);
2034 } else if (they_are_dirs) {
2035 drop_nlink(old_dir);
2039 old_dir->i_size -= BOGO_DIRENT_SIZE;
2040 new_dir->i_size += BOGO_DIRENT_SIZE;
2041 old_dir->i_ctime = old_dir->i_mtime =
2042 new_dir->i_ctime = new_dir->i_mtime =
2043 inode->i_ctime = CURRENT_TIME;
2047 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2051 struct inode *inode;
2054 struct shmem_inode_info *info;
2056 len = strlen(symname) + 1;
2057 if (len > PAGE_CACHE_SIZE)
2058 return -ENAMETOOLONG;
2060 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2064 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
2067 if (error != -EOPNOTSUPP) {
2074 info = SHMEM_I(inode);
2075 inode->i_size = len-1;
2076 if (len <= SHMEM_SYMLINK_INLINE_LEN) {
2078 memcpy(info->inline_symlink, symname, len);
2079 inode->i_op = &shmem_symlink_inline_operations;
2081 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2086 inode->i_mapping->a_ops = &shmem_aops;
2087 inode->i_op = &shmem_symlink_inode_operations;
2088 kaddr = kmap_atomic(page, KM_USER0);
2089 memcpy(kaddr, symname, len);
2090 kunmap_atomic(kaddr, KM_USER0);
2091 set_page_dirty(page);
2093 page_cache_release(page);
2095 dir->i_size += BOGO_DIRENT_SIZE;
2096 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2097 d_instantiate(dentry, inode);
2102 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2104 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
2108 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2110 struct page *page = NULL;
2111 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2112 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
2118 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2120 if (!IS_ERR(nd_get_link(nd))) {
2121 struct page *page = cookie;
2123 mark_page_accessed(page);
2124 page_cache_release(page);
2128 #ifdef CONFIG_TMPFS_XATTR
2130 * Superblocks without xattr inode operations may get some security.* xattr
2131 * support from the LSM "for free". As soon as we have any other xattrs
2132 * like ACLs, we also need to implement the security.* handlers at
2133 * filesystem level, though.
2136 static int shmem_xattr_get(struct dentry *dentry, const char *name,
2137 void *buffer, size_t size)
2139 struct shmem_inode_info *info;
2140 struct shmem_xattr *xattr;
2143 info = SHMEM_I(dentry->d_inode);
2145 spin_lock(&info->lock);
2146 list_for_each_entry(xattr, &info->xattr_list, list) {
2147 if (strcmp(name, xattr->name))
2152 if (size < xattr->size)
2155 memcpy(buffer, xattr->value, xattr->size);
2159 spin_unlock(&info->lock);
2163 static int shmem_xattr_set(struct dentry *dentry, const char *name,
2164 const void *value, size_t size, int flags)
2166 struct inode *inode = dentry->d_inode;
2167 struct shmem_inode_info *info = SHMEM_I(inode);
2168 struct shmem_xattr *xattr;
2169 struct shmem_xattr *new_xattr = NULL;
2173 /* value == NULL means remove */
2176 len = sizeof(*new_xattr) + size;
2177 if (len <= sizeof(*new_xattr))
2180 new_xattr = kmalloc(len, GFP_KERNEL);
2184 new_xattr->name = kstrdup(name, GFP_KERNEL);
2185 if (!new_xattr->name) {
2190 new_xattr->size = size;
2191 memcpy(new_xattr->value, value, size);
2194 spin_lock(&info->lock);
2195 list_for_each_entry(xattr, &info->xattr_list, list) {
2196 if (!strcmp(name, xattr->name)) {
2197 if (flags & XATTR_CREATE) {
2200 } else if (new_xattr) {
2201 list_replace(&xattr->list, &new_xattr->list);
2203 list_del(&xattr->list);
2208 if (flags & XATTR_REPLACE) {
2212 list_add(&new_xattr->list, &info->xattr_list);
2216 spin_unlock(&info->lock);
2224 static const struct xattr_handler *shmem_xattr_handlers[] = {
2225 #ifdef CONFIG_TMPFS_POSIX_ACL
2226 &generic_acl_access_handler,
2227 &generic_acl_default_handler,
2232 static int shmem_xattr_validate(const char *name)
2234 struct { const char *prefix; size_t len; } arr[] = {
2235 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2236 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2240 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2241 size_t preflen = arr[i].len;
2242 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2251 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2252 void *buffer, size_t size)
2257 * If this is a request for a synthetic attribute in the system.*
2258 * namespace use the generic infrastructure to resolve a handler
2259 * for it via sb->s_xattr.
2261 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2262 return generic_getxattr(dentry, name, buffer, size);
2264 err = shmem_xattr_validate(name);
2268 return shmem_xattr_get(dentry, name, buffer, size);
2271 static int shmem_setxattr(struct dentry *dentry, const char *name,
2272 const void *value, size_t size, int flags)
2277 * If this is a request for a synthetic attribute in the system.*
2278 * namespace use the generic infrastructure to resolve a handler
2279 * for it via sb->s_xattr.
2281 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2282 return generic_setxattr(dentry, name, value, size, flags);
2284 err = shmem_xattr_validate(name);
2289 value = ""; /* empty EA, do not remove */
2291 return shmem_xattr_set(dentry, name, value, size, flags);
2295 static int shmem_removexattr(struct dentry *dentry, const char *name)
2300 * If this is a request for a synthetic attribute in the system.*
2301 * namespace use the generic infrastructure to resolve a handler
2302 * for it via sb->s_xattr.
2304 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2305 return generic_removexattr(dentry, name);
2307 err = shmem_xattr_validate(name);
2311 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2314 static bool xattr_is_trusted(const char *name)
2316 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2319 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2321 bool trusted = capable(CAP_SYS_ADMIN);
2322 struct shmem_xattr *xattr;
2323 struct shmem_inode_info *info;
2326 info = SHMEM_I(dentry->d_inode);
2328 spin_lock(&info->lock);
2329 list_for_each_entry(xattr, &info->xattr_list, list) {
2332 /* skip "trusted." attributes for unprivileged callers */
2333 if (!trusted && xattr_is_trusted(xattr->name))
2336 len = strlen(xattr->name) + 1;
2343 memcpy(buffer, xattr->name, len);
2347 spin_unlock(&info->lock);
2351 #endif /* CONFIG_TMPFS_XATTR */
2353 static const struct inode_operations shmem_symlink_inline_operations = {
2354 .readlink = generic_readlink,
2355 .follow_link = shmem_follow_link_inline,
2356 #ifdef CONFIG_TMPFS_XATTR
2357 .setxattr = shmem_setxattr,
2358 .getxattr = shmem_getxattr,
2359 .listxattr = shmem_listxattr,
2360 .removexattr = shmem_removexattr,
2364 static const struct inode_operations shmem_symlink_inode_operations = {
2365 .readlink = generic_readlink,
2366 .follow_link = shmem_follow_link,
2367 .put_link = shmem_put_link,
2368 #ifdef CONFIG_TMPFS_XATTR
2369 .setxattr = shmem_setxattr,
2370 .getxattr = shmem_getxattr,
2371 .listxattr = shmem_listxattr,
2372 .removexattr = shmem_removexattr,
2376 static struct dentry *shmem_get_parent(struct dentry *child)
2378 return ERR_PTR(-ESTALE);
2381 static int shmem_match(struct inode *ino, void *vfh)
2385 inum = (inum << 32) | fh[1];
2386 return ino->i_ino == inum && fh[0] == ino->i_generation;
2389 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2390 struct fid *fid, int fh_len, int fh_type)
2392 struct inode *inode;
2393 struct dentry *dentry = NULL;
2394 u64 inum = fid->raw[2];
2395 inum = (inum << 32) | fid->raw[1];
2400 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2401 shmem_match, fid->raw);
2403 dentry = d_find_alias(inode);
2410 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2413 struct inode *inode = dentry->d_inode;
2420 if (inode_unhashed(inode)) {
2421 /* Unfortunately insert_inode_hash is not idempotent,
2422 * so as we hash inodes here rather than at creation
2423 * time, we need a lock to ensure we only try
2426 static DEFINE_SPINLOCK(lock);
2428 if (inode_unhashed(inode))
2429 __insert_inode_hash(inode,
2430 inode->i_ino + inode->i_generation);
2434 fh[0] = inode->i_generation;
2435 fh[1] = inode->i_ino;
2436 fh[2] = ((__u64)inode->i_ino) >> 32;
2442 static const struct export_operations shmem_export_ops = {
2443 .get_parent = shmem_get_parent,
2444 .encode_fh = shmem_encode_fh,
2445 .fh_to_dentry = shmem_fh_to_dentry,
2448 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2451 char *this_char, *value, *rest;
2453 while (options != NULL) {
2454 this_char = options;
2457 * NUL-terminate this option: unfortunately,
2458 * mount options form a comma-separated list,
2459 * but mpol's nodelist may also contain commas.
2461 options = strchr(options, ',');
2462 if (options == NULL)
2465 if (!isdigit(*options)) {
2472 if ((value = strchr(this_char,'=')) != NULL) {
2476 "tmpfs: No value for mount option '%s'\n",
2481 if (!strcmp(this_char,"size")) {
2482 unsigned long long size;
2483 size = memparse(value,&rest);
2485 size <<= PAGE_SHIFT;
2486 size *= totalram_pages;
2492 sbinfo->max_blocks =
2493 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2494 } else if (!strcmp(this_char,"nr_blocks")) {
2495 sbinfo->max_blocks = memparse(value, &rest);
2498 } else if (!strcmp(this_char,"nr_inodes")) {
2499 sbinfo->max_inodes = memparse(value, &rest);
2502 } else if (!strcmp(this_char,"mode")) {
2505 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2508 } else if (!strcmp(this_char,"uid")) {
2511 sbinfo->uid = simple_strtoul(value, &rest, 0);
2514 } else if (!strcmp(this_char,"gid")) {
2517 sbinfo->gid = simple_strtoul(value, &rest, 0);
2520 } else if (!strcmp(this_char,"mpol")) {
2521 if (mpol_parse_str(value, &sbinfo->mpol, 1))
2524 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2532 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2538 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2540 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2541 struct shmem_sb_info config = *sbinfo;
2542 unsigned long inodes;
2543 int error = -EINVAL;
2545 if (shmem_parse_options(data, &config, true))
2548 spin_lock(&sbinfo->stat_lock);
2549 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2550 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2552 if (config.max_inodes < inodes)
2555 * Those tests also disallow limited->unlimited while any are in
2556 * use, so i_blocks will always be zero when max_blocks is zero;
2557 * but we must separately disallow unlimited->limited, because
2558 * in that case we have no record of how much is already in use.
2560 if (config.max_blocks && !sbinfo->max_blocks)
2562 if (config.max_inodes && !sbinfo->max_inodes)
2566 sbinfo->max_blocks = config.max_blocks;
2567 sbinfo->max_inodes = config.max_inodes;
2568 sbinfo->free_inodes = config.max_inodes - inodes;
2570 mpol_put(sbinfo->mpol);
2571 sbinfo->mpol = config.mpol; /* transfers initial ref */
2573 spin_unlock(&sbinfo->stat_lock);
2577 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2579 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2581 if (sbinfo->max_blocks != shmem_default_max_blocks())
2582 seq_printf(seq, ",size=%luk",
2583 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2584 if (sbinfo->max_inodes != shmem_default_max_inodes())
2585 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2586 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2587 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2588 if (sbinfo->uid != 0)
2589 seq_printf(seq, ",uid=%u", sbinfo->uid);
2590 if (sbinfo->gid != 0)
2591 seq_printf(seq, ",gid=%u", sbinfo->gid);
2592 shmem_show_mpol(seq, sbinfo->mpol);
2595 #endif /* CONFIG_TMPFS */
2597 static void shmem_put_super(struct super_block *sb)
2599 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2601 percpu_counter_destroy(&sbinfo->used_blocks);
2603 sb->s_fs_info = NULL;
2606 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2608 struct inode *inode;
2609 struct dentry *root;
2610 struct shmem_sb_info *sbinfo;
2613 /* Round up to L1_CACHE_BYTES to resist false sharing */
2614 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2615 L1_CACHE_BYTES), GFP_KERNEL);
2619 sbinfo->mode = S_IRWXUGO | S_ISVTX;
2620 sbinfo->uid = current_fsuid();
2621 sbinfo->gid = current_fsgid();
2622 sb->s_fs_info = sbinfo;
2626 * Per default we only allow half of the physical ram per
2627 * tmpfs instance, limiting inodes to one per page of lowmem;
2628 * but the internal instance is left unlimited.
2630 if (!(sb->s_flags & MS_NOUSER)) {
2631 sbinfo->max_blocks = shmem_default_max_blocks();
2632 sbinfo->max_inodes = shmem_default_max_inodes();
2633 if (shmem_parse_options(data, sbinfo, false)) {
2638 sb->s_export_op = &shmem_export_ops;
2640 sb->s_flags |= MS_NOUSER;
2643 spin_lock_init(&sbinfo->stat_lock);
2644 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2646 sbinfo->free_inodes = sbinfo->max_inodes;
2648 sb->s_maxbytes = SHMEM_MAX_BYTES;
2649 sb->s_blocksize = PAGE_CACHE_SIZE;
2650 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2651 sb->s_magic = TMPFS_MAGIC;
2652 sb->s_op = &shmem_ops;
2653 sb->s_time_gran = 1;
2654 #ifdef CONFIG_TMPFS_XATTR
2655 sb->s_xattr = shmem_xattr_handlers;
2657 #ifdef CONFIG_TMPFS_POSIX_ACL
2658 sb->s_flags |= MS_POSIXACL;
2661 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2664 inode->i_uid = sbinfo->uid;
2665 inode->i_gid = sbinfo->gid;
2666 root = d_alloc_root(inode);
2675 shmem_put_super(sb);
2679 static struct kmem_cache *shmem_inode_cachep;
2681 static struct inode *shmem_alloc_inode(struct super_block *sb)
2683 struct shmem_inode_info *p;
2684 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2687 return &p->vfs_inode;
2690 static void shmem_i_callback(struct rcu_head *head)
2692 struct inode *inode = container_of(head, struct inode, i_rcu);
2693 INIT_LIST_HEAD(&inode->i_dentry);
2694 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2697 static void shmem_destroy_inode(struct inode *inode)
2699 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2700 /* only struct inode is valid if it's an inline symlink */
2701 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2703 call_rcu(&inode->i_rcu, shmem_i_callback);
2706 static void init_once(void *foo)
2708 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2710 inode_init_once(&p->vfs_inode);
2713 static int init_inodecache(void)
2715 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2716 sizeof(struct shmem_inode_info),
2717 0, SLAB_PANIC, init_once);
2721 static void destroy_inodecache(void)
2723 kmem_cache_destroy(shmem_inode_cachep);
2726 static const struct address_space_operations shmem_aops = {
2727 .writepage = shmem_writepage,
2728 .set_page_dirty = __set_page_dirty_no_writeback,
2730 .write_begin = shmem_write_begin,
2731 .write_end = shmem_write_end,
2733 .migratepage = migrate_page,
2734 .error_remove_page = generic_error_remove_page,
2737 static const struct file_operations shmem_file_operations = {
2740 .llseek = generic_file_llseek,
2741 .read = do_sync_read,
2742 .write = do_sync_write,
2743 .aio_read = shmem_file_aio_read,
2744 .aio_write = generic_file_aio_write,
2745 .fsync = noop_fsync,
2746 .splice_read = shmem_file_splice_read,
2747 .splice_write = generic_file_splice_write,
2751 static const struct inode_operations shmem_inode_operations = {
2752 .setattr = shmem_setattr,
2753 .truncate_range = shmem_truncate_range,
2754 #ifdef CONFIG_TMPFS_XATTR
2755 .setxattr = shmem_setxattr,
2756 .getxattr = shmem_getxattr,
2757 .listxattr = shmem_listxattr,
2758 .removexattr = shmem_removexattr,
2760 #ifdef CONFIG_TMPFS_POSIX_ACL
2761 .check_acl = generic_check_acl,
2766 static const struct inode_operations shmem_dir_inode_operations = {
2768 .create = shmem_create,
2769 .lookup = simple_lookup,
2771 .unlink = shmem_unlink,
2772 .symlink = shmem_symlink,
2773 .mkdir = shmem_mkdir,
2774 .rmdir = shmem_rmdir,
2775 .mknod = shmem_mknod,
2776 .rename = shmem_rename,
2778 #ifdef CONFIG_TMPFS_XATTR
2779 .setxattr = shmem_setxattr,
2780 .getxattr = shmem_getxattr,
2781 .listxattr = shmem_listxattr,
2782 .removexattr = shmem_removexattr,
2784 #ifdef CONFIG_TMPFS_POSIX_ACL
2785 .setattr = shmem_setattr,
2786 .check_acl = generic_check_acl,
2790 static const struct inode_operations shmem_special_inode_operations = {
2791 #ifdef CONFIG_TMPFS_XATTR
2792 .setxattr = shmem_setxattr,
2793 .getxattr = shmem_getxattr,
2794 .listxattr = shmem_listxattr,
2795 .removexattr = shmem_removexattr,
2797 #ifdef CONFIG_TMPFS_POSIX_ACL
2798 .setattr = shmem_setattr,
2799 .check_acl = generic_check_acl,
2803 static const struct super_operations shmem_ops = {
2804 .alloc_inode = shmem_alloc_inode,
2805 .destroy_inode = shmem_destroy_inode,
2807 .statfs = shmem_statfs,
2808 .remount_fs = shmem_remount_fs,
2809 .show_options = shmem_show_options,
2811 .evict_inode = shmem_evict_inode,
2812 .drop_inode = generic_delete_inode,
2813 .put_super = shmem_put_super,
2816 static const struct vm_operations_struct shmem_vm_ops = {
2817 .fault = shmem_fault,
2819 .set_policy = shmem_set_policy,
2820 .get_policy = shmem_get_policy,
2825 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2826 int flags, const char *dev_name, void *data)
2828 return mount_nodev(fs_type, flags, data, shmem_fill_super);
2831 static struct file_system_type tmpfs_fs_type = {
2832 .owner = THIS_MODULE,
2834 .mount = shmem_mount,
2835 .kill_sb = kill_litter_super,
2838 int __init init_tmpfs(void)
2842 error = bdi_init(&shmem_backing_dev_info);
2846 error = init_inodecache();
2850 error = register_filesystem(&tmpfs_fs_type);
2852 printk(KERN_ERR "Could not register tmpfs\n");
2856 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2857 tmpfs_fs_type.name, NULL);
2858 if (IS_ERR(shm_mnt)) {
2859 error = PTR_ERR(shm_mnt);
2860 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2866 unregister_filesystem(&tmpfs_fs_type);
2868 destroy_inodecache();
2870 bdi_destroy(&shmem_backing_dev_info);
2872 shm_mnt = ERR_PTR(error);
2876 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2878 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2879 * @inode: the inode to be searched
2880 * @pgoff: the offset to be searched
2881 * @pagep: the pointer for the found page to be stored
2882 * @ent: the pointer for the found swap entry to be stored
2884 * If a page is found, refcount of it is incremented. Callers should handle
2887 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2888 struct page **pagep, swp_entry_t *ent)
2890 swp_entry_t entry = { .val = 0 }, *ptr;
2891 struct page *page = NULL;
2892 struct shmem_inode_info *info = SHMEM_I(inode);
2894 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2897 spin_lock(&info->lock);
2898 ptr = shmem_swp_entry(info, pgoff, NULL);
2900 if (ptr && ptr->val) {
2901 entry.val = ptr->val;
2902 page = find_get_page(&swapper_space, entry.val);
2905 page = find_get_page(inode->i_mapping, pgoff);
2907 shmem_swp_unmap(ptr);
2908 spin_unlock(&info->lock);
2915 #else /* !CONFIG_SHMEM */
2918 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2920 * This is intended for small system where the benefits of the full
2921 * shmem code (swap-backed and resource-limited) are outweighed by
2922 * their complexity. On systems without swap this code should be
2923 * effectively equivalent, but much lighter weight.
2926 #include <linux/ramfs.h>
2928 static struct file_system_type tmpfs_fs_type = {
2930 .mount = ramfs_mount,
2931 .kill_sb = kill_litter_super,
2934 int __init init_tmpfs(void)
2936 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2938 shm_mnt = kern_mount(&tmpfs_fs_type);
2939 BUG_ON(IS_ERR(shm_mnt));
2944 int shmem_unuse(swp_entry_t entry, struct page *page)
2949 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2954 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
2956 truncate_inode_pages_range(inode->i_mapping, start, end);
2958 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2960 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2962 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2963 * @inode: the inode to be searched
2964 * @pgoff: the offset to be searched
2965 * @pagep: the pointer for the found page to be stored
2966 * @ent: the pointer for the found swap entry to be stored
2968 * If a page is found, refcount of it is incremented. Callers should handle
2971 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2972 struct page **pagep, swp_entry_t *ent)
2974 struct page *page = NULL;
2976 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2978 page = find_get_page(inode->i_mapping, pgoff);
2981 *ent = (swp_entry_t){ .val = 0 };
2985 #define shmem_vm_ops generic_file_vm_ops
2986 #define shmem_file_operations ramfs_file_operations
2987 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2988 #define shmem_acct_size(flags, size) 0
2989 #define shmem_unacct_size(flags, size) do {} while (0)
2990 #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
2992 #endif /* CONFIG_SHMEM */
2997 * shmem_file_setup - get an unlinked file living in tmpfs
2998 * @name: name for dentry (to be seen in /proc/<pid>/maps
2999 * @size: size to be set for the file
3000 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3002 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3006 struct inode *inode;
3008 struct dentry *root;
3011 if (IS_ERR(shm_mnt))
3012 return (void *)shm_mnt;
3014 if (size < 0 || size > SHMEM_MAX_BYTES)
3015 return ERR_PTR(-EINVAL);
3017 if (shmem_acct_size(flags, size))
3018 return ERR_PTR(-ENOMEM);
3022 this.len = strlen(name);
3023 this.hash = 0; /* will go */
3024 root = shm_mnt->mnt_root;
3025 path.dentry = d_alloc(root, &this);
3028 path.mnt = mntget(shm_mnt);
3031 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3035 d_instantiate(path.dentry, inode);
3036 inode->i_size = size;
3037 inode->i_nlink = 0; /* It is unlinked */
3039 error = ramfs_nommu_expand_for_mapping(inode, size);
3045 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3046 &shmem_file_operations);
3055 shmem_unacct_size(flags, size);
3056 return ERR_PTR(error);
3058 EXPORT_SYMBOL_GPL(shmem_file_setup);
3061 * shmem_zero_setup - setup a shared anonymous mapping
3062 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3064 int shmem_zero_setup(struct vm_area_struct *vma)
3067 loff_t size = vma->vm_end - vma->vm_start;
3069 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3071 return PTR_ERR(file);
3075 vma->vm_file = file;
3076 vma->vm_ops = &shmem_vm_ops;
3077 vma->vm_flags |= VM_CAN_NONLINEAR;
3082 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3083 * @mapping: the page's address_space
3084 * @index: the page index
3085 * @gfp: the page allocator flags to use if allocating
3087 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3088 * with any new page allocations done using the specified allocation flags.
3089 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3090 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3091 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3093 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3094 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3096 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3097 pgoff_t index, gfp_t gfp)
3100 struct inode *inode = mapping->host;
3104 BUG_ON(mapping->a_ops != &shmem_aops);
3105 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3107 page = ERR_PTR(error);
3113 * The tiny !SHMEM case uses ramfs without swap
3115 return read_cache_page_gfp(mapping, index, gfp);
3118 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);