2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 * This file is released under the GPL.
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
31 #include <linux/export.h>
32 #include <linux/swap.h>
34 static struct vfsmount *shm_mnt;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/pagevec.h>
55 #include <linux/percpu_counter.h>
56 #include <linux/splice.h>
57 #include <linux/security.h>
58 #include <linux/swapops.h>
59 #include <linux/mempolicy.h>
60 #include <linux/namei.h>
61 #include <linux/ctype.h>
62 #include <linux/migrate.h>
63 #include <linux/highmem.h>
64 #include <linux/seq_file.h>
65 #include <linux/magic.h>
67 #include <asm/uaccess.h>
68 #include <asm/pgtable.h>
70 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
71 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
73 /* Pretend that each entry is of this size in directory's i_size */
74 #define BOGO_DIRENT_SIZE 20
76 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77 #define SHORT_SYMLINK_LEN 128
80 * vmtruncate_range() communicates with shmem_fault via
81 * inode->i_private (with i_mutex making sure that it has only one user at
82 * a time): we would prefer not to enlarge the shmem inode just for that.
85 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
86 pgoff_t start; /* start of range currently being fallocated */
87 pgoff_t next; /* the next page offset to be fallocated */
91 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
92 char *name; /* xattr name */
97 /* Flag allocation requirements to shmem_getpage */
99 SGP_READ, /* don't exceed i_size, don't allocate page */
100 SGP_CACHE, /* don't exceed i_size, may allocate page */
101 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
102 SGP_WRITE, /* may exceed i_size, may allocate page */
106 static unsigned long shmem_default_max_blocks(void)
108 return totalram_pages / 2;
111 static unsigned long shmem_default_max_inodes(void)
113 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
117 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
118 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
120 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
121 struct page **pagep, enum sgp_type sgp, int *fault_type)
123 return shmem_getpage_gfp(inode, index, pagep, sgp,
124 mapping_gfp_mask(inode->i_mapping), fault_type);
127 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
129 return sb->s_fs_info;
133 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134 * for shared memory and for shared anonymous (/dev/zero) mappings
135 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136 * consistent with the pre-accounting of private mappings ...
138 static inline int shmem_acct_size(unsigned long flags, loff_t size)
140 return (flags & VM_NORESERVE) ?
141 0 : security_vm_enough_memory_kern(VM_ACCT(size));
144 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
146 if (!(flags & VM_NORESERVE))
147 vm_unacct_memory(VM_ACCT(size));
151 * ... whereas tmpfs objects are accounted incrementally as
152 * pages are allocated, in order to allow huge sparse files.
153 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
154 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
156 static inline int shmem_acct_block(unsigned long flags)
158 return (flags & VM_NORESERVE) ?
159 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
162 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
164 if (flags & VM_NORESERVE)
165 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
168 static const struct super_operations shmem_ops;
169 static const struct address_space_operations shmem_aops;
170 static const struct file_operations shmem_file_operations;
171 static const struct inode_operations shmem_inode_operations;
172 static const struct inode_operations shmem_dir_inode_operations;
173 static const struct inode_operations shmem_special_inode_operations;
174 static const struct vm_operations_struct shmem_vm_ops;
176 static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
177 .ra_pages = 0, /* No readahead */
178 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
181 static LIST_HEAD(shmem_swaplist);
182 static DEFINE_MUTEX(shmem_swaplist_mutex);
184 static int shmem_reserve_inode(struct super_block *sb)
186 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
187 if (sbinfo->max_inodes) {
188 spin_lock(&sbinfo->stat_lock);
189 if (!sbinfo->free_inodes) {
190 spin_unlock(&sbinfo->stat_lock);
193 sbinfo->free_inodes--;
194 spin_unlock(&sbinfo->stat_lock);
199 static void shmem_free_inode(struct super_block *sb)
201 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
202 if (sbinfo->max_inodes) {
203 spin_lock(&sbinfo->stat_lock);
204 sbinfo->free_inodes++;
205 spin_unlock(&sbinfo->stat_lock);
210 * shmem_recalc_inode - recalculate the block usage of an inode
211 * @inode: inode to recalc
213 * We have to calculate the free blocks since the mm can drop
214 * undirtied hole pages behind our back.
216 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
217 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
219 * It has to be called with the spinlock held.
221 static void shmem_recalc_inode(struct inode *inode)
223 struct shmem_inode_info *info = SHMEM_I(inode);
226 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
228 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
229 if (sbinfo->max_blocks)
230 percpu_counter_add(&sbinfo->used_blocks, -freed);
231 info->alloced -= freed;
232 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
233 shmem_unacct_blocks(info->flags, freed);
238 * Replace item expected in radix tree by a new item, while holding tree lock.
240 static int shmem_radix_tree_replace(struct address_space *mapping,
241 pgoff_t index, void *expected, void *replacement)
246 VM_BUG_ON(!expected);
247 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
249 item = radix_tree_deref_slot_protected(pslot,
250 &mapping->tree_lock);
251 if (item != expected)
254 radix_tree_replace_slot(pslot, replacement);
256 radix_tree_delete(&mapping->page_tree, index);
261 * Like add_to_page_cache_locked, but error if expected item has gone.
263 static int shmem_add_to_page_cache(struct page *page,
264 struct address_space *mapping,
265 pgoff_t index, gfp_t gfp, void *expected)
269 VM_BUG_ON(!PageLocked(page));
270 VM_BUG_ON(!PageSwapBacked(page));
273 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
275 page_cache_get(page);
276 page->mapping = mapping;
279 spin_lock_irq(&mapping->tree_lock);
281 error = radix_tree_insert(&mapping->page_tree,
284 error = shmem_radix_tree_replace(mapping, index,
288 __inc_zone_page_state(page, NR_FILE_PAGES);
289 __inc_zone_page_state(page, NR_SHMEM);
290 spin_unlock_irq(&mapping->tree_lock);
292 page->mapping = NULL;
293 spin_unlock_irq(&mapping->tree_lock);
294 page_cache_release(page);
297 radix_tree_preload_end();
300 mem_cgroup_uncharge_cache_page(page);
305 * Like delete_from_page_cache, but substitutes swap for page.
307 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
309 struct address_space *mapping = page->mapping;
312 spin_lock_irq(&mapping->tree_lock);
313 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
314 page->mapping = NULL;
316 __dec_zone_page_state(page, NR_FILE_PAGES);
317 __dec_zone_page_state(page, NR_SHMEM);
318 spin_unlock_irq(&mapping->tree_lock);
319 page_cache_release(page);
324 * Like find_get_pages, but collecting swap entries as well as pages.
326 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
327 pgoff_t start, unsigned int nr_pages,
328 struct page **pages, pgoff_t *indices)
332 unsigned int nr_found;
336 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
337 (void ***)pages, indices, start, nr_pages);
339 for (i = 0; i < nr_found; i++) {
342 page = radix_tree_deref_slot((void **)pages[i]);
345 if (radix_tree_exception(page)) {
346 if (radix_tree_deref_retry(page))
349 * Otherwise, we must be storing a swap entry
350 * here as an exceptional entry: so return it
351 * without attempting to raise page count.
355 if (!page_cache_get_speculative(page))
358 /* Has the page moved? */
359 if (unlikely(page != *((void **)pages[i]))) {
360 page_cache_release(page);
364 indices[ret] = indices[i];
368 if (unlikely(!ret && nr_found))
375 * Remove swap entry from radix tree, free the swap and its page cache.
377 static int shmem_free_swap(struct address_space *mapping,
378 pgoff_t index, void *radswap)
382 spin_lock_irq(&mapping->tree_lock);
383 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
384 spin_unlock_irq(&mapping->tree_lock);
386 free_swap_and_cache(radix_to_swp_entry(radswap));
391 * Pagevec may contain swap entries, so shuffle up pages before releasing.
393 static void shmem_deswap_pagevec(struct pagevec *pvec)
397 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
398 struct page *page = pvec->pages[i];
399 if (!radix_tree_exceptional_entry(page))
400 pvec->pages[j++] = page;
406 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
408 void shmem_unlock_mapping(struct address_space *mapping)
411 pgoff_t indices[PAGEVEC_SIZE];
414 pagevec_init(&pvec, 0);
416 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
418 while (!mapping_unevictable(mapping)) {
420 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
421 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
423 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
424 PAGEVEC_SIZE, pvec.pages, indices);
427 index = indices[pvec.nr - 1] + 1;
428 shmem_deswap_pagevec(&pvec);
429 check_move_unevictable_pages(pvec.pages, pvec.nr);
430 pagevec_release(&pvec);
436 * Remove range of pages and swap entries from radix tree, and free them.
438 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
440 struct address_space *mapping = inode->i_mapping;
441 struct shmem_inode_info *info = SHMEM_I(inode);
442 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
443 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
444 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
446 pgoff_t indices[PAGEVEC_SIZE];
447 long nr_swaps_freed = 0;
451 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
453 pagevec_init(&pvec, 0);
455 while (index <= end) {
456 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
457 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
458 pvec.pages, indices);
461 mem_cgroup_uncharge_start();
462 for (i = 0; i < pagevec_count(&pvec); i++) {
463 struct page *page = pvec.pages[i];
469 if (radix_tree_exceptional_entry(page)) {
470 nr_swaps_freed += !shmem_free_swap(mapping,
475 if (!trylock_page(page))
477 if (page->mapping == mapping) {
478 VM_BUG_ON(PageWriteback(page));
479 truncate_inode_page(mapping, page);
483 shmem_deswap_pagevec(&pvec);
484 pagevec_release(&pvec);
485 mem_cgroup_uncharge_end();
491 struct page *page = NULL;
492 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
494 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
495 set_page_dirty(page);
497 page_cache_release(page);
504 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
505 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
506 pvec.pages, indices);
513 if (index == start && indices[0] > end) {
514 shmem_deswap_pagevec(&pvec);
515 pagevec_release(&pvec);
518 mem_cgroup_uncharge_start();
519 for (i = 0; i < pagevec_count(&pvec); i++) {
520 struct page *page = pvec.pages[i];
526 if (radix_tree_exceptional_entry(page)) {
527 nr_swaps_freed += !shmem_free_swap(mapping,
533 if (page->mapping == mapping) {
534 VM_BUG_ON(PageWriteback(page));
535 truncate_inode_page(mapping, page);
539 shmem_deswap_pagevec(&pvec);
540 pagevec_release(&pvec);
541 mem_cgroup_uncharge_end();
545 spin_lock(&info->lock);
546 info->swapped -= nr_swaps_freed;
547 shmem_recalc_inode(inode);
548 spin_unlock(&info->lock);
550 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
552 EXPORT_SYMBOL_GPL(shmem_truncate_range);
554 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
556 struct inode *inode = dentry->d_inode;
559 error = inode_change_ok(inode, attr);
563 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
564 loff_t oldsize = inode->i_size;
565 loff_t newsize = attr->ia_size;
567 if (newsize != oldsize) {
568 i_size_write(inode, newsize);
569 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
571 if (newsize < oldsize) {
572 loff_t holebegin = round_up(newsize, PAGE_SIZE);
573 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
574 shmem_truncate_range(inode, newsize, (loff_t)-1);
575 /* unmap again to remove racily COWed private pages */
576 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
580 setattr_copy(inode, attr);
581 #ifdef CONFIG_TMPFS_POSIX_ACL
582 if (attr->ia_valid & ATTR_MODE)
583 error = generic_acl_chmod(inode);
588 static void shmem_evict_inode(struct inode *inode)
590 struct shmem_inode_info *info = SHMEM_I(inode);
591 struct shmem_xattr *xattr, *nxattr;
593 if (inode->i_mapping->a_ops == &shmem_aops) {
594 shmem_unacct_size(info->flags, inode->i_size);
596 shmem_truncate_range(inode, 0, (loff_t)-1);
597 if (!list_empty(&info->swaplist)) {
598 mutex_lock(&shmem_swaplist_mutex);
599 list_del_init(&info->swaplist);
600 mutex_unlock(&shmem_swaplist_mutex);
603 kfree(info->symlink);
605 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
609 WARN_ON(inode->i_blocks);
610 shmem_free_inode(inode->i_sb);
611 end_writeback(inode);
615 * If swap found in inode, free it and move page from swapcache to filecache.
617 static int shmem_unuse_inode(struct shmem_inode_info *info,
618 swp_entry_t swap, struct page *page)
620 struct address_space *mapping = info->vfs_inode.i_mapping;
625 radswap = swp_to_radix_entry(swap);
626 index = radix_tree_locate_item(&mapping->page_tree, radswap);
631 * Move _head_ to start search for next from here.
632 * But be careful: shmem_evict_inode checks list_empty without taking
633 * mutex, and there's an instant in list_move_tail when info->swaplist
634 * would appear empty, if it were the only one on shmem_swaplist.
636 if (shmem_swaplist.next != &info->swaplist)
637 list_move_tail(&shmem_swaplist, &info->swaplist);
640 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
641 * but also to hold up shmem_evict_inode(): so inode cannot be freed
642 * beneath us (pagelock doesn't help until the page is in pagecache).
644 error = shmem_add_to_page_cache(page, mapping, index,
645 GFP_NOWAIT, radswap);
646 /* which does mem_cgroup_uncharge_cache_page on error */
648 if (error != -ENOMEM) {
650 * Truncation and eviction use free_swap_and_cache(), which
651 * only does trylock page: if we raced, best clean up here.
653 delete_from_swap_cache(page);
654 set_page_dirty(page);
656 spin_lock(&info->lock);
658 spin_unlock(&info->lock);
661 error = 1; /* not an error, but entry was found */
667 * Search through swapped inodes to find and replace swap by page.
669 int shmem_unuse(swp_entry_t swap, struct page *page)
671 struct list_head *this, *next;
672 struct shmem_inode_info *info;
677 * Charge page using GFP_KERNEL while we can wait, before taking
678 * the shmem_swaplist_mutex which might hold up shmem_writepage().
679 * Charged back to the user (not to caller) when swap account is used.
681 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
684 /* No radix_tree_preload: swap entry keeps a place for page in tree */
686 mutex_lock(&shmem_swaplist_mutex);
687 list_for_each_safe(this, next, &shmem_swaplist) {
688 info = list_entry(this, struct shmem_inode_info, swaplist);
690 found = shmem_unuse_inode(info, swap, page);
692 list_del_init(&info->swaplist);
697 mutex_unlock(&shmem_swaplist_mutex);
700 mem_cgroup_uncharge_cache_page(page);
705 page_cache_release(page);
710 * Move the page from the page cache to the swap cache.
712 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
714 struct shmem_inode_info *info;
715 struct address_space *mapping;
720 BUG_ON(!PageLocked(page));
721 mapping = page->mapping;
723 inode = mapping->host;
724 info = SHMEM_I(inode);
725 if (info->flags & VM_LOCKED)
727 if (!total_swap_pages)
731 * shmem_backing_dev_info's capabilities prevent regular writeback or
732 * sync from ever calling shmem_writepage; but a stacking filesystem
733 * might use ->writepage of its underlying filesystem, in which case
734 * tmpfs should write out to swap only in response to memory pressure,
735 * and not for the writeback threads or sync.
737 if (!wbc->for_reclaim) {
738 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
741 swap = get_swap_page();
746 * Add inode to shmem_unuse()'s list of swapped-out inodes,
747 * if it's not already there. Do it now before the page is
748 * moved to swap cache, when its pagelock no longer protects
749 * the inode from eviction. But don't unlock the mutex until
750 * we've incremented swapped, because shmem_unuse_inode() will
751 * prune a !swapped inode from the swaplist under this mutex.
753 mutex_lock(&shmem_swaplist_mutex);
754 if (list_empty(&info->swaplist))
755 list_add_tail(&info->swaplist, &shmem_swaplist);
757 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
758 swap_shmem_alloc(swap);
759 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
761 spin_lock(&info->lock);
763 shmem_recalc_inode(inode);
764 spin_unlock(&info->lock);
766 mutex_unlock(&shmem_swaplist_mutex);
767 BUG_ON(page_mapped(page));
768 swap_writepage(page, wbc);
772 mutex_unlock(&shmem_swaplist_mutex);
773 swapcache_free(swap, NULL);
775 set_page_dirty(page);
776 if (wbc->for_reclaim)
777 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
784 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
788 if (!mpol || mpol->mode == MPOL_DEFAULT)
789 return; /* show nothing */
791 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
793 seq_printf(seq, ",mpol=%s", buffer);
796 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
798 struct mempolicy *mpol = NULL;
800 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
803 spin_unlock(&sbinfo->stat_lock);
807 #endif /* CONFIG_TMPFS */
809 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
810 struct shmem_inode_info *info, pgoff_t index)
812 struct vm_area_struct pvma;
815 /* Create a pseudo vma that just contains the policy */
817 pvma.vm_pgoff = index;
819 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
821 page = swapin_readahead(swap, gfp, &pvma, 0);
823 /* Drop reference taken by mpol_shared_policy_lookup() */
824 mpol_cond_put(pvma.vm_policy);
829 static struct page *shmem_alloc_page(gfp_t gfp,
830 struct shmem_inode_info *info, pgoff_t index)
832 struct vm_area_struct pvma;
835 /* Create a pseudo vma that just contains the policy */
837 pvma.vm_pgoff = index;
839 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
841 page = alloc_page_vma(gfp, &pvma, 0);
843 /* Drop reference taken by mpol_shared_policy_lookup() */
844 mpol_cond_put(pvma.vm_policy);
848 #else /* !CONFIG_NUMA */
850 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
853 #endif /* CONFIG_TMPFS */
855 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
856 struct shmem_inode_info *info, pgoff_t index)
858 return swapin_readahead(swap, gfp, NULL, 0);
861 static inline struct page *shmem_alloc_page(gfp_t gfp,
862 struct shmem_inode_info *info, pgoff_t index)
864 return alloc_page(gfp);
866 #endif /* CONFIG_NUMA */
868 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
869 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
876 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
878 * If we allocate a new one we do not mark it dirty. That's up to the
879 * vm. If we swap it in we mark it dirty since we also free the swap
880 * entry since a page cannot live in both the swap and page cache
882 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
883 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
885 struct address_space *mapping = inode->i_mapping;
886 struct shmem_inode_info *info;
887 struct shmem_sb_info *sbinfo;
893 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
897 page = find_lock_page(mapping, index);
898 if (radix_tree_exceptional_entry(page)) {
899 swap = radix_to_swp_entry(page);
903 if (sgp != SGP_WRITE &&
904 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
909 if (page || (sgp == SGP_READ && !swap.val)) {
911 * Once we can get the page lock, it must be uptodate:
912 * if there were an error in reading back from swap,
913 * the page would not be inserted into the filecache.
915 BUG_ON(page && !PageUptodate(page));
921 * Fast cache lookup did not find it:
922 * bring it back from swap or allocate.
924 info = SHMEM_I(inode);
925 sbinfo = SHMEM_SB(inode->i_sb);
928 /* Look it up and read it in.. */
929 page = lookup_swap_cache(swap);
931 /* here we actually do the io */
933 *fault_type |= VM_FAULT_MAJOR;
934 page = shmem_swapin(swap, gfp, info, index);
941 /* We have to do this with page locked to prevent races */
943 if (!PageUptodate(page)) {
947 wait_on_page_writeback(page);
949 /* Someone may have already done it for us */
951 if (page->mapping == mapping &&
952 page->index == index)
958 error = mem_cgroup_cache_charge(page, current->mm,
959 gfp & GFP_RECLAIM_MASK);
961 error = shmem_add_to_page_cache(page, mapping, index,
962 gfp, swp_to_radix_entry(swap));
966 spin_lock(&info->lock);
968 shmem_recalc_inode(inode);
969 spin_unlock(&info->lock);
971 delete_from_swap_cache(page);
972 set_page_dirty(page);
976 if (shmem_acct_block(info->flags)) {
980 if (sbinfo->max_blocks) {
981 if (percpu_counter_compare(&sbinfo->used_blocks,
982 sbinfo->max_blocks) >= 0) {
986 percpu_counter_inc(&sbinfo->used_blocks);
989 page = shmem_alloc_page(gfp, info, index);
995 SetPageSwapBacked(page);
996 __set_page_locked(page);
997 error = mem_cgroup_cache_charge(page, current->mm,
998 gfp & GFP_RECLAIM_MASK);
1000 error = shmem_add_to_page_cache(page, mapping, index,
1004 lru_cache_add_anon(page);
1006 spin_lock(&info->lock);
1008 inode->i_blocks += BLOCKS_PER_PAGE;
1009 shmem_recalc_inode(inode);
1010 spin_unlock(&info->lock);
1012 clear_highpage(page);
1013 flush_dcache_page(page);
1014 SetPageUptodate(page);
1015 if (sgp == SGP_DIRTY)
1016 set_page_dirty(page);
1019 /* Perhaps the file has been truncated since we checked */
1020 if (sgp != SGP_WRITE &&
1021 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1032 ClearPageDirty(page);
1033 delete_from_page_cache(page);
1034 spin_lock(&info->lock);
1036 inode->i_blocks -= BLOCKS_PER_PAGE;
1037 spin_unlock(&info->lock);
1039 if (sbinfo->max_blocks)
1040 percpu_counter_add(&sbinfo->used_blocks, -1);
1042 shmem_unacct_blocks(info->flags, 1);
1044 if (swap.val && error != -EINVAL) {
1045 struct page *test = find_get_page(mapping, index);
1046 if (test && !radix_tree_exceptional_entry(test))
1047 page_cache_release(test);
1048 /* Have another try if the entry has changed */
1049 if (test != swp_to_radix_entry(swap))
1054 page_cache_release(page);
1056 if (error == -ENOSPC && !once++) {
1057 info = SHMEM_I(inode);
1058 spin_lock(&info->lock);
1059 shmem_recalc_inode(inode);
1060 spin_unlock(&info->lock);
1063 if (error == -EEXIST)
1068 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1070 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1072 int ret = VM_FAULT_LOCKED;
1075 * Trinity finds that probing a hole which tmpfs is punching can
1076 * prevent the hole-punch from ever completing: which in turn
1077 * locks writers out with its hold on i_mutex. So refrain from
1078 * faulting pages into the hole while it's being punched. Although
1079 * shmem_truncate_range() does remove the additions, it may be unable to
1080 * keep up, as each new page needs its own unmap_mapping_range() call,
1081 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1083 * It does not matter if we sometimes reach this check just before the
1084 * hole-punch begins, so that one fault then races with the punch:
1085 * we just need to make racing faults a rare case.
1087 * The implementation below would be much simpler if we just used a
1088 * standard mutex or completion: but we cannot take i_mutex in fault,
1089 * and bloating every shmem inode for this unlikely case would be sad.
1091 if (unlikely(inode->i_private)) {
1092 struct shmem_falloc *shmem_falloc;
1094 spin_lock(&inode->i_lock);
1095 shmem_falloc = inode->i_private;
1097 vmf->pgoff >= shmem_falloc->start &&
1098 vmf->pgoff < shmem_falloc->next) {
1099 wait_queue_head_t *shmem_falloc_waitq;
1100 DEFINE_WAIT(shmem_fault_wait);
1102 ret = VM_FAULT_NOPAGE;
1103 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1104 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1105 /* It's polite to up mmap_sem if we can */
1106 up_read(&vma->vm_mm->mmap_sem);
1107 ret = VM_FAULT_RETRY;
1110 shmem_falloc_waitq = shmem_falloc->waitq;
1111 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1112 TASK_UNINTERRUPTIBLE);
1113 spin_unlock(&inode->i_lock);
1117 * shmem_falloc_waitq points into the vmtruncate_range()
1118 * stack of the hole-punching task: shmem_falloc_waitq
1119 * is usually invalid by the time we reach here, but
1120 * finish_wait() does not dereference it in that case;
1121 * though i_lock needed lest racing with wake_up_all().
1123 spin_lock(&inode->i_lock);
1124 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1125 spin_unlock(&inode->i_lock);
1128 spin_unlock(&inode->i_lock);
1131 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1133 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1135 if (ret & VM_FAULT_MAJOR) {
1136 count_vm_event(PGMAJFAULT);
1137 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1142 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1145 * If the underlying filesystem is not going to provide
1146 * a way to truncate a range of blocks (punch a hole) -
1147 * we should return failure right now.
1148 * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
1150 if (inode->i_op->truncate_range != shmem_truncate_range)
1153 mutex_lock(&inode->i_mutex);
1155 struct shmem_falloc shmem_falloc;
1156 struct address_space *mapping = inode->i_mapping;
1157 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
1158 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
1159 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1161 shmem_falloc.waitq = &shmem_falloc_waitq;
1162 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1163 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1164 spin_lock(&inode->i_lock);
1165 inode->i_private = &shmem_falloc;
1166 spin_unlock(&inode->i_lock);
1168 if ((u64)unmap_end > (u64)unmap_start)
1169 unmap_mapping_range(mapping, unmap_start,
1170 1 + unmap_end - unmap_start, 0);
1171 shmem_truncate_range(inode, lstart, lend);
1172 /* No need to unmap again: hole-punching leaves COWed pages */
1174 spin_lock(&inode->i_lock);
1175 inode->i_private = NULL;
1176 wake_up_all(&shmem_falloc_waitq);
1177 spin_unlock(&inode->i_lock);
1179 mutex_unlock(&inode->i_mutex);
1184 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1186 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1187 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1190 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1193 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1196 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1197 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1201 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1203 struct inode *inode = file->f_path.dentry->d_inode;
1204 struct shmem_inode_info *info = SHMEM_I(inode);
1205 int retval = -ENOMEM;
1207 spin_lock(&info->lock);
1208 if (lock && !(info->flags & VM_LOCKED)) {
1209 if (!user_shm_lock(inode->i_size, user))
1211 info->flags |= VM_LOCKED;
1212 mapping_set_unevictable(file->f_mapping);
1214 if (!lock && (info->flags & VM_LOCKED) && user) {
1215 user_shm_unlock(inode->i_size, user);
1216 info->flags &= ~VM_LOCKED;
1217 mapping_clear_unevictable(file->f_mapping);
1222 spin_unlock(&info->lock);
1226 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1228 file_accessed(file);
1229 vma->vm_ops = &shmem_vm_ops;
1230 vma->vm_flags |= VM_CAN_NONLINEAR;
1234 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1235 int mode, dev_t dev, unsigned long flags)
1237 struct inode *inode;
1238 struct shmem_inode_info *info;
1239 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1241 if (shmem_reserve_inode(sb))
1244 inode = new_inode(sb);
1246 inode->i_ino = get_next_ino();
1247 inode_init_owner(inode, dir, mode);
1248 inode->i_blocks = 0;
1249 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1250 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1251 inode->i_generation = get_seconds();
1252 info = SHMEM_I(inode);
1253 memset(info, 0, (char *)inode - (char *)info);
1254 spin_lock_init(&info->lock);
1255 info->flags = flags & VM_NORESERVE;
1256 INIT_LIST_HEAD(&info->swaplist);
1257 INIT_LIST_HEAD(&info->xattr_list);
1258 cache_no_acl(inode);
1260 switch (mode & S_IFMT) {
1262 inode->i_op = &shmem_special_inode_operations;
1263 init_special_inode(inode, mode, dev);
1266 inode->i_mapping->a_ops = &shmem_aops;
1267 inode->i_op = &shmem_inode_operations;
1268 inode->i_fop = &shmem_file_operations;
1269 mpol_shared_policy_init(&info->policy,
1270 shmem_get_sbmpol(sbinfo));
1274 /* Some things misbehave if size == 0 on a directory */
1275 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1276 inode->i_op = &shmem_dir_inode_operations;
1277 inode->i_fop = &simple_dir_operations;
1281 * Must not load anything in the rbtree,
1282 * mpol_free_shared_policy will not be called.
1284 mpol_shared_policy_init(&info->policy, NULL);
1288 shmem_free_inode(sb);
1293 static const struct inode_operations shmem_symlink_inode_operations;
1294 static const struct inode_operations shmem_short_symlink_operations;
1297 shmem_write_begin(struct file *file, struct address_space *mapping,
1298 loff_t pos, unsigned len, unsigned flags,
1299 struct page **pagep, void **fsdata)
1301 struct inode *inode = mapping->host;
1302 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1303 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1307 shmem_write_end(struct file *file, struct address_space *mapping,
1308 loff_t pos, unsigned len, unsigned copied,
1309 struct page *page, void *fsdata)
1311 struct inode *inode = mapping->host;
1313 if (pos + copied > inode->i_size)
1314 i_size_write(inode, pos + copied);
1316 set_page_dirty(page);
1318 page_cache_release(page);
1323 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1325 struct inode *inode = filp->f_path.dentry->d_inode;
1326 struct address_space *mapping = inode->i_mapping;
1328 unsigned long offset;
1329 enum sgp_type sgp = SGP_READ;
1332 * Might this read be for a stacking filesystem? Then when reading
1333 * holes of a sparse file, we actually need to allocate those pages,
1334 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1336 if (segment_eq(get_fs(), KERNEL_DS))
1339 index = *ppos >> PAGE_CACHE_SHIFT;
1340 offset = *ppos & ~PAGE_CACHE_MASK;
1343 struct page *page = NULL;
1345 unsigned long nr, ret;
1346 loff_t i_size = i_size_read(inode);
1348 end_index = i_size >> PAGE_CACHE_SHIFT;
1349 if (index > end_index)
1351 if (index == end_index) {
1352 nr = i_size & ~PAGE_CACHE_MASK;
1357 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1359 if (desc->error == -EINVAL)
1367 * We must evaluate after, since reads (unlike writes)
1368 * are called without i_mutex protection against truncate
1370 nr = PAGE_CACHE_SIZE;
1371 i_size = i_size_read(inode);
1372 end_index = i_size >> PAGE_CACHE_SHIFT;
1373 if (index == end_index) {
1374 nr = i_size & ~PAGE_CACHE_MASK;
1377 page_cache_release(page);
1385 * If users can be writing to this page using arbitrary
1386 * virtual addresses, take care about potential aliasing
1387 * before reading the page on the kernel side.
1389 if (mapping_writably_mapped(mapping))
1390 flush_dcache_page(page);
1392 * Mark the page accessed if we read the beginning.
1395 mark_page_accessed(page);
1397 page = ZERO_PAGE(0);
1398 page_cache_get(page);
1402 * Ok, we have the page, and it's up-to-date, so
1403 * now we can copy it to user space...
1405 * The actor routine returns how many bytes were actually used..
1406 * NOTE! This may not be the same as how much of a user buffer
1407 * we filled up (we may be padding etc), so we can only update
1408 * "pos" here (the actor routine has to update the user buffer
1409 * pointers and the remaining count).
1411 ret = actor(desc, page, offset, nr);
1413 index += offset >> PAGE_CACHE_SHIFT;
1414 offset &= ~PAGE_CACHE_MASK;
1416 page_cache_release(page);
1417 if (ret != nr || !desc->count)
1423 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1424 file_accessed(filp);
1427 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1428 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1430 struct file *filp = iocb->ki_filp;
1434 loff_t *ppos = &iocb->ki_pos;
1436 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1440 for (seg = 0; seg < nr_segs; seg++) {
1441 read_descriptor_t desc;
1444 desc.arg.buf = iov[seg].iov_base;
1445 desc.count = iov[seg].iov_len;
1446 if (desc.count == 0)
1449 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1450 retval += desc.written;
1452 retval = retval ?: desc.error;
1461 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1462 struct pipe_inode_info *pipe, size_t len,
1465 struct address_space *mapping = in->f_mapping;
1466 struct inode *inode = mapping->host;
1467 unsigned int loff, nr_pages, req_pages;
1468 struct page *pages[PIPE_DEF_BUFFERS];
1469 struct partial_page partial[PIPE_DEF_BUFFERS];
1471 pgoff_t index, end_index;
1474 struct splice_pipe_desc spd = {
1477 .nr_pages_max = PIPE_DEF_BUFFERS,
1479 .ops = &page_cache_pipe_buf_ops,
1480 .spd_release = spd_release_page,
1483 isize = i_size_read(inode);
1484 if (unlikely(*ppos >= isize))
1487 left = isize - *ppos;
1488 if (unlikely(left < len))
1491 if (splice_grow_spd(pipe, &spd))
1494 index = *ppos >> PAGE_CACHE_SHIFT;
1495 loff = *ppos & ~PAGE_CACHE_MASK;
1496 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1497 nr_pages = min(req_pages, pipe->buffers);
1499 spd.nr_pages = find_get_pages_contig(mapping, index,
1500 nr_pages, spd.pages);
1501 index += spd.nr_pages;
1504 while (spd.nr_pages < nr_pages) {
1505 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1509 spd.pages[spd.nr_pages++] = page;
1513 index = *ppos >> PAGE_CACHE_SHIFT;
1514 nr_pages = spd.nr_pages;
1517 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1518 unsigned int this_len;
1523 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1524 page = spd.pages[page_nr];
1526 if (!PageUptodate(page) || page->mapping != mapping) {
1527 error = shmem_getpage(inode, index, &page,
1532 page_cache_release(spd.pages[page_nr]);
1533 spd.pages[page_nr] = page;
1536 isize = i_size_read(inode);
1537 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1538 if (unlikely(!isize || index > end_index))
1541 if (end_index == index) {
1544 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1548 this_len = min(this_len, plen - loff);
1552 spd.partial[page_nr].offset = loff;
1553 spd.partial[page_nr].len = this_len;
1560 while (page_nr < nr_pages)
1561 page_cache_release(spd.pages[page_nr++]);
1564 error = splice_to_pipe(pipe, &spd);
1566 splice_shrink_spd(&spd);
1575 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1577 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1579 buf->f_type = TMPFS_MAGIC;
1580 buf->f_bsize = PAGE_CACHE_SIZE;
1581 buf->f_namelen = NAME_MAX;
1582 if (sbinfo->max_blocks) {
1583 buf->f_blocks = sbinfo->max_blocks;
1585 buf->f_bfree = sbinfo->max_blocks -
1586 percpu_counter_sum(&sbinfo->used_blocks);
1588 if (sbinfo->max_inodes) {
1589 buf->f_files = sbinfo->max_inodes;
1590 buf->f_ffree = sbinfo->free_inodes;
1592 /* else leave those fields 0 like simple_statfs */
1597 * File creation. Allocate an inode, and we're done..
1600 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1602 struct inode *inode;
1603 int error = -ENOSPC;
1605 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1607 error = security_inode_init_security(inode, dir,
1611 if (error != -EOPNOTSUPP) {
1616 #ifdef CONFIG_TMPFS_POSIX_ACL
1617 error = generic_acl_init(inode, dir);
1625 dir->i_size += BOGO_DIRENT_SIZE;
1626 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1627 d_instantiate(dentry, inode);
1628 dget(dentry); /* Extra count - pin the dentry in core */
1633 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1637 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1643 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1644 struct nameidata *nd)
1646 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1652 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1654 struct inode *inode = old_dentry->d_inode;
1658 * No ordinary (disk based) filesystem counts links as inodes;
1659 * but each new link needs a new dentry, pinning lowmem, and
1660 * tmpfs dentries cannot be pruned until they are unlinked.
1662 ret = shmem_reserve_inode(inode->i_sb);
1666 dir->i_size += BOGO_DIRENT_SIZE;
1667 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1669 ihold(inode); /* New dentry reference */
1670 dget(dentry); /* Extra pinning count for the created dentry */
1671 d_instantiate(dentry, inode);
1676 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1678 struct inode *inode = dentry->d_inode;
1680 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1681 shmem_free_inode(inode->i_sb);
1683 dir->i_size -= BOGO_DIRENT_SIZE;
1684 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1686 dput(dentry); /* Undo the count from "create" - this does all the work */
1690 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1692 if (!simple_empty(dentry))
1695 drop_nlink(dentry->d_inode);
1697 return shmem_unlink(dir, dentry);
1701 * The VFS layer already does all the dentry stuff for rename,
1702 * we just have to decrement the usage count for the target if
1703 * it exists so that the VFS layer correctly free's it when it
1706 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1708 struct inode *inode = old_dentry->d_inode;
1709 int they_are_dirs = S_ISDIR(inode->i_mode);
1711 if (!simple_empty(new_dentry))
1714 if (new_dentry->d_inode) {
1715 (void) shmem_unlink(new_dir, new_dentry);
1717 drop_nlink(old_dir);
1718 } else if (they_are_dirs) {
1719 drop_nlink(old_dir);
1723 old_dir->i_size -= BOGO_DIRENT_SIZE;
1724 new_dir->i_size += BOGO_DIRENT_SIZE;
1725 old_dir->i_ctime = old_dir->i_mtime =
1726 new_dir->i_ctime = new_dir->i_mtime =
1727 inode->i_ctime = CURRENT_TIME;
1731 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1735 struct inode *inode;
1738 struct shmem_inode_info *info;
1740 len = strlen(symname) + 1;
1741 if (len > PAGE_CACHE_SIZE)
1742 return -ENAMETOOLONG;
1744 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1748 error = security_inode_init_security(inode, dir, &dentry->d_name,
1751 if (error != -EOPNOTSUPP) {
1758 info = SHMEM_I(inode);
1759 inode->i_size = len-1;
1760 if (len <= SHORT_SYMLINK_LEN) {
1761 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1762 if (!info->symlink) {
1766 inode->i_op = &shmem_short_symlink_operations;
1768 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1773 inode->i_mapping->a_ops = &shmem_aops;
1774 inode->i_op = &shmem_symlink_inode_operations;
1775 kaddr = kmap_atomic(page, KM_USER0);
1776 memcpy(kaddr, symname, len);
1777 kunmap_atomic(kaddr, KM_USER0);
1778 set_page_dirty(page);
1780 page_cache_release(page);
1782 dir->i_size += BOGO_DIRENT_SIZE;
1783 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1784 d_instantiate(dentry, inode);
1789 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1791 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1795 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1797 struct page *page = NULL;
1798 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1799 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1805 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1807 if (!IS_ERR(nd_get_link(nd))) {
1808 struct page *page = cookie;
1810 mark_page_accessed(page);
1811 page_cache_release(page);
1815 #ifdef CONFIG_TMPFS_XATTR
1817 * Superblocks without xattr inode operations may get some security.* xattr
1818 * support from the LSM "for free". As soon as we have any other xattrs
1819 * like ACLs, we also need to implement the security.* handlers at
1820 * filesystem level, though.
1823 static int shmem_xattr_get(struct dentry *dentry, const char *name,
1824 void *buffer, size_t size)
1826 struct shmem_inode_info *info;
1827 struct shmem_xattr *xattr;
1830 info = SHMEM_I(dentry->d_inode);
1832 spin_lock(&info->lock);
1833 list_for_each_entry(xattr, &info->xattr_list, list) {
1834 if (strcmp(name, xattr->name))
1839 if (size < xattr->size)
1842 memcpy(buffer, xattr->value, xattr->size);
1846 spin_unlock(&info->lock);
1850 static int shmem_xattr_set(struct dentry *dentry, const char *name,
1851 const void *value, size_t size, int flags)
1853 struct inode *inode = dentry->d_inode;
1854 struct shmem_inode_info *info = SHMEM_I(inode);
1855 struct shmem_xattr *xattr;
1856 struct shmem_xattr *new_xattr = NULL;
1860 /* value == NULL means remove */
1863 len = sizeof(*new_xattr) + size;
1864 if (len <= sizeof(*new_xattr))
1867 new_xattr = kmalloc(len, GFP_KERNEL);
1871 new_xattr->name = kstrdup(name, GFP_KERNEL);
1872 if (!new_xattr->name) {
1877 new_xattr->size = size;
1878 memcpy(new_xattr->value, value, size);
1881 spin_lock(&info->lock);
1882 list_for_each_entry(xattr, &info->xattr_list, list) {
1883 if (!strcmp(name, xattr->name)) {
1884 if (flags & XATTR_CREATE) {
1887 } else if (new_xattr) {
1888 list_replace(&xattr->list, &new_xattr->list);
1890 list_del(&xattr->list);
1895 if (flags & XATTR_REPLACE) {
1899 list_add(&new_xattr->list, &info->xattr_list);
1903 spin_unlock(&info->lock);
1910 static const struct xattr_handler *shmem_xattr_handlers[] = {
1911 #ifdef CONFIG_TMPFS_POSIX_ACL
1912 &generic_acl_access_handler,
1913 &generic_acl_default_handler,
1918 static int shmem_xattr_validate(const char *name)
1920 struct { const char *prefix; size_t len; } arr[] = {
1921 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1922 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1926 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1927 size_t preflen = arr[i].len;
1928 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1937 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1938 void *buffer, size_t size)
1943 * If this is a request for a synthetic attribute in the system.*
1944 * namespace use the generic infrastructure to resolve a handler
1945 * for it via sb->s_xattr.
1947 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1948 return generic_getxattr(dentry, name, buffer, size);
1950 err = shmem_xattr_validate(name);
1954 return shmem_xattr_get(dentry, name, buffer, size);
1957 static int shmem_setxattr(struct dentry *dentry, const char *name,
1958 const void *value, size_t size, int flags)
1963 * If this is a request for a synthetic attribute in the system.*
1964 * namespace use the generic infrastructure to resolve a handler
1965 * for it via sb->s_xattr.
1967 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1968 return generic_setxattr(dentry, name, value, size, flags);
1970 err = shmem_xattr_validate(name);
1975 value = ""; /* empty EA, do not remove */
1977 return shmem_xattr_set(dentry, name, value, size, flags);
1981 static int shmem_removexattr(struct dentry *dentry, const char *name)
1986 * If this is a request for a synthetic attribute in the system.*
1987 * namespace use the generic infrastructure to resolve a handler
1988 * for it via sb->s_xattr.
1990 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1991 return generic_removexattr(dentry, name);
1993 err = shmem_xattr_validate(name);
1997 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2000 static bool xattr_is_trusted(const char *name)
2002 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2005 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2007 bool trusted = capable(CAP_SYS_ADMIN);
2008 struct shmem_xattr *xattr;
2009 struct shmem_inode_info *info;
2012 info = SHMEM_I(dentry->d_inode);
2014 spin_lock(&info->lock);
2015 list_for_each_entry(xattr, &info->xattr_list, list) {
2018 /* skip "trusted." attributes for unprivileged callers */
2019 if (!trusted && xattr_is_trusted(xattr->name))
2022 len = strlen(xattr->name) + 1;
2029 memcpy(buffer, xattr->name, len);
2033 spin_unlock(&info->lock);
2037 #endif /* CONFIG_TMPFS_XATTR */
2039 static const struct inode_operations shmem_short_symlink_operations = {
2040 .readlink = generic_readlink,
2041 .follow_link = shmem_follow_short_symlink,
2042 #ifdef CONFIG_TMPFS_XATTR
2043 .setxattr = shmem_setxattr,
2044 .getxattr = shmem_getxattr,
2045 .listxattr = shmem_listxattr,
2046 .removexattr = shmem_removexattr,
2050 static const struct inode_operations shmem_symlink_inode_operations = {
2051 .readlink = generic_readlink,
2052 .follow_link = shmem_follow_link,
2053 .put_link = shmem_put_link,
2054 #ifdef CONFIG_TMPFS_XATTR
2055 .setxattr = shmem_setxattr,
2056 .getxattr = shmem_getxattr,
2057 .listxattr = shmem_listxattr,
2058 .removexattr = shmem_removexattr,
2062 static struct dentry *shmem_get_parent(struct dentry *child)
2064 return ERR_PTR(-ESTALE);
2067 static int shmem_match(struct inode *ino, void *vfh)
2071 inum = (inum << 32) | fh[1];
2072 return ino->i_ino == inum && fh[0] == ino->i_generation;
2075 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2076 struct fid *fid, int fh_len, int fh_type)
2078 struct inode *inode;
2079 struct dentry *dentry = NULL;
2086 inum = (inum << 32) | fid->raw[1];
2088 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2089 shmem_match, fid->raw);
2091 dentry = d_find_alias(inode);
2098 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2101 struct inode *inode = dentry->d_inode;
2108 if (inode_unhashed(inode)) {
2109 /* Unfortunately insert_inode_hash is not idempotent,
2110 * so as we hash inodes here rather than at creation
2111 * time, we need a lock to ensure we only try
2114 static DEFINE_SPINLOCK(lock);
2116 if (inode_unhashed(inode))
2117 __insert_inode_hash(inode,
2118 inode->i_ino + inode->i_generation);
2122 fh[0] = inode->i_generation;
2123 fh[1] = inode->i_ino;
2124 fh[2] = ((__u64)inode->i_ino) >> 32;
2130 static const struct export_operations shmem_export_ops = {
2131 .get_parent = shmem_get_parent,
2132 .encode_fh = shmem_encode_fh,
2133 .fh_to_dentry = shmem_fh_to_dentry,
2136 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2139 char *this_char, *value, *rest;
2141 while (options != NULL) {
2142 this_char = options;
2145 * NUL-terminate this option: unfortunately,
2146 * mount options form a comma-separated list,
2147 * but mpol's nodelist may also contain commas.
2149 options = strchr(options, ',');
2150 if (options == NULL)
2153 if (!isdigit(*options)) {
2160 if ((value = strchr(this_char,'=')) != NULL) {
2164 "tmpfs: No value for mount option '%s'\n",
2169 if (!strcmp(this_char,"size")) {
2170 unsigned long long size;
2171 size = memparse(value,&rest);
2173 size <<= PAGE_SHIFT;
2174 size *= totalram_pages;
2180 sbinfo->max_blocks =
2181 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2182 } else if (!strcmp(this_char,"nr_blocks")) {
2183 sbinfo->max_blocks = memparse(value, &rest);
2186 } else if (!strcmp(this_char,"nr_inodes")) {
2187 sbinfo->max_inodes = memparse(value, &rest);
2190 } else if (!strcmp(this_char,"mode")) {
2193 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2196 } else if (!strcmp(this_char,"uid")) {
2199 sbinfo->uid = simple_strtoul(value, &rest, 0);
2202 } else if (!strcmp(this_char,"gid")) {
2205 sbinfo->gid = simple_strtoul(value, &rest, 0);
2208 } else if (!strcmp(this_char,"mpol")) {
2209 if (mpol_parse_str(value, &sbinfo->mpol, 1))
2212 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2220 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2226 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2228 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2229 struct shmem_sb_info config = *sbinfo;
2230 unsigned long inodes;
2231 int error = -EINVAL;
2234 if (shmem_parse_options(data, &config, true))
2237 spin_lock(&sbinfo->stat_lock);
2238 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2239 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2241 if (config.max_inodes < inodes)
2244 * Those tests disallow limited->unlimited while any are in use;
2245 * but we must separately disallow unlimited->limited, because
2246 * in that case we have no record of how much is already in use.
2248 if (config.max_blocks && !sbinfo->max_blocks)
2250 if (config.max_inodes && !sbinfo->max_inodes)
2254 sbinfo->max_blocks = config.max_blocks;
2255 sbinfo->max_inodes = config.max_inodes;
2256 sbinfo->free_inodes = config.max_inodes - inodes;
2259 * Preserve previous mempolicy unless mpol remount option was specified.
2262 mpol_put(sbinfo->mpol);
2263 sbinfo->mpol = config.mpol; /* transfers initial ref */
2266 spin_unlock(&sbinfo->stat_lock);
2270 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2272 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2274 if (sbinfo->max_blocks != shmem_default_max_blocks())
2275 seq_printf(seq, ",size=%luk",
2276 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2277 if (sbinfo->max_inodes != shmem_default_max_inodes())
2278 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2279 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2280 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2281 if (sbinfo->uid != 0)
2282 seq_printf(seq, ",uid=%u", sbinfo->uid);
2283 if (sbinfo->gid != 0)
2284 seq_printf(seq, ",gid=%u", sbinfo->gid);
2285 shmem_show_mpol(seq, sbinfo->mpol);
2288 #endif /* CONFIG_TMPFS */
2290 static void shmem_put_super(struct super_block *sb)
2292 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2294 percpu_counter_destroy(&sbinfo->used_blocks);
2296 sb->s_fs_info = NULL;
2299 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2301 struct inode *inode;
2302 struct dentry *root;
2303 struct shmem_sb_info *sbinfo;
2306 /* Round up to L1_CACHE_BYTES to resist false sharing */
2307 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2308 L1_CACHE_BYTES), GFP_KERNEL);
2312 sbinfo->mode = S_IRWXUGO | S_ISVTX;
2313 sbinfo->uid = current_fsuid();
2314 sbinfo->gid = current_fsgid();
2315 sb->s_fs_info = sbinfo;
2319 * Per default we only allow half of the physical ram per
2320 * tmpfs instance, limiting inodes to one per page of lowmem;
2321 * but the internal instance is left unlimited.
2323 if (!(sb->s_flags & MS_NOUSER)) {
2324 sbinfo->max_blocks = shmem_default_max_blocks();
2325 sbinfo->max_inodes = shmem_default_max_inodes();
2326 if (shmem_parse_options(data, sbinfo, false)) {
2331 sb->s_export_op = &shmem_export_ops;
2333 sb->s_flags |= MS_NOUSER;
2336 spin_lock_init(&sbinfo->stat_lock);
2337 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2339 sbinfo->free_inodes = sbinfo->max_inodes;
2341 sb->s_maxbytes = MAX_LFS_FILESIZE;
2342 sb->s_blocksize = PAGE_CACHE_SIZE;
2343 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2344 sb->s_magic = TMPFS_MAGIC;
2345 sb->s_op = &shmem_ops;
2346 sb->s_time_gran = 1;
2347 #ifdef CONFIG_TMPFS_XATTR
2348 sb->s_xattr = shmem_xattr_handlers;
2350 #ifdef CONFIG_TMPFS_POSIX_ACL
2351 sb->s_flags |= MS_POSIXACL;
2354 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2357 inode->i_uid = sbinfo->uid;
2358 inode->i_gid = sbinfo->gid;
2359 root = d_alloc_root(inode);
2368 shmem_put_super(sb);
2372 static struct kmem_cache *shmem_inode_cachep;
2374 static struct inode *shmem_alloc_inode(struct super_block *sb)
2376 struct shmem_inode_info *info;
2377 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2380 return &info->vfs_inode;
2383 static void shmem_destroy_callback(struct rcu_head *head)
2385 struct inode *inode = container_of(head, struct inode, i_rcu);
2386 INIT_LIST_HEAD(&inode->i_dentry);
2387 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2390 static void shmem_destroy_inode(struct inode *inode)
2392 if ((inode->i_mode & S_IFMT) == S_IFREG)
2393 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2394 call_rcu(&inode->i_rcu, shmem_destroy_callback);
2397 static void shmem_init_inode(void *foo)
2399 struct shmem_inode_info *info = foo;
2400 inode_init_once(&info->vfs_inode);
2403 static int shmem_init_inodecache(void)
2405 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2406 sizeof(struct shmem_inode_info),
2407 0, SLAB_PANIC, shmem_init_inode);
2411 static void shmem_destroy_inodecache(void)
2413 kmem_cache_destroy(shmem_inode_cachep);
2416 static const struct address_space_operations shmem_aops = {
2417 .writepage = shmem_writepage,
2418 .set_page_dirty = __set_page_dirty_no_writeback,
2420 .write_begin = shmem_write_begin,
2421 .write_end = shmem_write_end,
2423 .migratepage = migrate_page,
2424 .error_remove_page = generic_error_remove_page,
2427 static const struct file_operations shmem_file_operations = {
2430 .llseek = generic_file_llseek,
2431 .read = do_sync_read,
2432 .write = do_sync_write,
2433 .aio_read = shmem_file_aio_read,
2434 .aio_write = generic_file_aio_write,
2435 .fsync = noop_fsync,
2436 .splice_read = shmem_file_splice_read,
2437 .splice_write = generic_file_splice_write,
2441 static const struct inode_operations shmem_inode_operations = {
2442 .setattr = shmem_setattr,
2443 .truncate_range = shmem_truncate_range,
2444 #ifdef CONFIG_TMPFS_XATTR
2445 .setxattr = shmem_setxattr,
2446 .getxattr = shmem_getxattr,
2447 .listxattr = shmem_listxattr,
2448 .removexattr = shmem_removexattr,
2452 static const struct inode_operations shmem_dir_inode_operations = {
2454 .create = shmem_create,
2455 .lookup = simple_lookup,
2457 .unlink = shmem_unlink,
2458 .symlink = shmem_symlink,
2459 .mkdir = shmem_mkdir,
2460 .rmdir = shmem_rmdir,
2461 .mknod = shmem_mknod,
2462 .rename = shmem_rename,
2464 #ifdef CONFIG_TMPFS_XATTR
2465 .setxattr = shmem_setxattr,
2466 .getxattr = shmem_getxattr,
2467 .listxattr = shmem_listxattr,
2468 .removexattr = shmem_removexattr,
2470 #ifdef CONFIG_TMPFS_POSIX_ACL
2471 .setattr = shmem_setattr,
2475 static const struct inode_operations shmem_special_inode_operations = {
2476 #ifdef CONFIG_TMPFS_XATTR
2477 .setxattr = shmem_setxattr,
2478 .getxattr = shmem_getxattr,
2479 .listxattr = shmem_listxattr,
2480 .removexattr = shmem_removexattr,
2482 #ifdef CONFIG_TMPFS_POSIX_ACL
2483 .setattr = shmem_setattr,
2487 static const struct super_operations shmem_ops = {
2488 .alloc_inode = shmem_alloc_inode,
2489 .destroy_inode = shmem_destroy_inode,
2491 .statfs = shmem_statfs,
2492 .remount_fs = shmem_remount_fs,
2493 .show_options = shmem_show_options,
2495 .evict_inode = shmem_evict_inode,
2496 .drop_inode = generic_delete_inode,
2497 .put_super = shmem_put_super,
2500 static const struct vm_operations_struct shmem_vm_ops = {
2501 .fault = shmem_fault,
2503 .set_policy = shmem_set_policy,
2504 .get_policy = shmem_get_policy,
2508 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2509 int flags, const char *dev_name, void *data)
2511 return mount_nodev(fs_type, flags, data, shmem_fill_super);
2514 static struct file_system_type shmem_fs_type = {
2515 .owner = THIS_MODULE,
2517 .mount = shmem_mount,
2518 .kill_sb = kill_litter_super,
2521 int __init shmem_init(void)
2525 error = bdi_init(&shmem_backing_dev_info);
2529 error = shmem_init_inodecache();
2533 error = register_filesystem(&shmem_fs_type);
2535 printk(KERN_ERR "Could not register tmpfs\n");
2539 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2540 shmem_fs_type.name, NULL);
2541 if (IS_ERR(shm_mnt)) {
2542 error = PTR_ERR(shm_mnt);
2543 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2549 unregister_filesystem(&shmem_fs_type);
2551 shmem_destroy_inodecache();
2553 bdi_destroy(&shmem_backing_dev_info);
2555 shm_mnt = ERR_PTR(error);
2559 #else /* !CONFIG_SHMEM */
2562 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2564 * This is intended for small system where the benefits of the full
2565 * shmem code (swap-backed and resource-limited) are outweighed by
2566 * their complexity. On systems without swap this code should be
2567 * effectively equivalent, but much lighter weight.
2570 #include <linux/ramfs.h>
2572 static struct file_system_type shmem_fs_type = {
2574 .mount = ramfs_mount,
2575 .kill_sb = kill_litter_super,
2578 int __init shmem_init(void)
2580 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2582 shm_mnt = kern_mount(&shmem_fs_type);
2583 BUG_ON(IS_ERR(shm_mnt));
2588 int shmem_unuse(swp_entry_t swap, struct page *page)
2593 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2598 void shmem_unlock_mapping(struct address_space *mapping)
2602 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2604 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2606 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2608 int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2610 /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
2614 #define shmem_vm_ops generic_file_vm_ops
2615 #define shmem_file_operations ramfs_file_operations
2616 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2617 #define shmem_acct_size(flags, size) 0
2618 #define shmem_unacct_size(flags, size) do {} while (0)
2620 #endif /* CONFIG_SHMEM */
2625 * shmem_file_setup - get an unlinked file living in tmpfs
2626 * @name: name for dentry (to be seen in /proc/<pid>/maps
2627 * @size: size to be set for the file
2628 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2630 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2634 struct inode *inode;
2636 struct dentry *root;
2639 if (IS_ERR(shm_mnt))
2640 return (void *)shm_mnt;
2642 if (size < 0 || size > MAX_LFS_FILESIZE)
2643 return ERR_PTR(-EINVAL);
2645 if (shmem_acct_size(flags, size))
2646 return ERR_PTR(-ENOMEM);
2650 this.len = strlen(name);
2651 this.hash = 0; /* will go */
2652 root = shm_mnt->mnt_root;
2653 path.dentry = d_alloc(root, &this);
2656 path.mnt = mntget(shm_mnt);
2659 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2663 d_instantiate(path.dentry, inode);
2664 inode->i_size = size;
2665 clear_nlink(inode); /* It is unlinked */
2667 error = ramfs_nommu_expand_for_mapping(inode, size);
2673 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2674 &shmem_file_operations);
2683 shmem_unacct_size(flags, size);
2684 return ERR_PTR(error);
2686 EXPORT_SYMBOL_GPL(shmem_file_setup);
2689 * shmem_zero_setup - setup a shared anonymous mapping
2690 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2692 int shmem_zero_setup(struct vm_area_struct *vma)
2695 loff_t size = vma->vm_end - vma->vm_start;
2697 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2699 return PTR_ERR(file);
2703 vma->vm_file = file;
2704 vma->vm_ops = &shmem_vm_ops;
2705 vma->vm_flags |= VM_CAN_NONLINEAR;
2710 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2711 * @mapping: the page's address_space
2712 * @index: the page index
2713 * @gfp: the page allocator flags to use if allocating
2715 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2716 * with any new page allocations done using the specified allocation flags.
2717 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2718 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2719 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2721 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2722 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2724 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2725 pgoff_t index, gfp_t gfp)
2728 struct inode *inode = mapping->host;
2732 BUG_ON(mapping->a_ops != &shmem_aops);
2733 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2735 page = ERR_PTR(error);
2741 * The tiny !SHMEM case uses ramfs without swap
2743 return read_cache_page_gfp(mapping, index, gfp);
2746 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);