Pull ec into release branch
[pandora-kernel.git] / mm / shmem.c
index 70da7a0..b2a35eb 100644 (file)
@@ -175,12 +175,12 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
                vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
 }
 
-static struct super_operations shmem_ops;
+static const struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
 static const struct file_operations shmem_file_operations;
-static struct inode_operations shmem_inode_operations;
-static struct inode_operations shmem_dir_inode_operations;
-static struct inode_operations shmem_special_inode_operations;
+static const struct inode_operations shmem_inode_operations;
+static const struct inode_operations shmem_dir_inode_operations;
+static const struct inode_operations shmem_special_inode_operations;
 static struct vm_operations_struct shmem_vm_ops;
 
 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
@@ -402,26 +402,38 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
 /*
  * shmem_free_swp - free some swap entries in a directory
  *
- * @dir:   pointer to the directory
- * @edir:  pointer after last entry of the directory
+ * @dir:        pointer to the directory
+ * @edir:       pointer after last entry of the directory
+ * @punch_lock: pointer to spinlock when needed for the holepunch case
  */
-static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
+static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
+                                               spinlock_t *punch_lock)
 {
+       spinlock_t *punch_unlock = NULL;
        swp_entry_t *ptr;
        int freed = 0;
 
        for (ptr = dir; ptr < edir; ptr++) {
                if (ptr->val) {
+                       if (unlikely(punch_lock)) {
+                               punch_unlock = punch_lock;
+                               punch_lock = NULL;
+                               spin_lock(punch_unlock);
+                               if (!ptr->val)
+                                       continue;
+                       }
                        free_swap_and_cache(*ptr);
                        *ptr = (swp_entry_t){0};
                        freed++;
                }
        }
+       if (punch_unlock)
+               spin_unlock(punch_unlock);
        return freed;
 }
 
-static int shmem_map_and_free_swp(struct page *subdir,
-               int offset, int limit, struct page ***dir)
+static int shmem_map_and_free_swp(struct page *subdir, int offset,
+               int limit, struct page ***dir, spinlock_t *punch_lock)
 {
        swp_entry_t *ptr;
        int freed = 0;
@@ -431,7 +443,8 @@ static int shmem_map_and_free_swp(struct page *subdir,
                int size = limit - offset;
                if (size > LATENCY_LIMIT)
                        size = LATENCY_LIMIT;
-               freed += shmem_free_swp(ptr+offset, ptr+offset+size);
+               freed += shmem_free_swp(ptr+offset, ptr+offset+size,
+                                                       punch_lock);
                if (need_resched()) {
                        shmem_swp_unmap(ptr);
                        if (*dir) {
@@ -481,7 +494,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        long nr_swaps_freed = 0;
        int offset;
        int freed;
-       int punch_hole = 0;
+       int punch_hole;
+       spinlock_t *needs_lock;
+       spinlock_t *punch_lock;
+       unsigned long upper_limit;
 
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
        idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -492,11 +508,20 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        info->flags |= SHMEM_TRUNCATE;
        if (likely(end == (loff_t) -1)) {
                limit = info->next_index;
+               upper_limit = SHMEM_MAX_INDEX;
                info->next_index = idx;
+               needs_lock = NULL;
+               punch_hole = 0;
        } else {
-               limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               if (limit > info->next_index)
-                       limit = info->next_index;
+               if (end + 1 >= inode->i_size) { /* we may free a little more */
+                       limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
+                                                       PAGE_CACHE_SHIFT;
+                       upper_limit = SHMEM_MAX_INDEX;
+               } else {
+                       limit = (end + 1) >> PAGE_CACHE_SHIFT;
+                       upper_limit = limit;
+               }
+               needs_lock = &info->lock;
                punch_hole = 1;
        }
 
@@ -513,17 +538,30 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                size = limit;
                if (size > SHMEM_NR_DIRECT)
                        size = SHMEM_NR_DIRECT;
-               nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
+               nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
        }
 
        /*
         * If there are no indirect blocks or we are punching a hole
         * below indirect blocks, nothing to be done.
         */
-       if (!topdir || (punch_hole && (limit <= SHMEM_NR_DIRECT)))
+       if (!topdir || limit <= SHMEM_NR_DIRECT)
                goto done2;
 
-       BUG_ON(limit <= SHMEM_NR_DIRECT);
+       /*
+        * The truncation case has already dropped info->lock, and we're safe
+        * because i_size and next_index have already been lowered, preventing
+        * access beyond.  But in the punch_hole case, we still need to take
+        * the lock when updating the swap directory, because there might be
+        * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
+        * shmem_writepage.  However, whenever we find we can remove a whole
+        * directory page (not at the misaligned start or end of the range),
+        * we first NULLify its pointer in the level above, and then have no
+        * need to take the lock when updating its contents: needs_lock and
+        * punch_lock (either pointing to info->lock or NULL) manage this.
+        */
+
+       upper_limit -= SHMEM_NR_DIRECT;
        limit -= SHMEM_NR_DIRECT;
        idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
        offset = idx % ENTRIES_PER_PAGE;
@@ -543,8 +581,14 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                if (*dir) {
                        diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
                                ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
-                       if (!diroff && !offset) {
-                               *dir = NULL;
+                       if (!diroff && !offset && upper_limit >= stage) {
+                               if (needs_lock) {
+                                       spin_lock(needs_lock);
+                                       *dir = NULL;
+                                       spin_unlock(needs_lock);
+                                       needs_lock = NULL;
+                               } else
+                                       *dir = NULL;
                                nr_pages_to_free++;
                                list_add(&middir->lru, &pages_to_free);
                        }
@@ -570,39 +614,55 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                        }
                        stage = idx + ENTRIES_PER_PAGEPAGE;
                        middir = *dir;
-                       *dir = NULL;
-                       nr_pages_to_free++;
-                       list_add(&middir->lru, &pages_to_free);
+                       if (punch_hole)
+                               needs_lock = &info->lock;
+                       if (upper_limit >= stage) {
+                               if (needs_lock) {
+                                       spin_lock(needs_lock);
+                                       *dir = NULL;
+                                       spin_unlock(needs_lock);
+                                       needs_lock = NULL;
+                               } else
+                                       *dir = NULL;
+                               nr_pages_to_free++;
+                               list_add(&middir->lru, &pages_to_free);
+                       }
                        shmem_dir_unmap(dir);
                        cond_resched();
                        dir = shmem_dir_map(middir);
                        diroff = 0;
                }
+               punch_lock = needs_lock;
                subdir = dir[diroff];
-               if (subdir && page_private(subdir)) {
+               if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
+                       if (needs_lock) {
+                               spin_lock(needs_lock);
+                               dir[diroff] = NULL;
+                               spin_unlock(needs_lock);
+                               punch_lock = NULL;
+                       } else
+                               dir[diroff] = NULL;
+                       nr_pages_to_free++;
+                       list_add(&subdir->lru, &pages_to_free);
+               }
+               if (subdir && page_private(subdir) /* has swap entries */) {
                        size = limit - idx;
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        freed = shmem_map_and_free_swp(subdir,
-                                               offset, size, &dir);
+                                       offset, size, &dir, punch_lock);
                        if (!dir)
                                dir = shmem_dir_map(middir);
                        nr_swaps_freed += freed;
-                       if (offset)
+                       if (offset || punch_lock) {
                                spin_lock(&info->lock);
-                       set_page_private(subdir, page_private(subdir) - freed);
-                       if (offset)
+                               set_page_private(subdir,
+                                       page_private(subdir) - freed);
                                spin_unlock(&info->lock);
-                       if (!punch_hole)
-                               BUG_ON(page_private(subdir) > offset);
-               }
-               if (offset)
-                       offset = 0;
-               else if (subdir && !page_private(subdir)) {
-                       dir[diroff] = NULL;
-                       nr_pages_to_free++;
-                       list_add(&subdir->lru, &pages_to_free);
+                       } else
+                               BUG_ON(page_private(subdir) != freed);
                }
+               offset = 0;
        }
 done1:
        shmem_dir_unmap(dir);
@@ -614,8 +674,16 @@ done2:
                 * generic_delete_inode did it, before we lowered next_index.
                 * Also, though shmem_getpage checks i_size before adding to
                 * cache, no recheck after: so fix the narrow window there too.
+                *
+                * Recalling truncate_inode_pages_range and unmap_mapping_range
+                * every time for punch_hole (which never got a chance to clear
+                * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
+                * yet hardly ever necessary: try to optimize them out later.
                 */
                truncate_inode_pages_range(inode->i_mapping, start, end);
+               if (punch_hole)
+                       unmap_mapping_range(inode->i_mapping, start,
+                                                       end - start, 1);
        }
 
        spin_lock(&info->lock);
@@ -1228,7 +1296,8 @@ failed:
        return error;
 }
 
-struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+static struct page *shmem_nopage(struct vm_area_struct *vma,
+                                unsigned long address, int *type)
 {
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        struct page *page = NULL;
@@ -1335,7 +1404,7 @@ out_nomem:
        return retval;
 }
 
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 {
        file_accessed(file);
        vma->vm_ops = &shmem_vm_ops;
@@ -1410,8 +1479,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
 }
 
 #ifdef CONFIG_TMPFS
-static struct inode_operations shmem_symlink_inode_operations;
-static struct inode_operations shmem_symlink_inline_operations;
+static const struct inode_operations shmem_symlink_inode_operations;
+static const struct inode_operations shmem_symlink_inline_operations;
 
 /*
  * Normally tmpfs makes no use of shmem_prepare_write, but it
@@ -1904,12 +1973,12 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
        }
 }
 
-static struct inode_operations shmem_symlink_inline_operations = {
+static const struct inode_operations shmem_symlink_inline_operations = {
        .readlink       = generic_readlink,
        .follow_link    = shmem_follow_link_inline,
 };
 
-static struct inode_operations shmem_symlink_inode_operations = {
+static const struct inode_operations shmem_symlink_inode_operations = {
        .truncate       = shmem_truncate,
        .readlink       = generic_readlink,
        .follow_link    = shmem_follow_link,
@@ -2316,7 +2385,7 @@ static void destroy_inodecache(void)
 
 static const struct address_space_operations shmem_aops = {
        .writepage      = shmem_writepage,
-       .set_page_dirty = __set_page_dirty_nobuffers,
+       .set_page_dirty = __set_page_dirty_no_writeback,
 #ifdef CONFIG_TMPFS
        .prepare_write  = shmem_prepare_write,
        .commit_write   = simple_commit_write,
@@ -2335,7 +2404,7 @@ static const struct file_operations shmem_file_operations = {
 #endif
 };
 
-static struct inode_operations shmem_inode_operations = {
+static const struct inode_operations shmem_inode_operations = {
        .truncate       = shmem_truncate,
        .setattr        = shmem_notify_change,
        .truncate_range = shmem_truncate_range,
@@ -2349,7 +2418,7 @@ static struct inode_operations shmem_inode_operations = {
 
 };
 
-static struct inode_operations shmem_dir_inode_operations = {
+static const struct inode_operations shmem_dir_inode_operations = {
 #ifdef CONFIG_TMPFS
        .create         = shmem_create,
        .lookup         = simple_lookup,
@@ -2371,7 +2440,7 @@ static struct inode_operations shmem_dir_inode_operations = {
 #endif
 };
 
-static struct inode_operations shmem_special_inode_operations = {
+static const struct inode_operations shmem_special_inode_operations = {
 #ifdef CONFIG_TMPFS_POSIX_ACL
        .setattr        = shmem_notify_change,
        .setxattr       = generic_setxattr,
@@ -2382,7 +2451,7 @@ static struct inode_operations shmem_special_inode_operations = {
 #endif
 };
 
-static struct super_operations shmem_ops = {
+static const struct super_operations shmem_ops = {
        .alloc_inode    = shmem_alloc_inode,
        .destroy_inode  = shmem_destroy_inode,
 #ifdef CONFIG_TMPFS