omap2+: add drm device
[pandora-kernel.git] / mm / shmem.c
index 45b9acb..139b8ad 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/swap.h>
 
 static struct vfsmount *shm_mnt;
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
 /*
  * Pagevec may contain swap entries, so shuffle up pages before releasing.
  */
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
 {
        int i, j;
 
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
                        pvec->pages[j++] = page;
        }
        pvec->nr = j;
-       pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
+       pgoff_t index = 0;
+
+       pagevec_init(&pvec, 0);
+       /*
+        * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+        */
+       while (!mapping_unevictable(mapping)) {
+               /*
+                * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+                * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+                */
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                                       PAGEVEC_SIZE, pvec.pages, indices);
+               if (!pvec.nr)
+                       break;
+               index = indices[pvec.nr - 1] + 1;
+               shmem_deswap_pagevec(&pvec);
+               check_move_unevictable_pages(pvec.pages, pvec.nr);
+               pagevec_release(&pvec);
+               cond_resched();
+       }
 }
 
 /*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
                index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        continue;
                }
                if (index == start && indices[0] > end) {
-                       shmem_pagevec_release(&pvec);
+                       shmem_deswap_pagevec(&pvec);
+                       pagevec_release(&pvec);
                        break;
                }
                mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
        }
@@ -563,7 +595,7 @@ static void shmem_evict_inode(struct inode *inode)
                kfree(xattr->name);
                kfree(xattr);
        }
-       BUG_ON(inode->i_blocks);
+       WARN_ON(inode->i_blocks);
        shmem_free_inode(inode->i_sb);
        end_writeback(inode);
 }
@@ -766,24 +798,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
-       struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
-
-       spol = mpol_cond_copy(&mpol,
-                       mpol_shared_policy_lookup(&info->policy, index));
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = index;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = spol;
-       return swapin_readahead(swap, gfp, &pvma, 0);
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+
+       page = swapin_readahead(swap, gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 
 static struct page *shmem_alloc_page(gfp_t gfp,
                        struct shmem_inode_info *info, pgoff_t index)
 {
        struct vm_area_struct pvma;
+       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
@@ -791,10 +827,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
        pvma.vm_ops = NULL;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
 
-       /*
-        * alloc_page_vma() will drop the shared policy reference
-        */
-       return alloc_page_vma(gfp, &pvma, 0);
+       page = alloc_page_vma(gfp, &pvma, 0);
+
+       /* Drop reference taken by mpol_shared_policy_lookup() */
+       mpol_cond_put(pvma.vm_policy);
+
+       return page;
 }
 #else /* !CONFIG_NUMA */
 #ifdef CONFIG_TMPFS
@@ -1068,13 +1106,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
                user_shm_unlock(inode->i_size, user);
                info->flags &= ~VM_LOCKED;
                mapping_clear_unevictable(file->f_mapping);
-               /*
-                * Ensure that a racing putback_lru_page() can see
-                * the pages of this mapping are evictable when we
-                * skip them due to !PageLRU during the scan.
-                */
-               smp_mb__after_clear_bit();
-               scan_mapping_unevictable_pages(file->f_mapping);
        }
        retval = 0;
 
@@ -1334,6 +1365,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1422,7 +1454,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1936,12 +1968,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
 {
        struct inode *inode;
        struct dentry *dentry = NULL;
-       u64 inum = fid->raw[2];
-       inum = (inum << 32) | fid->raw[1];
+       u64 inum;
 
        if (fh_len < 3)
                return NULL;
 
+       inum = fid->raw[2];
+       inum = (inum << 32) | fid->raw[1];
+
        inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
                        shmem_match, fid->raw);
        if (inode) {
@@ -2446,6 +2480,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
        truncate_inode_pages_range(inode->i_mapping, lstart, lend);
@@ -2546,6 +2584,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
+EXPORT_SYMBOL_GPL(shmem_zero_setup);
 
 /**
  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.