Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh64-2.6
[pandora-kernel.git] / mm / filemap_xip.c
index 8c199f5..b4fd0d7 100644 (file)
@@ -174,6 +174,8 @@ __xip_unmap (struct address_space * mapping,
        unsigned long address;
        pte_t *pte;
        pte_t pteval;
+       spinlock_t *ptl;
+       struct page *page;
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
@@ -181,19 +183,17 @@ __xip_unmap (struct address_space * mapping,
                address = vma->vm_start +
                        ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
                BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-               /*
-                * We need the page_table_lock to protect us from page faults,
-                * munmap, fork, etc...
-                */
-               pte = page_check_address(ZERO_PAGE(address), mm,
-                                        address);
-               if (!IS_ERR(pte)) {
+               page = ZERO_PAGE(address);
+               pte = page_check_address(page, mm, address, &ptl);
+               if (pte) {
                        /* Nuke the page table entry. */
                        flush_cache_page(vma, address, pte_pfn(*pte));
                        pteval = ptep_clear_flush(vma, address, pte);
+                       page_remove_rmap(page);
+                       dec_mm_counter(mm, file_rss);
                        BUG_ON(pte_dirty(pteval));
-                       pte_unmap(pte);
-                       spin_unlock(&mm->page_table_lock);
+                       pte_unmap_unlock(pte, ptl);
+                       page_cache_release(page);
                }
        }
        spin_unlock(&mapping->i_mmap_lock);
@@ -228,7 +228,7 @@ xip_file_nopage(struct vm_area_struct * area,
 
        page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
        if (!IS_ERR(page)) {
-               return page;
+               goto out;
        }
        if (PTR_ERR(page) != -ENODATA)
                return NULL;
@@ -249,6 +249,8 @@ xip_file_nopage(struct vm_area_struct * area,
                page = ZERO_PAGE(address);
        }
 
+out:
+       page_cache_get(page);
        return page;
 }
 
@@ -271,7 +273,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
                  size_t count, loff_t pos, loff_t *ppos)
 {
        struct address_space * mapping = filp->f_mapping;
-       struct address_space_operations *a_ops = mapping->a_ops;
+       const struct address_space_operations *a_ops = mapping->a_ops;
        struct inode    *inode = mapping->host;
        long            status = 0;
        struct page     *page;
@@ -336,7 +338,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
        *ppos = pos;
        /*
         * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_sem.
+        * cannot change under us because we hold i_mutex.
         */
        if (pos > inode->i_size) {
                i_size_write(inode, pos);
@@ -356,7 +358,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        loff_t pos;
        ssize_t ret;
 
-       down(&inode->i_sem);
+       mutex_lock(&inode->i_mutex);
 
        if (!access_ok(VERIFY_READ, buf, len)) {
                ret=-EFAULT;
@@ -381,14 +383,14 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        if (ret)
                goto out_backing;
 
-       inode_update_time(inode, 1);
+       file_update_time(filp);
 
        ret = __xip_file_write (filp, buf, count, pos, ppos);
 
  out_backing:
        current->backing_dev_info = NULL;
  out_up:
-       up(&inode->i_sem);
+       mutex_unlock(&inode->i_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(xip_file_write);