Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[pandora-kernel.git] / mm / filemap_xip.c
index 3b6e384..b960ac8 100644 (file)
@@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping,
                if (unlikely(IS_ERR(page))) {
                        if (PTR_ERR(page) == -ENODATA) {
                                /* sparse */
-                               page = virt_to_page(empty_zero_page);
+                               page = ZERO_PAGE(0);
                        } else {
                                desc->error = PTR_ERR(page);
                                goto out;
                        }
-               } else
-                       BUG_ON(!PageUptodate(page));
+               }
 
                /* If users can be writing to this page using arbitrary
                 * virtual addresses, take care about potential aliasing
@@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping,
                        flush_dcache_page(page);
 
                /*
-                * Ok, we have the page, and it's up-to-date, so
-                * now we can copy it to user space...
+                * Ok, we have the page, so now we can copy it to user space...
                 *
                 * The actor routine returns how many bytes were actually used..
                 * NOTE! This may not be the same as how much of a user buffer
@@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
  * xip_write
  *
  * This function walks all vmas of the address_space and unmaps the
- * empty_zero_page when found at pgoff. Should it go in rmap.c?
+ * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
  */
 static void
 __xip_unmap (struct address_space * mapping,
@@ -176,6 +174,8 @@ __xip_unmap (struct address_space * mapping,
        unsigned long address;
        pte_t *pte;
        pte_t pteval;
+       spinlock_t *ptl;
+       struct page *page;
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
@@ -183,19 +183,17 @@ __xip_unmap (struct address_space * mapping,
                address = vma->vm_start +
                        ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
                BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-               /*
-                * We need the page_table_lock to protect us from page faults,
-                * munmap, fork, etc...
-                */
-               pte = page_check_address(virt_to_page(empty_zero_page), mm,
-                                        address);
-               if (!IS_ERR(pte)) {
+               page = ZERO_PAGE(address);
+               pte = page_check_address(page, mm, address, &ptl);
+               if (pte) {
                        /* Nuke the page table entry. */
-                       flush_cache_page(vma, address, pte_pfn(pte));
+                       flush_cache_page(vma, address, pte_pfn(*pte));
                        pteval = ptep_clear_flush(vma, address, pte);
+                       page_remove_rmap(page);
+                       dec_mm_counter(mm, file_rss);
                        BUG_ON(pte_dirty(pteval));
-                       pte_unmap(pte);
-                       spin_unlock(&mm->page_table_lock);
+                       pte_unmap_unlock(pte, ptl);
+                       page_cache_release(page);
                }
        }
        spin_unlock(&mapping->i_mmap_lock);
@@ -230,8 +228,7 @@ xip_file_nopage(struct vm_area_struct * area,
 
        page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
        if (!IS_ERR(page)) {
-               BUG_ON(!PageUptodate(page));
-               return page;
+               goto out;
        }
        if (PTR_ERR(page) != -ENODATA)
                return NULL;
@@ -245,14 +242,15 @@ xip_file_nopage(struct vm_area_struct * area,
                        pgoff*(PAGE_SIZE/512), 1);
                if (IS_ERR(page))
                        return NULL;
-               BUG_ON(!PageUptodate(page));
                /* unmap page at pgoff from all other vmas */
                __xip_unmap(mapping, pgoff);
        } else {
-               /* not shared and writable, use empty_zero_page */
-               page = virt_to_page(empty_zero_page);
+               /* not shared and writable, use ZERO_PAGE() */
+               page = ZERO_PAGE(address);
        }
 
+out:
+       page_cache_get(page);
        return page;
 }
 
@@ -319,8 +317,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
                        break;
                }
 
-               BUG_ON(!PageUptodate(page));
-
                copied = filemap_copy_from_user(page, offset, buf, bytes);
                flush_dcache_page(page);
                if (likely(copied > 0)) {
@@ -342,7 +338,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
        *ppos = pos;
        /*
         * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_sem.
+        * cannot change under us because we hold i_mutex.
         */
        if (pos > inode->i_size) {
                i_size_write(inode, pos);
@@ -362,7 +358,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        loff_t pos;
        ssize_t ret;
 
-       down(&inode->i_sem);
+       mutex_lock(&inode->i_mutex);
 
        if (!access_ok(VERIFY_READ, buf, len)) {
                ret=-EFAULT;
@@ -387,14 +383,14 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        if (ret)
                goto out_backing;
 
-       inode_update_time(inode, 1);
+       file_update_time(filp);
 
        ret = __xip_file_write (filp, buf, count, pos, ppos);
 
  out_backing:
        current->backing_dev_info = NULL;
  out_up:
-       up(&inode->i_sem);
+       mutex_unlock(&inode->i_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(xip_file_write);
@@ -435,8 +431,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
                        return 0;
                else
                        return PTR_ERR(page);
-       } else
-               BUG_ON(!PageUptodate(page));
+       }
        kaddr = kmap_atomic(page, KM_USER0);
        memset(kaddr + offset, 0, length);
        kunmap_atomic(kaddr, KM_USER0);