Merge ../linux-2.6-watchdog-mm
[pandora-kernel.git] / mm / rmap.c
index 40158b5..669acb2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
  * Lock ordering in mm:
  *
  * inode->i_mutex      (while writing or truncating, not reading or faulting)
- *   inode->i_alloc_sem
- *
- * When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
- * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
- * taken together; in truncation, i_mutex is taken outermost.
- *
- * mm->mmap_sem
- *   page->flags PG_locked (lock_page)
- *     mapping->i_mmap_lock
- *       anon_vma->lock
- *         mm->page_table_lock or pte_lock
- *           zone->lru_lock (in mark_page_accessed, isolate_lru_page)
- *           swap_lock (in swap_duplicate, swap_info_get)
- *             mmlist_lock (in mmput, drain_mmlist and others)
- *             mapping->private_lock (in __set_page_dirty_buffers)
- *             inode_lock (in set_page_dirty's __mark_inode_dirty)
- *               sb_lock (within inode_lock in fs/fs-writeback.c)
- *               mapping->tree_lock (widely used, in set_page_dirty,
- *                         in arch-dependent flush_dcache_mmap_lock,
- *                         within inode_lock in __sync_single_inode)
+ *   inode->i_alloc_sem (vmtruncate_range)
+ *   mm->mmap_sem
+ *     page->flags PG_locked (lock_page)
+ *       mapping->i_mmap_lock
+ *         anon_vma->lock
+ *           mm->page_table_lock or pte_lock
+ *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
+ *             swap_lock (in swap_duplicate, swap_info_get)
+ *               mmlist_lock (in mmput, drain_mmlist and others)
+ *               mapping->private_lock (in __set_page_dirty_buffers)
+ *               inode_lock (in set_page_dirty's __mark_inode_dirty)
+ *                 sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                 mapping->tree_lock (widely used, in set_page_dirty,
+ *                           in arch-dependent flush_dcache_mmap_lock,
+ *                           within inode_lock in __sync_single_inode)
  */
 
 #include <linux/mm.h>
@@ -53,6 +47,7 @@
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
 #include <linux/module.h>
+#include <linux/kallsyms.h>
 
 #include <asm/tlbflush.h>
 
@@ -434,6 +429,74 @@ int page_referenced(struct page *page, int is_locked)
        return referenced;
 }
 
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       address = vma_address(page, vma);
+       if (address == -EFAULT)
+               goto out;
+
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
+
+       if (pte_dirty(*pte) || pte_write(*pte)) {
+               pte_t entry;
+
+               flush_cache_page(vma, address, pte_pfn(*pte));
+               entry = ptep_clear_flush(vma, address, pte);
+               entry = pte_wrprotect(entry);
+               entry = pte_mkclean(entry);
+               set_pte_at(mm, address, pte, entry);
+               lazy_mmu_prot_update(entry);
+               ret = 1;
+       }
+
+       pte_unmap_unlock(pte, ptl);
+out:
+       return ret;
+}
+
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
+{
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = 0;
+
+       BUG_ON(PageAnon(page));
+
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               if (vma->vm_flags & VM_SHARED)
+                       ret += page_mkclean_one(page, vma);
+       }
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int page_mkclean(struct page *page)
+{
+       int ret = 0;
+
+       BUG_ON(!PageLocked(page));
+
+       if (page_mapped(page)) {
+               struct address_space *mapping = page_mapping(page);
+               if (mapping)
+                       ret = page_mkclean_file(mapping, page);
+       }
+       if (page_test_and_clear_dirty(page))
+               ret = 1;
+
+       return ret;
+}
+
 /**
  * page_set_anon_rmap - setup new anonymous rmap
  * @page:      the page to add the mapping to
@@ -508,18 +571,23 @@ void page_add_file_rmap(struct page *page)
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page)
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
 {
        if (atomic_add_negative(-1, &page->_mapcount)) {
-#ifdef CONFIG_DEBUG_VM
                if (unlikely(page_mapcount(page) < 0)) {
                        printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+                       printk (KERN_EMERG "  page pfn = %lx\n", page_to_pfn(page));
                        printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
                        printk (KERN_EMERG "  page->count = %x\n", page_count(page));
                        printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
+                       print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
+                       if (vma->vm_ops)
+                               print_symbol (KERN_EMERG "  vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
+                       if (vma->vm_file && vma->vm_file->f_op)
+                               print_symbol (KERN_EMERG "  vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
+                       BUG();
                }
-#endif
-               BUG_ON(page_mapcount(page) < 0);
+
                /*
                 * It would be tidy to reset the PageAnon mapping here,
                 * but that might overwrite a racing page_add_anon_rmap
@@ -621,7 +689,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                dec_mm_counter(mm, file_rss);
 
 
-       page_remove_rmap(page);
+       page_remove_rmap(page, vma);
        page_cache_release(page);
 
 out_unmap:
@@ -711,7 +779,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
                if (pte_dirty(pteval))
                        set_page_dirty(page);
 
-               page_remove_rmap(page);
+               page_remove_rmap(page, vma);
                page_cache_release(page);
                dec_mm_counter(mm, file_rss);
                (*mapcount)--;