mm: add_active_or_unevictable into rmap
authorHugh Dickins <hugh@veritas.com>
Tue, 6 Jan 2009 22:39:25 +0000 (14:39 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 6 Jan 2009 23:59:02 +0000 (15:59 -0800)
lru_cache_add_active_or_unevictable() and page_add_new_anon_rmap() always
appear together.  Save some symbol table space and some jumping around by
removing lru_cache_add_active_or_unevictable(), folding its code into
page_add_new_anon_rmap(): like how we add file pages to lru just after
adding them to page cache.

Remove the nearby "TODO: is this safe?" comments (yes, it is safe), and
change page_add_new_anon_rmap()'s address BUG_ON to VM_BUG_ON as
originally intended.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/memory.c
mm/rmap.c
mm/swap.c

index a3af95b..48f309d 100644 (file)
@@ -174,8 +174,6 @@ extern unsigned int nr_free_pagecache_pages(void);
 /* linux/mm/swap.c */
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
-extern void lru_cache_add_active_or_unevictable(struct page *,
-                                       struct vm_area_struct *);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
index b5af358..a138c50 100644 (file)
@@ -1949,10 +1949,7 @@ gotten:
                 */
                ptep_clear_flush_notify(vma, address, page_table);
                SetPageSwapBacked(new_page);
-               lru_cache_add_active_or_unevictable(new_page, vma);
                page_add_new_anon_rmap(new_page, vma, address);
-
-//TODO:  is this safe?  do_anonymous_page() does it this way.
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                if (old_page) {
@@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto release;
        inc_mm_counter(mm, anon_rss);
        SetPageSwapBacked(page);
-       lru_cache_add_active_or_unevictable(page, vma);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
@@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                if (anon) {
                        inc_mm_counter(mm, anon_rss);
                        SetPageSwapBacked(page);
-                       lru_cache_add_active_or_unevictable(page, vma);
                        page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
@@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
-//TODO:  is this safe?  do_anonymous_page() does it this way.
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
index f01e922..10da682 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -47,6 +47,7 @@
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
 #include <linux/module.h>
+#include <linux/mm_inline.h>
 #include <linux/kallsyms.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
@@ -671,9 +672,13 @@ void page_add_anon_rmap(struct page *page,
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
-       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+       VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
        __page_set_anon_rmap(page, vma, address);
+       if (page_evictable(page, vma))
+               lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+       else
+               add_page_to_unevictable_list(page);
 }
 
 /**
index 21a566f..ff0b290 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -246,25 +246,6 @@ void add_page_to_unevictable_list(struct page *page)
        spin_unlock_irq(&zone->lru_lock);
 }
 
-/**
- * lru_cache_add_active_or_unevictable
- * @page:  the page to be added to LRU
- * @vma:   vma in which page is mapped for determining reclaimability
- *
- * place @page on active or unevictable LRU list, depending on
- * page_evictable().  Note that if the page is not evictable,
- * it goes directly back onto it's zone's unevictable list.  It does
- * NOT use a per cpu pagevec.
- */
-void lru_cache_add_active_or_unevictable(struct page *page,
-                                       struct vm_area_struct *vma)
-{
-       if (page_evictable(page, vma))
-               lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
-       else
-               add_page_to_unevictable_list(page);
-}
-
 /*
  * Drain pages out of the cpu's pagevecs.
  * Either "cpu" is the current CPU, and preemption has already been