mm: remove CONFIG_UNEVICTABLE_LRU config option
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 16 Jun 2009 22:32:51 +0000 (15:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 02:47:42 +0000 (19:47 -0700)
Currently, nobody wants to turn UNEVICTABLE_LRU off.  Thus this
configurability is unnecessary.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <andi@firstfloor.org>
Acked-by: Minchan Kim <minchan.kim@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
17 files changed:
drivers/base/node.c
fs/proc/meminfo.c
fs/proc/page.c
include/linux/mmzone.h
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/rmap.h
include/linux/swap.h
include/linux/vmstat.h
kernel/sysctl.c
mm/Kconfig
mm/internal.h
mm/mlock.c
mm/page_alloc.c
mm/rmap.c
mm/vmscan.c
mm/vmstat.c

index 40b8097..91d4087 100644 (file)
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       "Node %d Inactive(anon): %8lu kB\n"
                       "Node %d Active(file):   %8lu kB\n"
                       "Node %d Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                       "Node %d Unevictable:    %8lu kB\n"
                       "Node %d Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                       "Node %d HighTotal:      %8lu kB\n"
                       "Node %d HighFree:       %8lu kB\n"
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
                       nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
                       nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                       nid, K(node_page_state(nid, NR_UNEVICTABLE)),
                       nid, K(node_page_state(nid, NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                       nid, K(i.totalhigh),
                       nid, K(i.freehigh),
index c6b0302..d5c410d 100644 (file)
@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                "Inactive(anon): %8lu kB\n"
                "Active(file):   %8lu kB\n"
                "Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                "Unevictable:    %8lu kB\n"
                "Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                "HighTotal:      %8lu kB\n"
                "HighFree:       %8lu kB\n"
@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(pages[LRU_INACTIVE_ANON]),
                K(pages[LRU_ACTIVE_FILE]),
                K(pages[LRU_INACTIVE_FILE]),
-#ifdef CONFIG_UNEVICTABLE_LRU
                K(pages[LRU_UNEVICTABLE]),
                K(global_page_state(NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                K(i.totalhigh),
                K(i.freehigh),
index 9d926bd..2707c6c 100644 (file)
@@ -172,10 +172,8 @@ static u64 get_uflags(struct page *page)
        u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
        u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
        u |= kpf_copy_bit(k, KPF_MLOCKED,       PG_mlocked);
-#endif
 
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
index db976b9..8895985 100644 (file)
@@ -83,13 +83,8 @@ enum zone_stat_item {
        NR_ACTIVE_ANON,         /*  "     "     "   "       "         */
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
-#ifdef CONFIG_UNEVICTABLE_LRU
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
-#else
-       NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
-       NR_MLOCK = NR_ACTIVE_FILE,
-#endif
        NR_ANON_PAGES,  /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
                           only modified from process context */
@@ -132,11 +127,7 @@ enum lru_list {
        LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
        LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
        LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
-#ifdef CONFIG_UNEVICTABLE_LRU
        LRU_UNEVICTABLE,
-#else
-       LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
-#endif
        NR_LRU_LISTS
 };
 
@@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l)
 
 static inline int is_unevictable_lru(enum lru_list l)
 {
-#ifdef CONFIG_UNEVICTABLE_LRU
        return (l == LRU_UNEVICTABLE);
-#else
-       return 0;
-#endif
 }
 
 enum zone_watermarks {
index 62214c7..d6792f8 100644 (file)
@@ -95,9 +95,7 @@ enum pageflags {
        PG_reclaim,             /* To be reclaimed asap */
        PG_buddy,               /* Page is free, on buddy lists */
        PG_swapbacked,          /* Page is backed by RAM/swap */
-#ifdef CONFIG_UNEVICTABLE_LRU
        PG_unevictable,         /* Page is "unevictable"  */
-#endif
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
        PG_mlocked,             /* Page is vma mlocked */
 #endif
@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache)
        SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
        TESTCLEARFLAG(Unevictable, unevictable)
-#else
-PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
-       SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
-       __CLEARPAGEFLAG_NOOP(Unevictable)
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define MLOCK_PAGES 1
@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page)
 
 #endif /* !PAGEFLAGS_EXTENDED */
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-#define __PG_UNEVICTABLE       (1 << PG_unevictable)
-#else
-#define __PG_UNEVICTABLE       0
-#endif
-
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define __PG_MLOCKED           (1 << PG_mlocked)
 #else
@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page)
         1 << PG_private | 1 << PG_private_2 | \
         1 << PG_buddy   | 1 << PG_writeback | 1 << PG_reserved | \
         1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
-        __PG_UNEVICTABLE | __PG_MLOCKED)
+        1 << PG_unevictable | __PG_MLOCKED)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
index 34da523..aec3252 100644 (file)
@@ -22,9 +22,7 @@ enum mapping_flags {
        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
-#ifdef CONFIG_UNEVICTABLE_LRU
        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
-#endif
 };
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
        }
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-
 static inline void mapping_set_unevictable(struct address_space *mapping)
 {
        set_bit(AS_UNEVICTABLE, &mapping->flags);
@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
                return test_bit(AS_UNEVICTABLE, &mapping->flags);
        return !!mapping;
 }
-#else
-static inline void mapping_set_unevictable(struct address_space *mapping) { }
-static inline void mapping_clear_unevictable(struct address_space *mapping) { }
-static inline int mapping_unevictable(struct address_space *mapping)
-{
-       return 0;
-}
-#endif
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
index b35bc0e..619379a 100644 (file)
@@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
  */
 int page_mkclean(struct page *);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * called in munlock()/munmap() path to check for other vmas holding
  * the page mlocked.
  */
 int try_to_munlock(struct page *);
-#else
-static inline int try_to_munlock(struct page *page)
-{
-       return 0;       /* a.k.a. SWAP_SUCCESS */
-}
-#endif
 
 #else  /* !CONFIG_MMU */
 
index d476aad..f30c069 100644 (file)
@@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
 extern void scan_mapping_unevictable_pages(struct address_space *);
 
@@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
                                        void __user *, size_t *, loff_t *);
 extern int scan_unevictable_register_node(struct node *node);
 extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int page_evictable(struct page *page,
-                                               struct vm_area_struct *vma)
-{
-       return 1;
-}
-
-static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-}
-
-static inline int scan_unevictable_register_node(struct node *node)
-{
-       return 0;
-}
-
-static inline void scan_unevictable_unregister_node(struct node *node) { }
-#endif
 
 extern int kswapd_run(int nid);
 
index 524cd1b..ff4696c 100644 (file)
@@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_HUGETLB_PAGE
                HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
                UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
                UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
                UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
@@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
                UNEVICTABLE_MLOCKFREED,
-#endif
                NR_VM_EVENT_ITEMS
 };
 
index 0e51a35..2ccee08 100644 (file)
@@ -1325,7 +1325,6 @@ static struct ctl_table vm_table[] = {
                .extra2         = &one,
        },
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "scan_unevictable_pages",
@@ -1334,7 +1333,6 @@ static struct ctl_table vm_table[] = {
                .mode           = 0644,
                .proc_handler   = &scan_unevictable_handler,
        },
-#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
index 71830ba..97d2c88 100644 (file)
@@ -203,25 +203,13 @@ config VIRT_TO_BUS
        def_bool y
        depends on !ARCH_NO_VIRT_TO_BUS
 
-config UNEVICTABLE_LRU
-       bool "Add LRU list to track non-evictable pages"
-       default y
-       help
-         Keeps unevictable pages off of the active and inactive pageout
-         lists, so kswapd will not waste CPU time or have its balancing
-         algorithms thrown off by scanning these pages.  Selecting this
-         will use one page flag and increase the code size a little,
-         say Y unless you know what you are doing.
-
-         See Documentation/vm/unevictable-lru.txt for more information.
-
 config HAVE_MLOCK
        bool
        default y if MMU=y
 
 config HAVE_MLOCKED_PAGE_BIT
        bool
-       default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
+       default y if HAVE_MLOCK=y
 
 config MMU_NOTIFIER
        bool
index b4ac332..f02c750 100644 (file)
@@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * unevictable_migrate_page() called only from migrate_page_copy() to
  * migrate unevictable flag to new page.
@@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
        if (TestClearPageUnevictable(old))
                SetPageUnevictable(new);
 }
-#else
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
-}
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 /*
index ac13043..45eb650 100644 (file)
@@ -31,7 +31,6 @@ int can_do_mlock(void)
 }
 EXPORT_SYMBOL(can_do_mlock);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * Mlocked pages are marked with PageMlocked() flag for efficient testing
  * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
        return retval;
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED.  No-op if unlocking.
- */
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
-{
-       if (mlock && (vma->vm_flags & VM_LOCKED))
-               return make_pages_present(start, end);
-       return 0;
-}
-
-static inline int __mlock_posix_error_return(long retval)
-{
-       return 0;
-}
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
 /**
  * mlock_vma_pages_range() - mlock pages in specified vma range.
  * @vma - the vma containing the specfied address range
index 00e2937..c95a77c 100644 (file)
@@ -2077,19 +2077,14 @@ void show_free_areas(void)
 
        printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
                " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
@@ -2113,9 +2108,7 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
@@ -2129,9 +2122,7 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
index 23122af..316c9d6 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration)
        return ret;
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked
@@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page)
        else
                return try_to_unmap_file(page, 1, 0);
 }
-#endif
+
index 879d034..2c4b945 100644 (file)
@@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
  *
  * lru_lock must not be held, interrupts must be enabled.
  */
-#ifdef CONFIG_UNEVICTABLE_LRU
 void putback_lru_page(struct page *page)
 {
        int lru;
@@ -568,20 +567,6 @@ redo:
        put_page(page);         /* drop ref from isolate */
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-void putback_lru_page(struct page *page)
-{
-       int lru;
-       VM_BUG_ON(PageLRU(page));
-
-       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
-       lru_cache_add_lru(page, lru);
-       put_page(page);
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
@@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node)
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
 
-#endif
index 1e151cf..1e3aa81 100644 (file)
@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = {
        "nr_active_anon",
        "nr_inactive_file",
        "nr_active_file",
-#ifdef CONFIG_UNEVICTABLE_LRU
        "nr_unevictable",
        "nr_mlock",
-#endif
        "nr_anon_pages",
        "nr_mapped",
        "nr_file_pages",
@@ -687,7 +685,6 @@ static const char * const vmstat_text[] = {
        "htlb_buddy_alloc_success",
        "htlb_buddy_alloc_fail",
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        "unevictable_pgs_culled",
        "unevictable_pgs_scanned",
        "unevictable_pgs_rescued",
@@ -697,7 +694,6 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
 #endif
-#endif
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,