[PATCH] zone_reclaim: dynamic slab reclaim
[pandora-kernel.git] / mm / vmscan.c
index 72babac..089e943 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 #include <linux/delay.h>
+#include <linux/kthread.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -46,8 +47,6 @@ struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
 
-       unsigned long nr_mapped;        /* From page_state */
-
        /* This context's GFP mask */
        gfp_t gfp_mask;
 
@@ -63,6 +62,8 @@ struct scan_control {
        int swap_cluster_max;
 
        int swappiness;
+
+       int all_unreclaimable;
 };
 
 /*
@@ -216,7 +217,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                                break;
                        if (shrink_ret < nr_before)
                                ret += nr_before - shrink_ret;
-                       mod_page_state(slabs_scanned, this_scan);
+                       count_vm_events(SLABS_SCANNED, this_scan);
                        total_scan -= this_scan;
 
                        cond_resched();
@@ -378,8 +379,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
 
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (!mapping)
-               return 0;               /* truncate got there first */
+       BUG_ON(!PageLocked(page));
+       BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
 
@@ -441,7 +442,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (TestSetPageLocked(page))
                        goto keep;
 
-               BUG_ON(PageActive(page));
+               VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
@@ -548,7 +549,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto free_it;
                }
 
-               if (!remove_mapping(mapping, page))
+               if (!mapping || !remove_mapping(mapping, page))
                        goto keep_locked;
 
 free_it:
@@ -565,12 +566,12 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
                __pagevec_release_nonlru(&freed_pvec);
-       mod_page_state(pgactivate, pgactivate);
+       count_vm_events(PGACTIVATE, pgactivate);
        return nr_reclaimed;
 }
 
@@ -604,7 +605,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               BUG_ON(!PageLRU(page));
+               VM_BUG_ON(!PageLRU(page));
 
                list_del(&page->lru);
                target = src;
@@ -660,11 +661,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                nr_reclaimed += nr_freed;
                local_irq_disable();
                if (current_is_kswapd()) {
-                       __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
-                       __mod_page_state(kswapd_steal, nr_freed);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+                       __count_vm_events(KSWAPD_STEAL, nr_freed);
                } else
-                       __mod_page_state_zone(zone, pgscan_direct, nr_scan);
-               __mod_page_state_zone(zone, pgsteal, nr_freed);
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
+               __count_vm_events(PGACTIVATE, nr_freed);
 
                if (nr_taken == 0)
                        goto done;
@@ -675,7 +676,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                while (!list_empty(&page_list)) {
                        page = lru_to_page(&page_list);
-                       BUG_ON(PageLRU(page));
+                       VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
                        if (PageActive(page))
@@ -696,6 +697,11 @@ done:
        return nr_reclaimed;
 }
 
+static inline int zone_is_near_oom(struct zone *zone)
+{
+       return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
+}
+
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -731,6 +737,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                long distress;
                long swap_tendency;
 
+               if (zone_is_near_oom(zone))
+                       goto force_reclaim_mapped;
+
                /*
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
@@ -743,7 +752,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * how much memory
                 * is mapped.
                 */
-               mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
+               mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES)) * 100) /
+                                       vm_total_pages;
 
                /*
                 * Now decide how much we really want to unmap some pages.  The
@@ -764,6 +775,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * memory onto the inactive list.
                 */
                if (swap_tendency >= 100)
+force_reclaim_mapped:
                        reclaim_mapped = 1;
        }
 
@@ -796,9 +808,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
@@ -826,9 +838,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_active)) {
                page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                list_move(&page->lru, &zone->active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -840,11 +852,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                }
        }
        zone->nr_active += pgmoved;
-       spin_unlock(&zone->lru_lock);
 
-       __mod_page_state_zone(zone, pgrefill, pgscanned);
-       __mod_page_state(pgdeactivate, pgdeactivate);
-       local_irq_enable();
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
+       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       spin_unlock_irq(&zone->lru_lock);
 
        pagevec_release(&pvec);
 }
@@ -925,6 +936,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
        unsigned long nr_reclaimed = 0;
        int i;
 
+       sc->all_unreclaimable = 1;
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
@@ -941,6 +953,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
+               sc->all_unreclaimable = 0;
+
                nr_reclaimed += shrink_zone(priority, zone, sc);
        }
        return nr_reclaimed;
@@ -976,7 +990,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                .swappiness = vm_swappiness,
        };
 
-       inc_page_state(allocstall);
+       count_vm_event(ALLOCSTALL);
 
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
@@ -989,7 +1003,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
        }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-               sc.nr_mapped = read_page_state(nr_mapped);
                sc.nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
@@ -1022,6 +1035,9 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
                        blk_congestion_wait(WRITE, HZ/10);
        }
+       /* top priority shrink_caches still had more to do? don't OOM, then */
+       if (!sc.all_unreclaimable)
+               ret = 1;
 out:
        for (i = 0; zones[i] != 0; i++) {
                struct zone *zone = zones[i];
@@ -1074,9 +1090,7 @@ loop_again:
        total_scanned = 0;
        nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
-       sc.nr_mapped = read_page_state(nr_mapped);
-
-       inc_page_state(pageoutrun);
+       count_vm_event(PAGEOUTRUN);
 
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
@@ -1156,7 +1170,7 @@ scan:
                        if (zone->all_unreclaimable)
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 4)
+                                   (zone->nr_active + zone->nr_inactive) * 6)
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
@@ -1223,7 +1237,6 @@ static int kswapd(void *p)
        };
        cpumask_t cpumask;
 
-       daemonize("kswapd%d", pgdat->node_id);
        cpumask = node_to_cpumask(pgdat->node_id);
        if (!cpus_empty(cpumask))
                set_cpus_allowed(tsk, cpumask);
@@ -1365,7 +1378,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for_each_zone(zone)
                lru_pages += zone->nr_active + zone->nr_inactive;
 
-       nr_slab = read_page_state(nr_slab);
+       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
                reclaim_state.reclaimed_slab = 0;
@@ -1407,9 +1420,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
                        unsigned long nr_to_scan = nr_pages - ret;
 
-                       sc.nr_mapped = read_page_state(nr_mapped);
                        sc.nr_scanned = 0;
-
                        ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
                        if (ret >= nr_pages)
                                goto out;
@@ -1450,7 +1461,7 @@ out:
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
    restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action, void *hcpu)
 {
        pg_data_t *pgdat;
@@ -1468,20 +1479,35 @@ static int cpu_callback(struct notifier_block *nfb,
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
+/*
+ * This kswapd start function will be called by init and node-hot-add.
+ * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
+ */
+int kswapd_run(int nid)
+{
+       pg_data_t *pgdat = NODE_DATA(nid);
+       int ret = 0;
+
+       if (pgdat->kswapd)
+               return 0;
+
+       pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+       if (IS_ERR(pgdat->kswapd)) {
+               /* failure at boot is fatal */
+               BUG_ON(system_state == SYSTEM_BOOTING);
+               printk("Failed to start kswapd on node %d\n",nid);
+               ret = -1;
+       }
+       return ret;
+}
+
 static int __init kswapd_init(void)
 {
-       pg_data_t *pgdat;
+       int nid;
 
        swap_setup();
-       for_each_online_pgdat(pgdat) {
-               pid_t pid;
-
-               pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
-               BUG_ON(pid < 0);
-               read_lock(&tasklist_lock);
-               pgdat->kswapd = find_task_by_pid(pid);
-               read_unlock(&tasklist_lock);
-       }
+       for_each_online_node(nid)
+               kswapd_run(nid);
        hotcpu_notifier(cpu_callback, 0);
        return 0;
 }
@@ -1494,10 +1520,6 @@ module_init(kswapd_init)
  *
  * If non-zero call zone_reclaim when the number of free pages falls below
  * the watermarks.
- *
- * In the future we may add flags to the mode. However, the page allocator
- * should only have to check that zone_reclaim_mode != 0 before calling
- * zone_reclaim().
  */
 int zone_reclaim_mode __read_mostly;
 
@@ -1505,12 +1527,6 @@ int zone_reclaim_mode __read_mostly;
 #define RECLAIM_ZONE (1<<0)    /* Run shrink_cache on the zone */
 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
 #define RECLAIM_SWAP (1<<2)    /* Swap pages out during reclaim */
-#define RECLAIM_SLAB (1<<3)    /* Do a global slab shrink if the zone is out of memory */
-
-/*
- * Mininum time between zone reclaim scans
- */
-int zone_reclaim_interval __read_mostly = 30*HZ;
 
 /*
  * Priority for ZONE_RECLAIM. This determines the fraction of pages
@@ -1519,6 +1535,18 @@ int zone_reclaim_interval __read_mostly = 30*HZ;
  */
 #define ZONE_RECLAIM_PRIORITY 4
 
+/*
+ * Percentage of pages in a zone that must be unmapped for zone_reclaim to
+ * occur.
+ */
+int sysctl_min_unmapped_ratio = 1;
+
+/*
+ * If the number of slab pages in a zone grows beyond this percentage then
+ * slab reclaim needs to occur.
+ */
+int sysctl_min_slab_ratio = 5;
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -1533,7 +1561,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
-               .nr_mapped = read_page_state(nr_mapped),
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
@@ -1551,43 +1578,41 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       /*
-        * Free memory by calling shrink zone with increasing priorities
-        * until we have enough memory freed.
-        */
-       priority = ZONE_RECLAIM_PRIORITY;
-       do {
-               nr_reclaimed += shrink_zone(priority, zone, &sc);
-               priority--;
-       } while (priority >= 0 && nr_reclaimed < nr_pages);
+       if (zone_page_state(zone, NR_FILE_PAGES) -
+               zone_page_state(zone, NR_FILE_MAPPED) >
+               zone->min_unmapped_pages) {
+               /*
+                * Free memory by calling shrink zone with increasing
+                * priorities until we have enough memory freed.
+                */
+               priority = ZONE_RECLAIM_PRIORITY;
+               do {
+                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       priority--;
+               } while (priority >= 0 && nr_reclaimed < nr_pages);
+       }
 
-       if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
+       if (zone_page_state(zone, NR_SLAB_RECLAIMABLE) > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
-                * many pages were freed in this zone. So we just shake the slab
-                * a bit and then go off node for this particular allocation
-                * despite possibly having freed enough memory to allocate in
-                * this zone.  If we freed local memory then the next
-                * allocations will be local again.
+                * many pages were freed in this zone. So we take the current
+                * number of slab pages and shake the slab until it is reduced
+                * by the same nr_pages that we used for reclaiming unmapped
+                * pages.
                 *
-                * shrink_slab will free memory on all zones and may take
-                * a long time.
+                * Note that shrink_slab will free memory on all zones and may
+                * take a long time.
                 */
-               shrink_slab(sc.nr_scanned, gfp_mask, order);
+               unsigned long limit = zone_page_state(zone,
+                               NR_SLAB_RECLAIMABLE) - nr_pages;
+
+               while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) > limit)
+                       ;
        }
 
        p->reclaim_state = NULL;
        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
-
-       if (nr_reclaimed == 0) {
-               /*
-                * We were unable to reclaim enough pages to stay on node.  We
-                * now allow off node accesses for a certain time period before
-                * trying again to reclaim pages from the local zone.
-                */
-               zone->last_unsuccessful_zone_reclaim = jiffies;
-       }
-
        return nr_reclaimed >= nr_pages;
 }
 
@@ -1597,14 +1622,20 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int node_id;
 
        /*
-        * Do not reclaim if there was a recent unsuccessful attempt at zone
-        * reclaim.  In that case we let allocations go off node for the
-        * zone_reclaim_interval.  Otherwise we would scan for each off-node
-        * page allocation.
+        * Zone reclaim reclaims unmapped file backed pages and
+        * slab pages if we are over the defined limits.
+        *
+        * A small portion of unmapped file backed pages is needed for
+        * file I/O otherwise pages read by file I/O will be immediately
+        * thrown out if the zone is overallocated. So we do not reclaim
+        * if less than a specified percentage of the zone is used by
+        * unmapped file backed pages.
         */
-       if (time_before(jiffies,
-               zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
-                       return 0;
+       if (zone_page_state(zone, NR_FILE_PAGES) -
+           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
+           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
+                       <= zone->min_slab_pages)
+               return 0;
 
        /*
         * Avoid concurrent zone reclaims, do not reclaim in a zone that does