memcg: export memory cgroup's swappiness with mem_cgroup_swappiness()
[pandora-kernel.git] / mm / vmscan.c
index d036e59..0563749 100644 (file)
@@ -95,8 +95,6 @@ struct scan_control {
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
-       int swappiness;
-
        int order;
 
        /*
@@ -250,49 +248,90 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                unsigned long long delta;
                unsigned long total_scan;
                unsigned long max_pass;
+               int shrink_ret = 0;
+               long nr;
+               long new_nr;
+               long batch_size = shrinker->batch ? shrinker->batch
+                                                 : SHRINK_BATCH;
+
+               /*
+                * copy the current shrinker scan count into a local variable
+                * and zero it so that other concurrent shrinker invocations
+                * don't also do this scanning work.
+                */
+               do {
+                       nr = shrinker->nr;
+               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
 
+               total_scan = nr;
                max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
-               shrinker->nr += delta;
-               if (shrinker->nr < 0) {
+               total_scan += delta;
+               if (total_scan < 0) {
                        printk(KERN_ERR "shrink_slab: %pF negative objects to "
                               "delete nr=%ld\n",
-                              shrinker->shrink, shrinker->nr);
-                       shrinker->nr = max_pass;
+                              shrinker->shrink, total_scan);
+                       total_scan = max_pass;
                }
 
+               /*
+                * We need to avoid excessive windup on filesystem shrinkers
+                * due to large numbers of GFP_NOFS allocations causing the
+                * shrinkers to return -1 all the time. This results in a large
+                * nr being built up so when a shrink that can do some work
+                * comes along it empties the entire cache due to nr >>>
+                * max_pass.  This is bad for sustaining a working set in
+                * memory.
+                *
+                * Hence only allow the shrinker to scan the entire cache when
+                * a large delta change is calculated directly.
+                */
+               if (delta < max_pass / 4)
+                       total_scan = min(total_scan, max_pass / 2);
+
                /*
                 * Avoid risking looping forever due to too large nr value:
                 * never try to free more than twice the estimate number of
                 * freeable entries.
                 */
-               if (shrinker->nr > max_pass * 2)
-                       shrinker->nr = max_pass * 2;
+               if (total_scan > max_pass * 2)
+                       total_scan = max_pass * 2;
 
-               total_scan = shrinker->nr;
-               shrinker->nr = 0;
+               trace_mm_shrink_slab_start(shrinker, shrink, nr,
+                                       nr_pages_scanned, lru_pages,
+                                       max_pass, delta, total_scan);
 
-               while (total_scan >= SHRINK_BATCH) {
-                       long this_scan = SHRINK_BATCH;
-                       int shrink_ret;
+               while (total_scan >= batch_size) {
                        int nr_before;
 
                        nr_before = do_shrinker_shrink(shrinker, shrink, 0);
                        shrink_ret = do_shrinker_shrink(shrinker, shrink,
-                                                       this_scan);
+                                                       batch_size);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
                                ret += nr_before - shrink_ret;
-                       count_vm_events(SLABS_SCANNED, this_scan);
-                       total_scan -= this_scan;
+                       count_vm_events(SLABS_SCANNED, batch_size);
+                       total_scan -= batch_size;
 
                        cond_resched();
                }
 
-               shrinker->nr += total_scan;
+               /*
+                * move the unused scan count back into the shrinker in a
+                * manner that handles concurrent updates. If we exhausted the
+                * scan, there is no need to do an update.
+                */
+               do {
+                       nr = shrinker->nr;
+                       new_nr = total_scan + nr;
+                       if (total_scan <= 0)
+                               break;
+               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+
+               trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
        up_read(&shrinker_rwsem);
 out:
@@ -1729,6 +1768,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
+static int vmscan_swappiness(struct scan_control *sc)
+{
+       if (scanning_global_lru(sc))
+               return vm_swappiness;
+       return mem_cgroup_swappiness(sc->mem_cgroup);
+}
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1789,8 +1835,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = sc->swappiness;
-       file_prio = 200 - sc->swappiness;
+       anon_prio = vmscan_swappiness(sc);
+       file_prio = 200 - vmscan_swappiness(sc);
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -2179,7 +2225,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
                .may_swap = 1,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .nodemask = nodemask,
@@ -2203,7 +2248,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
-                                               unsigned int swappiness,
                                                struct zone *zone,
                                                unsigned long *nr_scanned)
 {
@@ -2213,7 +2257,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = !noswap,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem,
        };
@@ -2242,8 +2285,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
-                                          bool noswap,
-                                          unsigned int swappiness)
+                                          bool noswap)
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
@@ -2253,7 +2295,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .may_unmap = 1,
                .may_swap = !noswap,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .nodemask = NULL, /* we don't care the placement */
@@ -2404,7 +2445,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                 * we want to put equal scanning pressure on each zone.
                 */
                .nr_to_reclaim = ULONG_MAX,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
        };
@@ -2874,7 +2914,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .may_writepage = 1,
                .nr_to_reclaim = nr_to_reclaim,
                .hibernation_mode = 1,
-               .swappiness = vm_swappiness,
                .order = 0,
        };
        struct shrink_control shrink = {
@@ -3061,7 +3100,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .nr_to_reclaim = max_t(unsigned long, nr_pages,
                                       SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
-               .swappiness = vm_swappiness,
                .order = order,
        };
        struct shrink_control shrink = {