page allocator: smarter retry of costly-order allocations
[pandora-kernel.git] / mm / vmscan.c
index 0515b8f..12e8627 100644 (file)
@@ -1251,7 +1251,7 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
 {
        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        unsigned long nr_reclaimed = 0;
-       struct zone **z;
+       struct zoneref *z;
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
@@ -1299,9 +1299,12 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
  * hope that some of these pages can be written.  But if the allocating task
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
+ *
+ * returns:    0, if no pages reclaimed
+ *             else, the number of pages reclaimed
  */
 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
-                                       gfp_t gfp_mask, struct scan_control *sc)
+                                       struct scan_control *sc)
 {
        int priority;
        int ret = 0;
@@ -1309,9 +1312,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
-       struct zone **z;
+       struct zoneref *z;
        struct zone *zone;
-       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
 
        if (scan_global_lru(sc))
                count_vm_event(ALLOCSTALL);
@@ -1339,7 +1342,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 * over limit cgroups
                 */
                if (scan_global_lru(sc)) {
-                       shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
+                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
                                nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
@@ -1347,7 +1350,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                }
                total_scanned += sc->nr_scanned;
                if (nr_reclaimed >= sc->swap_cluster_max) {
-                       ret = 1;
+                       ret = nr_reclaimed;
                        goto out;
                }
 
@@ -1370,7 +1373,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        }
        /* top priority shrink_caches still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scan_global_lru(sc))
-               ret = 1;
+               ret = nr_reclaimed;
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1410,7 +1413,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .isolate_pages = isolate_pages_global,
        };
 
-       return do_try_to_free_pages(zonelist, gfp_mask, &sc);
+       return do_try_to_free_pages(zonelist, &sc);
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -1419,7 +1422,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                                gfp_t gfp_mask)
 {
        struct scan_control sc = {
-               .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
@@ -1429,12 +1431,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
        };
        struct zonelist *zonelist;
-       int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
 
-       zonelist = &NODE_DATA(numa_node_id())->node_zonelists[target_zone];
-       if (do_try_to_free_pages(zonelist, sc.gfp_mask, &sc))
-               return 1;
-       return 0;
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       return do_try_to_free_pages(zonelist, &sc);
 }
 #endif