Revert "xen/pv-on-hvm kexec: add xs_reset_watches to shutdown watches from old kernel"
[pandora-kernel.git] / mm / vmscan.c
index f51a33e..a1893c0 100644 (file)
@@ -1767,7 +1767,7 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
        if (scanning_global_lru(sc))
                low = inactive_anon_is_low_global(zone);
        else
-               low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
+               low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone);
        return low;
 }
 #else
@@ -1810,7 +1810,7 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
        if (scanning_global_lru(sc))
                low = inactive_file_is_low_global(zone);
        else
-               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup, zone);
        return low;
 }
 
@@ -2103,14 +2103,19 @@ restart:
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
+ *
+ * This function returns true if a zone is being reclaimed for a costly
+ * high-order allocation and compaction is either ready to begin or deferred.
+ * This indicates to the caller that it should retry the allocation or fail.
  */
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
+       bool should_abort_reclaim = false;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2125,6 +2130,23 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
+                       if (COMPACTION_BUILD) {
+                               /*
+                                * If we already have plenty of memory free for
+                                * compaction in this zone, don't free any more.
+                                * Even though compaction is invoked for any
+                                * non-zero order, only frequent costly order
+                                * reclamation is disruptive enough to become a
+                                * noticable problem, like transparent huge page
+                                * allocations.
+                                */
+                               if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
+                                       (compaction_suitable(zone, sc->order) ||
+                                        compaction_deferred(zone))) {
+                                       should_abort_reclaim = true;
+                                       continue;
+                               }
+                       }
                        /*
                         * This steals pages from memory cgroups over softlimit
                         * and returns the number of reclaimed pages and
@@ -2142,6 +2164,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
 
                shrink_zone(priority, zone, sc);
        }
+
+       return should_abort_reclaim;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2206,7 +2230,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token(sc->mem_cgroup);
-               shrink_zones(priority, zonelist, sc);
+               if (shrink_zones(priority, zonelist, sc))
+                       break;
+
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -2240,7 +2266,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
                if (total_scanned > writeback_threshold) {
-                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
+                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
+                                               WB_REASON_TRY_TO_FREE_PAGES);
                        sc->may_writepage = 1;
                }