mm: have order > 0 compaction start near a pageblock with free pages
[pandora-kernel.git] / mm / compaction.c
index e78cb96..bcce789 100644 (file)
@@ -383,6 +383,20 @@ static bool suitable_migration_target(struct page *page)
        return false;
 }
 
+/*
+ * Returns the start pfn of the last page block in a zone.  This is the starting
+ * point for full compaction of a zone.  Compaction searches for free pages from
+ * the end of each zone, while isolate_freepages_block scans forward inside each
+ * page block.
+ */
+static unsigned long start_free_pfn(struct zone *zone)
+{
+       unsigned long free_pfn;
+       free_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       free_pfn &= ~(pageblock_nr_pages-1);
+       return free_pfn;
+}
+
 /*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
@@ -422,17 +436,6 @@ static void isolate_freepages(struct zone *zone,
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
-               /*
-                * Skip ahead if another thread is compacting in the area
-                * simultaneously. If we wrapped around, we can only skip
-                * ahead if zone->compact_cached_free_pfn also wrapped to
-                * above our starting point.
-                */
-               if (cc->order > 0 && (!cc->wrapped ||
-                                     zone->compact_cached_free_pfn >
-                                     cc->start_free_pfn))
-                       pfn = min(pfn, zone->compact_cached_free_pfn);
-
                if (!pfn_valid(pfn))
                        continue;
 
@@ -474,7 +477,15 @@ static void isolate_freepages(struct zone *zone,
                 */
                if (isolated) {
                        high_pfn = max(high_pfn, pfn);
-                       if (cc->order > 0)
+
+                       /*
+                        * If the free scanner has wrapped, update
+                        * compact_cached_free_pfn to point to the highest
+                        * pageblock with free pages. This reduces excessive
+                        * scanning of full pageblocks near the end of the
+                        * zone
+                        */
+                       if (cc->order > 0 && cc->wrapped)
                                zone->compact_cached_free_pfn = high_pfn;
                }
        }
@@ -484,6 +495,11 @@ static void isolate_freepages(struct zone *zone,
 
        cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
+
+       /* If compact_cached_free_pfn is reset then set it now */
+       if (cc->order > 0 && !cc->wrapped &&
+                       zone->compact_cached_free_pfn == start_free_pfn(zone))
+               zone->compact_cached_free_pfn = high_pfn;
 }
 
 /*
@@ -570,20 +586,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        return ISOLATE_SUCCESS;
 }
 
-/*
- * Returns the start pfn of the last page block in a zone.  This is the starting
- * point for full compaction of a zone.  Compaction searches for free pages from
- * the end of each zone, while isolate_freepages_block scans forward inside each
- * page block.
- */
-static unsigned long start_free_pfn(struct zone *zone)
-{
-       unsigned long free_pfn;
-       free_pfn = zone->zone_start_pfn + zone->spanned_pages;
-       free_pfn &= ~(pageblock_nr_pages-1);
-       return free_pfn;
-}
-
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
@@ -861,7 +863,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                if (cc->order > 0) {
                        int ok = zone_watermark_ok(zone, cc->order,
                                                low_wmark_pages(zone), 0, 0);
-                       if (ok && cc->order > zone->compact_order_failed)
+                       if (ok && cc->order >= zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
                        else if (!ok && cc->sync)