mm: have order > 0 compaction start near a pageblock with free pages
[pandora-kernel.git] / mm / compaction.c
index 2f42d95..bcce789 100644 (file)
@@ -383,6 +383,20 @@ static bool suitable_migration_target(struct page *page)
        return false;
 }
 
+/*
+ * Returns the start pfn of the last page block in a zone.  This is the starting
+ * point for full compaction of a zone.  Compaction searches for free pages from
+ * the end of each zone, while isolate_freepages_block scans forward inside each
+ * page block.
+ */
+static unsigned long start_free_pfn(struct zone *zone)
+{
+       unsigned long free_pfn;
+       free_pfn = zone->zone_start_pfn + zone->spanned_pages;
+       free_pfn &= ~(pageblock_nr_pages-1);
+       return free_pfn;
+}
+
 /*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
@@ -461,8 +475,19 @@ static void isolate_freepages(struct zone *zone,
                 * looking for free pages, the search will restart here as
                 * page migration may have returned some pages to the allocator
                 */
-               if (isolated)
+               if (isolated) {
                        high_pfn = max(high_pfn, pfn);
+
+                       /*
+                        * If the free scanner has wrapped, update
+                        * compact_cached_free_pfn to point to the highest
+                        * pageblock with free pages. This reduces excessive
+                        * scanning of full pageblocks near the end of the
+                        * zone
+                        */
+                       if (cc->order > 0 && cc->wrapped)
+                               zone->compact_cached_free_pfn = high_pfn;
+               }
        }
 
        /* split_free_page does not map the pages */
@@ -470,6 +495,11 @@ static void isolate_freepages(struct zone *zone,
 
        cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
+
+       /* If compact_cached_free_pfn is reset then set it now */
+       if (cc->order > 0 && !cc->wrapped &&
+                       zone->compact_cached_free_pfn == start_free_pfn(zone))
+               zone->compact_cached_free_pfn = high_pfn;
 }
 
 /*
@@ -565,8 +595,26 @@ static int compact_finished(struct zone *zone,
        if (fatal_signal_pending(current))
                return COMPACT_PARTIAL;
 
-       /* Compaction run completes if the migrate and free scanner meet */
-       if (cc->free_pfn <= cc->migrate_pfn)
+       /*
+        * A full (order == -1) compaction run starts at the beginning and
+        * end of a zone; it completes when the migrate and free scanner meet.
+        * A partial (order > 0) compaction can start with the free scanner
+        * at a random point in the zone, and may have to restart.
+        */
+       if (cc->free_pfn <= cc->migrate_pfn) {
+               if (cc->order > 0 && !cc->wrapped) {
+                       /* We started partway through; restart at the end. */
+                       unsigned long free_pfn = start_free_pfn(zone);
+                       zone->compact_cached_free_pfn = free_pfn;
+                       cc->free_pfn = free_pfn;
+                       cc->wrapped = 1;
+                       return COMPACT_CONTINUE;
+               }
+               return COMPACT_COMPLETE;
+       }
+
+       /* We wrapped around and ended up where we started. */
+       if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
                return COMPACT_COMPLETE;
 
        /*
@@ -664,8 +712,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
        /* Setup to move all movable pages to the end of the zone */
        cc->migrate_pfn = zone->zone_start_pfn;
-       cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
-       cc->free_pfn &= ~(pageblock_nr_pages-1);
+
+       if (cc->order > 0) {
+               /* Incremental compaction. Start where the last one stopped. */
+               cc->free_pfn = zone->compact_cached_free_pfn;
+               cc->start_free_pfn = cc->free_pfn;
+       } else {
+               /* Order == -1 starts at the end of the zone. */
+               cc->free_pfn = start_free_pfn(zone);
+       }
 
        migrate_prep_local();
 
@@ -808,7 +863,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                if (cc->order > 0) {
                        int ok = zone_watermark_ok(zone, cc->order,
                                                low_wmark_pages(zone), 0, 0);
-                       if (ok && cc->order > zone->compact_order_failed)
+                       if (ok && cc->order >= zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
                        else if (!ok && cc->sync)