mm/vmscan: push lruvec pointer into get_scan_count()
[pandora-kernel.git] / mm / compaction.c
index da7d35e..74e1b38 100644 (file)
@@ -226,7 +226,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
        unsigned long last_pageblock_nr = 0, pageblock_nr;
        unsigned long nr_scanned = 0, nr_isolated = 0;
        struct list_head *migratelist = &cc->migratepages;
-       isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
+       isolate_mode_t mode = 0;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -235,7 +235,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
         */
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
-               if (!cc->sync)
+               if (cc->mode != COMPACT_SYNC)
                        return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -303,7 +303,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                 * satisfies the allocation
                 */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+               if (cc->mode != COMPACT_SYNC &&
+                   last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -324,11 +325,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
-               if (!cc->sync)
+               if (cc->mode != COMPACT_SYNC)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
                /* Try isolate the page */
-               if (__isolate_lru_page(page, mode, 0) != 0)
+               if (__isolate_lru_page(page, mode) != 0)
                        continue;
 
                VM_BUG_ON(PageTransCompound(page));
@@ -357,27 +358,90 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
+/*
+ * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
+ * converted to MIGRATE_MOVABLE type, false otherwise.
+ */
+static bool rescue_unmovable_pageblock(struct page *page)
+{
+       unsigned long pfn, start_pfn, end_pfn;
+       struct page *start_page, *end_page;
+
+       pfn = page_to_pfn(page);
+       start_pfn = pfn & ~(pageblock_nr_pages - 1);
+       end_pfn = start_pfn + pageblock_nr_pages;
+
+       start_page = pfn_to_page(start_pfn);
+       end_page = pfn_to_page(end_pfn);
+
+       /* Do not deal with pageblocks that overlap zones */
+       if (page_zone(start_page) != page_zone(end_page))
+               return false;
+
+       for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
+                                                                 page++) {
+               if (!pfn_valid_within(pfn))
+                       continue;
+
+               if (PageBuddy(page)) {
+                       int order = page_order(page);
+
+                       pfn += (1 << order) - 1;
+                       page += (1 << order) - 1;
+
+                       continue;
+               } else if (page_count(page) == 0 || PageLRU(page))
+                       continue;
+
+               return false;
+       }
+
+       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+       move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
+       return true;
+}
 
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
+enum smt_result {
+       GOOD_AS_MIGRATION_TARGET,
+       FAIL_UNMOVABLE_TARGET,
+       FAIL_BAD_TARGET,
+};
+
+/*
+ * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
+ * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
+ * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
+ */
+static enum smt_result suitable_migration_target(struct page *page,
+                                     struct compact_control *cc)
 {
 
        int migratetype = get_pageblock_migratetype(page);
 
        /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
        if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return false;
+               return FAIL_BAD_TARGET;
 
        /* If the page is a large free page, then allow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return true;
+               return GOOD_AS_MIGRATION_TARGET;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (migrate_async_suitable(migratetype))
-               return true;
+       if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
+           migrate_async_suitable(migratetype))
+               return GOOD_AS_MIGRATION_TARGET;
+
+       if (cc->mode == COMPACT_ASYNC_MOVABLE &&
+           migratetype == MIGRATE_UNMOVABLE)
+               return FAIL_UNMOVABLE_TARGET;
+
+       if (cc->mode != COMPACT_ASYNC_MOVABLE &&
+           migratetype == MIGRATE_UNMOVABLE &&
+           rescue_unmovable_pageblock(page))
+               return GOOD_AS_MIGRATION_TARGET;
 
        /* Otherwise skip the block */
-       return false;
+       return FAIL_BAD_TARGET;
 }
 
 /*
@@ -410,6 +474,13 @@ static void isolate_freepages(struct zone *zone,
 
        zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 
+       /*
+        * isolate_freepages() may be called more than once during
+        * compact_zone_order() run and we want only the most recent
+        * count.
+        */
+       cc->nr_pageblocks_skipped = 0;
+
        /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
@@ -418,6 +489,7 @@ static void isolate_freepages(struct zone *zone,
        for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
+               enum smt_result ret;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -434,9 +506,12 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Check the block is suitable for migration */
-               if (!suitable_migration_target(page))
+               ret = suitable_migration_target(page, cc);
+               if (ret != GOOD_AS_MIGRATION_TARGET) {
+                       if (ret == FAIL_UNMOVABLE_TARGET)
+                               cc->nr_pageblocks_skipped++;
                        continue;
-
+               }
                /*
                 * Found a block suitable for isolating free pages from. Now
                 * we disabled interrupts, double check things are ok and
@@ -445,12 +520,14 @@ static void isolate_freepages(struct zone *zone,
                 */
                isolated = 0;
                spin_lock_irqsave(&zone->lock, flags);
-               if (suitable_migration_target(page)) {
+               ret = suitable_migration_target(page, cc);
+               if (ret == GOOD_AS_MIGRATION_TARGET) {
                        end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
                        isolated = isolate_freepages_block(pfn, end_pfn,
                                                           freelist, false);
                        nr_freepages += isolated;
-               }
+               } else if (ret == FAIL_UNMOVABLE_TARGET)
+                       cc->nr_pageblocks_skipped++;
                spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
@@ -682,8 +759,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
-                               (unsigned long)cc, false,
-                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+                       (unsigned long)&cc->freepages, false,
+                       (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
+                                                     : MIGRATE_ASYNC);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -712,7 +790,8 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                bool sync)
+                                enum compact_mode mode,
+                                unsigned long *nr_pageblocks_skipped)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -720,12 +799,17 @@ static unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
-               .sync = sync,
+               .mode = mode,
        };
+       unsigned long rc;
+
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
 
-       return compact_zone(zone, &cc);
+       rc = compact_zone(zone, &cc);
+       *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
+
+       return rc;
 }
 
 int sysctl_extfrag_threshold = 500;
@@ -750,6 +834,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
        struct zoneref *z;
        struct zone *zone;
        int rc = COMPACT_SKIPPED;
+       unsigned long nr_pageblocks_skipped;
+       enum compact_mode mode;
 
        /*
         * Check whether it is worth even starting compaction. The order check is
@@ -766,12 +852,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               status = compact_zone_order(zone, order, gfp_mask, sync);
+               mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
+retry:
+               status = compact_zone_order(zone, order, gfp_mask, mode,
+                                               &nr_pageblocks_skipped);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
                if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                        break;
+
+               if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
+                       if (nr_pageblocks_skipped) {
+                               mode = COMPACT_ASYNC_UNMOVABLE;
+                               goto retry;
+                       }
+               }
        }
 
        return rc;
@@ -805,7 +901,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        if (ok && cc->order > zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->sync)
+                       else if (!ok && cc->mode == COMPACT_SYNC)
                                defer_compaction(zone, cc->order);
                }
 
@@ -820,7 +916,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
 {
        struct compact_control cc = {
                .order = order,
-               .sync = false,
+               .mode = COMPACT_ASYNC_MOVABLE,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -830,7 +926,7 @@ static int compact_node(int nid)
 {
        struct compact_control cc = {
                .order = -1,
-               .sync = true,
+               .mode = COMPACT_SYNC,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);