Merge branch 'devicetree/merge' into spi/merge
[pandora-kernel.git] / mm / vmscan.c
index 10ebd74..f5d90de 100644 (file)
 #include <trace/events/vmscan.h>
 
 /*
- * lumpy_mode determines how the inactive list is shrunk
- * LUMPY_MODE_SINGLE: Reclaim only order-0 pages
- * LUMPY_MODE_ASYNC:  Do not block
- * LUMPY_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
- * LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference
+ * reclaim_mode determines how the inactive list is shrunk
+ * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
+ * RECLAIM_MODE_ASYNC:  Do not block
+ * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
+ * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
  *                     page from the LRU and reclaim all pages within a
  *                     naturally aligned range
- * LUMPY_MODE_COMPACTION: For high-order allocations, reclaim a number of
+ * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
  *                     order-0 pages and then compact the zone
  */
-typedef unsigned __bitwise__ lumpy_mode;
-#define LUMPY_MODE_SINGLE              ((__force lumpy_mode)0x01u)
-#define LUMPY_MODE_ASYNC               ((__force lumpy_mode)0x02u)
-#define LUMPY_MODE_SYNC                        ((__force lumpy_mode)0x04u)
-#define LUMPY_MODE_CONTIGRECLAIM       ((__force lumpy_mode)0x08u)
-#define LUMPY_MODE_COMPACTION          ((__force lumpy_mode)0x10u)
+typedef unsigned __bitwise__ reclaim_mode_t;
+#define RECLAIM_MODE_SINGLE            ((__force reclaim_mode_t)0x01u)
+#define RECLAIM_MODE_ASYNC             ((__force reclaim_mode_t)0x02u)
+#define RECLAIM_MODE_SYNC              ((__force reclaim_mode_t)0x04u)
+#define RECLAIM_MODE_LUMPYRECLAIM      ((__force reclaim_mode_t)0x08u)
+#define RECLAIM_MODE_COMPACTION                ((__force reclaim_mode_t)0x10u)
 
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
@@ -101,7 +101,7 @@ struct scan_control {
         * Intend to reclaim enough continuous memory rather than reclaim
         * enough amount of memory. i.e, mode for high order allocation.
         */
-       lumpy_mode lumpy_reclaim_mode;
+       reclaim_mode_t reclaim_mode;
 
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
@@ -284,10 +284,10 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        return ret;
 }
 
-static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
+static void set_reclaim_mode(int priority, struct scan_control *sc,
                                   bool sync)
 {
-       lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+       reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
 
        /*
         * Initially assume we are entering either lumpy reclaim or
@@ -295,9 +295,9 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
         * sync mode or just reclaim order-0 pages later.
         */
        if (COMPACTION_BUILD)
-               sc->lumpy_reclaim_mode = LUMPY_MODE_COMPACTION;
+               sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
        else
-               sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM;
+               sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
 
        /*
         * Avoid using lumpy reclaim or reclaim/compaction if possible by
@@ -305,16 +305,16 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
         * under memory pressure
         */
        if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               sc->lumpy_reclaim_mode |= syncmode;
+               sc->reclaim_mode |= syncmode;
        else if (sc->order && priority < DEF_PRIORITY - 2)
-               sc->lumpy_reclaim_mode |= syncmode;
+               sc->reclaim_mode |= syncmode;
        else
-               sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
+               sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
-static void disable_lumpy_reclaim_mode(struct scan_control *sc)
+static void reset_reclaim_mode(struct scan_control *sc)
 {
-       sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
+       sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
 static inline int is_page_cache_freeable(struct page *page)
@@ -445,7 +445,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * first attempt to free a range of pages fails.
                 */
                if (PageWriteback(page) &&
-                   (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC))
+                   (sc->reclaim_mode & RECLAIM_MODE_SYNC))
                        wait_on_page_writeback(page);
 
                if (!PageWriteback(page)) {
@@ -453,7 +453,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        ClearPageReclaim(page);
                }
                trace_mm_vmscan_writepage(page,
-                       trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
+                       trace_reclaim_flags(page, sc->reclaim_mode));
                inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
@@ -638,7 +638,7 @@ static enum page_references page_check_references(struct page *page,
        referenced_page = TestClearPageReferenced(page);
 
        /* Lumpy reclaim - ignore references */
-       if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM)
+       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
                return PAGEREF_RECLAIM;
 
        /*
@@ -755,7 +755,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         * for any page for which writeback has already
                         * started.
                         */
-                       if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) &&
+                       if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
                            may_enter_fs)
                                wait_on_page_writeback(page);
                        else {
@@ -911,7 +911,7 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
-               disable_lumpy_reclaim_mode(sc);
+               reset_reclaim_mode(sc);
                continue;
 
 activate_locked:
@@ -924,7 +924,7 @@ activate_locked:
 keep_locked:
                unlock_page(page);
 keep:
-               disable_lumpy_reclaim_mode(sc);
+               reset_reclaim_mode(sc);
 keep_lumpy:
                list_add(&page->lru, &ret_pages);
                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
@@ -1044,7 +1044,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                case 0:
                        list_move(&page->lru, dst);
                        mem_cgroup_del_lru(page);
-                       nr_taken++;
+                       nr_taken += hpage_nr_pages(page);
                        break;
 
                case -EBUSY:
@@ -1102,7 +1102,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
                                mem_cgroup_del_lru(cursor_page);
-                               nr_taken++;
+                               nr_taken += hpage_nr_pages(page);
                                nr_lumpy_taken++;
                                if (PageDirty(cursor_page))
                                        nr_lumpy_dirty++;
@@ -1157,14 +1157,15 @@ static unsigned long clear_active_flags(struct list_head *page_list,
        struct page *page;
 
        list_for_each_entry(page, page_list, lru) {
+               int numpages = hpage_nr_pages(page);
                lru = page_lru_base_type(page);
                if (PageActive(page)) {
                        lru += LRU_ACTIVE;
                        ClearPageActive(page);
-                       nr_active++;
+                       nr_active += numpages;
                }
                if (count)
-                       count[lru]++;
+                       count[lru] += numpages;
        }
 
        return nr_active;
@@ -1274,7 +1275,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
                add_page_to_lru_list(zone, page, lru);
                if (is_active_lru(lru)) {
                        int file = is_file_lru(lru);
-                       reclaim_stat->recent_rotated[file]++;
+                       int numpages = hpage_nr_pages(page);
+                       reclaim_stat->recent_rotated[file] += numpages;
                }
                if (!pagevec_add(&pvec, page)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1340,7 +1342,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
                return false;
 
        /* Only stall on lumpy reclaim */
-       if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
+       if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
                return false;
 
        /* If we have relaimed everything on the isolated list, no stall */
@@ -1384,14 +1386,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                        return SWAP_CLUSTER_MAX;
        }
 
-       set_lumpy_reclaim_mode(priority, sc, false);
+       set_reclaim_mode(priority, sc, false);
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
 
        if (scanning_global_lru(sc)) {
                nr_taken = isolate_pages_global(nr_to_scan,
                        &page_list, &nr_scanned, sc->order,
-                       sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ?
+                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
                                        ISOLATE_BOTH : ISOLATE_INACTIVE,
                        zone, 0, file);
                zone->pages_scanned += nr_scanned;
@@ -1404,7 +1406,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        } else {
                nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
                        &page_list, &nr_scanned, sc->order,
-                       sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM ?
+                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
                                        ISOLATE_BOTH : ISOLATE_INACTIVE,
                        zone, sc->mem_cgroup,
                        0, file);
@@ -1427,7 +1429,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-               set_lumpy_reclaim_mode(priority, sc, true);
+               set_reclaim_mode(priority, sc, true);
                nr_reclaimed += shrink_page_list(&page_list, zone, sc);
        }
 
@@ -1442,7 +1444,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                zone_idx(zone),
                nr_scanned, nr_reclaimed,
                priority,
-               trace_shrink_flags(file, sc->lumpy_reclaim_mode));
+               trace_shrink_flags(file, sc->reclaim_mode));
        return nr_reclaimed;
 }
 
@@ -1482,7 +1484,7 @@ static void move_active_pages_to_lru(struct zone *zone,
 
                list_move(&page->lru, &zone->lru[lru].list);
                mem_cgroup_add_lru_list(page, lru);
-               pgmoved++;
+               pgmoved += hpage_nr_pages(page);
 
                if (!pagevec_add(&pvec, page) || list_empty(list)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1550,7 +1552,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                }
 
                if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-                       nr_rotated++;
+                       nr_rotated += hpage_nr_pages(page);
                        /*
                         * Identify referenced, file-backed active pages and
                         * give them one more trip around the active list. So
@@ -1836,7 +1838,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
        unsigned long inactive_lru_pages;
 
        /* If not in reclaim/compaction mode, stop */
-       if (!(sc->lumpy_reclaim_mode & LUMPY_MODE_COMPACTION))
+       if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
                return false;
 
        /*
@@ -2198,38 +2200,87 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 }
 #endif
 
+/*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+ * by the callers classzone_idx are added to balanced_pages. The total of
+ * balanced pages must be at least 25% of the zones allowed by classzone_idx
+ * for the node to be considered balanced. Forcing all zones to be balanced
+ * for high orders can cause excessive reclaim when there are imbalanced zones.
+ * The choice of 25% is due to
+ *   o a 16M DMA zone that is balanced will not balance a zone on any
+ *     reasonable sized machine
+ *   o On all other machines, the top zone must be at least a reasonable
+ *     precentage of the middle zones. For example, on 32-bit x86, highmem
+ *     would need to be at least 256M for it to be balance a whole node.
+ *     Similarly, on x86-64 the Normal zone would need to be at least 1G
+ *     to balance a node on its own. These seemed like reasonable ratios.
+ */
+static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
+                                               int classzone_idx)
+{
+       unsigned long present_pages = 0;
+       int i;
+
+       for (i = 0; i <= classzone_idx; i++)
+               present_pages += pgdat->node_zones[i].present_pages;
+
+       return balanced_pages > (present_pages >> 2);
+}
+
 /* is kswapd sleeping prematurely? */
-static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+                                       int classzone_idx)
 {
        int i;
+       unsigned long balanced = 0;
+       bool all_zones_ok = true;
 
        /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
        if (remaining)
-               return 1;
+               return true;
 
-       /* If after HZ/10, a zone is below the high mark, it's premature */
+       /* Check the watermark levels */
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
                if (!populated_zone(zone))
                        continue;
 
-               if (zone->all_unreclaimable)
+               /*
+                * balance_pgdat() skips over all_unreclaimable after
+                * DEF_PRIORITY. Effectively, it considers them balanced so
+                * they must be considered balanced here as well if kswapd
+                * is to sleep
+                */
+               if (zone->all_unreclaimable) {
+                       balanced += zone->present_pages;
                        continue;
+               }
 
                if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-                                                               0, 0))
-                       return 1;
+                                                       classzone_idx, 0))
+                       all_zones_ok = false;
+               else
+                       balanced += zone->present_pages;
        }
 
-       return 0;
+       /*
+        * For high-order requests, the balanced zones must contain at least
+        * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
+        * must be balanced
+        */
+       if (order)
+               return pgdat_balanced(pgdat, balanced, classzone_idx);
+       else
+               return !all_zones_ok;
 }
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
  *
- * Returns the number of pages which were actually freed.
+ * Returns the final order kswapd was reclaiming at
  *
  * There is special handling here for zones which are full of pinned pages.
  * This can happen if the pages are all mlocked, or if they are all used by
@@ -2246,11 +2297,14 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
  * interoperates with the page allocator fallback scheme to ensure that aging
  * of pages is balanced across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
+                                                       int *classzone_idx)
 {
        int all_zones_ok;
+       unsigned long balanced;
        int priority;
        int i;
+       int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
@@ -2273,7 +2327,6 @@ loop_again:
        count_vm_event(PAGEOUTRUN);
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-               int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
                unsigned long lru_pages = 0;
                int has_under_min_watermark_zone = 0;
 
@@ -2282,6 +2335,7 @@ loop_again:
                        disable_swap_token();
 
                all_zones_ok = 1;
+               balanced = 0;
 
                /*
                 * Scan in the highmem->dma direction for the highest
@@ -2307,6 +2361,7 @@ loop_again:
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
+                               *classzone_idx = i;
                                break;
                        }
                }
@@ -2329,6 +2384,7 @@ loop_again:
                 * cause too much scanning of the lower zones.
                 */
                for (i = 0; i <= end_zone; i++) {
+                       int compaction;
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
 
@@ -2358,9 +2414,26 @@ loop_again:
                                                lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
+
+                       compaction = 0;
+                       if (order &&
+                           zone_watermark_ok(zone, 0,
+                                              high_wmark_pages(zone),
+                                             end_zone, 0) &&
+                           !zone_watermark_ok(zone, order,
+                                              high_wmark_pages(zone),
+                                              end_zone, 0)) {
+                               compact_zone_order(zone,
+                                                  order,
+                                                  sc.gfp_mask, false,
+                                                  COMPACT_MODE_KSWAPD);
+                               compaction = 1;
+                       }
+
                        if (zone->all_unreclaimable)
                                continue;
-                       if (nr_slab == 0 && !zone_reclaimable(zone))
+                       if (!compaction && nr_slab == 0 &&
+                           !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
@@ -2371,14 +2444,6 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
-                       /*
-                        * Compact the zone for higher orders to reduce
-                        * latencies for higher-order allocations that
-                        * would ordinarily call try_to_compact_pages()
-                        */
-                       if (sc.order > PAGE_ALLOC_COSTLY_ORDER)
-                               compact_zone_order(zone, sc.order, sc.gfp_mask);
-
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
@@ -2399,10 +2464,12 @@ loop_again:
                                 * spectulatively avoid congestion waits
                                 */
                                zone_clear_flag(zone, ZONE_CONGESTED);
+                               if (i <= *classzone_idx)
+                                       balanced += zone->present_pages;
                        }
 
                }
-               if (all_zones_ok)
+               if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
                        break;          /* kswapd: all done */
                /*
                 * OK, kswapd is getting into trouble.  Take a nap, then take
@@ -2425,7 +2492,13 @@ loop_again:
                        break;
        }
 out:
-       if (!all_zones_ok) {
+
+       /*
+        * order-0: All zones must meet high watermark for a balanced node
+        * high-order: Balanced zones must make up at least 25% of the node
+        *             for the node to be balanced
+        */
+       if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
                cond_resched();
 
                try_to_freeze();
@@ -2450,10 +2523,47 @@ out:
                goto loop_again;
        }
 
-       return sc.nr_reclaimed;
+       /*
+        * If kswapd was reclaiming at a higher order, it has the option of
+        * sleeping without all zones being balanced. Before it does, it must
+        * ensure that the watermarks for order-0 on *all* zones are met and
+        * that the congestion flags are cleared. The congestion flag must
+        * be cleared as kswapd is the only mechanism that clears the flag
+        * and it is potentially going to sleep here.
+        */
+       if (order) {
+               for (i = 0; i <= end_zone; i++) {
+                       struct zone *zone = pgdat->node_zones + i;
+
+                       if (!populated_zone(zone))
+                               continue;
+
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                               continue;
+
+                       /* Confirm the zone is balanced for order-0 */
+                       if (!zone_watermark_ok(zone, 0,
+                                       high_wmark_pages(zone), 0, 0)) {
+                               order = sc.order = 0;
+                               goto loop_again;
+                       }
+
+                       /* If balanced, clear the congested flag */
+                       zone_clear_flag(zone, ZONE_CONGESTED);
+               }
+       }
+
+       /*
+        * Return the order we were reclaiming at so sleeping_prematurely()
+        * makes a decision on the order we were last reclaiming at. However,
+        * if another caller entered the allocator slow path while kswapd
+        * was awake, order will remain at the higher level
+        */
+       *classzone_idx = end_zone;
+       return order;
 }
 
-static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
 {
        long remaining = 0;
        DEFINE_WAIT(wait);
@@ -2464,7 +2574,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
 
        /* Try to sleep for a short interval */
-       if (!sleeping_prematurely(pgdat, order, remaining)) {
+       if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
                remaining = schedule_timeout(HZ/10);
                finish_wait(&pgdat->kswapd_wait, &wait);
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -2474,7 +2584,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
         * After a short sleep, check if it was a premature sleep. If not, then
         * go fully to sleep until explicitly woken up.
         */
-       if (!sleeping_prematurely(pgdat, order, remaining)) {
+       if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
                trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
 
                /*
@@ -2513,6 +2623,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
 static int kswapd(void *p)
 {
        unsigned long order;
+       int classzone_idx;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
 
@@ -2543,21 +2654,29 @@ static int kswapd(void *p)
        set_freezable();
 
        order = 0;
+       classzone_idx = MAX_NR_ZONES - 1;
        for ( ; ; ) {
                unsigned long new_order;
+               int new_classzone_idx;
                int ret;
 
                new_order = pgdat->kswapd_max_order;
+               new_classzone_idx = pgdat->classzone_idx;
                pgdat->kswapd_max_order = 0;
-               if (order < new_order) {
+               pgdat->classzone_idx = MAX_NR_ZONES - 1;
+               if (order < new_order || classzone_idx > new_classzone_idx) {
                        /*
                         * Don't sleep if someone wants a larger 'order'
-                        * allocation
+                        * allocation or has tigher zone constraints
                         */
                        order = new_order;
+                       classzone_idx = new_classzone_idx;
                } else {
-                       kswapd_try_to_sleep(pgdat, order);
+                       kswapd_try_to_sleep(pgdat, order, classzone_idx);
                        order = pgdat->kswapd_max_order;
+                       classzone_idx = pgdat->classzone_idx;
+                       pgdat->kswapd_max_order = 0;
+                       pgdat->classzone_idx = MAX_NR_ZONES - 1;
                }
 
                ret = try_to_freeze();
@@ -2570,7 +2689,7 @@ static int kswapd(void *p)
                 */
                if (!ret) {
                        trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
-                       balance_pgdat(pgdat, order);
+                       order = balance_pgdat(pgdat, order, &classzone_idx);
                }
        }
        return 0;
@@ -2579,7 +2698,7 @@ static int kswapd(void *p)
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone, int order)
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
 {
        pg_data_t *pgdat;
 
@@ -2589,8 +2708,10 @@ void wakeup_kswapd(struct zone *zone, int order)
        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        pgdat = zone->zone_pgdat;
-       if (pgdat->kswapd_max_order < order)
+       if (pgdat->kswapd_max_order < order) {
                pgdat->kswapd_max_order = order;
+               pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
+       }
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
        if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))