mm, compaction: pass classzone_idx and alloc_flags to watermark checking
[pandora-kernel.git] / mm / page_alloc.c
index 701fe90..e32121f 100644 (file)
@@ -741,6 +741,9 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
+       VM_BUG_ON_PAGE(PageTail(page), page);
+       VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
+
        trace_mm_page_free(page, order);
        kmemcheck_free_shadow(page, order);
 
@@ -1267,55 +1270,75 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 #endif
 
 /*
- * Drain pages of the indicated processor.
+ * Drain pcplists of the indicated processor and zone.
  *
  * The processor must either be the current processor and the
  * thread pinned to the current processor or a processor that
  * is not online.
  */
-static void drain_pages(unsigned int cpu)
+static void drain_pages_zone(unsigned int cpu, struct zone *zone)
 {
        unsigned long flags;
-       struct zone *zone;
+       struct per_cpu_pageset *pset;
+       struct per_cpu_pages *pcp;
 
-       for_each_populated_zone(zone) {
-               struct per_cpu_pageset *pset;
-               struct per_cpu_pages *pcp;
+       local_irq_save(flags);
+       pset = per_cpu_ptr(zone->pageset, cpu);
 
-               local_irq_save(flags);
-               pset = per_cpu_ptr(zone->pageset, cpu);
+       pcp = &pset->pcp;
+       if (pcp->count) {
+               free_pcppages_bulk(zone, pcp->count, pcp);
+               pcp->count = 0;
+       }
+       local_irq_restore(flags);
+}
 
-               pcp = &pset->pcp;
-               if (pcp->count) {
-                       free_pcppages_bulk(zone, pcp->count, pcp);
-                       pcp->count = 0;
-               }
-               local_irq_restore(flags);
+/*
+ * Drain pcplists of all zones on the indicated processor.
+ *
+ * The processor must either be the current processor and the
+ * thread pinned to the current processor or a processor that
+ * is not online.
+ */
+static void drain_pages(unsigned int cpu)
+{
+       struct zone *zone;
+
+       for_each_populated_zone(zone) {
+               drain_pages_zone(cpu, zone);
        }
 }
 
 /*
  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ *
+ * The CPU has to be pinned. When zone parameter is non-NULL, spill just
+ * the single zone's pages.
  */
-void drain_local_pages(void *arg)
+void drain_local_pages(struct zone *zone)
 {
-       drain_pages(smp_processor_id());
+       int cpu = smp_processor_id();
+
+       if (zone)
+               drain_pages_zone(cpu, zone);
+       else
+               drain_pages(cpu);
 }
 
 /*
  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  *
+ * When zone parameter is non-NULL, spill just the single zone's pages.
+ *
  * Note that this code is protected against sending an IPI to an offline
  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
  * nothing keeps CPUs from showing up after we populated the cpumask and
  * before the call to on_each_cpu_mask().
  */
-void drain_all_pages(void)
+void drain_all_pages(struct zone *zone)
 {
        int cpu;
-       struct per_cpu_pageset *pcp;
-       struct zone *zone;
 
        /*
         * Allocate in the BSS so we wont require allocation in
@@ -1330,20 +1353,31 @@ void drain_all_pages(void)
         * disables preemption as part of its processing
         */
        for_each_online_cpu(cpu) {
+               struct per_cpu_pageset *pcp;
+               struct zone *z;
                bool has_pcps = false;
-               for_each_populated_zone(zone) {
+
+               if (zone) {
                        pcp = per_cpu_ptr(zone->pageset, cpu);
-                       if (pcp->pcp.count) {
+                       if (pcp->pcp.count)
                                has_pcps = true;
-                               break;
+               } else {
+                       for_each_populated_zone(z) {
+                               pcp = per_cpu_ptr(z->pageset, cpu);
+                               if (pcp->pcp.count) {
+                                       has_pcps = true;
+                                       break;
+                               }
                        }
                }
+
                if (has_pcps)
                        cpumask_set_cpu(cpu, &cpus_with_pcps);
                else
                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
        }
-       on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
+       on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
+                                                               zone, 1);
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -2307,6 +2341,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
                                                nodemask, mode,
                                                contended_compaction,
+                                               alloc_flags, classzone_idx,
                                                &last_compact_zone);
        current->flags &= ~PF_MEMALLOC;
 
@@ -2433,7 +2468,7 @@ retry:
         * pages are pinned on the per-cpu lists. Drain them and try again
         */
        if (!page && !drained) {
-               drain_all_pages();
+               drain_all_pages(NULL);
                drained = true;
                goto retry;
        }
@@ -6385,7 +6420,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
         */
 
        lru_add_drain_all();
-       drain_all_pages();
+       drain_all_pages(cc.zone);
 
        order = 0;
        outer_start = start;