[PATCH] Swap Migration V5: PF_SWAPWRITE to allow writing to swap
[pandora-kernel.git] / mm / page_alloc.c
index b257720..ad3d020 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/memory_hotplug.h>
 #include <linux/nodemask.h>
 #include <linux/vmalloc.h>
+#include <linux/mempolicy.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -52,6 +53,9 @@ struct pglist_data *pgdat_list __read_mostly;
 unsigned long totalram_pages __read_mostly;
 unsigned long totalhigh_pages __read_mostly;
 long nr_swap_pages;
+int percpu_pagelist_fraction;
+
+static void fastcall free_hot_cold_page(struct page *page, int cold);
 
 /*
  * results with 256, 32 in the lowmem_reserve sysctl:
@@ -81,6 +85,7 @@ int min_free_kbytes = 1024;
 unsigned long __initdata nr_kernel_pages;
 unsigned long __initdata nr_all_pages;
 
+#ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
        int ret = 0;
@@ -122,16 +127,23 @@ static int bad_range(struct zone *zone, struct page *page)
        return 0;
 }
 
-static void bad_page(const char *function, struct page *page)
+#else
+static inline int bad_range(struct zone *zone, struct page *page)
 {
-       printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
-               function, current->comm, page);
-       printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
-               (int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
-               page->mapping, page_mapcount(page), page_count(page));
-       printk(KERN_EMERG "Backtrace:\n");
+       return 0;
+}
+#endif
+
+static void bad_page(struct page *page)
+{
+       printk(KERN_EMERG "Bad page state in process '%s'\n"
+               "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+               "Trying to fix it up, but a reboot is needed\n"
+               "Backtrace:\n",
+               current->comm, page, (int)(2*sizeof(unsigned long)),
+               (unsigned long)page->flags, page->mapping,
+               page_mapcount(page), page_count(page));
        dump_stack();
-       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
        page->flags &= ~(1 << PG_lru    |
                        1 << PG_private |
                        1 << PG_locked  |
@@ -184,19 +196,15 @@ static void destroy_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       if (!PageCompound(page))
-               return;
-
-       if (page[1].index != order)
-               bad_page(__FUNCTION__, page);
+       if (unlikely(page[1].index != order))
+               bad_page(page);
 
        for (i = 0; i < nr_pages; i++) {
                struct page *p = page + i;
 
-               if (!PageCompound(p))
-                       bad_page(__FUNCTION__, page);
-               if (page_private(p) != (unsigned long)page)
-                       bad_page(__FUNCTION__, page);
+               if (unlikely(!PageCompound(p) |
+                               (page_private(p) != (unsigned long)page)))
+                       bad_page(page);
                ClearPageCompound(p);
        }
 }
@@ -255,14 +263,20 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
 /*
  * This function checks whether a page is free && is the buddy
  * we can do coalesce a page and its buddy if
- * (a) the buddy is free &&
- * (b) the buddy is on the buddy system &&
- * (c) a page and its buddy have the same order.
+ * (a) the buddy is not in a hole &&
+ * (b) the buddy is free &&
+ * (c) the buddy is on the buddy system &&
+ * (d) a page and its buddy have the same order.
  * for recording page's order, we use page_private(page) and PG_private.
  *
  */
 static inline int page_is_buddy(struct page *page, int order)
 {
+#ifdef CONFIG_HOLES_IN_ZONE
+       if (!pfn_valid(page_to_pfn(page)))
+               return 0;
+#endif
+
        if (PagePrivate(page)           &&
            (page_order(page) == order) &&
             page_count(page) == 0)
@@ -294,13 +308,13 @@ static inline int page_is_buddy(struct page *page, int order)
  * -- wli
  */
 
-static inline void __free_pages_bulk (struct page *page,
+static inline void __free_one_page(struct page *page,
                struct zone *zone, unsigned int order)
 {
        unsigned long page_idx;
        int order_size = 1 << order;
 
-       if (unlikely(order))
+       if (unlikely(PageCompound(page)))
                destroy_compound_page(page, order);
 
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
@@ -314,17 +328,15 @@ static inline void __free_pages_bulk (struct page *page,
                struct free_area *area;
                struct page *buddy;
 
-               combined_idx = __find_combined_index(page_idx, order);
                buddy = __page_find_buddy(page, page_idx, order);
-
-               if (bad_range(zone, buddy))
-                       break;
                if (!page_is_buddy(buddy, order))
                        break;          /* Move the buddy up one level. */
+
                list_del(&buddy->lru);
                area = zone->free_area + order;
                area->nr_free--;
                rmv_page_order(buddy);
+               combined_idx = __find_combined_index(page_idx, order);
                page = page + (combined_idx - page_idx);
                page_idx = combined_idx;
                order++;
@@ -334,11 +346,11 @@ static inline void __free_pages_bulk (struct page *page,
        zone->free_area[order].nr_free++;
 }
 
-static inline int free_pages_check(const char *function, struct page *page)
+static inline int free_pages_check(struct page *page)
 {
-       if (    page_mapcount(page) ||
-               page->mapping != NULL ||
-               page_count(page) != 0 ||
+       if (unlikely(page_mapcount(page) |
+               (page->mapping != NULL)  |
+               (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
                        1 << PG_private |
@@ -348,8 +360,8 @@ static inline int free_pages_check(const char *function, struct page *page)
                        1 << PG_slab    |
                        1 << PG_swapcache |
                        1 << PG_writeback |
-                       1 << PG_reserved )))
-               bad_page(function, page);
+                       1 << PG_reserved ))))
+               bad_page(page);
        if (PageDirty(page))
                __ClearPageDirty(page);
        /*
@@ -371,51 +383,87 @@ static inline int free_pages_check(const char *function, struct page *page)
  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  * pinned" detection logic.
  */
-static int
-free_pages_bulk(struct zone *zone, int count,
-               struct list_head *list, unsigned int order)
+static void free_pages_bulk(struct zone *zone, int count,
+                                       struct list_head *list, int order)
 {
-       unsigned long flags;
-       struct page *page = NULL;
-       int ret = 0;
-
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
-       while (!list_empty(list) && count--) {
+       while (count--) {
+               struct page *page;
+
+               BUG_ON(list_empty(list));
                page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_pages_bulk list manipulates */
+               /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
-               __free_pages_bulk(page, zone, order);
-               ret++;
+               __free_one_page(page, zone, order);
        }
-       spin_unlock_irqrestore(&zone->lock, flags);
-       return ret;
+       spin_unlock(&zone->lock);
 }
 
-void __free_pages_ok(struct page *page, unsigned int order)
+static void free_one_page(struct zone *zone, struct page *page, int order)
 {
        LIST_HEAD(list);
+       list_add(&page->lru, &list);
+       free_pages_bulk(zone, 1, &list, order);
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+       unsigned long flags;
        int i;
        int reserved = 0;
 
        arch_free_page(page, order);
 
 #ifndef CONFIG_MMU
-       if (order > 0)
-               for (i = 1 ; i < (1 << order) ; ++i)
-                       __put_page(page + i);
+       for (i = 1 ; i < (1 << order) ; ++i)
+               __put_page(page + i);
 #endif
 
        for (i = 0 ; i < (1 << order) ; ++i)
-               reserved += free_pages_check(__FUNCTION__, page + i);
+               reserved += free_pages_check(page + i);
        if (reserved)
                return;
 
-       list_add(&page->lru, &list);
-       mod_page_state(pgfree, 1 << order);
-       kernel_map_pages(page, 1<<order, 0);
-       free_pages_bulk(page_zone(page), 1, &list, order);
+       kernel_map_pages(page, 1 << order, 0);
+       local_irq_save(flags);
+       __mod_page_state(pgfree, 1 << order);
+       free_one_page(page_zone(page), page, order);
+       local_irq_restore(flags);
+}
+
+/*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+{
+       if (order == 0) {
+               __ClearPageReserved(page);
+               set_page_count(page, 0);
+
+               free_hot_cold_page(page, 0);
+       } else {
+               LIST_HEAD(list);
+               int loop;
+
+               for (loop = 0; loop < BITS_PER_LONG; loop++) {
+                       struct page *p = &page[loop];
+
+                       if (loop + 16 < BITS_PER_LONG)
+                               prefetchw(p + 16);
+                       __ClearPageReserved(p);
+                       set_page_count(p, 0);
+               }
+
+               arch_free_page(page, order);
+
+               mod_page_state(pgfree, 1 << order);
+
+               list_add(&page->lru, &list);
+               kernel_map_pages(page, 1 << order, 0);
+               free_pages_bulk(page_zone(page), 1, &list, order);
+       }
 }
 
 
@@ -433,8 +481,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
  *
  * -- wli
  */
-static inline struct page *
-expand(struct zone *zone, struct page *page,
+static inline void expand(struct zone *zone, struct page *page,
        int low, int high, struct free_area *area)
 {
        unsigned long size = 1 << high;
@@ -448,24 +495,6 @@ expand(struct zone *zone, struct page *page,
                area->nr_free++;
                set_page_order(&page[size], high);
        }
-       return page;
-}
-
-void set_page_refs(struct page *page, int order)
-{
-#ifdef CONFIG_MMU
-       set_page_count(page, 1);
-#else
-       int i;
-
-       /*
-        * We need to reference all the pages for this order, otherwise if
-        * anyone accesses one of the pages with (get/put) it will be freed.
-        * - eg: access_process_vm()
-        */
-       for (i = 0; i < (1 << order); i++)
-               set_page_count(page + i, 1);
-#endif /* CONFIG_MMU */
 }
 
 /*
@@ -473,9 +502,9 @@ void set_page_refs(struct page *page, int order)
  */
 static int prep_new_page(struct page *page, int order)
 {
-       if (    page_mapcount(page) ||
-               page->mapping != NULL ||
-               page_count(page) != 0 ||
+       if (unlikely(page_mapcount(page) |
+               (page->mapping != NULL)  |
+               (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
                        1 << PG_private |
@@ -486,8 +515,8 @@ static int prep_new_page(struct page *page, int order)
                        1 << PG_slab    |
                        1 << PG_swapcache |
                        1 << PG_writeback |
-                       1 << PG_reserved )))
-               bad_page(__FUNCTION__, page);
+                       1 << PG_reserved ))))
+               bad_page(page);
 
        /*
         * For now, we report if PG_reserved was found set, but do not
@@ -525,7 +554,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
                rmv_page_order(page);
                area->nr_free--;
                zone->free_pages -= 1UL << order;
-               return expand(zone, page, order, current_order, area);
+               expand(zone, page, order, current_order, area);
+               return page;
        }
 
        return NULL;
@@ -539,21 +569,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list)
 {
-       unsigned long flags;
        int i;
-       int allocated = 0;
-       struct page *page;
        
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
-               page = __rmqueue(zone, order);
-               if (page == NULL)
+               struct page *page = __rmqueue(zone, order);
+               if (unlikely(page == NULL))
                        break;
-               allocated++;
                list_add_tail(&page->lru, list);
        }
-       spin_unlock_irqrestore(&zone->lock, flags);
-       return allocated;
+       spin_unlock(&zone->lock);
+       return i;
 }
 
 #ifdef CONFIG_NUMA
@@ -572,14 +598,13 @@ void drain_remote_pages(void)
                if (zone->zone_pgdat->node_id == numa_node_id())
                        continue;
 
-               pset = zone->pageset[smp_processor_id()];
+               pset = zone_pcp(zone, smp_processor_id());
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
 
                        pcp = &pset->pcp[i];
-                       if (pcp->count)
-                               pcp->count -= free_pages_bulk(zone, pcp->count,
-                                               &pcp->list, 0);
+                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+                       pcp->count = 0;
                }
        }
        local_irq_restore(flags);
@@ -589,6 +614,7 @@ void drain_remote_pages(void)
 #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
+       unsigned long flags;
        struct zone *zone;
        int i;
 
@@ -600,8 +626,10 @@ static void __drain_pages(unsigned int cpu)
                        struct per_cpu_pages *pcp;
 
                        pcp = &pset->pcp[i];
-                       pcp->count -= free_pages_bulk(zone, pcp->count,
-                                               &pcp->list, 0);
+                       local_irq_save(flags);
+                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+                       pcp->count = 0;
+                       local_irq_restore(flags);
                }
        }
 }
@@ -647,18 +675,14 @@ void drain_local_pages(void)
 }
 #endif /* CONFIG_PM */
 
-static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
 {
 #ifdef CONFIG_NUMA
-       unsigned long flags;
-       int cpu;
        pg_data_t *pg = z->zone_pgdat;
        pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
        struct per_cpu_pageset *p;
 
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-       p = zone_pcp(z,cpu);
+       p = zone_pcp(z, cpu);
        if (pg == orig) {
                p->numa_hit++;
        } else {
@@ -669,14 +693,12 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
                p->local_node++;
        else
                p->other_node++;
-       local_irq_restore(flags);
 #endif
 }
 
 /*
  * Free a 0-order page
  */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
 static void fastcall free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
@@ -687,18 +709,20 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
 
        if (PageAnon(page))
                page->mapping = NULL;
-       if (free_pages_check(__FUNCTION__, page))
+       if (free_pages_check(page))
                return;
 
-       inc_page_state(pgfree);
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
        local_irq_save(flags);
+       __inc_page_state(pgfree);
        list_add(&page->lru, &pcp->list);
        pcp->count++;
-       if (pcp->count >= pcp->high)
-               pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+       if (pcp->count >= pcp->high) {
+               free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+               pcp->count -= pcp->batch;
+       }
        local_irq_restore(flags);
        put_cpu();
 }
@@ -727,49 +751,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *
-buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
+static struct page *buffered_rmqueue(struct zonelist *zonelist,
+                       struct zone *zone, int order, gfp_t gfp_flags)
 {
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
+       int cpu;
 
 again:
-       if (order == 0) {
+       cpu  = get_cpu();
+       if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
 
-               page = NULL;
-               pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+               pcp = &zone_pcp(zone, cpu)->pcp[cold];
                local_irq_save(flags);
-               if (pcp->count <= pcp->low)
+               if (!pcp->count) {
                        pcp->count += rmqueue_bulk(zone, 0,
                                                pcp->batch, &pcp->list);
-               if (pcp->count) {
-                       page = list_entry(pcp->list.next, struct page, lru);
-                       list_del(&page->lru);
-                       pcp->count--;
+                       if (unlikely(!pcp->count))
+                               goto failed;
                }
-               local_irq_restore(flags);
-               put_cpu();
+               page = list_entry(pcp->list.next, struct page, lru);
+               list_del(&page->lru);
+               pcp->count--;
        } else {
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order);
-               spin_unlock_irqrestore(&zone->lock, flags);
+               spin_unlock(&zone->lock);
+               if (!page)
+                       goto failed;
        }
 
-       if (page != NULL) {
-               BUG_ON(bad_range(zone, page));
-               mod_page_state_zone(zone, pgalloc, 1 << order);
-               if (prep_new_page(page, order))
-                       goto again;
+       __mod_page_state_zone(zone, pgalloc, 1 << order);
+       zone_statistics(zonelist, zone, cpu);
+       local_irq_restore(flags);
+       put_cpu();
+
+       BUG_ON(bad_range(zone, page));
+       if (prep_new_page(page, order))
+               goto again;
 
-               if (gfp_flags & __GFP_ZERO)
-                       prep_zero_page(page, order, gfp_flags);
+       if (gfp_flags & __GFP_ZERO)
+               prep_zero_page(page, order, gfp_flags);
 
-               if (order && (gfp_flags & __GFP_COMP))
-                       prep_compound_page(page, order);
-       }
+       if (order && (gfp_flags & __GFP_COMP))
+               prep_compound_page(page, order);
        return page;
+
+failed:
+       local_irq_restore(flags);
+       put_cpu();
+       return NULL;
 }
 
 #define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
@@ -845,9 +878,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                continue;
                }
 
-               page = buffered_rmqueue(*z, order, gfp_mask);
+               page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
                if (page) {
-                       zone_statistics(zonelist, *z);
                        break;
                }
        } while (*(++z) != NULL);
@@ -903,8 +935,7 @@ restart:
                alloc_flags |= ALLOC_HARDER;
        if (gfp_mask & __GFP_HIGH)
                alloc_flags |= ALLOC_HIGH;
-       if (wait)
-               alloc_flags |= ALLOC_CPUSET;
+       alloc_flags |= ALLOC_CPUSET;
 
        /*
         * Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -926,7 +957,7 @@ restart:
 nofail_alloc:
                        /* go through the zonelist yet again, ignoring mins */
                        page = get_page_from_freelist(gfp_mask, order,
-                               zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET);
+                               zonelist, ALLOC_NO_WATERMARKS);
                        if (page)
                                goto got_pg;
                        if (gfp_mask & __GFP_NOFAIL) {
@@ -1171,7 +1202,7 @@ EXPORT_SYMBOL(nr_pagecache);
 DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
 #endif
 
-void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
+static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
 {
        int cpu = 0;
 
@@ -1224,7 +1255,7 @@ void get_full_page_state(struct page_state *ret)
        __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
 }
 
-unsigned long __read_page_state(unsigned long offset)
+unsigned long read_page_state_offset(unsigned long offset)
 {
        unsigned long ret = 0;
        int cpu;
@@ -1238,18 +1269,26 @@ unsigned long __read_page_state(unsigned long offset)
        return ret;
 }
 
-void __mod_page_state(unsigned long offset, unsigned long delta)
+void __mod_page_state_offset(unsigned long offset, unsigned long delta)
+{
+       void *ptr;
+
+       ptr = &__get_cpu_var(page_states);
+       *(unsigned long *)(ptr + offset) += delta;
+}
+EXPORT_SYMBOL(__mod_page_state_offset);
+
+void mod_page_state_offset(unsigned long offset, unsigned long delta)
 {
        unsigned long flags;
-       voidptr;
+       void *ptr;
 
        local_irq_save(flags);
        ptr = &__get_cpu_var(page_states);
-       *(unsigned long*)(ptr + offset) += delta;
+       *(unsigned long *)(ptr + offset) += delta;
        local_irq_restore(flags);
 }
-
-EXPORT_SYMBOL(__mod_page_state);
+EXPORT_SYMBOL(mod_page_state_offset);
 
 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
                        unsigned long *free, struct pglist_data *pgdat)
@@ -1335,7 +1374,7 @@ void show_free_areas(void)
                show_node(zone);
                printk("%s per-cpu:", zone->name);
 
-               if (!zone->present_pages) {
+               if (!populated_zone(zone)) {
                        printk(" empty\n");
                        continue;
                } else
@@ -1347,10 +1386,9 @@ void show_free_areas(void)
                        pageset = zone_pcp(zone, cpu);
 
                        for (temperature = 0; temperature < 2; temperature++)
-                               printk("cpu %d %s: low %d, high %d, batch %d used:%d\n",
+                               printk("cpu %d %s: high %d, batch %d used:%d\n",
                                        cpu,
                                        temperature ? "cold" : "hot",
-                                       pageset->pcp[temperature].low,
                                        pageset->pcp[temperature].high,
                                        pageset->pcp[temperature].batch,
                                        pageset->pcp[temperature].count);
@@ -1413,7 +1451,7 @@ void show_free_areas(void)
 
                show_node(zone);
                printk("%s: ", zone->name);
-               if (!zone->present_pages) {
+               if (!populated_zone(zone)) {
                        printk("empty\n");
                        continue;
                }
@@ -1433,36 +1471,29 @@ void show_free_areas(void)
 
 /*
  * Builds allocation fallback zone lists.
+ *
+ * Add all populated zones of a node to the zonelist.
  */
-static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
-{
-       switch (k) {
-               struct zone *zone;
-       default:
-               BUG();
-       case ZONE_HIGHMEM:
-               zone = pgdat->node_zones + ZONE_HIGHMEM;
-               if (zone->present_pages) {
+static int __init build_zonelists_node(pg_data_t *pgdat,
+                       struct zonelist *zonelist, int nr_zones, int zone_type)
+{
+       struct zone *zone;
+
+       BUG_ON(zone_type > ZONE_HIGHMEM);
+
+       do {
+               zone = pgdat->node_zones + zone_type;
+               if (populated_zone(zone)) {
 #ifndef CONFIG_HIGHMEM
-                       BUG();
+                       BUG_ON(zone_type > ZONE_NORMAL);
 #endif
-                       zonelist->zones[j++] = zone;
+                       zonelist->zones[nr_zones++] = zone;
+                       check_highest_zone(zone_type);
                }
-       case ZONE_NORMAL:
-               zone = pgdat->node_zones + ZONE_NORMAL;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       case ZONE_DMA32:
-               zone = pgdat->node_zones + ZONE_DMA32;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       case ZONE_DMA:
-               zone = pgdat->node_zones + ZONE_DMA;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       }
+               zone_type--;
 
-       return j;
+       } while (zone_type >= 0);
+       return nr_zones;
 }
 
 static inline int highest_zone(int zone_bits)
@@ -1709,8 +1740,6 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
                if (!early_pfn_valid(pfn))
                        continue;
-               if (!early_pfn_in_nid(pfn, nid))
-                       continue;
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                set_page_count(page, 1);
@@ -1772,16 +1801,16 @@ static int __devinit zone_batchsize(struct zone *zone)
                batch = 1;
 
        /*
-        * We will be trying to allcoate bigger chunks of contiguous
-        * memory of the order of fls(batch).  This should result in
-        * better cache coloring.
+        * Clamp the batch to a 2^n - 1 value. Having a power
+        * of 2 value was found to be more likely to have
+        * suboptimal cache aliasing properties in some cases.
         *
-        * A sanity check also to ensure that batch is still in limits.
+        * For example if 2 tasks are alternately allocating
+        * batches of pages, one task can end up with a lot
+        * of pages of one half of the possible page colors
+        * and the other with pages of the other colors.
         */
-       batch = (1 << fls(batch + batch/2));
-
-       if (fls(batch) >= (PAGE_SHIFT + MAX_ORDER - 2))
-               batch = PAGE_SHIFT + ((MAX_ORDER - 1 - PAGE_SHIFT)/2);
+       batch = (1 << (fls(batch + batch/2)-1)) - 1;
 
        return batch;
 }
@@ -1794,19 +1823,35 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 
        pcp = &p->pcp[0];               /* hot */
        pcp->count = 0;
-       pcp->low = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
        INIT_LIST_HEAD(&pcp->list);
 
        pcp = &p->pcp[1];               /* cold*/
        pcp->count = 0;
-       pcp->low = 0;
        pcp->high = 2 * batch;
        pcp->batch = max(1UL, batch/2);
        INIT_LIST_HEAD(&pcp->list);
 }
 
+/*
+ * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
+ * to the value high for the pageset p.
+ */
+
+static void setup_pagelist_highmark(struct per_cpu_pageset *p,
+                               unsigned long high)
+{
+       struct per_cpu_pages *pcp;
+
+       pcp = &p->pcp[0]; /* hot list */
+       pcp->high = high;
+       pcp->batch = max(1UL, high/4);
+       if ((high/4) > (PAGE_SHIFT * 8))
+               pcp->batch = PAGE_SHIFT * 8;
+}
+
+
 #ifdef CONFIG_NUMA
 /*
  * Boot pageset table. One per cpu which is going to be used for all
@@ -1838,12 +1883,16 @@ static int __devinit process_zones(int cpu)
 
        for_each_zone(zone) {
 
-               zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
+               zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
                                         GFP_KERNEL, cpu_to_node(cpu));
-               if (!zone->pageset[cpu])
+               if (!zone_pcp(zone, cpu))
                        goto bad;
 
-               setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
+               setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+
+               if (percpu_pagelist_fraction)
+                       setup_pagelist_highmark(zone_pcp(zone, cpu),
+                               (zone->present_pages / percpu_pagelist_fraction));
        }
 
        return 0;
@@ -1851,15 +1900,14 @@ bad:
        for_each_zone(dzone) {
                if (dzone == zone)
                        break;
-               kfree(dzone->pageset[cpu]);
-               dzone->pageset[cpu] = NULL;
+               kfree(zone_pcp(dzone, cpu));
+               zone_pcp(dzone, cpu) = NULL;
        }
        return -ENOMEM;
 }
 
 static inline void free_zone_pagesets(int cpu)
 {
-#ifdef CONFIG_NUMA
        struct zone *zone;
 
        for_each_zone(zone) {
@@ -1868,7 +1916,6 @@ static inline void free_zone_pagesets(int cpu)
                zone_pcp(zone, cpu) = NULL;
                kfree(pset);
        }
-#endif
 }
 
 static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
@@ -1896,7 +1943,7 @@ static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
 static struct notifier_block pageset_notifier =
        { &pageset_cpuup_callback, NULL, 0 };
 
-void __init setup_per_cpu_pageset()
+void __init setup_per_cpu_pageset(void)
 {
        int err;
 
@@ -1939,7 +1986,7 @@ static __devinit void zone_pcp_init(struct zone *zone)
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 #ifdef CONFIG_NUMA
                /* Early boot. Slab allocator not functional yet */
-               zone->pageset[cpu] = &boot_pageset[cpu];
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
                setup_pageset(&boot_pageset[cpu],0);
 #else
                setup_pageset(zone_pcp(zone,cpu), batch);
@@ -2116,7 +2163,7 @@ static int frag_show(struct seq_file *m, void *arg)
        int order;
 
        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
-               if (!zone->present_pages)
+               if (!populated_zone(zone))
                        continue;
 
                spin_lock_irqsave(&zone->lock, flags);
@@ -2149,7 +2196,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
                int i;
 
-               if (!zone->present_pages)
+               if (!populated_zone(zone))
                        continue;
 
                spin_lock_irqsave(&zone->lock, flags);
@@ -2182,7 +2229,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
                seq_printf(m,
                           ")"
                           "\n  pagesets");
-               for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
+               for_each_online_cpu(i) {
                        struct per_cpu_pageset *pageset;
                        int j;
 
@@ -2197,12 +2244,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
                                seq_printf(m,
                                           "\n    cpu: %i pcp: %i"
                                           "\n              count: %i"
-                                          "\n              low:   %i"
                                           "\n              high:  %i"
                                           "\n              batch: %i",
                                           i, j,
                                           pageset->pcp[j].count,
-                                          pageset->pcp[j].low,
                                           pageset->pcp[j].high,
                                           pageset->pcp[j].batch);
                        }
@@ -2257,32 +2302,40 @@ static char *vmstat_text[] = {
        "pgpgout",
        "pswpin",
        "pswpout",
-       "pgalloc_high",
 
+       "pgalloc_high",
        "pgalloc_normal",
+       "pgalloc_dma32",
        "pgalloc_dma",
+
        "pgfree",
        "pgactivate",
        "pgdeactivate",
 
        "pgfault",
        "pgmajfault",
+
        "pgrefill_high",
        "pgrefill_normal",
+       "pgrefill_dma32",
        "pgrefill_dma",
 
        "pgsteal_high",
        "pgsteal_normal",
+       "pgsteal_dma32",
        "pgsteal_dma",
+
        "pgscan_kswapd_high",
        "pgscan_kswapd_normal",
-
+       "pgscan_kswapd_dma32",
        "pgscan_kswapd_dma",
+
        "pgscan_direct_high",
        "pgscan_direct_normal",
+       "pgscan_direct_dma32",
        "pgscan_direct_dma",
-       "pginodesteal",
 
+       "pginodesteal",
        "slabs_scanned",
        "kswapd_steal",
        "kswapd_inodesteal",
@@ -2539,6 +2592,32 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
        return 0;
 }
 
+/*
+ * percpu_pagelist_fraction - changes the pcp->high for each zone on each
+ * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
+ * can have before it gets flushed back to buddy allocator.
+ */
+
+int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       struct zone *zone;
+       unsigned int cpu;
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       if (!write || (ret == -EINVAL))
+               return ret;
+       for_each_zone(zone) {
+               for_each_online_cpu(cpu) {
+                       unsigned long  high;
+                       high = zone->present_pages / percpu_pagelist_fraction;
+                       setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+               }
+       }
+       return 0;
+}
+
 __initdata int hashdist = HASHDIST_DEFAULT;
 
 #ifdef CONFIG_NUMA