[PATCH] Swap Migration V5: PF_SWAPWRITE to allow writing to swap
[pandora-kernel.git] / mm / page_alloc.c
index 3796187..ad3d020 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/memory_hotplug.h>
 #include <linux/nodemask.h>
 #include <linux/vmalloc.h>
+#include <linux/mempolicy.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -52,6 +53,9 @@ struct pglist_data *pgdat_list __read_mostly;
 unsigned long totalram_pages __read_mostly;
 unsigned long totalhigh_pages __read_mostly;
 long nr_swap_pages;
+int percpu_pagelist_fraction;
+
+static void fastcall free_hot_cold_page(struct page *page, int cold);
 
 /*
  * results with 256, 32 in the lowmem_reserve sysctl:
@@ -130,16 +134,16 @@ static inline int bad_range(struct zone *zone, struct page *page)
 }
 #endif
 
-static void bad_page(const char *function, struct page *page)
+static void bad_page(struct page *page)
 {
-       printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
-               function, current->comm, page);
-       printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
-               (int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
-               page->mapping, page_mapcount(page), page_count(page));
-       printk(KERN_EMERG "Backtrace:\n");
+       printk(KERN_EMERG "Bad page state in process '%s'\n"
+               "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+               "Trying to fix it up, but a reboot is needed\n"
+               "Backtrace:\n",
+               current->comm, page, (int)(2*sizeof(unsigned long)),
+               (unsigned long)page->flags, page->mapping,
+               page_mapcount(page), page_count(page));
        dump_stack();
-       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
        page->flags &= ~(1 << PG_lru    |
                        1 << PG_private |
                        1 << PG_locked  |
@@ -192,19 +196,15 @@ static void destroy_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       if (!PageCompound(page))
-               return;
-
-       if (page[1].index != order)
-               bad_page(__FUNCTION__, page);
+       if (unlikely(page[1].index != order))
+               bad_page(page);
 
        for (i = 0; i < nr_pages; i++) {
                struct page *p = page + i;
 
-               if (!PageCompound(p))
-                       bad_page(__FUNCTION__, page);
-               if (page_private(p) != (unsigned long)page)
-                       bad_page(__FUNCTION__, page);
+               if (unlikely(!PageCompound(p) |
+                               (page_private(p) != (unsigned long)page)))
+                       bad_page(page);
                ClearPageCompound(p);
        }
 }
@@ -308,13 +308,13 @@ static inline int page_is_buddy(struct page *page, int order)
  * -- wli
  */
 
-static inline void __free_pages_bulk (struct page *page,
+static inline void __free_one_page(struct page *page,
                struct zone *zone, unsigned int order)
 {
        unsigned long page_idx;
        int order_size = 1 << order;
 
-       if (unlikely(order))
+       if (unlikely(PageCompound(page)))
                destroy_compound_page(page, order);
 
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
@@ -346,7 +346,7 @@ static inline void __free_pages_bulk (struct page *page,
        zone->free_area[order].nr_free++;
 }
 
-static inline int free_pages_check(const char *function, struct page *page)
+static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
@@ -361,7 +361,7 @@ static inline int free_pages_check(const char *function, struct page *page)
                        1 << PG_swapcache |
                        1 << PG_writeback |
                        1 << PG_reserved ))))
-               bad_page(function, page);
+               bad_page(page);
        if (PageDirty(page))
                __ClearPageDirty(page);
        /*
@@ -383,55 +383,89 @@ static inline int free_pages_check(const char *function, struct page *page)
  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  * pinned" detection logic.
  */
-static int
-free_pages_bulk(struct zone *zone, int count,
-               struct list_head *list, unsigned int order)
+static void free_pages_bulk(struct zone *zone, int count,
+                                       struct list_head *list, int order)
 {
-       struct page *page = NULL;
-       int ret = 0;
-
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
-       while (!list_empty(list) && count--) {
+       while (count--) {
+               struct page *page;
+
+               BUG_ON(list_empty(list));
                page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_pages_bulk list manipulates */
+               /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
-               __free_pages_bulk(page, zone, order);
-               ret++;
+               __free_one_page(page, zone, order);
        }
        spin_unlock(&zone->lock);
-       return ret;
 }
 
-void __free_pages_ok(struct page *page, unsigned int order)
+static void free_one_page(struct zone *zone, struct page *page, int order)
 {
-       unsigned long flags;
        LIST_HEAD(list);
+       list_add(&page->lru, &list);
+       free_pages_bulk(zone, 1, &list, order);
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+       unsigned long flags;
        int i;
        int reserved = 0;
 
        arch_free_page(page, order);
 
 #ifndef CONFIG_MMU
-       if (order > 0)
-               for (i = 1 ; i < (1 << order) ; ++i)
-                       __put_page(page + i);
+       for (i = 1 ; i < (1 << order) ; ++i)
+               __put_page(page + i);
 #endif
 
        for (i = 0 ; i < (1 << order) ; ++i)
-               reserved += free_pages_check(__FUNCTION__, page + i);
+               reserved += free_pages_check(page + i);
        if (reserved)
                return;
 
-       list_add(&page->lru, &list);
-       mod_page_state(pgfree, 1 << order);
-       kernel_map_pages(page, 1<<order, 0);
+       kernel_map_pages(page, 1 << order, 0);
        local_irq_save(flags);
-       free_pages_bulk(page_zone(page), 1, &list, order);
+       __mod_page_state(pgfree, 1 << order);
+       free_one_page(page_zone(page), page, order);
        local_irq_restore(flags);
 }
 
+/*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+{
+       if (order == 0) {
+               __ClearPageReserved(page);
+               set_page_count(page, 0);
+
+               free_hot_cold_page(page, 0);
+       } else {
+               LIST_HEAD(list);
+               int loop;
+
+               for (loop = 0; loop < BITS_PER_LONG; loop++) {
+                       struct page *p = &page[loop];
+
+                       if (loop + 16 < BITS_PER_LONG)
+                               prefetchw(p + 16);
+                       __ClearPageReserved(p);
+                       set_page_count(p, 0);
+               }
+
+               arch_free_page(page, order);
+
+               mod_page_state(pgfree, 1 << order);
+
+               list_add(&page->lru, &list);
+               kernel_map_pages(page, 1 << order, 0);
+               free_pages_bulk(page_zone(page), 1, &list, order);
+       }
+}
+
 
 /*
  * The order of subdivision here is critical for the IO subsystem.
@@ -447,8 +481,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
  *
  * -- wli
  */
-static inline struct page *
-expand(struct zone *zone, struct page *page,
+static inline void expand(struct zone *zone, struct page *page,
        int low, int high, struct free_area *area)
 {
        unsigned long size = 1 << high;
@@ -462,7 +495,6 @@ expand(struct zone *zone, struct page *page,
                area->nr_free++;
                set_page_order(&page[size], high);
        }
-       return page;
 }
 
 /*
@@ -484,7 +516,7 @@ static int prep_new_page(struct page *page, int order)
                        1 << PG_swapcache |
                        1 << PG_writeback |
                        1 << PG_reserved ))))
-               bad_page(__FUNCTION__, page);
+               bad_page(page);
 
        /*
         * For now, we report if PG_reserved was found set, but do not
@@ -522,7 +554,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
                rmv_page_order(page);
                area->nr_free--;
                zone->free_pages -= 1UL << order;
-               return expand(zone, page, order, current_order, area);
+               expand(zone, page, order, current_order, area);
+               return page;
        }
 
        return NULL;
@@ -537,19 +570,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        unsigned long count, struct list_head *list)
 {
        int i;
-       int allocated = 0;
-       struct page *page;
        
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
-               page = __rmqueue(zone, order);
-               if (page == NULL)
+               struct page *page = __rmqueue(zone, order);
+               if (unlikely(page == NULL))
                        break;
-               allocated++;
                list_add_tail(&page->lru, list);
        }
        spin_unlock(&zone->lock);
-       return allocated;
+       return i;
 }
 
 #ifdef CONFIG_NUMA
@@ -568,14 +598,13 @@ void drain_remote_pages(void)
                if (zone->zone_pgdat->node_id == numa_node_id())
                        continue;
 
-               pset = zone->pageset[smp_processor_id()];
+               pset = zone_pcp(zone, smp_processor_id());
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
 
                        pcp = &pset->pcp[i];
-                       if (pcp->count)
-                               pcp->count -= free_pages_bulk(zone, pcp->count,
-                                               &pcp->list, 0);
+                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+                       pcp->count = 0;
                }
        }
        local_irq_restore(flags);
@@ -598,8 +627,8 @@ static void __drain_pages(unsigned int cpu)
 
                        pcp = &pset->pcp[i];
                        local_irq_save(flags);
-                       pcp->count -= free_pages_bulk(zone, pcp->count,
-                                               &pcp->list, 0);
+                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+                       pcp->count = 0;
                        local_irq_restore(flags);
                }
        }
@@ -646,18 +675,14 @@ void drain_local_pages(void)
 }
 #endif /* CONFIG_PM */
 
-static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
 {
 #ifdef CONFIG_NUMA
-       unsigned long flags;
-       int cpu;
        pg_data_t *pg = z->zone_pgdat;
        pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
        struct per_cpu_pageset *p;
 
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-       p = zone_pcp(z,cpu);
+       p = zone_pcp(z, cpu);
        if (pg == orig) {
                p->numa_hit++;
        } else {
@@ -668,14 +693,12 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
                p->local_node++;
        else
                p->other_node++;
-       local_irq_restore(flags);
 #endif
 }
 
 /*
  * Free a 0-order page
  */
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
 static void fastcall free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
@@ -686,18 +709,20 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
 
        if (PageAnon(page))
                page->mapping = NULL;
-       if (free_pages_check(__FUNCTION__, page))
+       if (free_pages_check(page))
                return;
 
-       inc_page_state(pgfree);
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
        local_irq_save(flags);
+       __inc_page_state(pgfree);
        list_add(&page->lru, &pcp->list);
        pcp->count++;
-       if (pcp->count >= pcp->high)
-               pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+       if (pcp->count >= pcp->high) {
+               free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+               pcp->count -= pcp->batch;
+       }
        local_irq_restore(flags);
        put_cpu();
 }
@@ -726,49 +751,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *
-buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
+static struct page *buffered_rmqueue(struct zonelist *zonelist,
+                       struct zone *zone, int order, gfp_t gfp_flags)
 {
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
+       int cpu;
 
 again:
-       if (order == 0) {
+       cpu  = get_cpu();
+       if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
 
-               page = NULL;
-               pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+               pcp = &zone_pcp(zone, cpu)->pcp[cold];
                local_irq_save(flags);
-               if (!pcp->count)
+               if (!pcp->count) {
                        pcp->count += rmqueue_bulk(zone, 0,
                                                pcp->batch, &pcp->list);
-               if (likely(pcp->count)) {
-                       page = list_entry(pcp->list.next, struct page, lru);
-                       list_del(&page->lru);
-                       pcp->count--;
+                       if (unlikely(!pcp->count))
+                               goto failed;
                }
-               local_irq_restore(flags);
-               put_cpu();
+               page = list_entry(pcp->list.next, struct page, lru);
+               list_del(&page->lru);
+               pcp->count--;
        } else {
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order);
-               spin_unlock_irqrestore(&zone->lock, flags);
+               spin_unlock(&zone->lock);
+               if (!page)
+                       goto failed;
        }
 
-       if (page != NULL) {
-               BUG_ON(bad_range(zone, page));
-               mod_page_state_zone(zone, pgalloc, 1 << order);
-               if (prep_new_page(page, order))
-                       goto again;
+       __mod_page_state_zone(zone, pgalloc, 1 << order);
+       zone_statistics(zonelist, zone, cpu);
+       local_irq_restore(flags);
+       put_cpu();
+
+       BUG_ON(bad_range(zone, page));
+       if (prep_new_page(page, order))
+               goto again;
 
-               if (gfp_flags & __GFP_ZERO)
-                       prep_zero_page(page, order, gfp_flags);
+       if (gfp_flags & __GFP_ZERO)
+               prep_zero_page(page, order, gfp_flags);
 
-               if (order && (gfp_flags & __GFP_COMP))
-                       prep_compound_page(page, order);
-       }
+       if (order && (gfp_flags & __GFP_COMP))
+               prep_compound_page(page, order);
        return page;
+
+failed:
+       local_irq_restore(flags);
+       put_cpu();
+       return NULL;
 }
 
 #define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
@@ -844,9 +878,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                continue;
                }
 
-               page = buffered_rmqueue(*z, order, gfp_mask);
+               page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
                if (page) {
-                       zone_statistics(zonelist, *z);
                        break;
                }
        } while (*(++z) != NULL);
@@ -1174,6 +1207,7 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
        int cpu = 0;
 
        memset(ret, 0, sizeof(*ret));
+       cpus_and(*cpumask, *cpumask, cpu_online_map);
 
        cpu = first_cpu(*cpumask);
        while (cpu < NR_CPUS) {
@@ -1221,12 +1255,12 @@ void get_full_page_state(struct page_state *ret)
        __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
 }
 
-unsigned long __read_page_state(unsigned long offset)
+unsigned long read_page_state_offset(unsigned long offset)
 {
        unsigned long ret = 0;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_online_cpu(cpu) {
                unsigned long in;
 
                in = (unsigned long)&per_cpu(page_states, cpu) + offset;
@@ -1235,18 +1269,26 @@ unsigned long __read_page_state(unsigned long offset)
        return ret;
 }
 
-void __mod_page_state(unsigned long offset, unsigned long delta)
+void __mod_page_state_offset(unsigned long offset, unsigned long delta)
+{
+       void *ptr;
+
+       ptr = &__get_cpu_var(page_states);
+       *(unsigned long *)(ptr + offset) += delta;
+}
+EXPORT_SYMBOL(__mod_page_state_offset);
+
+void mod_page_state_offset(unsigned long offset, unsigned long delta)
 {
        unsigned long flags;
-       voidptr;
+       void *ptr;
 
        local_irq_save(flags);
        ptr = &__get_cpu_var(page_states);
-       *(unsigned long*)(ptr + offset) += delta;
+       *(unsigned long *)(ptr + offset) += delta;
        local_irq_restore(flags);
 }
-
-EXPORT_SYMBOL(__mod_page_state);
+EXPORT_SYMBOL(mod_page_state_offset);
 
 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
                        unsigned long *free, struct pglist_data *pgdat)
@@ -1332,7 +1374,7 @@ void show_free_areas(void)
                show_node(zone);
                printk("%s per-cpu:", zone->name);
 
-               if (!zone->present_pages) {
+               if (!populated_zone(zone)) {
                        printk(" empty\n");
                        continue;
                } else
@@ -1409,7 +1451,7 @@ void show_free_areas(void)
 
                show_node(zone);
                printk("%s: ", zone->name);
-               if (!zone->present_pages) {
+               if (!populated_zone(zone)) {
                        printk("empty\n");
                        continue;
                }
@@ -1429,36 +1471,29 @@ void show_free_areas(void)
 
 /*
  * Builds allocation fallback zone lists.
+ *
+ * Add all populated zones of a node to the zonelist.
  */
-static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
-{
-       switch (k) {
-               struct zone *zone;
-       default:
-               BUG();
-       case ZONE_HIGHMEM:
-               zone = pgdat->node_zones + ZONE_HIGHMEM;
-               if (zone->present_pages) {
+static int __init build_zonelists_node(pg_data_t *pgdat,
+                       struct zonelist *zonelist, int nr_zones, int zone_type)
+{
+       struct zone *zone;
+
+       BUG_ON(zone_type > ZONE_HIGHMEM);
+
+       do {
+               zone = pgdat->node_zones + zone_type;
+               if (populated_zone(zone)) {
 #ifndef CONFIG_HIGHMEM
-                       BUG();
+                       BUG_ON(zone_type > ZONE_NORMAL);
 #endif
-                       zonelist->zones[j++] = zone;
+                       zonelist->zones[nr_zones++] = zone;
+                       check_highest_zone(zone_type);
                }
-       case ZONE_NORMAL:
-               zone = pgdat->node_zones + ZONE_NORMAL;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       case ZONE_DMA32:
-               zone = pgdat->node_zones + ZONE_DMA32;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       case ZONE_DMA:
-               zone = pgdat->node_zones + ZONE_DMA;
-               if (zone->present_pages)
-                       zonelist->zones[j++] = zone;
-       }
+               zone_type--;
 
-       return j;
+       } while (zone_type >= 0);
+       return nr_zones;
 }
 
 static inline int highest_zone(int zone_bits)
@@ -1799,6 +1834,24 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
        INIT_LIST_HEAD(&pcp->list);
 }
 
+/*
+ * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
+ * to the value high for the pageset p.
+ */
+
+static void setup_pagelist_highmark(struct per_cpu_pageset *p,
+                               unsigned long high)
+{
+       struct per_cpu_pages *pcp;
+
+       pcp = &p->pcp[0]; /* hot list */
+       pcp->high = high;
+       pcp->batch = max(1UL, high/4);
+       if ((high/4) > (PAGE_SHIFT * 8))
+               pcp->batch = PAGE_SHIFT * 8;
+}
+
+
 #ifdef CONFIG_NUMA
 /*
  * Boot pageset table. One per cpu which is going to be used for all
@@ -1830,12 +1883,16 @@ static int __devinit process_zones(int cpu)
 
        for_each_zone(zone) {
 
-               zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
+               zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
                                         GFP_KERNEL, cpu_to_node(cpu));
-               if (!zone->pageset[cpu])
+               if (!zone_pcp(zone, cpu))
                        goto bad;
 
-               setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
+               setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+
+               if (percpu_pagelist_fraction)
+                       setup_pagelist_highmark(zone_pcp(zone, cpu),
+                               (zone->present_pages / percpu_pagelist_fraction));
        }
 
        return 0;
@@ -1843,15 +1900,14 @@ bad:
        for_each_zone(dzone) {
                if (dzone == zone)
                        break;
-               kfree(dzone->pageset[cpu]);
-               dzone->pageset[cpu] = NULL;
+               kfree(zone_pcp(dzone, cpu));
+               zone_pcp(dzone, cpu) = NULL;
        }
        return -ENOMEM;
 }
 
 static inline void free_zone_pagesets(int cpu)
 {
-#ifdef CONFIG_NUMA
        struct zone *zone;
 
        for_each_zone(zone) {
@@ -1860,7 +1916,6 @@ static inline void free_zone_pagesets(int cpu)
                zone_pcp(zone, cpu) = NULL;
                kfree(pset);
        }
-#endif
 }
 
 static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
@@ -1931,7 +1986,7 @@ static __devinit void zone_pcp_init(struct zone *zone)
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 #ifdef CONFIG_NUMA
                /* Early boot. Slab allocator not functional yet */
-               zone->pageset[cpu] = &boot_pageset[cpu];
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
                setup_pageset(&boot_pageset[cpu],0);
 #else
                setup_pageset(zone_pcp(zone,cpu), batch);
@@ -2108,7 +2163,7 @@ static int frag_show(struct seq_file *m, void *arg)
        int order;
 
        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
-               if (!zone->present_pages)
+               if (!populated_zone(zone))
                        continue;
 
                spin_lock_irqsave(&zone->lock, flags);
@@ -2141,7 +2196,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
                int i;
 
-               if (!zone->present_pages)
+               if (!populated_zone(zone))
                        continue;
 
                spin_lock_irqsave(&zone->lock, flags);
@@ -2174,7 +2229,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
                seq_printf(m,
                           ")"
                           "\n  pagesets");
-               for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
+               for_each_online_cpu(i) {
                        struct per_cpu_pageset *pageset;
                        int j;
 
@@ -2247,32 +2302,40 @@ static char *vmstat_text[] = {
        "pgpgout",
        "pswpin",
        "pswpout",
-       "pgalloc_high",
 
+       "pgalloc_high",
        "pgalloc_normal",
+       "pgalloc_dma32",
        "pgalloc_dma",
+
        "pgfree",
        "pgactivate",
        "pgdeactivate",
 
        "pgfault",
        "pgmajfault",
+
        "pgrefill_high",
        "pgrefill_normal",
+       "pgrefill_dma32",
        "pgrefill_dma",
 
        "pgsteal_high",
        "pgsteal_normal",
+       "pgsteal_dma32",
        "pgsteal_dma",
+
        "pgscan_kswapd_high",
        "pgscan_kswapd_normal",
-
+       "pgscan_kswapd_dma32",
        "pgscan_kswapd_dma",
+
        "pgscan_direct_high",
        "pgscan_direct_normal",
+       "pgscan_direct_dma32",
        "pgscan_direct_dma",
-       "pginodesteal",
 
+       "pginodesteal",
        "slabs_scanned",
        "kswapd_steal",
        "kswapd_inodesteal",
@@ -2529,6 +2592,32 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
        return 0;
 }
 
+/*
+ * percpu_pagelist_fraction - changes the pcp->high for each zone on each
+ * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
+ * can have before it gets flushed back to buddy allocator.
+ */
+
+int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       struct zone *zone;
+       unsigned int cpu;
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       if (!write || (ret == -EINVAL))
+               return ret;
+       for_each_zone(zone) {
+               for_each_online_cpu(cpu) {
+                       unsigned long  high;
+                       high = zone->present_pages / percpu_pagelist_fraction;
+                       setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+               }
+       }
+       return 0;
+}
+
 __initdata int hashdist = HASHDIST_DEFAULT;
 
 #ifdef CONFIG_NUMA