numa: add generic percpu var numa_node_id() implementation
[pandora-kernel.git] / mm / page_alloc.c
index cd88a86..6fe1b65 100644 (file)
 #include <asm/div64.h>
 #include "internal.h"
 
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+DEFINE_PER_CPU(int, numa_node);
+EXPORT_PER_CPU_SYMBOL(numa_node);
+#endif
+
 /*
  * Array of node states.
  */
@@ -620,20 +625,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        spin_unlock(&zone->lock);
 }
 
-static void __free_pages_ok(struct page *page, unsigned int order)
+static bool free_pages_prepare(struct page *page, unsigned int order)
 {
-       unsigned long flags;
        int i;
        int bad = 0;
-       int wasMlocked = __TestClearPageMlocked(page);
 
        trace_mm_page_free_direct(page, order);
        kmemcheck_free_shadow(page, order);
 
-       for (i = 0 ; i < (1 << order) ; ++i)
-               bad += free_pages_check(page + i);
+       for (i = 0; i < (1 << order); i++) {
+               struct page *pg = page + i;
+
+               if (PageAnon(pg))
+                       pg->mapping = NULL;
+               bad += free_pages_check(pg);
+       }
        if (bad)
-               return;
+               return false;
 
        if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -643,6 +651,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
+       return true;
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+       unsigned long flags;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       if (!free_pages_prepare(page, order))
+               return;
+
        local_irq_save(flags);
        if (unlikely(wasMlocked))
                free_page_mlock(page);
@@ -1128,21 +1147,9 @@ void free_hot_cold_page(struct page *page, int cold)
        int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
-       trace_mm_page_free_direct(page, 0);
-       kmemcheck_free_shadow(page, 0);
-
-       if (PageAnon(page))
-               page->mapping = NULL;
-       if (free_pages_check(page))
+       if (!free_pages_prepare(page, 0))
                return;
 
-       if (!PageHighMem(page)) {
-               debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
-               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
-       }
-       arch_free_page(page, 0);
-       kernel_map_pages(page, 1, 0);
-
        migratetype = get_pageblock_migratetype(page);
        set_page_private(page, migratetype);
        local_irq_save(flags);
@@ -1769,7 +1776,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 {
        struct page *page;
 
-       if (!order)
+       if (!order || compaction_deferred(preferred_zone))
                return NULL;
 
        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
@@ -1785,6 +1792,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                                alloc_flags, preferred_zone,
                                migratetype);
                if (page) {
+                       preferred_zone->compact_considered = 0;
+                       preferred_zone->compact_defer_shift = 0;
                        count_vm_event(COMPACTSUCCESS);
                        return page;
                }
@@ -1795,6 +1804,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                 * but not enough to satisfy watermarks.
                 */
                count_vm_event(COMPACTFAIL);
+               defer_compaction(preferred_zone);
 
                cond_resched();
        }
@@ -2566,8 +2576,11 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
                        strncpy((char*)table->data, saved_string,
                                NUMA_ZONELIST_ORDER_LEN);
                        user_zonelist_order = oldval;
-               } else if (oldval != user_zonelist_order)
-                       build_all_zonelists();
+               } else if (oldval != user_zonelist_order) {
+                       mutex_lock(&zonelists_mutex);
+                       build_all_zonelists(NULL);
+                       mutex_unlock(&zonelists_mutex);
+               }
        }
 out:
        mutex_unlock(&zl_order_mutex);
@@ -2917,9 +2930,16 @@ static void build_zonelist_cache(pg_data_t *pgdat)
  */
 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static void setup_zone_pageset(struct zone *zone);
+
+/*
+ * Global mutex to protect against size modification of zonelists
+ * as well as to serialize pageset setup for the new populated zone.
+ */
+DEFINE_MUTEX(zonelists_mutex);
 
 /* return values int ....just for stop_machine() */
-static int __build_all_zonelists(void *dummy)
+static __init_refok int __build_all_zonelists(void *data)
 {
        int nid;
        int cpu;
@@ -2934,6 +2954,14 @@ static int __build_all_zonelists(void *dummy)
                build_zonelist_cache(pgdat);
        }
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+       /* Setup real pagesets for the new zone */
+       if (data) {
+               struct zone *zone = data;
+               setup_zone_pageset(zone);
+       }
+#endif
+
        /*
         * Initialize the boot_pagesets that are going to be used
         * for bootstrapping processors. The real pagesets for
@@ -2953,7 +2981,11 @@ static int __build_all_zonelists(void *dummy)
        return 0;
 }
 
-void build_all_zonelists(void)
+/*
+ * Called with zonelists_mutex held always
+ * unless system_state == SYSTEM_BOOTING.
+ */
+void build_all_zonelists(void *data)
 {
        set_zonelist_order();
 
@@ -2964,7 +2996,7 @@ void build_all_zonelists(void)
        } else {
                /* we have to stop all cpus to guarantee there is no user
                   of zonelist */
-               stop_machine(__build_all_zonelists, NULL, NULL);
+               stop_machine(__build_all_zonelists, data, NULL);
                /* cpuset refresh routine should be here */
        }
        vm_total_pages = nr_free_pagecache_pages();
@@ -3287,31 +3319,34 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
                pcp->batch = PAGE_SHIFT * 8;
 }
 
+static __meminit void setup_zone_pageset(struct zone *zone)
+{
+       int cpu;
+
+       zone->pageset = alloc_percpu(struct per_cpu_pageset);
+
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
+
+               setup_pageset(pcp, zone_batchsize(zone));
+
+               if (percpu_pagelist_fraction)
+                       setup_pagelist_highmark(pcp,
+                               (zone->present_pages /
+                                       percpu_pagelist_fraction));
+       }
+}
+
 /*
  * Allocate per cpu pagesets and initialize them.
  * Before this call only boot pagesets were available.
- * Boot pagesets will no longer be used by this processorr
- * after setup_per_cpu_pageset().
  */
 void __init setup_per_cpu_pageset(void)
 {
        struct zone *zone;
-       int cpu;
-
-       for_each_populated_zone(zone) {
-               zone->pageset = alloc_percpu(struct per_cpu_pageset);
-
-               for_each_possible_cpu(cpu) {
-                       struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
 
-                       setup_pageset(pcp, zone_batchsize(zone));
-
-                       if (percpu_pagelist_fraction)
-                               setup_pagelist_highmark(pcp,
-                                       (zone->present_pages /
-                                               percpu_pagelist_fraction));
-               }
-       }
+       for_each_populated_zone(zone)
+               setup_zone_pageset(zone);
 }
 
 static noinline __init_refok