Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[pandora-kernel.git] / mm / page_alloc.c
index 3a877fe..54a4f53 100644 (file)
@@ -14,7 +14,6 @@
  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  */
 
-#include <linux/config.h>
 #include <linux/stddef.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
@@ -456,7 +455,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 
        kernel_map_pages(page, 1 << order, 0);
        local_irq_save(flags);
-       __mod_page_state(pgfree, 1 << order);
+       __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order);
        local_irq_restore(flags);
 }
@@ -709,27 +708,6 @@ void drain_local_pages(void)
 }
 #endif /* CONFIG_PM */
 
-static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
-{
-#ifdef CONFIG_NUMA
-       pg_data_t *pg = z->zone_pgdat;
-       pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
-       struct per_cpu_pageset *p;
-
-       p = zone_pcp(z, cpu);
-       if (pg == orig) {
-               p->numa_hit++;
-       } else {
-               p->numa_miss++;
-               zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
-       }
-       if (pg == NODE_DATA(numa_node_id()))
-               p->local_node++;
-       else
-               p->other_node++;
-#endif
-}
-
 /*
  * Free a 0-order page
  */
@@ -750,7 +728,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
 
        pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
        local_irq_save(flags);
-       __inc_page_state(pgfree);
+       __count_vm_event(PGFREE);
        list_add(&page->lru, &pcp->list);
        pcp->count++;
        if (pcp->count >= pcp->high) {
@@ -826,8 +804,8 @@ again:
                        goto failed;
        }
 
-       __mod_page_state_zone(zone, pgalloc, 1 << order);
-       zone_statistics(zonelist, zone, cpu);
+       __count_zone_vm_events(PGALLOC, zone, 1 << order);
+       zone_statistics(zonelist, zone);
        local_irq_restore(flags);
        put_cpu();
 
@@ -1271,7 +1249,6 @@ void si_meminfo_node(struct sysinfo *val, int nid)
  */
 void show_free_areas(void)
 {
-       struct page_state ps;
        int cpu, temperature;
        unsigned long active;
        unsigned long inactive;
@@ -1303,7 +1280,6 @@ void show_free_areas(void)
                }
        }
 
-       get_page_state(&ps);
        get_zone_counts(&active, &inactive, &free);
 
        printk("Free pages: %11ukB (%ukB HighMem)\n",
@@ -1314,13 +1290,13 @@ void show_free_areas(void)
                "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
                active,
                inactive,
-               ps.nr_dirty,
-               ps.nr_writeback,
-               ps.nr_unstable,
+               global_page_state(NR_FILE_DIRTY),
+               global_page_state(NR_WRITEBACK),
+               global_page_state(NR_UNSTABLE_NFS),
                nr_free_pages(),
-               ps.nr_slab,
-               ps.nr_mapped,
-               ps.nr_page_table_pages);
+               global_page_state(NR_SLAB),
+               global_page_state(NR_FILE_MAPPED),
+               global_page_state(NR_PAGETABLE));
 
        for_each_zone(zone) {
                int i;
@@ -2029,6 +2005,10 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
 
                zone->spanned_pages = size;
                zone->present_pages = realsize;
+#ifdef CONFIG_NUMA
+               zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
+                                               / 100;
+#endif
                zone->name = zone_names[j];
                spin_lock_init(&zone->lock);
                spin_lock_init(&zone->lru_lock);
@@ -2124,29 +2104,11 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
 {
        int cpu = (unsigned long)hcpu;
-       long *count;
-       unsigned long *src, *dest;
 
        if (action == CPU_DEAD) {
-               int i;
-
-               /* Drain local pagecache count. */
-               count = &per_cpu(nr_pagecache_local, cpu);
-               atomic_add(*count, &nr_pagecache);
-               *count = 0;
                local_irq_disable();
                __drain_pages(cpu);
-
-               /* Add dead cpu's page_states to our own. */
-               dest = (unsigned long *)&__get_cpu_var(page_states);
-               src = (unsigned long *)&per_cpu(page_states, cpu);
-
-               for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
-                               i++) {
-                       dest[i] += src[i];
-                       src[i] = 0;
-               }
-
+               vm_events_fold_cpu(cpu);
                local_irq_enable();
                refresh_cpu_vm_stats(cpu);
        }
@@ -2340,6 +2302,24 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
        return 0;
 }
 
+#ifdef CONFIG_NUMA
+int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
+       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       struct zone *zone;
+       int rc;
+
+       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       if (rc)
+               return rc;
+
+       for_each_zone(zone)
+               zone->min_unmapped_ratio = (zone->present_pages *
+                               sysctl_min_unmapped_ratio) / 100;
+       return 0;
+}
+#endif
+
 /*
  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  *     proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()