[Bluetooth] Enable SCO support for Broadcom HID proxy dongle
[pandora-kernel.git] / mm / vmstat.c
index 4800091..dfdf241 100644 (file)
 #include <linux/mm.h>
 #include <linux/module.h>
 
-/*
- * Accumulate the page_state information across all CPUs.
- * The result is unavoidably approximate - it can change
- * during and after execution of this function.
- */
-DEFINE_PER_CPU(struct page_state, page_states) = {0};
-
-atomic_t nr_pagecache = ATOMIC_INIT(0);
-EXPORT_SYMBOL(nr_pagecache);
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
-#endif
-
-static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
-{
-       unsigned cpu;
-
-       memset(ret, 0, nr * sizeof(unsigned long));
-       cpus_and(*cpumask, *cpumask, cpu_online_map);
-
-       for_each_cpu_mask(cpu, *cpumask) {
-               unsigned long *in;
-               unsigned long *out;
-               unsigned off;
-               unsigned next_cpu;
-
-               in = (unsigned long *)&per_cpu(page_states, cpu);
-
-               next_cpu = next_cpu(cpu, *cpumask);
-               if (likely(next_cpu < NR_CPUS))
-                       prefetch(&per_cpu(page_states, next_cpu));
-
-               out = (unsigned long *)ret;
-               for (off = 0; off < nr; off++)
-                       *out++ += *in++;
-       }
-}
-
-void get_page_state_node(struct page_state *ret, int node)
-{
-       int nr;
-       cpumask_t mask = node_to_cpumask(node);
-
-       nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
-       nr /= sizeof(unsigned long);
-
-       __get_page_state(ret, nr+1, &mask);
-}
-
-void get_page_state(struct page_state *ret)
-{
-       int nr;
-       cpumask_t mask = CPU_MASK_ALL;
-
-       nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
-       nr /= sizeof(unsigned long);
-
-       __get_page_state(ret, nr + 1, &mask);
-}
-
-void get_full_page_state(struct page_state *ret)
-{
-       cpumask_t mask = CPU_MASK_ALL;
-
-       __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
-}
-
-unsigned long read_page_state_offset(unsigned long offset)
-{
-       unsigned long ret = 0;
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               unsigned long in;
-
-               in = (unsigned long)&per_cpu(page_states, cpu) + offset;
-               ret += *((unsigned long *)in);
-       }
-       return ret;
-}
-
-void __mod_page_state_offset(unsigned long offset, unsigned long delta)
-{
-       void *ptr;
-
-       ptr = &__get_cpu_var(page_states);
-       *(unsigned long *)(ptr + offset) += delta;
-}
-EXPORT_SYMBOL(__mod_page_state_offset);
-
-void mod_page_state_offset(unsigned long offset, unsigned long delta)
-{
-       unsigned long flags;
-       void *ptr;
-
-       local_irq_save(flags);
-       ptr = &__get_cpu_var(page_states);
-       *(unsigned long *)(ptr + offset) += delta;
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(mod_page_state_offset);
-
 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
                        unsigned long *free, struct pglist_data *pgdat)
 {
@@ -148,6 +46,64 @@ void get_zone_counts(unsigned long *active,
        }
 }
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
+DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
+EXPORT_PER_CPU_SYMBOL(vm_event_states);
+
+static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
+{
+       int cpu = 0;
+       int i;
+
+       memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
+
+       cpu = first_cpu(*cpumask);
+       while (cpu < NR_CPUS) {
+               struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
+
+               cpu = next_cpu(cpu, *cpumask);
+
+               if (cpu < NR_CPUS)
+                       prefetch(&per_cpu(vm_event_states, cpu));
+
+
+               for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+                       ret[i] += this->event[i];
+       }
+}
+
+/*
+ * Accumulate the vm event counters across all CPUs.
+ * The result is unavoidably approximate - it can change
+ * during and after execution of this function.
+*/
+void all_vm_events(unsigned long *ret)
+{
+       sum_vm_events(ret, &cpu_online_map);
+}
+EXPORT_SYMBOL_GPL(all_vm_events);
+
+#ifdef CONFIG_HOTPLUG
+/*
+ * Fold the foreign cpu events into our own.
+ *
+ * This is adding to the events on one processor
+ * but keeps the global counts constant.
+ */
+void vm_events_fold_cpu(int cpu)
+{
+       struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
+       int i;
+
+       for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
+               count_vm_events(i, fold_state->event[i]);
+               fold_state->event[i] = 0;
+       }
+}
+#endif /* CONFIG_HOTPLUG */
+
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
 /*
  * Manage combined zone based / global counters
  *
@@ -227,9 +183,8 @@ EXPORT_SYMBOL(mod_zone_page_state);
  * in between and therefore the atomicity vs. interrupt cannot be exploited
  * in a useful way here.
  */
-void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       struct zone *zone = page_zone(page);
        s8 *p = diff_pointer(zone, item);
 
        (*p)++;
@@ -239,6 +194,11 @@ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
                *p = 0;
        }
 }
+
+void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+       __inc_zone_state(page_zone(page), item);
+}
 EXPORT_SYMBOL(__inc_zone_page_state);
 
 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -255,22 +215,23 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 }
 EXPORT_SYMBOL(__dec_zone_page_state);
 
+void inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __inc_zone_state(zone, item);
+       local_irq_restore(flags);
+}
+
 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
 {
        unsigned long flags;
        struct zone *zone;
-       s8 *p;
 
        zone = page_zone(page);
        local_irq_save(flags);
-       p = diff_pointer(zone, item);
-
-       (*p)++;
-
-       if (unlikely(*p > STAT_THRESHOLD)) {
-               zone_page_state_add(*p, zone, item);
-               *p = 0;
-       }
+       __inc_zone_state(zone, item);
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(inc_zone_page_state);
@@ -339,6 +300,28 @@ EXPORT_SYMBOL(refresh_vm_stats);
 
 #endif
 
+#ifdef CONFIG_NUMA
+/*
+ * zonelist = the list of zones passed to the allocator
+ * z       = the zone from which the allocation occurred.
+ *
+ * Must be called with interrupts disabled.
+ */
+void zone_statistics(struct zonelist *zonelist, struct zone *z)
+{
+       if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
+               __inc_zone_state(z, NUMA_HIT);
+       } else {
+               __inc_zone_state(z, NUMA_MISS);
+               __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
+       }
+       if (z->zone_pgdat == NODE_DATA(numa_node_id()))
+               __inc_zone_state(z, NUMA_LOCAL);
+       else
+               __inc_zone_state(z, NUMA_OTHER);
+}
+#endif
+
 #ifdef CONFIG_PROC_FS
 
 #include <linux/seq_file.h>
@@ -401,24 +384,35 @@ struct seq_operations fragmentation_op = {
 
 static char *vmstat_text[] = {
        /* Zoned VM counters */
+       "nr_anon_pages",
        "nr_mapped",
-
-       /* Page state */
+       "nr_file_pages",
+       "nr_slab",
+       "nr_page_table_pages",
        "nr_dirty",
        "nr_writeback",
        "nr_unstable",
-       "nr_page_table_pages",
-       "nr_slab",
+       "nr_bounce",
+
+#ifdef CONFIG_NUMA
+       "numa_hit",
+       "numa_miss",
+       "numa_foreign",
+       "numa_interleave",
+       "numa_local",
+       "numa_other",
+#endif
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
        "pgpgin",
        "pgpgout",
        "pswpin",
        "pswpout",
 
-       "pgalloc_high",
-       "pgalloc_normal",
-       "pgalloc_dma32",
        "pgalloc_dma",
+       "pgalloc_dma32",
+       "pgalloc_normal",
+       "pgalloc_high",
 
        "pgfree",
        "pgactivate",
@@ -427,25 +421,25 @@ static char *vmstat_text[] = {
        "pgfault",
        "pgmajfault",
 
-       "pgrefill_high",
-       "pgrefill_normal",
-       "pgrefill_dma32",
        "pgrefill_dma",
+       "pgrefill_dma32",
+       "pgrefill_normal",
+       "pgrefill_high",
 
-       "pgsteal_high",
-       "pgsteal_normal",
-       "pgsteal_dma32",
        "pgsteal_dma",
+       "pgsteal_dma32",
+       "pgsteal_normal",
+       "pgsteal_high",
 
-       "pgscan_kswapd_high",
-       "pgscan_kswapd_normal",
-       "pgscan_kswapd_dma32",
        "pgscan_kswapd_dma",
+       "pgscan_kswapd_dma32",
+       "pgscan_kswapd_normal",
+       "pgscan_kswapd_high",
 
-       "pgscan_direct_high",
-       "pgscan_direct_normal",
-       "pgscan_direct_dma32",
        "pgscan_direct_dma",
+       "pgscan_direct_dma32",
+       "pgscan_direct_normal",
+       "pgscan_direct_high",
 
        "pginodesteal",
        "slabs_scanned",
@@ -455,7 +449,7 @@ static char *vmstat_text[] = {
        "allocstall",
 
        "pgrotated",
-       "nr_bounce",
+#endif
 };
 
 /*
@@ -531,21 +525,6 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
                                           pageset->pcp[j].high,
                                           pageset->pcp[j].batch);
                        }
-#ifdef CONFIG_NUMA
-                       seq_printf(m,
-                                  "\n            numa_hit:       %lu"
-                                  "\n            numa_miss:      %lu"
-                                  "\n            numa_foreign:   %lu"
-                                  "\n            interleave_hit: %lu"
-                                  "\n            local_node:     %lu"
-                                  "\n            other_node:     %lu",
-                                  pageset->numa_hit,
-                                  pageset->numa_miss,
-                                  pageset->numa_foreign,
-                                  pageset->interleave_hit,
-                                  pageset->local_node,
-                                  pageset->other_node);
-#endif
                }
                seq_printf(m,
                           "\n  all_unreclaimable: %u"
@@ -573,23 +552,32 @@ struct seq_operations zoneinfo_op = {
 static void *vmstat_start(struct seq_file *m, loff_t *pos)
 {
        unsigned long *v;
-       struct page_state *ps;
+#ifdef CONFIG_VM_EVENT_COUNTERS
+       unsigned long *e;
+#endif
        int i;
 
        if (*pos >= ARRAY_SIZE(vmstat_text))
                return NULL;
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
        v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
-                       + sizeof(*ps), GFP_KERNEL);
+                       + sizeof(struct vm_event_state), GFP_KERNEL);
+#else
+       v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
+                       GFP_KERNEL);
+#endif
        m->private = v;
        if (!v)
                return ERR_PTR(-ENOMEM);
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
                v[i] = global_page_state(i);
-       ps = (struct page_state *)(v + NR_VM_ZONE_STAT_ITEMS);
-       get_full_page_state(ps);
-       ps->pgpgin /= 2;                /* sectors -> kbytes */
-       ps->pgpgout /= 2;
+#ifdef CONFIG_VM_EVENT_COUNTERS
+       e = v + NR_VM_ZONE_STAT_ITEMS;
+       all_vm_events(e);
+       e[PGPGIN] /= 2;         /* sectors -> kbytes */
+       e[PGPGOUT] /= 2;
+#endif
        return v + *pos;
 }