[PATCH] ZVC: Overstep counters
[pandora-kernel.git] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15
16 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17                         unsigned long *free, struct pglist_data *pgdat)
18 {
19         struct zone *zones = pgdat->node_zones;
20         int i;
21
22         *active = 0;
23         *inactive = 0;
24         *free = 0;
25         for (i = 0; i < MAX_NR_ZONES; i++) {
26                 *active += zones[i].nr_active;
27                 *inactive += zones[i].nr_inactive;
28                 *free += zones[i].free_pages;
29         }
30 }
31
32 void get_zone_counts(unsigned long *active,
33                 unsigned long *inactive, unsigned long *free)
34 {
35         struct pglist_data *pgdat;
36
37         *active = 0;
38         *inactive = 0;
39         *free = 0;
40         for_each_online_pgdat(pgdat) {
41                 unsigned long l, m, n;
42                 __get_zone_counts(&l, &m, &n, pgdat);
43                 *active += l;
44                 *inactive += m;
45                 *free += n;
46         }
47 }
48
49 #ifdef CONFIG_VM_EVENT_COUNTERS
50 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51 EXPORT_PER_CPU_SYMBOL(vm_event_states);
52
53 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
54 {
55         int cpu = 0;
56         int i;
57
58         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
59
60         cpu = first_cpu(*cpumask);
61         while (cpu < NR_CPUS) {
62                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
63
64                 cpu = next_cpu(cpu, *cpumask);
65
66                 if (cpu < NR_CPUS)
67                         prefetch(&per_cpu(vm_event_states, cpu));
68
69
70                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
71                         ret[i] += this->event[i];
72         }
73 }
74
75 /*
76  * Accumulate the vm event counters across all CPUs.
77  * The result is unavoidably approximate - it can change
78  * during and after execution of this function.
79 */
80 void all_vm_events(unsigned long *ret)
81 {
82         sum_vm_events(ret, &cpu_online_map);
83 }
84 EXPORT_SYMBOL_GPL(all_vm_events);
85
86 #ifdef CONFIG_HOTPLUG
87 /*
88  * Fold the foreign cpu events into our own.
89  *
90  * This is adding to the events on one processor
91  * but keeps the global counts constant.
92  */
93 void vm_events_fold_cpu(int cpu)
94 {
95         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
96         int i;
97
98         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
99                 count_vm_events(i, fold_state->event[i]);
100                 fold_state->event[i] = 0;
101         }
102 }
103 #endif /* CONFIG_HOTPLUG */
104
105 #endif /* CONFIG_VM_EVENT_COUNTERS */
106
107 /*
108  * Manage combined zone based / global counters
109  *
110  * vm_stat contains the global counters
111  */
112 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
113 EXPORT_SYMBOL(vm_stat);
114
115 #ifdef CONFIG_SMP
116
117 #define STAT_THRESHOLD 32
118
119 /*
120  * Determine pointer to currently valid differential byte given a zone and
121  * the item number.
122  *
123  * Preemption must be off
124  */
125 static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
126 {
127         return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item];
128 }
129
130 /*
131  * For use when we know that interrupts are disabled.
132  */
133 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
134                                 int delta)
135 {
136         s8 *p;
137         long x;
138
139         p = diff_pointer(zone, item);
140         x = delta + *p;
141
142         if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) {
143                 zone_page_state_add(x, zone, item);
144                 x = 0;
145         }
146
147         *p = x;
148 }
149 EXPORT_SYMBOL(__mod_zone_page_state);
150
151 /*
152  * For an unknown interrupt state
153  */
154 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
155                                         int delta)
156 {
157         unsigned long flags;
158
159         local_irq_save(flags);
160         __mod_zone_page_state(zone, item, delta);
161         local_irq_restore(flags);
162 }
163 EXPORT_SYMBOL(mod_zone_page_state);
164
165 /*
166  * Optimized increment and decrement functions.
167  *
168  * These are only for a single page and therefore can take a struct page *
169  * argument instead of struct zone *. This allows the inclusion of the code
170  * generated for page_zone(page) into the optimized functions.
171  *
172  * No overflow check is necessary and therefore the differential can be
173  * incremented or decremented in place which may allow the compilers to
174  * generate better code.
175  *
176  * The increment or decrement is known and therefore one boundary check can
177  * be omitted.
178  *
179  * Some processors have inc/dec instructions that are atomic vs an interrupt.
180  * However, the code must first determine the differential location in a zone
181  * based on the processor number and then inc/dec the counter. There is no
182  * guarantee without disabling preemption that the processor will not change
183  * in between and therefore the atomicity vs. interrupt cannot be exploited
184  * in a useful way here.
185  */
186 static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
187 {
188         s8 *p = diff_pointer(zone, item);
189
190         (*p)++;
191
192         if (unlikely(*p > STAT_THRESHOLD)) {
193                 zone_page_state_add(*p + STAT_THRESHOLD / 2, zone, item);
194                 *p = -STAT_THRESHOLD / 2;
195         }
196 }
197
198 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
199 {
200         __inc_zone_state(page_zone(page), item);
201 }
202 EXPORT_SYMBOL(__inc_zone_page_state);
203
204 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
205 {
206         struct zone *zone = page_zone(page);
207         s8 *p = diff_pointer(zone, item);
208
209         (*p)--;
210
211         if (unlikely(*p < -STAT_THRESHOLD)) {
212                 zone_page_state_add(*p - STAT_THRESHOLD / 2, zone, item);
213                 *p = STAT_THRESHOLD /2;
214         }
215 }
216 EXPORT_SYMBOL(__dec_zone_page_state);
217
218 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
219 {
220         unsigned long flags;
221
222         local_irq_save(flags);
223         __inc_zone_state(zone, item);
224         local_irq_restore(flags);
225 }
226
227 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
228 {
229         unsigned long flags;
230         struct zone *zone;
231
232         zone = page_zone(page);
233         local_irq_save(flags);
234         __inc_zone_state(zone, item);
235         local_irq_restore(flags);
236 }
237 EXPORT_SYMBOL(inc_zone_page_state);
238
239 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
240 {
241         unsigned long flags;
242
243         local_irq_save(flags);
244         __dec_zone_page_state(page, item);
245         local_irq_restore(flags);
246 }
247 EXPORT_SYMBOL(dec_zone_page_state);
248
249 /*
250  * Update the zone counters for one cpu.
251  */
252 void refresh_cpu_vm_stats(int cpu)
253 {
254         struct zone *zone;
255         int i;
256         unsigned long flags;
257
258         for_each_zone(zone) {
259                 struct per_cpu_pageset *pcp;
260
261                 pcp = zone_pcp(zone, cpu);
262
263                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
264                         if (pcp->vm_stat_diff[i]) {
265                                 local_irq_save(flags);
266                                 zone_page_state_add(pcp->vm_stat_diff[i],
267                                         zone, i);
268                                 pcp->vm_stat_diff[i] = 0;
269                                 local_irq_restore(flags);
270                         }
271         }
272 }
273
274 static void __refresh_cpu_vm_stats(void *dummy)
275 {
276         refresh_cpu_vm_stats(smp_processor_id());
277 }
278
279 /*
280  * Consolidate all counters.
281  *
282  * Note that the result is less inaccurate but still inaccurate
283  * if concurrent processes are allowed to run.
284  */
285 void refresh_vm_stats(void)
286 {
287         on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
288 }
289 EXPORT_SYMBOL(refresh_vm_stats);
290
291 #endif
292
293 #ifdef CONFIG_NUMA
294 /*
295  * zonelist = the list of zones passed to the allocator
296  * z        = the zone from which the allocation occurred.
297  *
298  * Must be called with interrupts disabled.
299  */
300 void zone_statistics(struct zonelist *zonelist, struct zone *z)
301 {
302         if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
303                 __inc_zone_state(z, NUMA_HIT);
304         } else {
305                 __inc_zone_state(z, NUMA_MISS);
306                 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
307         }
308         if (z->zone_pgdat == NODE_DATA(numa_node_id()))
309                 __inc_zone_state(z, NUMA_LOCAL);
310         else
311                 __inc_zone_state(z, NUMA_OTHER);
312 }
313 #endif
314
315 #ifdef CONFIG_PROC_FS
316
317 #include <linux/seq_file.h>
318
319 static void *frag_start(struct seq_file *m, loff_t *pos)
320 {
321         pg_data_t *pgdat;
322         loff_t node = *pos;
323         for (pgdat = first_online_pgdat();
324              pgdat && node;
325              pgdat = next_online_pgdat(pgdat))
326                 --node;
327
328         return pgdat;
329 }
330
331 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
332 {
333         pg_data_t *pgdat = (pg_data_t *)arg;
334
335         (*pos)++;
336         return next_online_pgdat(pgdat);
337 }
338
339 static void frag_stop(struct seq_file *m, void *arg)
340 {
341 }
342
343 /*
344  * This walks the free areas for each zone.
345  */
346 static int frag_show(struct seq_file *m, void *arg)
347 {
348         pg_data_t *pgdat = (pg_data_t *)arg;
349         struct zone *zone;
350         struct zone *node_zones = pgdat->node_zones;
351         unsigned long flags;
352         int order;
353
354         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
355                 if (!populated_zone(zone))
356                         continue;
357
358                 spin_lock_irqsave(&zone->lock, flags);
359                 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
360                 for (order = 0; order < MAX_ORDER; ++order)
361                         seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
362                 spin_unlock_irqrestore(&zone->lock, flags);
363                 seq_putc(m, '\n');
364         }
365         return 0;
366 }
367
368 struct seq_operations fragmentation_op = {
369         .start  = frag_start,
370         .next   = frag_next,
371         .stop   = frag_stop,
372         .show   = frag_show,
373 };
374
375 static char *vmstat_text[] = {
376         /* Zoned VM counters */
377         "nr_anon_pages",
378         "nr_mapped",
379         "nr_file_pages",
380         "nr_slab",
381         "nr_page_table_pages",
382         "nr_dirty",
383         "nr_writeback",
384         "nr_unstable",
385         "nr_bounce",
386
387 #ifdef CONFIG_NUMA
388         "numa_hit",
389         "numa_miss",
390         "numa_foreign",
391         "numa_interleave",
392         "numa_local",
393         "numa_other",
394 #endif
395
396 #ifdef CONFIG_VM_EVENT_COUNTERS
397         "pgpgin",
398         "pgpgout",
399         "pswpin",
400         "pswpout",
401
402         "pgalloc_dma",
403         "pgalloc_dma32",
404         "pgalloc_normal",
405         "pgalloc_high",
406
407         "pgfree",
408         "pgactivate",
409         "pgdeactivate",
410
411         "pgfault",
412         "pgmajfault",
413
414         "pgrefill_dma",
415         "pgrefill_dma32",
416         "pgrefill_normal",
417         "pgrefill_high",
418
419         "pgsteal_dma",
420         "pgsteal_dma32",
421         "pgsteal_normal",
422         "pgsteal_high",
423
424         "pgscan_kswapd_dma",
425         "pgscan_kswapd_dma32",
426         "pgscan_kswapd_normal",
427         "pgscan_kswapd_high",
428
429         "pgscan_direct_dma",
430         "pgscan_direct_dma32",
431         "pgscan_direct_normal",
432         "pgscan_direct_high",
433
434         "pginodesteal",
435         "slabs_scanned",
436         "kswapd_steal",
437         "kswapd_inodesteal",
438         "pageoutrun",
439         "allocstall",
440
441         "pgrotated",
442 #endif
443 };
444
445 /*
446  * Output information about zones in @pgdat.
447  */
448 static int zoneinfo_show(struct seq_file *m, void *arg)
449 {
450         pg_data_t *pgdat = arg;
451         struct zone *zone;
452         struct zone *node_zones = pgdat->node_zones;
453         unsigned long flags;
454
455         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
456                 int i;
457
458                 if (!populated_zone(zone))
459                         continue;
460
461                 spin_lock_irqsave(&zone->lock, flags);
462                 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
463                 seq_printf(m,
464                            "\n  pages free     %lu"
465                            "\n        min      %lu"
466                            "\n        low      %lu"
467                            "\n        high     %lu"
468                            "\n        active   %lu"
469                            "\n        inactive %lu"
470                            "\n        scanned  %lu (a: %lu i: %lu)"
471                            "\n        spanned  %lu"
472                            "\n        present  %lu",
473                            zone->free_pages,
474                            zone->pages_min,
475                            zone->pages_low,
476                            zone->pages_high,
477                            zone->nr_active,
478                            zone->nr_inactive,
479                            zone->pages_scanned,
480                            zone->nr_scan_active, zone->nr_scan_inactive,
481                            zone->spanned_pages,
482                            zone->present_pages);
483
484                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
485                         seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
486                                         zone_page_state(zone, i));
487
488                 seq_printf(m,
489                            "\n        protection: (%lu",
490                            zone->lowmem_reserve[0]);
491                 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
492                         seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
493                 seq_printf(m,
494                            ")"
495                            "\n  pagesets");
496                 for_each_online_cpu(i) {
497                         struct per_cpu_pageset *pageset;
498                         int j;
499
500                         pageset = zone_pcp(zone, i);
501                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
502                                 if (pageset->pcp[j].count)
503                                         break;
504                         }
505                         if (j == ARRAY_SIZE(pageset->pcp))
506                                 continue;
507                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
508                                 seq_printf(m,
509                                            "\n    cpu: %i pcp: %i"
510                                            "\n              count: %i"
511                                            "\n              high:  %i"
512                                            "\n              batch: %i",
513                                            i, j,
514                                            pageset->pcp[j].count,
515                                            pageset->pcp[j].high,
516                                            pageset->pcp[j].batch);
517                         }
518                 }
519                 seq_printf(m,
520                            "\n  all_unreclaimable: %u"
521                            "\n  prev_priority:     %i"
522                            "\n  temp_priority:     %i"
523                            "\n  start_pfn:         %lu",
524                            zone->all_unreclaimable,
525                            zone->prev_priority,
526                            zone->temp_priority,
527                            zone->zone_start_pfn);
528                 spin_unlock_irqrestore(&zone->lock, flags);
529                 seq_putc(m, '\n');
530         }
531         return 0;
532 }
533
534 struct seq_operations zoneinfo_op = {
535         .start  = frag_start, /* iterate over all zones. The same as in
536                                * fragmentation. */
537         .next   = frag_next,
538         .stop   = frag_stop,
539         .show   = zoneinfo_show,
540 };
541
542 static void *vmstat_start(struct seq_file *m, loff_t *pos)
543 {
544         unsigned long *v;
545 #ifdef CONFIG_VM_EVENT_COUNTERS
546         unsigned long *e;
547 #endif
548         int i;
549
550         if (*pos >= ARRAY_SIZE(vmstat_text))
551                 return NULL;
552
553 #ifdef CONFIG_VM_EVENT_COUNTERS
554         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
555                         + sizeof(struct vm_event_state), GFP_KERNEL);
556 #else
557         v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
558                         GFP_KERNEL);
559 #endif
560         m->private = v;
561         if (!v)
562                 return ERR_PTR(-ENOMEM);
563         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
564                 v[i] = global_page_state(i);
565 #ifdef CONFIG_VM_EVENT_COUNTERS
566         e = v + NR_VM_ZONE_STAT_ITEMS;
567         all_vm_events(e);
568         e[PGPGIN] /= 2;         /* sectors -> kbytes */
569         e[PGPGOUT] /= 2;
570 #endif
571         return v + *pos;
572 }
573
574 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
575 {
576         (*pos)++;
577         if (*pos >= ARRAY_SIZE(vmstat_text))
578                 return NULL;
579         return (unsigned long *)m->private + *pos;
580 }
581
582 static int vmstat_show(struct seq_file *m, void *arg)
583 {
584         unsigned long *l = arg;
585         unsigned long off = l - (unsigned long *)m->private;
586
587         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
588         return 0;
589 }
590
591 static void vmstat_stop(struct seq_file *m, void *arg)
592 {
593         kfree(m->private);
594         m->private = NULL;
595 }
596
597 struct seq_operations vmstat_op = {
598         .start  = vmstat_start,
599         .next   = vmstat_next,
600         .stop   = vmstat_stop,
601         .show   = vmstat_show,
602 };
603
604 #endif /* CONFIG_PROC_FS */
605