Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static struct trace_array               *irqsoff_trace __read_mostly;
22 static int                              tracer_enabled __read_mostly;
23
24 static DEFINE_PER_CPU(int, tracing_cpu);
25
26 static DEFINE_SPINLOCK(max_trace_lock);
27
28 enum {
29         TRACER_IRQS_OFF         = (1 << 1),
30         TRACER_PREEMPT_OFF      = (1 << 2),
31 };
32
33 static int trace_type __read_mostly;
34
35 static int save_lat_flag;
36
37 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
40 #ifdef CONFIG_PREEMPT_TRACER
41 static inline int
42 preempt_trace(void)
43 {
44         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45 }
46 #else
47 # define preempt_trace() (0)
48 #endif
49
50 #ifdef CONFIG_IRQSOFF_TRACER
51 static inline int
52 irq_trace(void)
53 {
54         return ((trace_type & TRACER_IRQS_OFF) &&
55                 irqs_disabled());
56 }
57 #else
58 # define irq_trace() (0)
59 #endif
60
61 #define TRACE_DISPLAY_GRAPH     1
62
63 static struct tracer_opt trace_opts[] = {
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65         /* display latency trace as call graph */
66         { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67 #endif
68         { } /* Empty entry */
69 };
70
71 static struct tracer_flags tracer_flags = {
72         .val  = 0,
73         .opts = trace_opts,
74 };
75
76 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
78 /*
79  * Sequence count - we record it when starting a measurement and
80  * skip the latency if the sequence has changed - some other section
81  * did a maximum and could disturb our measurement with serial console
82  * printouts, etc. Truly coinciding maximum latencies should be rare
83  * and what happens together happens separately as well, so this doesn't
84  * decrease the validity of the maximum found:
85  */
86 static __cacheline_aligned_in_smp       unsigned long max_sequence;
87
88 #ifdef CONFIG_FUNCTION_TRACER
89 /*
90  * Prologue for the preempt and irqs off function tracers.
91  *
92  * Returns 1 if it is OK to continue, and data->disabled is
93  *            incremented.
94  *         0 if the trace is to be ignored, and data->disabled
95  *            is kept the same.
96  *
97  * Note, this function is also used outside this ifdef but
98  *  inside the #ifdef of the function graph tracer below.
99  *  This is OK, since the function graph tracer is
100  *  dependent on the function tracer.
101  */
102 static int func_prolog_dec(struct trace_array *tr,
103                            struct trace_array_cpu **data,
104                            unsigned long *flags)
105 {
106         long disabled;
107         int cpu;
108
109         /*
110          * Does not matter if we preempt. We test the flags
111          * afterward, to see if irqs are disabled or not.
112          * If we preempt and get a false positive, the flags
113          * test will fail.
114          */
115         cpu = raw_smp_processor_id();
116         if (likely(!per_cpu(tracing_cpu, cpu)))
117                 return 0;
118
119         local_save_flags(*flags);
120         /* slight chance to get a false positive on tracing_cpu */
121         if (!irqs_disabled_flags(*flags))
122                 return 0;
123
124         *data = tr->data[cpu];
125         disabled = atomic_inc_return(&(*data)->disabled);
126
127         if (likely(disabled == 1))
128                 return 1;
129
130         atomic_dec(&(*data)->disabled);
131
132         return 0;
133 }
134
135 /*
136  * irqsoff uses its own tracer function to keep the overhead down:
137  */
138 static void
139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140 {
141         struct trace_array *tr = irqsoff_trace;
142         struct trace_array_cpu *data;
143         unsigned long flags;
144
145         if (!func_prolog_dec(tr, &data, &flags))
146                 return;
147
148         trace_function(tr, ip, parent_ip, flags, preempt_count());
149
150         atomic_dec(&data->disabled);
151 }
152
153 static struct ftrace_ops trace_ops __read_mostly =
154 {
155         .func = irqsoff_tracer_call,
156         .flags = FTRACE_OPS_FL_GLOBAL,
157 };
158 #endif /* CONFIG_FUNCTION_TRACER */
159
160 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
161 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
162 {
163         int cpu;
164
165         if (!(bit & TRACE_DISPLAY_GRAPH))
166                 return -EINVAL;
167
168         if (!(is_graph() ^ set))
169                 return 0;
170
171         stop_irqsoff_tracer(irqsoff_trace, !set);
172
173         for_each_possible_cpu(cpu)
174                 per_cpu(tracing_cpu, cpu) = 0;
175
176         tracing_max_latency = 0;
177         tracing_reset_online_cpus(irqsoff_trace);
178
179         return start_irqsoff_tracer(irqsoff_trace, set);
180 }
181
182 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
183 {
184         struct trace_array *tr = irqsoff_trace;
185         struct trace_array_cpu *data;
186         unsigned long flags;
187         int ret;
188         int pc;
189
190         if (!func_prolog_dec(tr, &data, &flags))
191                 return 0;
192
193         pc = preempt_count();
194         ret = __trace_graph_entry(tr, trace, flags, pc);
195         atomic_dec(&data->disabled);
196
197         return ret;
198 }
199
200 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
201 {
202         struct trace_array *tr = irqsoff_trace;
203         struct trace_array_cpu *data;
204         unsigned long flags;
205         int pc;
206
207         if (!func_prolog_dec(tr, &data, &flags))
208                 return;
209
210         pc = preempt_count();
211         __trace_graph_return(tr, trace, flags, pc);
212         atomic_dec(&data->disabled);
213 }
214
215 static void irqsoff_trace_open(struct trace_iterator *iter)
216 {
217         if (is_graph())
218                 graph_trace_open(iter);
219
220 }
221
222 static void irqsoff_trace_close(struct trace_iterator *iter)
223 {
224         if (iter->private)
225                 graph_trace_close(iter);
226 }
227
228 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
229                             TRACE_GRAPH_PRINT_PROC)
230
231 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
232 {
233         /*
234          * In graph mode call the graph tracer output function,
235          * otherwise go with the TRACE_FN event handler
236          */
237         if (is_graph())
238                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
239
240         return TRACE_TYPE_UNHANDLED;
241 }
242
243 static void irqsoff_print_header(struct seq_file *s)
244 {
245         if (is_graph())
246                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
247         else
248                 trace_default_header(s);
249 }
250
251 static void
252 __trace_function(struct trace_array *tr,
253                  unsigned long ip, unsigned long parent_ip,
254                  unsigned long flags, int pc)
255 {
256         if (is_graph())
257                 trace_graph_function(tr, ip, parent_ip, flags, pc);
258         else
259                 trace_function(tr, ip, parent_ip, flags, pc);
260 }
261
262 #else
263 #define __trace_function trace_function
264
265 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
266 {
267         return -EINVAL;
268 }
269
270 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
271 {
272         return -1;
273 }
274
275 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
276 {
277         return TRACE_TYPE_UNHANDLED;
278 }
279
280 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
281 static void irqsoff_print_header(struct seq_file *s) { }
282 static void irqsoff_trace_open(struct trace_iterator *iter) { }
283 static void irqsoff_trace_close(struct trace_iterator *iter) { }
284 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
285
286 /*
287  * Should this new latency be reported/recorded?
288  */
289 static int report_latency(cycle_t delta)
290 {
291         if (tracing_thresh) {
292                 if (delta < tracing_thresh)
293                         return 0;
294         } else {
295                 if (delta <= tracing_max_latency)
296                         return 0;
297         }
298         return 1;
299 }
300
301 static void
302 check_critical_timing(struct trace_array *tr,
303                       struct trace_array_cpu *data,
304                       unsigned long parent_ip,
305                       int cpu)
306 {
307         cycle_t T0, T1, delta;
308         unsigned long flags;
309         int pc;
310
311         T0 = data->preempt_timestamp;
312         T1 = ftrace_now(cpu);
313         delta = T1-T0;
314
315         local_save_flags(flags);
316
317         pc = preempt_count();
318
319         if (!report_latency(delta))
320                 goto out;
321
322         spin_lock_irqsave(&max_trace_lock, flags);
323
324         /* check if we are still the max latency */
325         if (!report_latency(delta))
326                 goto out_unlock;
327
328         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
329         /* Skip 5 functions to get to the irq/preempt enable function */
330         __trace_stack(tr, flags, 5, pc);
331
332         if (data->critical_sequence != max_sequence)
333                 goto out_unlock;
334
335         data->critical_end = parent_ip;
336
337         if (likely(!is_tracing_stopped())) {
338                 tracing_max_latency = delta;
339                 update_max_tr_single(tr, current, cpu);
340         }
341
342         max_sequence++;
343
344 out_unlock:
345         spin_unlock_irqrestore(&max_trace_lock, flags);
346
347 out:
348         data->critical_sequence = max_sequence;
349         data->preempt_timestamp = ftrace_now(cpu);
350         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
351 }
352
353 static inline void
354 start_critical_timing(unsigned long ip, unsigned long parent_ip)
355 {
356         int cpu;
357         struct trace_array *tr = irqsoff_trace;
358         struct trace_array_cpu *data;
359         unsigned long flags;
360
361         if (likely(!tracer_enabled))
362                 return;
363
364         cpu = raw_smp_processor_id();
365
366         if (per_cpu(tracing_cpu, cpu))
367                 return;
368
369         data = tr->data[cpu];
370
371         if (unlikely(!data) || atomic_read(&data->disabled))
372                 return;
373
374         atomic_inc(&data->disabled);
375
376         data->critical_sequence = max_sequence;
377         data->preempt_timestamp = ftrace_now(cpu);
378         data->critical_start = parent_ip ? : ip;
379
380         local_save_flags(flags);
381
382         __trace_function(tr, ip, parent_ip, flags, preempt_count());
383
384         per_cpu(tracing_cpu, cpu) = 1;
385
386         atomic_dec(&data->disabled);
387 }
388
389 static inline void
390 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
391 {
392         int cpu;
393         struct trace_array *tr = irqsoff_trace;
394         struct trace_array_cpu *data;
395         unsigned long flags;
396
397         cpu = raw_smp_processor_id();
398         /* Always clear the tracing cpu on stopping the trace */
399         if (unlikely(per_cpu(tracing_cpu, cpu)))
400                 per_cpu(tracing_cpu, cpu) = 0;
401         else
402                 return;
403
404         if (!tracer_enabled)
405                 return;
406
407         data = tr->data[cpu];
408
409         if (unlikely(!data) ||
410             !data->critical_start || atomic_read(&data->disabled))
411                 return;
412
413         atomic_inc(&data->disabled);
414
415         local_save_flags(flags);
416         __trace_function(tr, ip, parent_ip, flags, preempt_count());
417         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
418         data->critical_start = 0;
419         atomic_dec(&data->disabled);
420 }
421
422 /* start and stop critical timings used to for stoppage (in idle) */
423 void start_critical_timings(void)
424 {
425         if (preempt_trace() || irq_trace())
426                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
427 }
428 EXPORT_SYMBOL_GPL(start_critical_timings);
429
430 void stop_critical_timings(void)
431 {
432         if (preempt_trace() || irq_trace())
433                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
434 }
435 EXPORT_SYMBOL_GPL(stop_critical_timings);
436
437 #ifdef CONFIG_IRQSOFF_TRACER
438 #ifdef CONFIG_PROVE_LOCKING
439 void time_hardirqs_on(unsigned long a0, unsigned long a1)
440 {
441         if (!preempt_trace() && irq_trace())
442                 stop_critical_timing(a0, a1);
443 }
444
445 void time_hardirqs_off(unsigned long a0, unsigned long a1)
446 {
447         if (!preempt_trace() && irq_trace())
448                 start_critical_timing(a0, a1);
449 }
450
451 #else /* !CONFIG_PROVE_LOCKING */
452
453 /*
454  * Stubs:
455  */
456
457 void trace_softirqs_on(unsigned long ip)
458 {
459 }
460
461 void trace_softirqs_off(unsigned long ip)
462 {
463 }
464
465 inline void print_irqtrace_events(struct task_struct *curr)
466 {
467 }
468
469 /*
470  * We are only interested in hardirq on/off events:
471  */
472 void trace_hardirqs_on(void)
473 {
474         if (!preempt_trace() && irq_trace())
475                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476 }
477 EXPORT_SYMBOL(trace_hardirqs_on);
478
479 void trace_hardirqs_off(void)
480 {
481         if (!preempt_trace() && irq_trace())
482                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
483 }
484 EXPORT_SYMBOL(trace_hardirqs_off);
485
486 void trace_hardirqs_on_caller(unsigned long caller_addr)
487 {
488         if (!preempt_trace() && irq_trace())
489                 stop_critical_timing(CALLER_ADDR0, caller_addr);
490 }
491 EXPORT_SYMBOL(trace_hardirqs_on_caller);
492
493 void trace_hardirqs_off_caller(unsigned long caller_addr)
494 {
495         if (!preempt_trace() && irq_trace())
496                 start_critical_timing(CALLER_ADDR0, caller_addr);
497 }
498 EXPORT_SYMBOL(trace_hardirqs_off_caller);
499
500 #endif /* CONFIG_PROVE_LOCKING */
501 #endif /*  CONFIG_IRQSOFF_TRACER */
502
503 #ifdef CONFIG_PREEMPT_TRACER
504 void trace_preempt_on(unsigned long a0, unsigned long a1)
505 {
506         if (preempt_trace())
507                 stop_critical_timing(a0, a1);
508 }
509
510 void trace_preempt_off(unsigned long a0, unsigned long a1)
511 {
512         if (preempt_trace())
513                 start_critical_timing(a0, a1);
514 }
515 #endif /* CONFIG_PREEMPT_TRACER */
516
517 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
518 {
519         int ret = 0;
520
521         if (!graph)
522                 ret = register_ftrace_function(&trace_ops);
523         else
524                 ret = register_ftrace_graph(&irqsoff_graph_return,
525                                             &irqsoff_graph_entry);
526
527         if (!ret && tracing_is_enabled())
528                 tracer_enabled = 1;
529         else
530                 tracer_enabled = 0;
531
532         return ret;
533 }
534
535 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
536 {
537         tracer_enabled = 0;
538
539         if (!graph)
540                 unregister_ftrace_function(&trace_ops);
541         else
542                 unregister_ftrace_graph();
543 }
544
545 static void __irqsoff_tracer_init(struct trace_array *tr)
546 {
547         save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
548         trace_flags |= TRACE_ITER_LATENCY_FMT;
549
550         tracing_max_latency = 0;
551         irqsoff_trace = tr;
552         /* make sure that the tracer is visible */
553         smp_wmb();
554         tracing_reset_online_cpus(tr);
555
556         if (start_irqsoff_tracer(tr, is_graph()))
557                 printk(KERN_ERR "failed to start irqsoff tracer\n");
558 }
559
560 static void irqsoff_tracer_reset(struct trace_array *tr)
561 {
562         stop_irqsoff_tracer(tr, is_graph());
563
564         if (!save_lat_flag)
565                 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
566 }
567
568 static void irqsoff_tracer_start(struct trace_array *tr)
569 {
570         tracer_enabled = 1;
571 }
572
573 static void irqsoff_tracer_stop(struct trace_array *tr)
574 {
575         tracer_enabled = 0;
576 }
577
578 #ifdef CONFIG_IRQSOFF_TRACER
579 static int irqsoff_tracer_init(struct trace_array *tr)
580 {
581         trace_type = TRACER_IRQS_OFF;
582
583         __irqsoff_tracer_init(tr);
584         return 0;
585 }
586 static struct tracer irqsoff_tracer __read_mostly =
587 {
588         .name           = "irqsoff",
589         .init           = irqsoff_tracer_init,
590         .reset          = irqsoff_tracer_reset,
591         .start          = irqsoff_tracer_start,
592         .stop           = irqsoff_tracer_stop,
593         .print_max      = 1,
594         .print_header   = irqsoff_print_header,
595         .print_line     = irqsoff_print_line,
596         .flags          = &tracer_flags,
597         .set_flag       = irqsoff_set_flag,
598 #ifdef CONFIG_FTRACE_SELFTEST
599         .selftest    = trace_selftest_startup_irqsoff,
600 #endif
601         .open           = irqsoff_trace_open,
602         .close          = irqsoff_trace_close,
603         .use_max_tr     = 1,
604 };
605 # define register_irqsoff(trace) register_tracer(&trace)
606 #else
607 # define register_irqsoff(trace) do { } while (0)
608 #endif
609
610 #ifdef CONFIG_PREEMPT_TRACER
611 static int preemptoff_tracer_init(struct trace_array *tr)
612 {
613         trace_type = TRACER_PREEMPT_OFF;
614
615         __irqsoff_tracer_init(tr);
616         return 0;
617 }
618
619 static struct tracer preemptoff_tracer __read_mostly =
620 {
621         .name           = "preemptoff",
622         .init           = preemptoff_tracer_init,
623         .reset          = irqsoff_tracer_reset,
624         .start          = irqsoff_tracer_start,
625         .stop           = irqsoff_tracer_stop,
626         .print_max      = 1,
627         .print_header   = irqsoff_print_header,
628         .print_line     = irqsoff_print_line,
629         .flags          = &tracer_flags,
630         .set_flag       = irqsoff_set_flag,
631 #ifdef CONFIG_FTRACE_SELFTEST
632         .selftest    = trace_selftest_startup_preemptoff,
633 #endif
634         .open           = irqsoff_trace_open,
635         .close          = irqsoff_trace_close,
636         .use_max_tr     = 1,
637 };
638 # define register_preemptoff(trace) register_tracer(&trace)
639 #else
640 # define register_preemptoff(trace) do { } while (0)
641 #endif
642
643 #if defined(CONFIG_IRQSOFF_TRACER) && \
644         defined(CONFIG_PREEMPT_TRACER)
645
646 static int preemptirqsoff_tracer_init(struct trace_array *tr)
647 {
648         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
649
650         __irqsoff_tracer_init(tr);
651         return 0;
652 }
653
654 static struct tracer preemptirqsoff_tracer __read_mostly =
655 {
656         .name           = "preemptirqsoff",
657         .init           = preemptirqsoff_tracer_init,
658         .reset          = irqsoff_tracer_reset,
659         .start          = irqsoff_tracer_start,
660         .stop           = irqsoff_tracer_stop,
661         .print_max      = 1,
662         .print_header   = irqsoff_print_header,
663         .print_line     = irqsoff_print_line,
664         .flags          = &tracer_flags,
665         .set_flag       = irqsoff_set_flag,
666 #ifdef CONFIG_FTRACE_SELFTEST
667         .selftest    = trace_selftest_startup_preemptirqsoff,
668 #endif
669         .open           = irqsoff_trace_open,
670         .close          = irqsoff_trace_close,
671         .use_max_tr     = 1,
672 };
673
674 # define register_preemptirqsoff(trace) register_tracer(&trace)
675 #else
676 # define register_preemptirqsoff(trace) do { } while (0)
677 #endif
678
679 __init static int init_irqsoff_tracer(void)
680 {
681         register_irqsoff(irqsoff_tracer);
682         register_preemptoff(preemptoff_tracer);
683         register_preemptirqsoff(preemptirqsoff_tracer);
684
685         return 0;
686 }
687 device_initcall(init_irqsoff_tracer);