Merge branch 'tip/perf/urgent-3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 struct fgraph_cpu_data {
19         pid_t           last_pid;
20         int             depth;
21         int             ignore;
22         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
23 };
24
25 struct fgraph_data {
26         struct fgraph_cpu_data          *cpu_data;
27
28         /* Place to preserve last processed entry. */
29         struct ftrace_graph_ent_entry   ent;
30         struct ftrace_graph_ret_entry   ret;
31         int                             failed;
32         int                             cpu;
33 };
34
35 #define TRACE_GRAPH_INDENT      2
36
37 /* Flag options */
38 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
39 #define TRACE_GRAPH_PRINT_CPU           0x2
40 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
41 #define TRACE_GRAPH_PRINT_PROC          0x8
42 #define TRACE_GRAPH_PRINT_DURATION      0x10
43 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
44
45 static struct tracer_opt trace_opts[] = {
46         /* Display overruns? (for self-debug purpose) */
47         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
48         /* Display CPU ? */
49         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
50         /* Display Overhead ? */
51         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
52         /* Display proc name/pid */
53         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
54         /* Display duration of execution */
55         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
56         /* Display absolute time of an entry */
57         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
58         { } /* Empty entry */
59 };
60
61 static struct tracer_flags tracer_flags = {
62         /* Don't display overruns and proc by default */
63         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
64                TRACE_GRAPH_PRINT_DURATION,
65         .opts = trace_opts
66 };
67
68 static struct trace_array *graph_array;
69
70
71 /* Add a function return address to the trace stack on thread info.*/
72 int
73 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
74                          unsigned long frame_pointer)
75 {
76         unsigned long long calltime;
77         int index;
78
79         if (!current->ret_stack)
80                 return -EBUSY;
81
82         /*
83          * We must make sure the ret_stack is tested before we read
84          * anything else.
85          */
86         smp_rmb();
87
88         /* The return trace stack is full */
89         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
90                 atomic_inc(&current->trace_overrun);
91                 return -EBUSY;
92         }
93
94         calltime = trace_clock_local();
95
96         index = ++current->curr_ret_stack;
97         barrier();
98         current->ret_stack[index].ret = ret;
99         current->ret_stack[index].func = func;
100         current->ret_stack[index].calltime = calltime;
101         current->ret_stack[index].subtime = 0;
102         current->ret_stack[index].fp = frame_pointer;
103         *depth = index;
104
105         return 0;
106 }
107
108 /* Retrieve a function return address to the trace stack on thread info.*/
109 static void
110 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
111                         unsigned long frame_pointer)
112 {
113         int index;
114
115         index = current->curr_ret_stack;
116
117         if (unlikely(index < 0)) {
118                 ftrace_graph_stop();
119                 WARN_ON(1);
120                 /* Might as well panic, otherwise we have no where to go */
121                 *ret = (unsigned long)panic;
122                 return;
123         }
124
125 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
126         /*
127          * The arch may choose to record the frame pointer used
128          * and check it here to make sure that it is what we expect it
129          * to be. If gcc does not set the place holder of the return
130          * address in the frame pointer, and does a copy instead, then
131          * the function graph trace will fail. This test detects this
132          * case.
133          *
134          * Currently, x86_32 with optimize for size (-Os) makes the latest
135          * gcc do the above.
136          */
137         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
138                 ftrace_graph_stop();
139                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
140                      "  from func %ps return to %lx\n",
141                      current->ret_stack[index].fp,
142                      frame_pointer,
143                      (void *)current->ret_stack[index].func,
144                      current->ret_stack[index].ret);
145                 *ret = (unsigned long)panic;
146                 return;
147         }
148 #endif
149
150         *ret = current->ret_stack[index].ret;
151         trace->func = current->ret_stack[index].func;
152         trace->calltime = current->ret_stack[index].calltime;
153         trace->overrun = atomic_read(&current->trace_overrun);
154         trace->depth = index;
155 }
156
157 /*
158  * Send the trace to the ring-buffer.
159  * @return the original return address.
160  */
161 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
162 {
163         struct ftrace_graph_ret trace;
164         unsigned long ret;
165
166         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
167         trace.rettime = trace_clock_local();
168         ftrace_graph_return(&trace);
169         barrier();
170         current->curr_ret_stack--;
171
172         if (unlikely(!ret)) {
173                 ftrace_graph_stop();
174                 WARN_ON(1);
175                 /* Might as well panic. What else to do? */
176                 ret = (unsigned long)panic;
177         }
178
179         return ret;
180 }
181
182 int __trace_graph_entry(struct trace_array *tr,
183                                 struct ftrace_graph_ent *trace,
184                                 unsigned long flags,
185                                 int pc)
186 {
187         struct ftrace_event_call *call = &event_funcgraph_entry;
188         struct ring_buffer_event *event;
189         struct ring_buffer *buffer = tr->buffer;
190         struct ftrace_graph_ent_entry *entry;
191
192         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
193                 return 0;
194
195         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
196                                           sizeof(*entry), flags, pc);
197         if (!event)
198                 return 0;
199         entry   = ring_buffer_event_data(event);
200         entry->graph_ent                        = *trace;
201         if (!filter_current_check_discard(buffer, call, entry, event))
202                 ring_buffer_unlock_commit(buffer, event);
203
204         return 1;
205 }
206
207 int trace_graph_entry(struct ftrace_graph_ent *trace)
208 {
209         struct trace_array *tr = graph_array;
210         struct trace_array_cpu *data;
211         unsigned long flags;
212         long disabled;
213         int ret;
214         int cpu;
215         int pc;
216
217         if (!ftrace_trace_task(current))
218                 return 0;
219
220         /* trace it when it is-nested-in or is a function enabled. */
221         if (!(trace->depth || ftrace_graph_addr(trace->func)))
222                 return 0;
223
224         local_irq_save(flags);
225         cpu = raw_smp_processor_id();
226         data = tr->data[cpu];
227         disabled = atomic_inc_return(&data->disabled);
228         if (likely(disabled == 1)) {
229                 pc = preempt_count();
230                 ret = __trace_graph_entry(tr, trace, flags, pc);
231         } else {
232                 ret = 0;
233         }
234
235         atomic_dec(&data->disabled);
236         local_irq_restore(flags);
237
238         return ret;
239 }
240
241 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
242 {
243         if (tracing_thresh)
244                 return 1;
245         else
246                 return trace_graph_entry(trace);
247 }
248
249 void __trace_graph_return(struct trace_array *tr,
250                                 struct ftrace_graph_ret *trace,
251                                 unsigned long flags,
252                                 int pc)
253 {
254         struct ftrace_event_call *call = &event_funcgraph_exit;
255         struct ring_buffer_event *event;
256         struct ring_buffer *buffer = tr->buffer;
257         struct ftrace_graph_ret_entry *entry;
258
259         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
260                 return;
261
262         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
263                                           sizeof(*entry), flags, pc);
264         if (!event)
265                 return;
266         entry   = ring_buffer_event_data(event);
267         entry->ret                              = *trace;
268         if (!filter_current_check_discard(buffer, call, entry, event))
269                 ring_buffer_unlock_commit(buffer, event);
270 }
271
272 void trace_graph_return(struct ftrace_graph_ret *trace)
273 {
274         struct trace_array *tr = graph_array;
275         struct trace_array_cpu *data;
276         unsigned long flags;
277         long disabled;
278         int cpu;
279         int pc;
280
281         local_irq_save(flags);
282         cpu = raw_smp_processor_id();
283         data = tr->data[cpu];
284         disabled = atomic_inc_return(&data->disabled);
285         if (likely(disabled == 1)) {
286                 pc = preempt_count();
287                 __trace_graph_return(tr, trace, flags, pc);
288         }
289         atomic_dec(&data->disabled);
290         local_irq_restore(flags);
291 }
292
293 void set_graph_array(struct trace_array *tr)
294 {
295         graph_array = tr;
296
297         /* Make graph_array visible before we start tracing */
298
299         smp_mb();
300 }
301
302 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
303 {
304         if (tracing_thresh &&
305             (trace->rettime - trace->calltime < tracing_thresh))
306                 return;
307         else
308                 trace_graph_return(trace);
309 }
310
311 static int graph_trace_init(struct trace_array *tr)
312 {
313         int ret;
314
315         set_graph_array(tr);
316         if (tracing_thresh)
317                 ret = register_ftrace_graph(&trace_graph_thresh_return,
318                                             &trace_graph_thresh_entry);
319         else
320                 ret = register_ftrace_graph(&trace_graph_return,
321                                             &trace_graph_entry);
322         if (ret)
323                 return ret;
324         tracing_start_cmdline_record();
325
326         return 0;
327 }
328
329 static void graph_trace_reset(struct trace_array *tr)
330 {
331         tracing_stop_cmdline_record();
332         unregister_ftrace_graph();
333 }
334
335 static int max_bytes_for_cpu;
336
337 static enum print_line_t
338 print_graph_cpu(struct trace_seq *s, int cpu)
339 {
340         int ret;
341
342         /*
343          * Start with a space character - to make it stand out
344          * to the right a bit when trace output is pasted into
345          * email:
346          */
347         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
348         if (!ret)
349                 return TRACE_TYPE_PARTIAL_LINE;
350
351         return TRACE_TYPE_HANDLED;
352 }
353
354 #define TRACE_GRAPH_PROCINFO_LENGTH     14
355
356 static enum print_line_t
357 print_graph_proc(struct trace_seq *s, pid_t pid)
358 {
359         char comm[TASK_COMM_LEN];
360         /* sign + log10(MAX_INT) + '\0' */
361         char pid_str[11];
362         int spaces = 0;
363         int ret;
364         int len;
365         int i;
366
367         trace_find_cmdline(pid, comm);
368         comm[7] = '\0';
369         sprintf(pid_str, "%d", pid);
370
371         /* 1 stands for the "-" character */
372         len = strlen(comm) + strlen(pid_str) + 1;
373
374         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
375                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
376
377         /* First spaces to align center */
378         for (i = 0; i < spaces / 2; i++) {
379                 ret = trace_seq_printf(s, " ");
380                 if (!ret)
381                         return TRACE_TYPE_PARTIAL_LINE;
382         }
383
384         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
385         if (!ret)
386                 return TRACE_TYPE_PARTIAL_LINE;
387
388         /* Last spaces to align center */
389         for (i = 0; i < spaces - (spaces / 2); i++) {
390                 ret = trace_seq_printf(s, " ");
391                 if (!ret)
392                         return TRACE_TYPE_PARTIAL_LINE;
393         }
394         return TRACE_TYPE_HANDLED;
395 }
396
397
398 static enum print_line_t
399 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
400 {
401         if (!trace_seq_putc(s, ' '))
402                 return 0;
403
404         return trace_print_lat_fmt(s, entry);
405 }
406
407 /* If the pid changed since the last trace, output this event */
408 static enum print_line_t
409 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
410 {
411         pid_t prev_pid;
412         pid_t *last_pid;
413         int ret;
414
415         if (!data)
416                 return TRACE_TYPE_HANDLED;
417
418         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
419
420         if (*last_pid == pid)
421                 return TRACE_TYPE_HANDLED;
422
423         prev_pid = *last_pid;
424         *last_pid = pid;
425
426         if (prev_pid == -1)
427                 return TRACE_TYPE_HANDLED;
428 /*
429  * Context-switch trace line:
430
431  ------------------------------------------
432  | 1)  migration/0--1  =>  sshd-1755
433  ------------------------------------------
434
435  */
436         ret = trace_seq_printf(s,
437                 " ------------------------------------------\n");
438         if (!ret)
439                 return TRACE_TYPE_PARTIAL_LINE;
440
441         ret = print_graph_cpu(s, cpu);
442         if (ret == TRACE_TYPE_PARTIAL_LINE)
443                 return TRACE_TYPE_PARTIAL_LINE;
444
445         ret = print_graph_proc(s, prev_pid);
446         if (ret == TRACE_TYPE_PARTIAL_LINE)
447                 return TRACE_TYPE_PARTIAL_LINE;
448
449         ret = trace_seq_printf(s, " => ");
450         if (!ret)
451                 return TRACE_TYPE_PARTIAL_LINE;
452
453         ret = print_graph_proc(s, pid);
454         if (ret == TRACE_TYPE_PARTIAL_LINE)
455                 return TRACE_TYPE_PARTIAL_LINE;
456
457         ret = trace_seq_printf(s,
458                 "\n ------------------------------------------\n\n");
459         if (!ret)
460                 return TRACE_TYPE_PARTIAL_LINE;
461
462         return TRACE_TYPE_HANDLED;
463 }
464
465 static struct ftrace_graph_ret_entry *
466 get_return_for_leaf(struct trace_iterator *iter,
467                 struct ftrace_graph_ent_entry *curr)
468 {
469         struct fgraph_data *data = iter->private;
470         struct ring_buffer_iter *ring_iter = NULL;
471         struct ring_buffer_event *event;
472         struct ftrace_graph_ret_entry *next;
473
474         /*
475          * If the previous output failed to write to the seq buffer,
476          * then we just reuse the data from before.
477          */
478         if (data && data->failed) {
479                 curr = &data->ent;
480                 next = &data->ret;
481         } else {
482
483                 ring_iter = iter->buffer_iter[iter->cpu];
484
485                 /* First peek to compare current entry and the next one */
486                 if (ring_iter)
487                         event = ring_buffer_iter_peek(ring_iter, NULL);
488                 else {
489                         /*
490                          * We need to consume the current entry to see
491                          * the next one.
492                          */
493                         ring_buffer_consume(iter->tr->buffer, iter->cpu,
494                                             NULL, NULL);
495                         event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
496                                                  NULL, NULL);
497                 }
498
499                 if (!event)
500                         return NULL;
501
502                 next = ring_buffer_event_data(event);
503
504                 if (data) {
505                         /*
506                          * Save current and next entries for later reference
507                          * if the output fails.
508                          */
509                         data->ent = *curr;
510                         /*
511                          * If the next event is not a return type, then
512                          * we only care about what type it is. Otherwise we can
513                          * safely copy the entire event.
514                          */
515                         if (next->ent.type == TRACE_GRAPH_RET)
516                                 data->ret = *next;
517                         else
518                                 data->ret.ent.type = next->ent.type;
519                 }
520         }
521
522         if (next->ent.type != TRACE_GRAPH_RET)
523                 return NULL;
524
525         if (curr->ent.pid != next->ent.pid ||
526                         curr->graph_ent.func != next->ret.func)
527                 return NULL;
528
529         /* this is a leaf, now advance the iterator */
530         if (ring_iter)
531                 ring_buffer_read(ring_iter, NULL);
532
533         return next;
534 }
535
536 /* Signal a overhead of time execution to the output */
537 static int
538 print_graph_overhead(unsigned long long duration, struct trace_seq *s,
539                      u32 flags)
540 {
541         /* If duration disappear, we don't need anything */
542         if (!(flags & TRACE_GRAPH_PRINT_DURATION))
543                 return 1;
544
545         /* Non nested entry or return */
546         if (duration == -1)
547                 return trace_seq_printf(s, "  ");
548
549         if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
550                 /* Duration exceeded 100 msecs */
551                 if (duration > 100000ULL)
552                         return trace_seq_printf(s, "! ");
553
554                 /* Duration exceeded 10 msecs */
555                 if (duration > 10000ULL)
556                         return trace_seq_printf(s, "+ ");
557         }
558
559         return trace_seq_printf(s, "  ");
560 }
561
562 static int print_graph_abs_time(u64 t, struct trace_seq *s)
563 {
564         unsigned long usecs_rem;
565
566         usecs_rem = do_div(t, NSEC_PER_SEC);
567         usecs_rem /= 1000;
568
569         return trace_seq_printf(s, "%5lu.%06lu |  ",
570                         (unsigned long)t, usecs_rem);
571 }
572
573 static enum print_line_t
574 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
575                 enum trace_type type, int cpu, pid_t pid, u32 flags)
576 {
577         int ret;
578         struct trace_seq *s = &iter->seq;
579
580         if (addr < (unsigned long)__irqentry_text_start ||
581                 addr >= (unsigned long)__irqentry_text_end)
582                 return TRACE_TYPE_UNHANDLED;
583
584         /* Absolute time */
585         if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
586                 ret = print_graph_abs_time(iter->ts, s);
587                 if (!ret)
588                         return TRACE_TYPE_PARTIAL_LINE;
589         }
590
591         /* Cpu */
592         if (flags & TRACE_GRAPH_PRINT_CPU) {
593                 ret = print_graph_cpu(s, cpu);
594                 if (ret == TRACE_TYPE_PARTIAL_LINE)
595                         return TRACE_TYPE_PARTIAL_LINE;
596         }
597
598         /* Proc */
599         if (flags & TRACE_GRAPH_PRINT_PROC) {
600                 ret = print_graph_proc(s, pid);
601                 if (ret == TRACE_TYPE_PARTIAL_LINE)
602                         return TRACE_TYPE_PARTIAL_LINE;
603                 ret = trace_seq_printf(s, " | ");
604                 if (!ret)
605                         return TRACE_TYPE_PARTIAL_LINE;
606         }
607
608         /* No overhead */
609         ret = print_graph_overhead(-1, s, flags);
610         if (!ret)
611                 return TRACE_TYPE_PARTIAL_LINE;
612
613         if (type == TRACE_GRAPH_ENT)
614                 ret = trace_seq_printf(s, "==========>");
615         else
616                 ret = trace_seq_printf(s, "<==========");
617
618         if (!ret)
619                 return TRACE_TYPE_PARTIAL_LINE;
620
621         /* Don't close the duration column if haven't one */
622         if (flags & TRACE_GRAPH_PRINT_DURATION)
623                 trace_seq_printf(s, " |");
624         ret = trace_seq_printf(s, "\n");
625
626         if (!ret)
627                 return TRACE_TYPE_PARTIAL_LINE;
628         return TRACE_TYPE_HANDLED;
629 }
630
631 enum print_line_t
632 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
633 {
634         unsigned long nsecs_rem = do_div(duration, 1000);
635         /* log10(ULONG_MAX) + '\0' */
636         char msecs_str[21];
637         char nsecs_str[5];
638         int ret, len;
639         int i;
640
641         sprintf(msecs_str, "%lu", (unsigned long) duration);
642
643         /* Print msecs */
644         ret = trace_seq_printf(s, "%s", msecs_str);
645         if (!ret)
646                 return TRACE_TYPE_PARTIAL_LINE;
647
648         len = strlen(msecs_str);
649
650         /* Print nsecs (we don't want to exceed 7 numbers) */
651         if (len < 7) {
652                 snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
653                          nsecs_rem);
654                 ret = trace_seq_printf(s, ".%s", nsecs_str);
655                 if (!ret)
656                         return TRACE_TYPE_PARTIAL_LINE;
657                 len += strlen(nsecs_str);
658         }
659
660         ret = trace_seq_printf(s, " us ");
661         if (!ret)
662                 return TRACE_TYPE_PARTIAL_LINE;
663
664         /* Print remaining spaces to fit the row's width */
665         for (i = len; i < 7; i++) {
666                 ret = trace_seq_printf(s, " ");
667                 if (!ret)
668                         return TRACE_TYPE_PARTIAL_LINE;
669         }
670         return TRACE_TYPE_HANDLED;
671 }
672
673 static enum print_line_t
674 print_graph_duration(unsigned long long duration, struct trace_seq *s)
675 {
676         int ret;
677
678         ret = trace_print_graph_duration(duration, s);
679         if (ret != TRACE_TYPE_HANDLED)
680                 return ret;
681
682         ret = trace_seq_printf(s, "|  ");
683         if (!ret)
684                 return TRACE_TYPE_PARTIAL_LINE;
685
686         return TRACE_TYPE_HANDLED;
687 }
688
689 /* Case of a leaf function on its call entry */
690 static enum print_line_t
691 print_graph_entry_leaf(struct trace_iterator *iter,
692                 struct ftrace_graph_ent_entry *entry,
693                 struct ftrace_graph_ret_entry *ret_entry,
694                 struct trace_seq *s, u32 flags)
695 {
696         struct fgraph_data *data = iter->private;
697         struct ftrace_graph_ret *graph_ret;
698         struct ftrace_graph_ent *call;
699         unsigned long long duration;
700         int ret;
701         int i;
702
703         graph_ret = &ret_entry->ret;
704         call = &entry->graph_ent;
705         duration = graph_ret->rettime - graph_ret->calltime;
706
707         if (data) {
708                 struct fgraph_cpu_data *cpu_data;
709                 int cpu = iter->cpu;
710
711                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
712
713                 /*
714                  * Comments display at + 1 to depth. Since
715                  * this is a leaf function, keep the comments
716                  * equal to this depth.
717                  */
718                 cpu_data->depth = call->depth - 1;
719
720                 /* No need to keep this function around for this depth */
721                 if (call->depth < FTRACE_RETFUNC_DEPTH)
722                         cpu_data->enter_funcs[call->depth] = 0;
723         }
724
725         /* Overhead */
726         ret = print_graph_overhead(duration, s, flags);
727         if (!ret)
728                 return TRACE_TYPE_PARTIAL_LINE;
729
730         /* Duration */
731         if (flags & TRACE_GRAPH_PRINT_DURATION) {
732                 ret = print_graph_duration(duration, s);
733                 if (ret == TRACE_TYPE_PARTIAL_LINE)
734                         return TRACE_TYPE_PARTIAL_LINE;
735         }
736
737         /* Function */
738         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
739                 ret = trace_seq_printf(s, " ");
740                 if (!ret)
741                         return TRACE_TYPE_PARTIAL_LINE;
742         }
743
744         ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
745         if (!ret)
746                 return TRACE_TYPE_PARTIAL_LINE;
747
748         return TRACE_TYPE_HANDLED;
749 }
750
751 static enum print_line_t
752 print_graph_entry_nested(struct trace_iterator *iter,
753                          struct ftrace_graph_ent_entry *entry,
754                          struct trace_seq *s, int cpu, u32 flags)
755 {
756         struct ftrace_graph_ent *call = &entry->graph_ent;
757         struct fgraph_data *data = iter->private;
758         int ret;
759         int i;
760
761         if (data) {
762                 struct fgraph_cpu_data *cpu_data;
763                 int cpu = iter->cpu;
764
765                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
766                 cpu_data->depth = call->depth;
767
768                 /* Save this function pointer to see if the exit matches */
769                 if (call->depth < FTRACE_RETFUNC_DEPTH)
770                         cpu_data->enter_funcs[call->depth] = call->func;
771         }
772
773         /* No overhead */
774         ret = print_graph_overhead(-1, s, flags);
775         if (!ret)
776                 return TRACE_TYPE_PARTIAL_LINE;
777
778         /* No time */
779         if (flags & TRACE_GRAPH_PRINT_DURATION) {
780                 ret = trace_seq_printf(s, "            |  ");
781                 if (!ret)
782                         return TRACE_TYPE_PARTIAL_LINE;
783         }
784
785         /* Function */
786         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
787                 ret = trace_seq_printf(s, " ");
788                 if (!ret)
789                         return TRACE_TYPE_PARTIAL_LINE;
790         }
791
792         ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
793         if (!ret)
794                 return TRACE_TYPE_PARTIAL_LINE;
795
796         /*
797          * we already consumed the current entry to check the next one
798          * and see if this is a leaf.
799          */
800         return TRACE_TYPE_NO_CONSUME;
801 }
802
803 static enum print_line_t
804 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
805                      int type, unsigned long addr, u32 flags)
806 {
807         struct fgraph_data *data = iter->private;
808         struct trace_entry *ent = iter->ent;
809         int cpu = iter->cpu;
810         int ret;
811
812         /* Pid */
813         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
814                 return TRACE_TYPE_PARTIAL_LINE;
815
816         if (type) {
817                 /* Interrupt */
818                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
819                 if (ret == TRACE_TYPE_PARTIAL_LINE)
820                         return TRACE_TYPE_PARTIAL_LINE;
821         }
822
823         /* Absolute time */
824         if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
825                 ret = print_graph_abs_time(iter->ts, s);
826                 if (!ret)
827                         return TRACE_TYPE_PARTIAL_LINE;
828         }
829
830         /* Cpu */
831         if (flags & TRACE_GRAPH_PRINT_CPU) {
832                 ret = print_graph_cpu(s, cpu);
833                 if (ret == TRACE_TYPE_PARTIAL_LINE)
834                         return TRACE_TYPE_PARTIAL_LINE;
835         }
836
837         /* Proc */
838         if (flags & TRACE_GRAPH_PRINT_PROC) {
839                 ret = print_graph_proc(s, ent->pid);
840                 if (ret == TRACE_TYPE_PARTIAL_LINE)
841                         return TRACE_TYPE_PARTIAL_LINE;
842
843                 ret = trace_seq_printf(s, " | ");
844                 if (!ret)
845                         return TRACE_TYPE_PARTIAL_LINE;
846         }
847
848         /* Latency format */
849         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
850                 ret = print_graph_lat_fmt(s, ent);
851                 if (ret == TRACE_TYPE_PARTIAL_LINE)
852                         return TRACE_TYPE_PARTIAL_LINE;
853         }
854
855         return 0;
856 }
857
858 static enum print_line_t
859 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
860                         struct trace_iterator *iter, u32 flags)
861 {
862         struct fgraph_data *data = iter->private;
863         struct ftrace_graph_ent *call = &field->graph_ent;
864         struct ftrace_graph_ret_entry *leaf_ret;
865         static enum print_line_t ret;
866         int cpu = iter->cpu;
867
868         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
869                 return TRACE_TYPE_PARTIAL_LINE;
870
871         leaf_ret = get_return_for_leaf(iter, field);
872         if (leaf_ret)
873                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
874         else
875                 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
876
877         if (data) {
878                 /*
879                  * If we failed to write our output, then we need to make
880                  * note of it. Because we already consumed our entry.
881                  */
882                 if (s->full) {
883                         data->failed = 1;
884                         data->cpu = cpu;
885                 } else
886                         data->failed = 0;
887         }
888
889         return ret;
890 }
891
892 static enum print_line_t
893 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
894                    struct trace_entry *ent, struct trace_iterator *iter,
895                    u32 flags)
896 {
897         unsigned long long duration = trace->rettime - trace->calltime;
898         struct fgraph_data *data = iter->private;
899         pid_t pid = ent->pid;
900         int cpu = iter->cpu;
901         int func_match = 1;
902         int ret;
903         int i;
904
905         if (data) {
906                 struct fgraph_cpu_data *cpu_data;
907                 int cpu = iter->cpu;
908
909                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
910
911                 /*
912                  * Comments display at + 1 to depth. This is the
913                  * return from a function, we now want the comments
914                  * to display at the same level of the bracket.
915                  */
916                 cpu_data->depth = trace->depth - 1;
917
918                 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
919                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
920                                 func_match = 0;
921                         cpu_data->enter_funcs[trace->depth] = 0;
922                 }
923         }
924
925         if (print_graph_prologue(iter, s, 0, 0, flags))
926                 return TRACE_TYPE_PARTIAL_LINE;
927
928         /* Overhead */
929         ret = print_graph_overhead(duration, s, flags);
930         if (!ret)
931                 return TRACE_TYPE_PARTIAL_LINE;
932
933         /* Duration */
934         if (flags & TRACE_GRAPH_PRINT_DURATION) {
935                 ret = print_graph_duration(duration, s);
936                 if (ret == TRACE_TYPE_PARTIAL_LINE)
937                         return TRACE_TYPE_PARTIAL_LINE;
938         }
939
940         /* Closing brace */
941         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
942                 ret = trace_seq_printf(s, " ");
943                 if (!ret)
944                         return TRACE_TYPE_PARTIAL_LINE;
945         }
946
947         /*
948          * If the return function does not have a matching entry,
949          * then the entry was lost. Instead of just printing
950          * the '}' and letting the user guess what function this
951          * belongs to, write out the function name.
952          */
953         if (func_match) {
954                 ret = trace_seq_printf(s, "}\n");
955                 if (!ret)
956                         return TRACE_TYPE_PARTIAL_LINE;
957         } else {
958                 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
959                 if (!ret)
960                         return TRACE_TYPE_PARTIAL_LINE;
961         }
962
963         /* Overrun */
964         if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
965                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
966                                         trace->overrun);
967                 if (!ret)
968                         return TRACE_TYPE_PARTIAL_LINE;
969         }
970
971         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
972                               cpu, pid, flags);
973         if (ret == TRACE_TYPE_PARTIAL_LINE)
974                 return TRACE_TYPE_PARTIAL_LINE;
975
976         return TRACE_TYPE_HANDLED;
977 }
978
979 static enum print_line_t
980 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
981                     struct trace_iterator *iter, u32 flags)
982 {
983         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
984         struct fgraph_data *data = iter->private;
985         struct trace_event *event;
986         int depth = 0;
987         int ret;
988         int i;
989
990         if (data)
991                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
992
993         if (print_graph_prologue(iter, s, 0, 0, flags))
994                 return TRACE_TYPE_PARTIAL_LINE;
995
996         /* No overhead */
997         ret = print_graph_overhead(-1, s, flags);
998         if (!ret)
999                 return TRACE_TYPE_PARTIAL_LINE;
1000
1001         /* No time */
1002         if (flags & TRACE_GRAPH_PRINT_DURATION) {
1003                 ret = trace_seq_printf(s, "            |  ");
1004                 if (!ret)
1005                         return TRACE_TYPE_PARTIAL_LINE;
1006         }
1007
1008         /* Indentation */
1009         if (depth > 0)
1010                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1011                         ret = trace_seq_printf(s, " ");
1012                         if (!ret)
1013                                 return TRACE_TYPE_PARTIAL_LINE;
1014                 }
1015
1016         /* The comment */
1017         ret = trace_seq_printf(s, "/* ");
1018         if (!ret)
1019                 return TRACE_TYPE_PARTIAL_LINE;
1020
1021         switch (iter->ent->type) {
1022         case TRACE_BPRINT:
1023                 ret = trace_print_bprintk_msg_only(iter);
1024                 if (ret != TRACE_TYPE_HANDLED)
1025                         return ret;
1026                 break;
1027         case TRACE_PRINT:
1028                 ret = trace_print_printk_msg_only(iter);
1029                 if (ret != TRACE_TYPE_HANDLED)
1030                         return ret;
1031                 break;
1032         default:
1033                 event = ftrace_find_event(ent->type);
1034                 if (!event)
1035                         return TRACE_TYPE_UNHANDLED;
1036
1037                 ret = event->funcs->trace(iter, sym_flags, event);
1038                 if (ret != TRACE_TYPE_HANDLED)
1039                         return ret;
1040         }
1041
1042         /* Strip ending newline */
1043         if (s->buffer[s->len - 1] == '\n') {
1044                 s->buffer[s->len - 1] = '\0';
1045                 s->len--;
1046         }
1047
1048         ret = trace_seq_printf(s, " */\n");
1049         if (!ret)
1050                 return TRACE_TYPE_PARTIAL_LINE;
1051
1052         return TRACE_TYPE_HANDLED;
1053 }
1054
1055
1056 enum print_line_t
1057 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1058 {
1059         struct ftrace_graph_ent_entry *field;
1060         struct fgraph_data *data = iter->private;
1061         struct trace_entry *entry = iter->ent;
1062         struct trace_seq *s = &iter->seq;
1063         int cpu = iter->cpu;
1064         int ret;
1065
1066         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1067                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1068                 return TRACE_TYPE_HANDLED;
1069         }
1070
1071         /*
1072          * If the last output failed, there's a possibility we need
1073          * to print out the missing entry which would never go out.
1074          */
1075         if (data && data->failed) {
1076                 field = &data->ent;
1077                 iter->cpu = data->cpu;
1078                 ret = print_graph_entry(field, s, iter, flags);
1079                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1080                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1081                         ret = TRACE_TYPE_NO_CONSUME;
1082                 }
1083                 iter->cpu = cpu;
1084                 return ret;
1085         }
1086
1087         switch (entry->type) {
1088         case TRACE_GRAPH_ENT: {
1089                 /*
1090                  * print_graph_entry() may consume the current event,
1091                  * thus @field may become invalid, so we need to save it.
1092                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1093                  * it can be safely saved at the stack.
1094                  */
1095                 struct ftrace_graph_ent_entry saved;
1096                 trace_assign_type(field, entry);
1097                 saved = *field;
1098                 return print_graph_entry(&saved, s, iter, flags);
1099         }
1100         case TRACE_GRAPH_RET: {
1101                 struct ftrace_graph_ret_entry *field;
1102                 trace_assign_type(field, entry);
1103                 return print_graph_return(&field->ret, s, entry, iter, flags);
1104         }
1105         case TRACE_STACK:
1106         case TRACE_FN:
1107                 /* dont trace stack and functions as comments */
1108                 return TRACE_TYPE_UNHANDLED;
1109
1110         default:
1111                 return print_graph_comment(s, entry, iter, flags);
1112         }
1113
1114         return TRACE_TYPE_HANDLED;
1115 }
1116
1117 static enum print_line_t
1118 print_graph_function(struct trace_iterator *iter)
1119 {
1120         return print_graph_function_flags(iter, tracer_flags.val);
1121 }
1122
1123 static enum print_line_t
1124 print_graph_function_event(struct trace_iterator *iter, int flags,
1125                            struct trace_event *event)
1126 {
1127         return print_graph_function(iter);
1128 }
1129
1130 static void print_lat_header(struct seq_file *s, u32 flags)
1131 {
1132         static const char spaces[] = "                " /* 16 spaces */
1133                 "    "                                  /* 4 spaces */
1134                 "                 ";                    /* 17 spaces */
1135         int size = 0;
1136
1137         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1138                 size += 16;
1139         if (flags & TRACE_GRAPH_PRINT_CPU)
1140                 size += 4;
1141         if (flags & TRACE_GRAPH_PRINT_PROC)
1142                 size += 17;
1143
1144         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1145         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1146         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1147         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1148         seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
1149         seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
1150 }
1151
1152 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1153 {
1154         int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1155
1156         if (lat)
1157                 print_lat_header(s, flags);
1158
1159         /* 1st line */
1160         seq_printf(s, "#");
1161         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1162                 seq_printf(s, "     TIME       ");
1163         if (flags & TRACE_GRAPH_PRINT_CPU)
1164                 seq_printf(s, " CPU");
1165         if (flags & TRACE_GRAPH_PRINT_PROC)
1166                 seq_printf(s, "  TASK/PID       ");
1167         if (lat)
1168                 seq_printf(s, "|||||");
1169         if (flags & TRACE_GRAPH_PRINT_DURATION)
1170                 seq_printf(s, "  DURATION   ");
1171         seq_printf(s, "               FUNCTION CALLS\n");
1172
1173         /* 2nd line */
1174         seq_printf(s, "#");
1175         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1176                 seq_printf(s, "      |         ");
1177         if (flags & TRACE_GRAPH_PRINT_CPU)
1178                 seq_printf(s, " |  ");
1179         if (flags & TRACE_GRAPH_PRINT_PROC)
1180                 seq_printf(s, "   |    |        ");
1181         if (lat)
1182                 seq_printf(s, "|||||");
1183         if (flags & TRACE_GRAPH_PRINT_DURATION)
1184                 seq_printf(s, "   |   |      ");
1185         seq_printf(s, "               |   |   |   |\n");
1186 }
1187
1188 void print_graph_headers(struct seq_file *s)
1189 {
1190         print_graph_headers_flags(s, tracer_flags.val);
1191 }
1192
1193 void graph_trace_open(struct trace_iterator *iter)
1194 {
1195         /* pid and depth on the last trace processed */
1196         struct fgraph_data *data;
1197         int cpu;
1198
1199         iter->private = NULL;
1200
1201         data = kzalloc(sizeof(*data), GFP_KERNEL);
1202         if (!data)
1203                 goto out_err;
1204
1205         data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1206         if (!data->cpu_data)
1207                 goto out_err_free;
1208
1209         for_each_possible_cpu(cpu) {
1210                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1211                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1212                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1213                 *pid = -1;
1214                 *depth = 0;
1215                 *ignore = 0;
1216         }
1217
1218         iter->private = data;
1219
1220         return;
1221
1222  out_err_free:
1223         kfree(data);
1224  out_err:
1225         pr_warning("function graph tracer: not enough memory\n");
1226 }
1227
1228 void graph_trace_close(struct trace_iterator *iter)
1229 {
1230         struct fgraph_data *data = iter->private;
1231
1232         if (data) {
1233                 free_percpu(data->cpu_data);
1234                 kfree(data);
1235         }
1236 }
1237
1238 static struct trace_event_functions graph_functions = {
1239         .trace          = print_graph_function_event,
1240 };
1241
1242 static struct trace_event graph_trace_entry_event = {
1243         .type           = TRACE_GRAPH_ENT,
1244         .funcs          = &graph_functions,
1245 };
1246
1247 static struct trace_event graph_trace_ret_event = {
1248         .type           = TRACE_GRAPH_RET,
1249         .funcs          = &graph_functions
1250 };
1251
1252 static struct tracer graph_trace __read_mostly = {
1253         .name           = "function_graph",
1254         .open           = graph_trace_open,
1255         .pipe_open      = graph_trace_open,
1256         .close          = graph_trace_close,
1257         .pipe_close     = graph_trace_close,
1258         .wait_pipe      = poll_wait_pipe,
1259         .init           = graph_trace_init,
1260         .reset          = graph_trace_reset,
1261         .print_line     = print_graph_function,
1262         .print_header   = print_graph_headers,
1263         .flags          = &tracer_flags,
1264 #ifdef CONFIG_FTRACE_SELFTEST
1265         .selftest       = trace_selftest_startup_function_graph,
1266 #endif
1267 };
1268
1269 static __init int init_graph_trace(void)
1270 {
1271         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1272
1273         if (!register_ftrace_event(&graph_trace_entry_event)) {
1274                 pr_warning("Warning: could not register graph trace events\n");
1275                 return 1;
1276         }
1277
1278         if (!register_ftrace_event(&graph_trace_ret_event)) {
1279                 pr_warning("Warning: could not register graph trace events\n");
1280                 return 1;
1281         }
1282
1283         return register_tracer(&graph_trace);
1284 }
1285
1286 device_initcall(init_graph_trace);