Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[pandora-kernel.git] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 struct fgraph_cpu_data {
18         pid_t           last_pid;
19         int             depth;
20         int             ignore;
21         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
22 };
23
24 struct fgraph_data {
25         struct fgraph_cpu_data          *cpu_data;
26
27         /* Place to preserve last processed entry. */
28         struct ftrace_graph_ent_entry   ent;
29         struct ftrace_graph_ret_entry   ret;
30         int                             failed;
31         int                             cpu;
32 };
33
34 #define TRACE_GRAPH_INDENT      2
35
36 /* Flag options */
37 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
38 #define TRACE_GRAPH_PRINT_CPU           0x2
39 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
40 #define TRACE_GRAPH_PRINT_PROC          0x8
41 #define TRACE_GRAPH_PRINT_DURATION      0x10
42 #define TRACE_GRAPH_PRINT_ABS_TIME      0X20
43
44 static struct tracer_opt trace_opts[] = {
45         /* Display overruns? (for self-debug purpose) */
46         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47         /* Display CPU ? */
48         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49         /* Display Overhead ? */
50         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51         /* Display proc name/pid */
52         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53         /* Display duration of execution */
54         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55         /* Display absolute time of an entry */
56         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57         { } /* Empty entry */
58 };
59
60 static struct tracer_flags tracer_flags = {
61         /* Don't display overruns and proc by default */
62         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
63                TRACE_GRAPH_PRINT_DURATION,
64         .opts = trace_opts
65 };
66
67 static struct trace_array *graph_array;
68
69
70 /* Add a function return address to the trace stack on thread info.*/
71 int
72 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
73                          unsigned long frame_pointer)
74 {
75         unsigned long long calltime;
76         int index;
77
78         if (!current->ret_stack)
79                 return -EBUSY;
80
81         /*
82          * We must make sure the ret_stack is tested before we read
83          * anything else.
84          */
85         smp_rmb();
86
87         /* The return trace stack is full */
88         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89                 atomic_inc(&current->trace_overrun);
90                 return -EBUSY;
91         }
92
93         calltime = trace_clock_local();
94
95         index = ++current->curr_ret_stack;
96         barrier();
97         current->ret_stack[index].ret = ret;
98         current->ret_stack[index].func = func;
99         current->ret_stack[index].calltime = calltime;
100         current->ret_stack[index].subtime = 0;
101         current->ret_stack[index].fp = frame_pointer;
102         *depth = index;
103
104         return 0;
105 }
106
107 /* Retrieve a function return address to the trace stack on thread info.*/
108 static void
109 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
110                         unsigned long frame_pointer)
111 {
112         int index;
113
114         index = current->curr_ret_stack;
115
116         if (unlikely(index < 0)) {
117                 ftrace_graph_stop();
118                 WARN_ON(1);
119                 /* Might as well panic, otherwise we have no where to go */
120                 *ret = (unsigned long)panic;
121                 return;
122         }
123
124 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
125         /*
126          * The arch may choose to record the frame pointer used
127          * and check it here to make sure that it is what we expect it
128          * to be. If gcc does not set the place holder of the return
129          * address in the frame pointer, and does a copy instead, then
130          * the function graph trace will fail. This test detects this
131          * case.
132          *
133          * Currently, x86_32 with optimize for size (-Os) makes the latest
134          * gcc do the above.
135          */
136         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
137                 ftrace_graph_stop();
138                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
139                      "  from func %ps return to %lx\n",
140                      current->ret_stack[index].fp,
141                      frame_pointer,
142                      (void *)current->ret_stack[index].func,
143                      current->ret_stack[index].ret);
144                 *ret = (unsigned long)panic;
145                 return;
146         }
147 #endif
148
149         *ret = current->ret_stack[index].ret;
150         trace->func = current->ret_stack[index].func;
151         trace->calltime = current->ret_stack[index].calltime;
152         trace->overrun = atomic_read(&current->trace_overrun);
153         trace->depth = index;
154 }
155
156 /*
157  * Send the trace to the ring-buffer.
158  * @return the original return address.
159  */
160 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
161 {
162         struct ftrace_graph_ret trace;
163         unsigned long ret;
164
165         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
166         trace.rettime = trace_clock_local();
167         ftrace_graph_return(&trace);
168         barrier();
169         current->curr_ret_stack--;
170
171         if (unlikely(!ret)) {
172                 ftrace_graph_stop();
173                 WARN_ON(1);
174                 /* Might as well panic. What else to do? */
175                 ret = (unsigned long)panic;
176         }
177
178         return ret;
179 }
180
181 static int __trace_graph_entry(struct trace_array *tr,
182                                 struct ftrace_graph_ent *trace,
183                                 unsigned long flags,
184                                 int pc)
185 {
186         struct ftrace_event_call *call = &event_funcgraph_entry;
187         struct ring_buffer_event *event;
188         struct ring_buffer *buffer = tr->buffer;
189         struct ftrace_graph_ent_entry *entry;
190
191         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
192                 return 0;
193
194         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
195                                           sizeof(*entry), flags, pc);
196         if (!event)
197                 return 0;
198         entry   = ring_buffer_event_data(event);
199         entry->graph_ent                        = *trace;
200         if (!filter_current_check_discard(buffer, call, entry, event))
201                 ring_buffer_unlock_commit(buffer, event);
202
203         return 1;
204 }
205
206 int trace_graph_entry(struct ftrace_graph_ent *trace)
207 {
208         struct trace_array *tr = graph_array;
209         struct trace_array_cpu *data;
210         unsigned long flags;
211         long disabled;
212         int ret;
213         int cpu;
214         int pc;
215
216         if (!ftrace_trace_task(current))
217                 return 0;
218
219         /* trace it when it is-nested-in or is a function enabled. */
220         if (!(trace->depth || ftrace_graph_addr(trace->func)))
221                 return 0;
222
223         local_irq_save(flags);
224         cpu = raw_smp_processor_id();
225         data = tr->data[cpu];
226         disabled = atomic_inc_return(&data->disabled);
227         if (likely(disabled == 1)) {
228                 pc = preempt_count();
229                 ret = __trace_graph_entry(tr, trace, flags, pc);
230         } else {
231                 ret = 0;
232         }
233
234         atomic_dec(&data->disabled);
235         local_irq_restore(flags);
236
237         return ret;
238 }
239
240 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
241 {
242         if (tracing_thresh)
243                 return 1;
244         else
245                 return trace_graph_entry(trace);
246 }
247
248 static void __trace_graph_return(struct trace_array *tr,
249                                 struct ftrace_graph_ret *trace,
250                                 unsigned long flags,
251                                 int pc)
252 {
253         struct ftrace_event_call *call = &event_funcgraph_exit;
254         struct ring_buffer_event *event;
255         struct ring_buffer *buffer = tr->buffer;
256         struct ftrace_graph_ret_entry *entry;
257
258         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
259                 return;
260
261         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
262                                           sizeof(*entry), flags, pc);
263         if (!event)
264                 return;
265         entry   = ring_buffer_event_data(event);
266         entry->ret                              = *trace;
267         if (!filter_current_check_discard(buffer, call, entry, event))
268                 ring_buffer_unlock_commit(buffer, event);
269 }
270
271 void trace_graph_return(struct ftrace_graph_ret *trace)
272 {
273         struct trace_array *tr = graph_array;
274         struct trace_array_cpu *data;
275         unsigned long flags;
276         long disabled;
277         int cpu;
278         int pc;
279
280         local_irq_save(flags);
281         cpu = raw_smp_processor_id();
282         data = tr->data[cpu];
283         disabled = atomic_inc_return(&data->disabled);
284         if (likely(disabled == 1)) {
285                 pc = preempt_count();
286                 __trace_graph_return(tr, trace, flags, pc);
287         }
288         atomic_dec(&data->disabled);
289         local_irq_restore(flags);
290 }
291
292 void set_graph_array(struct trace_array *tr)
293 {
294         graph_array = tr;
295
296         /* Make graph_array visible before we start tracing */
297
298         smp_mb();
299 }
300
301 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
302 {
303         if (tracing_thresh &&
304             (trace->rettime - trace->calltime < tracing_thresh))
305                 return;
306         else
307                 trace_graph_return(trace);
308 }
309
310 static int graph_trace_init(struct trace_array *tr)
311 {
312         int ret;
313
314         set_graph_array(tr);
315         if (tracing_thresh)
316                 ret = register_ftrace_graph(&trace_graph_thresh_return,
317                                             &trace_graph_thresh_entry);
318         else
319                 ret = register_ftrace_graph(&trace_graph_return,
320                                             &trace_graph_entry);
321         if (ret)
322                 return ret;
323         tracing_start_cmdline_record();
324
325         return 0;
326 }
327
328 static void graph_trace_reset(struct trace_array *tr)
329 {
330         tracing_stop_cmdline_record();
331         unregister_ftrace_graph();
332 }
333
334 static int max_bytes_for_cpu;
335
336 static enum print_line_t
337 print_graph_cpu(struct trace_seq *s, int cpu)
338 {
339         int ret;
340
341         /*
342          * Start with a space character - to make it stand out
343          * to the right a bit when trace output is pasted into
344          * email:
345          */
346         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
347         if (!ret)
348                 return TRACE_TYPE_PARTIAL_LINE;
349
350         return TRACE_TYPE_HANDLED;
351 }
352
353 #define TRACE_GRAPH_PROCINFO_LENGTH     14
354
355 static enum print_line_t
356 print_graph_proc(struct trace_seq *s, pid_t pid)
357 {
358         char comm[TASK_COMM_LEN];
359         /* sign + log10(MAX_INT) + '\0' */
360         char pid_str[11];
361         int spaces = 0;
362         int ret;
363         int len;
364         int i;
365
366         trace_find_cmdline(pid, comm);
367         comm[7] = '\0';
368         sprintf(pid_str, "%d", pid);
369
370         /* 1 stands for the "-" character */
371         len = strlen(comm) + strlen(pid_str) + 1;
372
373         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
374                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
375
376         /* First spaces to align center */
377         for (i = 0; i < spaces / 2; i++) {
378                 ret = trace_seq_printf(s, " ");
379                 if (!ret)
380                         return TRACE_TYPE_PARTIAL_LINE;
381         }
382
383         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
384         if (!ret)
385                 return TRACE_TYPE_PARTIAL_LINE;
386
387         /* Last spaces to align center */
388         for (i = 0; i < spaces - (spaces / 2); i++) {
389                 ret = trace_seq_printf(s, " ");
390                 if (!ret)
391                         return TRACE_TYPE_PARTIAL_LINE;
392         }
393         return TRACE_TYPE_HANDLED;
394 }
395
396
397 static enum print_line_t
398 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
399 {
400         if (!trace_seq_putc(s, ' '))
401                 return 0;
402
403         return trace_print_lat_fmt(s, entry);
404 }
405
406 /* If the pid changed since the last trace, output this event */
407 static enum print_line_t
408 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
409 {
410         pid_t prev_pid;
411         pid_t *last_pid;
412         int ret;
413
414         if (!data)
415                 return TRACE_TYPE_HANDLED;
416
417         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
418
419         if (*last_pid == pid)
420                 return TRACE_TYPE_HANDLED;
421
422         prev_pid = *last_pid;
423         *last_pid = pid;
424
425         if (prev_pid == -1)
426                 return TRACE_TYPE_HANDLED;
427 /*
428  * Context-switch trace line:
429
430  ------------------------------------------
431  | 1)  migration/0--1  =>  sshd-1755
432  ------------------------------------------
433
434  */
435         ret = trace_seq_printf(s,
436                 " ------------------------------------------\n");
437         if (!ret)
438                 return TRACE_TYPE_PARTIAL_LINE;
439
440         ret = print_graph_cpu(s, cpu);
441         if (ret == TRACE_TYPE_PARTIAL_LINE)
442                 return TRACE_TYPE_PARTIAL_LINE;
443
444         ret = print_graph_proc(s, prev_pid);
445         if (ret == TRACE_TYPE_PARTIAL_LINE)
446                 return TRACE_TYPE_PARTIAL_LINE;
447
448         ret = trace_seq_printf(s, " => ");
449         if (!ret)
450                 return TRACE_TYPE_PARTIAL_LINE;
451
452         ret = print_graph_proc(s, pid);
453         if (ret == TRACE_TYPE_PARTIAL_LINE)
454                 return TRACE_TYPE_PARTIAL_LINE;
455
456         ret = trace_seq_printf(s,
457                 "\n ------------------------------------------\n\n");
458         if (!ret)
459                 return TRACE_TYPE_PARTIAL_LINE;
460
461         return TRACE_TYPE_HANDLED;
462 }
463
464 static struct ftrace_graph_ret_entry *
465 get_return_for_leaf(struct trace_iterator *iter,
466                 struct ftrace_graph_ent_entry *curr)
467 {
468         struct fgraph_data *data = iter->private;
469         struct ring_buffer_iter *ring_iter = NULL;
470         struct ring_buffer_event *event;
471         struct ftrace_graph_ret_entry *next;
472
473         /*
474          * If the previous output failed to write to the seq buffer,
475          * then we just reuse the data from before.
476          */
477         if (data && data->failed) {
478                 curr = &data->ent;
479                 next = &data->ret;
480         } else {
481
482                 ring_iter = iter->buffer_iter[iter->cpu];
483
484                 /* First peek to compare current entry and the next one */
485                 if (ring_iter)
486                         event = ring_buffer_iter_peek(ring_iter, NULL);
487                 else {
488                         /*
489                          * We need to consume the current entry to see
490                          * the next one.
491                          */
492                         ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
493                         event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
494                                                  NULL);
495                 }
496
497                 if (!event)
498                         return NULL;
499
500                 next = ring_buffer_event_data(event);
501
502                 if (data) {
503                         /*
504                          * Save current and next entries for later reference
505                          * if the output fails.
506                          */
507                         data->ent = *curr;
508                         data->ret = *next;
509                 }
510         }
511
512         if (next->ent.type != TRACE_GRAPH_RET)
513                 return NULL;
514
515         if (curr->ent.pid != next->ent.pid ||
516                         curr->graph_ent.func != next->ret.func)
517                 return NULL;
518
519         /* this is a leaf, now advance the iterator */
520         if (ring_iter)
521                 ring_buffer_read(ring_iter, NULL);
522
523         return next;
524 }
525
526 /* Signal a overhead of time execution to the output */
527 static int
528 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
529 {
530         /* If duration disappear, we don't need anything */
531         if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
532                 return 1;
533
534         /* Non nested entry or return */
535         if (duration == -1)
536                 return trace_seq_printf(s, "  ");
537
538         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
539                 /* Duration exceeded 100 msecs */
540                 if (duration > 100000ULL)
541                         return trace_seq_printf(s, "! ");
542
543                 /* Duration exceeded 10 msecs */
544                 if (duration > 10000ULL)
545                         return trace_seq_printf(s, "+ ");
546         }
547
548         return trace_seq_printf(s, "  ");
549 }
550
551 static int print_graph_abs_time(u64 t, struct trace_seq *s)
552 {
553         unsigned long usecs_rem;
554
555         usecs_rem = do_div(t, NSEC_PER_SEC);
556         usecs_rem /= 1000;
557
558         return trace_seq_printf(s, "%5lu.%06lu |  ",
559                         (unsigned long)t, usecs_rem);
560 }
561
562 static enum print_line_t
563 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
564                 enum trace_type type, int cpu, pid_t pid)
565 {
566         int ret;
567         struct trace_seq *s = &iter->seq;
568
569         if (addr < (unsigned long)__irqentry_text_start ||
570                 addr >= (unsigned long)__irqentry_text_end)
571                 return TRACE_TYPE_UNHANDLED;
572
573         /* Absolute time */
574         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
575                 ret = print_graph_abs_time(iter->ts, s);
576                 if (!ret)
577                         return TRACE_TYPE_PARTIAL_LINE;
578         }
579
580         /* Cpu */
581         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
582                 ret = print_graph_cpu(s, cpu);
583                 if (ret == TRACE_TYPE_PARTIAL_LINE)
584                         return TRACE_TYPE_PARTIAL_LINE;
585         }
586
587         /* Proc */
588         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
589                 ret = print_graph_proc(s, pid);
590                 if (ret == TRACE_TYPE_PARTIAL_LINE)
591                         return TRACE_TYPE_PARTIAL_LINE;
592                 ret = trace_seq_printf(s, " | ");
593                 if (!ret)
594                         return TRACE_TYPE_PARTIAL_LINE;
595         }
596
597         /* No overhead */
598         ret = print_graph_overhead(-1, s);
599         if (!ret)
600                 return TRACE_TYPE_PARTIAL_LINE;
601
602         if (type == TRACE_GRAPH_ENT)
603                 ret = trace_seq_printf(s, "==========>");
604         else
605                 ret = trace_seq_printf(s, "<==========");
606
607         if (!ret)
608                 return TRACE_TYPE_PARTIAL_LINE;
609
610         /* Don't close the duration column if haven't one */
611         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
612                 trace_seq_printf(s, " |");
613         ret = trace_seq_printf(s, "\n");
614
615         if (!ret)
616                 return TRACE_TYPE_PARTIAL_LINE;
617         return TRACE_TYPE_HANDLED;
618 }
619
620 enum print_line_t
621 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
622 {
623         unsigned long nsecs_rem = do_div(duration, 1000);
624         /* log10(ULONG_MAX) + '\0' */
625         char msecs_str[21];
626         char nsecs_str[5];
627         int ret, len;
628         int i;
629
630         sprintf(msecs_str, "%lu", (unsigned long) duration);
631
632         /* Print msecs */
633         ret = trace_seq_printf(s, "%s", msecs_str);
634         if (!ret)
635                 return TRACE_TYPE_PARTIAL_LINE;
636
637         len = strlen(msecs_str);
638
639         /* Print nsecs (we don't want to exceed 7 numbers) */
640         if (len < 7) {
641                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
642                 ret = trace_seq_printf(s, ".%s", nsecs_str);
643                 if (!ret)
644                         return TRACE_TYPE_PARTIAL_LINE;
645                 len += strlen(nsecs_str);
646         }
647
648         ret = trace_seq_printf(s, " us ");
649         if (!ret)
650                 return TRACE_TYPE_PARTIAL_LINE;
651
652         /* Print remaining spaces to fit the row's width */
653         for (i = len; i < 7; i++) {
654                 ret = trace_seq_printf(s, " ");
655                 if (!ret)
656                         return TRACE_TYPE_PARTIAL_LINE;
657         }
658         return TRACE_TYPE_HANDLED;
659 }
660
661 static enum print_line_t
662 print_graph_duration(unsigned long long duration, struct trace_seq *s)
663 {
664         int ret;
665
666         ret = trace_print_graph_duration(duration, s);
667         if (ret != TRACE_TYPE_HANDLED)
668                 return ret;
669
670         ret = trace_seq_printf(s, "|  ");
671         if (!ret)
672                 return TRACE_TYPE_PARTIAL_LINE;
673
674         return TRACE_TYPE_HANDLED;
675 }
676
677 /* Case of a leaf function on its call entry */
678 static enum print_line_t
679 print_graph_entry_leaf(struct trace_iterator *iter,
680                 struct ftrace_graph_ent_entry *entry,
681                 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
682 {
683         struct fgraph_data *data = iter->private;
684         struct ftrace_graph_ret *graph_ret;
685         struct ftrace_graph_ent *call;
686         unsigned long long duration;
687         int ret;
688         int i;
689
690         graph_ret = &ret_entry->ret;
691         call = &entry->graph_ent;
692         duration = graph_ret->rettime - graph_ret->calltime;
693
694         if (data) {
695                 struct fgraph_cpu_data *cpu_data;
696                 int cpu = iter->cpu;
697
698                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
699
700                 /*
701                  * Comments display at + 1 to depth. Since
702                  * this is a leaf function, keep the comments
703                  * equal to this depth.
704                  */
705                 cpu_data->depth = call->depth - 1;
706
707                 /* No need to keep this function around for this depth */
708                 if (call->depth < FTRACE_RETFUNC_DEPTH)
709                         cpu_data->enter_funcs[call->depth] = 0;
710         }
711
712         /* Overhead */
713         ret = print_graph_overhead(duration, s);
714         if (!ret)
715                 return TRACE_TYPE_PARTIAL_LINE;
716
717         /* Duration */
718         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
719                 ret = print_graph_duration(duration, s);
720                 if (ret == TRACE_TYPE_PARTIAL_LINE)
721                         return TRACE_TYPE_PARTIAL_LINE;
722         }
723
724         /* Function */
725         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
726                 ret = trace_seq_printf(s, " ");
727                 if (!ret)
728                         return TRACE_TYPE_PARTIAL_LINE;
729         }
730
731         ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
732         if (!ret)
733                 return TRACE_TYPE_PARTIAL_LINE;
734
735         return TRACE_TYPE_HANDLED;
736 }
737
738 static enum print_line_t
739 print_graph_entry_nested(struct trace_iterator *iter,
740                          struct ftrace_graph_ent_entry *entry,
741                          struct trace_seq *s, int cpu)
742 {
743         struct ftrace_graph_ent *call = &entry->graph_ent;
744         struct fgraph_data *data = iter->private;
745         int ret;
746         int i;
747
748         if (data) {
749                 struct fgraph_cpu_data *cpu_data;
750                 int cpu = iter->cpu;
751
752                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
753                 cpu_data->depth = call->depth;
754
755                 /* Save this function pointer to see if the exit matches */
756                 if (call->depth < FTRACE_RETFUNC_DEPTH)
757                         cpu_data->enter_funcs[call->depth] = call->func;
758         }
759
760         /* No overhead */
761         ret = print_graph_overhead(-1, s);
762         if (!ret)
763                 return TRACE_TYPE_PARTIAL_LINE;
764
765         /* No time */
766         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
767                 ret = trace_seq_printf(s, "            |  ");
768                 if (!ret)
769                         return TRACE_TYPE_PARTIAL_LINE;
770         }
771
772         /* Function */
773         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
774                 ret = trace_seq_printf(s, " ");
775                 if (!ret)
776                         return TRACE_TYPE_PARTIAL_LINE;
777         }
778
779         ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
780         if (!ret)
781                 return TRACE_TYPE_PARTIAL_LINE;
782
783         /*
784          * we already consumed the current entry to check the next one
785          * and see if this is a leaf.
786          */
787         return TRACE_TYPE_NO_CONSUME;
788 }
789
790 static enum print_line_t
791 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
792                      int type, unsigned long addr)
793 {
794         struct fgraph_data *data = iter->private;
795         struct trace_entry *ent = iter->ent;
796         int cpu = iter->cpu;
797         int ret;
798
799         /* Pid */
800         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
801                 return TRACE_TYPE_PARTIAL_LINE;
802
803         if (type) {
804                 /* Interrupt */
805                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
806                 if (ret == TRACE_TYPE_PARTIAL_LINE)
807                         return TRACE_TYPE_PARTIAL_LINE;
808         }
809
810         /* Absolute time */
811         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
812                 ret = print_graph_abs_time(iter->ts, s);
813                 if (!ret)
814                         return TRACE_TYPE_PARTIAL_LINE;
815         }
816
817         /* Cpu */
818         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
819                 ret = print_graph_cpu(s, cpu);
820                 if (ret == TRACE_TYPE_PARTIAL_LINE)
821                         return TRACE_TYPE_PARTIAL_LINE;
822         }
823
824         /* Proc */
825         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
826                 ret = print_graph_proc(s, ent->pid);
827                 if (ret == TRACE_TYPE_PARTIAL_LINE)
828                         return TRACE_TYPE_PARTIAL_LINE;
829
830                 ret = trace_seq_printf(s, " | ");
831                 if (!ret)
832                         return TRACE_TYPE_PARTIAL_LINE;
833         }
834
835         /* Latency format */
836         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
837                 ret = print_graph_lat_fmt(s, ent);
838                 if (ret == TRACE_TYPE_PARTIAL_LINE)
839                         return TRACE_TYPE_PARTIAL_LINE;
840         }
841
842         return 0;
843 }
844
845 static enum print_line_t
846 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
847                         struct trace_iterator *iter)
848 {
849         struct fgraph_data *data = iter->private;
850         struct ftrace_graph_ent *call = &field->graph_ent;
851         struct ftrace_graph_ret_entry *leaf_ret;
852         static enum print_line_t ret;
853         int cpu = iter->cpu;
854
855         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
856                 return TRACE_TYPE_PARTIAL_LINE;
857
858         leaf_ret = get_return_for_leaf(iter, field);
859         if (leaf_ret)
860                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
861         else
862                 ret = print_graph_entry_nested(iter, field, s, cpu);
863
864         if (data) {
865                 /*
866                  * If we failed to write our output, then we need to make
867                  * note of it. Because we already consumed our entry.
868                  */
869                 if (s->full) {
870                         data->failed = 1;
871                         data->cpu = cpu;
872                 } else
873                         data->failed = 0;
874         }
875
876         return ret;
877 }
878
879 static enum print_line_t
880 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
881                    struct trace_entry *ent, struct trace_iterator *iter)
882 {
883         unsigned long long duration = trace->rettime - trace->calltime;
884         struct fgraph_data *data = iter->private;
885         pid_t pid = ent->pid;
886         int cpu = iter->cpu;
887         int func_match = 1;
888         int ret;
889         int i;
890
891         if (data) {
892                 struct fgraph_cpu_data *cpu_data;
893                 int cpu = iter->cpu;
894
895                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
896
897                 /*
898                  * Comments display at + 1 to depth. This is the
899                  * return from a function, we now want the comments
900                  * to display at the same level of the bracket.
901                  */
902                 cpu_data->depth = trace->depth - 1;
903
904                 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
905                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
906                                 func_match = 0;
907                         cpu_data->enter_funcs[trace->depth] = 0;
908                 }
909         }
910
911         if (print_graph_prologue(iter, s, 0, 0))
912                 return TRACE_TYPE_PARTIAL_LINE;
913
914         /* Overhead */
915         ret = print_graph_overhead(duration, s);
916         if (!ret)
917                 return TRACE_TYPE_PARTIAL_LINE;
918
919         /* Duration */
920         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
921                 ret = print_graph_duration(duration, s);
922                 if (ret == TRACE_TYPE_PARTIAL_LINE)
923                         return TRACE_TYPE_PARTIAL_LINE;
924         }
925
926         /* Closing brace */
927         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
928                 ret = trace_seq_printf(s, " ");
929                 if (!ret)
930                         return TRACE_TYPE_PARTIAL_LINE;
931         }
932
933         /*
934          * If the return function does not have a matching entry,
935          * then the entry was lost. Instead of just printing
936          * the '}' and letting the user guess what function this
937          * belongs to, write out the function name.
938          */
939         if (func_match) {
940                 ret = trace_seq_printf(s, "}\n");
941                 if (!ret)
942                         return TRACE_TYPE_PARTIAL_LINE;
943         } else {
944                 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
945                 if (!ret)
946                         return TRACE_TYPE_PARTIAL_LINE;
947         }
948
949         /* Overrun */
950         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
951                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
952                                         trace->overrun);
953                 if (!ret)
954                         return TRACE_TYPE_PARTIAL_LINE;
955         }
956
957         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
958         if (ret == TRACE_TYPE_PARTIAL_LINE)
959                 return TRACE_TYPE_PARTIAL_LINE;
960
961         return TRACE_TYPE_HANDLED;
962 }
963
964 static enum print_line_t
965 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
966                     struct trace_iterator *iter)
967 {
968         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
969         struct fgraph_data *data = iter->private;
970         struct trace_event *event;
971         int depth = 0;
972         int ret;
973         int i;
974
975         if (data)
976                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
977
978         if (print_graph_prologue(iter, s, 0, 0))
979                 return TRACE_TYPE_PARTIAL_LINE;
980
981         /* No overhead */
982         ret = print_graph_overhead(-1, s);
983         if (!ret)
984                 return TRACE_TYPE_PARTIAL_LINE;
985
986         /* No time */
987         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
988                 ret = trace_seq_printf(s, "            |  ");
989                 if (!ret)
990                         return TRACE_TYPE_PARTIAL_LINE;
991         }
992
993         /* Indentation */
994         if (depth > 0)
995                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
996                         ret = trace_seq_printf(s, " ");
997                         if (!ret)
998                                 return TRACE_TYPE_PARTIAL_LINE;
999                 }
1000
1001         /* The comment */
1002         ret = trace_seq_printf(s, "/* ");
1003         if (!ret)
1004                 return TRACE_TYPE_PARTIAL_LINE;
1005
1006         switch (iter->ent->type) {
1007         case TRACE_BPRINT:
1008                 ret = trace_print_bprintk_msg_only(iter);
1009                 if (ret != TRACE_TYPE_HANDLED)
1010                         return ret;
1011                 break;
1012         case TRACE_PRINT:
1013                 ret = trace_print_printk_msg_only(iter);
1014                 if (ret != TRACE_TYPE_HANDLED)
1015                         return ret;
1016                 break;
1017         default:
1018                 event = ftrace_find_event(ent->type);
1019                 if (!event)
1020                         return TRACE_TYPE_UNHANDLED;
1021
1022                 ret = event->trace(iter, sym_flags);
1023                 if (ret != TRACE_TYPE_HANDLED)
1024                         return ret;
1025         }
1026
1027         /* Strip ending newline */
1028         if (s->buffer[s->len - 1] == '\n') {
1029                 s->buffer[s->len - 1] = '\0';
1030                 s->len--;
1031         }
1032
1033         ret = trace_seq_printf(s, " */\n");
1034         if (!ret)
1035                 return TRACE_TYPE_PARTIAL_LINE;
1036
1037         return TRACE_TYPE_HANDLED;
1038 }
1039
1040
1041 enum print_line_t
1042 print_graph_function(struct trace_iterator *iter)
1043 {
1044         struct ftrace_graph_ent_entry *field;
1045         struct fgraph_data *data = iter->private;
1046         struct trace_entry *entry = iter->ent;
1047         struct trace_seq *s = &iter->seq;
1048         int cpu = iter->cpu;
1049         int ret;
1050
1051         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1052                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1053                 return TRACE_TYPE_HANDLED;
1054         }
1055
1056         /*
1057          * If the last output failed, there's a possibility we need
1058          * to print out the missing entry which would never go out.
1059          */
1060         if (data && data->failed) {
1061                 field = &data->ent;
1062                 iter->cpu = data->cpu;
1063                 ret = print_graph_entry(field, s, iter);
1064                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1065                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1066                         ret = TRACE_TYPE_NO_CONSUME;
1067                 }
1068                 iter->cpu = cpu;
1069                 return ret;
1070         }
1071
1072         switch (entry->type) {
1073         case TRACE_GRAPH_ENT: {
1074                 /*
1075                  * print_graph_entry() may consume the current event,
1076                  * thus @field may become invalid, so we need to save it.
1077                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1078                  * it can be safely saved at the stack.
1079                  */
1080                 struct ftrace_graph_ent_entry saved;
1081                 trace_assign_type(field, entry);
1082                 saved = *field;
1083                 return print_graph_entry(&saved, s, iter);
1084         }
1085         case TRACE_GRAPH_RET: {
1086                 struct ftrace_graph_ret_entry *field;
1087                 trace_assign_type(field, entry);
1088                 return print_graph_return(&field->ret, s, entry, iter);
1089         }
1090         default:
1091                 return print_graph_comment(s, entry, iter);
1092         }
1093
1094         return TRACE_TYPE_HANDLED;
1095 }
1096
1097 static void print_lat_header(struct seq_file *s)
1098 {
1099         static const char spaces[] = "                " /* 16 spaces */
1100                 "    "                                  /* 4 spaces */
1101                 "                 ";                    /* 17 spaces */
1102         int size = 0;
1103
1104         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1105                 size += 16;
1106         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1107                 size += 4;
1108         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1109                 size += 17;
1110
1111         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1112         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1113         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1114         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1115         seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
1116         seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
1117 }
1118
1119 static void print_graph_headers(struct seq_file *s)
1120 {
1121         int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1122
1123         if (lat)
1124                 print_lat_header(s);
1125
1126         /* 1st line */
1127         seq_printf(s, "#");
1128         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1129                 seq_printf(s, "     TIME       ");
1130         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1131                 seq_printf(s, " CPU");
1132         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1133                 seq_printf(s, "  TASK/PID       ");
1134         if (lat)
1135                 seq_printf(s, "|||||");
1136         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1137                 seq_printf(s, "  DURATION   ");
1138         seq_printf(s, "               FUNCTION CALLS\n");
1139
1140         /* 2nd line */
1141         seq_printf(s, "#");
1142         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1143                 seq_printf(s, "      |         ");
1144         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1145                 seq_printf(s, " |  ");
1146         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1147                 seq_printf(s, "   |    |        ");
1148         if (lat)
1149                 seq_printf(s, "|||||");
1150         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1151                 seq_printf(s, "   |   |      ");
1152         seq_printf(s, "               |   |   |   |\n");
1153 }
1154
1155 static void graph_trace_open(struct trace_iterator *iter)
1156 {
1157         /* pid and depth on the last trace processed */
1158         struct fgraph_data *data;
1159         int cpu;
1160
1161         iter->private = NULL;
1162
1163         data = kzalloc(sizeof(*data), GFP_KERNEL);
1164         if (!data)
1165                 goto out_err;
1166
1167         data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1168         if (!data->cpu_data)
1169                 goto out_err_free;
1170
1171         for_each_possible_cpu(cpu) {
1172                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1173                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1174                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1175                 *pid = -1;
1176                 *depth = 0;
1177                 *ignore = 0;
1178         }
1179
1180         iter->private = data;
1181
1182         return;
1183
1184  out_err_free:
1185         kfree(data);
1186  out_err:
1187         pr_warning("function graph tracer: not enough memory\n");
1188 }
1189
1190 static void graph_trace_close(struct trace_iterator *iter)
1191 {
1192         struct fgraph_data *data = iter->private;
1193
1194         if (data) {
1195                 free_percpu(data->cpu_data);
1196                 kfree(data);
1197         }
1198 }
1199
1200 static struct tracer graph_trace __read_mostly = {
1201         .name           = "function_graph",
1202         .open           = graph_trace_open,
1203         .pipe_open      = graph_trace_open,
1204         .close          = graph_trace_close,
1205         .pipe_close     = graph_trace_close,
1206         .wait_pipe      = poll_wait_pipe,
1207         .init           = graph_trace_init,
1208         .reset          = graph_trace_reset,
1209         .print_line     = print_graph_function,
1210         .print_header   = print_graph_headers,
1211         .flags          = &tracer_flags,
1212 #ifdef CONFIG_FTRACE_SELFTEST
1213         .selftest       = trace_selftest_startup_function_graph,
1214 #endif
1215 };
1216
1217 static __init int init_graph_trace(void)
1218 {
1219         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1220
1221         return register_tracer(&graph_trace);
1222 }
1223
1224 device_initcall(init_graph_trace);