Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / trace / trace_irqsoff.c
index 2974bc7..6fd486e 100644 (file)
@@ -34,6 +34,9 @@ static int trace_type __read_mostly;
 
 static int save_lat_flag;
 
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+
 #ifdef CONFIG_PREEMPT_TRACER
 static inline int
 preempt_trace(void)
@@ -55,6 +58,23 @@ irq_trace(void)
 # define irq_trace() (0)
 #endif
 
+#define TRACE_DISPLAY_GRAPH    1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* display latency trace as call graph */
+       { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+       { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+       .val  = 0,
+       .opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
 /*
  * Sequence count - we record it when starting a measurement and
  * skip the latency if the sequence has changed - some other section
@@ -108,6 +128,202 @@ static struct ftrace_ops trace_ops __read_mostly =
 };
 #endif /* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+       int cpu;
+
+       if (!(bit & TRACE_DISPLAY_GRAPH))
+               return -EINVAL;
+
+       if (!(is_graph() ^ set))
+               return 0;
+
+       stop_irqsoff_tracer(irqsoff_trace, !set);
+
+       for_each_possible_cpu(cpu)
+               per_cpu(tracing_cpu, cpu) = 0;
+
+       tracing_max_latency = 0;
+       tracing_reset_online_cpus(irqsoff_trace);
+
+       return start_irqsoff_tracer(irqsoff_trace, set);
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+       struct trace_array *tr = irqsoff_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int ret;
+       int cpu;
+       int pc;
+
+       cpu = raw_smp_processor_id();
+       if (likely(!per_cpu(tracing_cpu, cpu)))
+               return 0;
+
+       local_save_flags(flags);
+       /* slight chance to get a false positive on tracing_cpu */
+       if (!irqs_disabled_flags(flags))
+               return 0;
+
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               ret = __trace_graph_entry(tr, trace, flags, pc);
+       } else
+               ret = 0;
+
+       atomic_dec(&data->disabled);
+       return ret;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct trace_array *tr = irqsoff_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       cpu = raw_smp_processor_id();
+       if (likely(!per_cpu(tracing_cpu, cpu)))
+               return;
+
+       local_save_flags(flags);
+       /* slight chance to get a false positive on tracing_cpu */
+       if (!irqs_disabled_flags(flags))
+               return;
+
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_return(tr, trace, flags, pc);
+       }
+
+       atomic_dec(&data->disabled);
+}
+
+static void irqsoff_trace_open(struct trace_iterator *iter)
+{
+       if (is_graph())
+               graph_trace_open(iter);
+
+}
+
+static void irqsoff_trace_close(struct trace_iterator *iter)
+{
+       if (iter->private)
+               graph_trace_close(iter);
+}
+
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
+                           TRACE_GRAPH_PRINT_PROC)
+
+static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
+{
+       u32 flags = GRAPH_TRACER_FLAGS;
+
+       if (trace_flags & TRACE_ITER_LATENCY_FMT)
+               flags |= TRACE_GRAPH_PRINT_DURATION;
+       else
+               flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+       /*
+        * In graph mode call the graph tracer output function,
+        * otherwise go with the TRACE_FN event handler
+        */
+       if (is_graph())
+               return print_graph_function_flags(iter, flags);
+
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static void irqsoff_print_header(struct seq_file *s)
+{
+       if (is_graph()) {
+               struct trace_iterator *iter = s->private;
+               u32 flags = GRAPH_TRACER_FLAGS;
+
+               if (trace_flags & TRACE_ITER_LATENCY_FMT) {
+                       /* print nothing if the buffers are empty */
+                       if (trace_empty(iter))
+                               return;
+
+                       print_trace_header(s, iter);
+                       flags |= TRACE_GRAPH_PRINT_DURATION;
+               } else
+                       flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+               print_graph_headers_flags(s, flags);
+       } else
+               trace_default_header(s);
+}
+
+static void
+trace_graph_function(struct trace_array *tr,
+                unsigned long ip, unsigned long flags, int pc)
+{
+       u64 time = trace_clock_local();
+       struct ftrace_graph_ent ent = {
+               .func  = ip,
+               .depth = 0,
+       };
+       struct ftrace_graph_ret ret = {
+               .func     = ip,
+               .depth    = 0,
+               .calltime = time,
+               .rettime  = time,
+       };
+
+       __trace_graph_entry(tr, &ent, flags, pc);
+       __trace_graph_return(tr, &ret, flags, pc);
+}
+
+static void
+__trace_function(struct trace_array *tr,
+                unsigned long ip, unsigned long parent_ip,
+                unsigned long flags, int pc)
+{
+       if (!is_graph())
+               trace_function(tr, ip, parent_ip, flags, pc);
+       else {
+               trace_graph_function(tr, parent_ip, flags, pc);
+               trace_graph_function(tr, ip, flags, pc);
+       }
+}
+
+#else
+#define __trace_function trace_function
+
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+       return -EINVAL;
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+       return -1;
+}
+
+static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
+{
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
+static void irqsoff_print_header(struct seq_file *s) { }
+static void irqsoff_trace_open(struct trace_iterator *iter) { }
+static void irqsoff_trace_close(struct trace_iterator *iter) { }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 /*
  * Should this new latency be reported/recorded?
  */
@@ -150,7 +366,7 @@ check_critical_timing(struct trace_array *tr,
        if (!report_latency(delta))
                goto out_unlock;
 
-       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+       __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
        /* Skip 5 functions to get to the irq/preempt enable function */
        __trace_stack(tr, flags, 5, pc);
 
@@ -172,7 +388,7 @@ out_unlock:
 out:
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = ftrace_now(cpu);
-       trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+       __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 }
 
 static inline void
@@ -204,7 +420,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
 
        local_save_flags(flags);
 
-       trace_function(tr, ip, parent_ip, flags, preempt_count());
+       __trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        per_cpu(tracing_cpu, cpu) = 1;
 
@@ -238,7 +454,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
        atomic_inc(&data->disabled);
 
        local_save_flags(flags);
-       trace_function(tr, ip, parent_ip, flags, preempt_count());
+       __trace_function(tr, ip, parent_ip, flags, preempt_count());
        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
        data->critical_start = 0;
        atomic_dec(&data->disabled);
@@ -347,19 +563,32 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
 }
 #endif /* CONFIG_PREEMPT_TRACER */
 
-static void start_irqsoff_tracer(struct trace_array *tr)
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 {
-       register_ftrace_function(&trace_ops);
-       if (tracing_is_enabled())
+       int ret = 0;
+
+       if (!graph)
+               ret = register_ftrace_function(&trace_ops);
+       else
+               ret = register_ftrace_graph(&irqsoff_graph_return,
+                                           &irqsoff_graph_entry);
+
+       if (!ret && tracing_is_enabled())
                tracer_enabled = 1;
        else
                tracer_enabled = 0;
+
+       return ret;
 }
 
-static void stop_irqsoff_tracer(struct trace_array *tr)
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 {
        tracer_enabled = 0;
-       unregister_ftrace_function(&trace_ops);
+
+       if (!graph)
+               unregister_ftrace_function(&trace_ops);
+       else
+               unregister_ftrace_graph();
 }
 
 static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -372,12 +601,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
        /* make sure that the tracer is visible */
        smp_wmb();
        tracing_reset_online_cpus(tr);
-       start_irqsoff_tracer(tr);
+
+       if (start_irqsoff_tracer(tr, is_graph()))
+               printk(KERN_ERR "failed to start irqsoff tracer\n");
 }
 
 static void irqsoff_tracer_reset(struct trace_array *tr)
 {
-       stop_irqsoff_tracer(tr);
+       stop_irqsoff_tracer(tr, is_graph());
 
        if (!save_lat_flag)
                trace_flags &= ~TRACE_ITER_LATENCY_FMT;
@@ -409,9 +640,15 @@ static struct tracer irqsoff_tracer __read_mostly =
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
        .print_max      = 1,
+       .print_header   = irqsoff_print_header,
+       .print_line     = irqsoff_print_line,
+       .flags          = &tracer_flags,
+       .set_flag       = irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_irqsoff,
 #endif
+       .open           = irqsoff_trace_open,
+       .close          = irqsoff_trace_close,
 };
 # define register_irqsoff(trace) register_tracer(&trace)
 #else
@@ -435,9 +672,15 @@ static struct tracer preemptoff_tracer __read_mostly =
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
        .print_max      = 1,
+       .print_header   = irqsoff_print_header,
+       .print_line     = irqsoff_print_line,
+       .flags          = &tracer_flags,
+       .set_flag       = irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptoff,
 #endif
+       .open           = irqsoff_trace_open,
+       .close          = irqsoff_trace_close,
 };
 # define register_preemptoff(trace) register_tracer(&trace)
 #else
@@ -463,9 +706,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
        .start          = irqsoff_tracer_start,
        .stop           = irqsoff_tracer_stop,
        .print_max      = 1,
+       .print_header   = irqsoff_print_header,
+       .print_line     = irqsoff_print_line,
+       .flags          = &tracer_flags,
+       .set_flag       = irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptirqsoff,
 #endif
+       .open           = irqsoff_trace_open,
+       .close          = irqsoff_trace_close,
 };
 
 # define register_preemptirqsoff(trace) register_tracer(&trace)