Merge branch 'linus' into tracing/core
authorIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 11:47:33 +0000 (13:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 11:47:45 +0000 (13:47 +0200)
Merge reason: update to upstream tracing facilities

Signed-off-by: Ingo Molnar <mingo@elte.hu>
12 files changed:
include/linux/ftrace.h
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_branch.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_output.c
kernel/trace/trace_output.h
kernel/trace/trace_stat.c
kernel/trace/trace_stat.h
kernel/trace/trace_workqueue.c

index 015a3d2..9e0a8d2 100644 (file)
@@ -365,6 +365,7 @@ struct ftrace_ret_stack {
        unsigned long ret;
        unsigned long func;
        unsigned long long calltime;
+       unsigned long long subtime;
 };
 
 /*
@@ -376,8 +377,6 @@ extern void return_to_handler(void);
 
 extern int
 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
-extern void
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
 
 /*
  * Sometimes we don't want to trace a function with the function
index 2246141..23b96eb 100644 (file)
@@ -104,6 +104,7 @@ config FUNCTION_GRAPH_TRACER
          the return value. This is done by setting the current return 
          address on the current task structure into a stack of calls.
 
+
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
@@ -375,6 +376,20 @@ config DYNAMIC_FTRACE
         were made. If so, it runs stop_machine (stops all CPUS)
         and modifies the code to jump over the call to ftrace.
 
+config FUNCTION_PROFILER
+       bool "Kernel function profiler"
+       depends on FUNCTION_TRACER
+       default n
+       help
+        This option enables the kernel function profiler. A file is created
+        in debugfs called function_profile_enabled which defaults to zero.
+        When a 1 is echoed into this file profiling begins, and when a
+        zero is entered, profiling stops. A file in the trace_stats
+        directory called functions, that show the list of functions that
+        have been hit and their counters.
+
+        If in doubt, say N
+
 config FTRACE_MCOUNT_RECORD
        def_bool y
        depends on DYNAMIC_FTRACE
index f1ed080..678e3d6 100644 (file)
@@ -33,7 +33,8 @@
 
 #include <asm/ftrace.h>
 
-#include "trace.h"
+#include "trace_output.h"
+#include "trace_stat.h"
 
 #define FTRACE_WARN_ON(cond)                   \
        do {                                    \
@@ -68,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
-       .func = ftrace_stub,
+       .func           = ftrace_stub,
 };
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -240,6 +241,576 @@ static void ftrace_update_pid_func(void)
 #endif
 }
 
+#ifdef CONFIG_FUNCTION_PROFILER
+struct ftrace_profile {
+       struct hlist_node               node;
+       unsigned long                   ip;
+       unsigned long                   counter;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       unsigned long long              time;
+#endif
+};
+
+struct ftrace_profile_page {
+       struct ftrace_profile_page      *next;
+       unsigned long                   index;
+       struct ftrace_profile           records[];
+};
+
+struct ftrace_profile_stat {
+       atomic_t                        disabled;
+       struct hlist_head               *hash;
+       struct ftrace_profile_page      *pages;
+       struct ftrace_profile_page      *start;
+       struct tracer_stat              stat;
+};
+
+#define PROFILE_RECORDS_SIZE                                           \
+       (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
+
+#define PROFILES_PER_PAGE                                      \
+       (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
+
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
+
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
+static DEFINE_MUTEX(ftrace_profile_lock);
+
+static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
+
+#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
+
+static void *
+function_stat_next(void *v, int idx)
+{
+       struct ftrace_profile *rec = v;
+       struct ftrace_profile_page *pg;
+
+       pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
+
+ again:
+       rec++;
+       if ((void *)rec >= (void *)&pg->records[pg->index]) {
+               pg = pg->next;
+               if (!pg)
+                       return NULL;
+               rec = &pg->records[0];
+               if (!rec->counter)
+                       goto again;
+       }
+
+       return rec;
+}
+
+static void *function_stat_start(struct tracer_stat *trace)
+{
+       struct ftrace_profile_stat *stat =
+               container_of(trace, struct ftrace_profile_stat, stat);
+
+       if (!stat || !stat->start)
+               return NULL;
+
+       return function_stat_next(&stat->start->records[0], 0);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* function graph compares on total time */
+static int function_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->time < b->time)
+               return -1;
+       if (a->time > b->time)
+               return 1;
+       else
+               return 0;
+}
+#else
+/* not function graph compares against hits */
+static int function_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->counter < b->counter)
+               return -1;
+       if (a->counter > b->counter)
+               return 1;
+       else
+               return 0;
+}
+#endif
+
+static int function_stat_headers(struct seq_file *m)
+{
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "  Function                               "
+                  "Hit    Time            Avg\n"
+                     "  --------                               "
+                  "---    ----            ---\n");
+#else
+       seq_printf(m, "  Function                               Hit\n"
+                     "  --------                               ---\n");
+#endif
+       return 0;
+}
+
+static int function_stat_show(struct seq_file *m, void *v)
+{
+       struct ftrace_profile *rec = v;
+       char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       static DEFINE_MUTEX(mutex);
+       static struct trace_seq s;
+       unsigned long long avg;
+#endif
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "    ");
+       avg = rec->time;
+       do_div(avg, rec->counter);
+
+       mutex_lock(&mutex);
+       trace_seq_init(&s);
+       trace_print_graph_duration(rec->time, &s);
+       trace_seq_puts(&s, "    ");
+       trace_print_graph_duration(avg, &s);
+       trace_print_seq(m, &s);
+       mutex_unlock(&mutex);
+#endif
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
+{
+       struct ftrace_profile_page *pg;
+
+       pg = stat->pages = stat->start;
+
+       while (pg) {
+               memset(pg->records, 0, PROFILE_RECORDS_SIZE);
+               pg->index = 0;
+               pg = pg->next;
+       }
+
+       memset(stat->hash, 0,
+              FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
+}
+
+int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+{
+       struct ftrace_profile_page *pg;
+       int functions;
+       int pages;
+       int i;
+
+       /* If we already allocated, do nothing */
+       if (stat->pages)
+               return 0;
+
+       stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!stat->pages)
+               return -ENOMEM;
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+       functions = ftrace_update_tot_cnt;
+#else
+       /*
+        * We do not know the number of functions that exist because
+        * dynamic tracing is what counts them. With past experience
+        * we have around 20K functions. That should be more than enough.
+        * It is highly unlikely we will execute every function in
+        * the kernel.
+        */
+       functions = 20000;
+#endif
+
+       pg = stat->start = stat->pages;
+
+       pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+       for (i = 0; i < pages; i++) {
+               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pg->next)
+                       goto out_free;
+               pg = pg->next;
+       }
+
+       return 0;
+
+ out_free:
+       pg = stat->start;
+       while (pg) {
+               unsigned long tmp = (unsigned long)pg;
+
+               pg = pg->next;
+               free_page(tmp);
+       }
+
+       free_page((unsigned long)stat->pages);
+       stat->pages = NULL;
+       stat->start = NULL;
+
+       return -ENOMEM;
+}
+
+static int ftrace_profile_init_cpu(int cpu)
+{
+       struct ftrace_profile_stat *stat;
+       int size;
+
+       stat = &per_cpu(ftrace_profile_stats, cpu);
+
+       if (stat->hash) {
+               /* If the profile is already created, simply reset it */
+               ftrace_profile_reset(stat);
+               return 0;
+       }
+
+       /*
+        * We are profiling all functions, but usually only a few thousand
+        * functions are hit. We'll make a hash of 1024 items.
+        */
+       size = FTRACE_PROFILE_HASH_SIZE;
+
+       stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+
+       if (!stat->hash)
+               return -ENOMEM;
+
+       if (!ftrace_profile_bits) {
+               size--;
+
+               for (; size; size >>= 1)
+                       ftrace_profile_bits++;
+       }
+
+       /* Preallocate the function profiling pages */
+       if (ftrace_profile_pages_init(stat) < 0) {
+               kfree(stat->hash);
+               stat->hash = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int ftrace_profile_init(void)
+{
+       int cpu;
+       int ret = 0;
+
+       for_each_online_cpu(cpu) {
+               ret = ftrace_profile_init_cpu(cpu);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* interrupts must be disabled */
+static struct ftrace_profile *
+ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+       struct ftrace_profile *rec;
+       struct hlist_head *hhd;
+       struct hlist_node *n;
+       unsigned long key;
+
+       key = hash_long(ip, ftrace_profile_bits);
+       hhd = &stat->hash[key];
+
+       if (hlist_empty(hhd))
+               return NULL;
+
+       hlist_for_each_entry_rcu(rec, n, hhd, node) {
+               if (rec->ip == ip)
+                       return rec;
+       }
+
+       return NULL;
+}
+
+static void ftrace_add_profile(struct ftrace_profile_stat *stat,
+                              struct ftrace_profile *rec)
+{
+       unsigned long key;
+
+       key = hash_long(rec->ip, ftrace_profile_bits);
+       hlist_add_head_rcu(&rec->node, &stat->hash[key]);
+}
+
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
+static struct ftrace_profile *
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+       struct ftrace_profile *rec = NULL;
+
+       /* prevent recursion (from NMIs) */
+       if (atomic_inc_return(&stat->disabled) != 1)
+               goto out;
+
+       /*
+        * Try to find the function again since an NMI
+        * could have added it
+        */
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (rec)
+               goto out;
+
+       if (stat->pages->index == PROFILES_PER_PAGE) {
+               if (!stat->pages->next)
+                       goto out;
+               stat->pages = stat->pages->next;
+       }
+
+       rec = &stat->pages->records[stat->pages->index++];
+       rec->ip = ip;
+       ftrace_add_profile(stat, rec);
+
+ out:
+       atomic_dec(&stat->disabled);
+
+       return rec;
+}
+
+static void
+function_profile_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_profile_stat *stat;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       if (!ftrace_profile_enabled)
+               return;
+
+       local_irq_save(flags);
+
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash)
+               goto out;
+
+       rec = ftrace_find_profiled_func(stat, ip);
+       if (!rec) {
+               rec = ftrace_profile_alloc(stat, ip);
+               if (!rec)
+                       goto out;
+       }
+
+       rec->counter++;
+ out:
+       local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int profile_graph_entry(struct ftrace_graph_ent *trace)
+{
+       function_profile_call(trace->func, 0);
+       return 1;
+}
+
+static void profile_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct ftrace_profile_stat *stat;
+       unsigned long long calltime;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash)
+               goto out;
+
+       calltime = trace->rettime - trace->calltime;
+
+       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+               int index;
+
+               index = trace->depth;
+
+               /* Append this call time to the parent time to subtract */
+               if (index)
+                       current->ret_stack[index - 1].subtime += calltime;
+
+               if (current->ret_stack[index].subtime < calltime)
+                       calltime -= current->ret_stack[index].subtime;
+               else
+                       calltime = 0;
+       }
+
+       rec = ftrace_find_profiled_func(stat, trace->func);
+       if (rec)
+               rec->time += calltime;
+
+ out:
+       local_irq_restore(flags);
+}
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_graph(&profile_graph_return,
+                                    &profile_graph_entry);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_graph();
+}
+#else
+static struct ftrace_ops ftrace_profile_ops __read_mostly =
+{
+       .func           = function_profile_call,
+};
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_function(&ftrace_profile_ops);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_function(&ftrace_profile_ops);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+static ssize_t
+ftrace_profile_write(struct file *filp, const char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       char buf[64];           /* big enough to hold a number */
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       val = !!val;
+
+       mutex_lock(&ftrace_profile_lock);
+       if (ftrace_profile_enabled ^ val) {
+               if (val) {
+                       ret = ftrace_profile_init();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+
+                       ret = register_ftrace_profiler();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
+                       ftrace_profile_enabled = 1;
+               } else {
+                       ftrace_profile_enabled = 0;
+                       unregister_ftrace_profiler();
+               }
+       }
+ out:
+       mutex_unlock(&ftrace_profile_lock);
+
+       filp->f_pos += cnt;
+
+       return cnt;
+}
+
+static ssize_t
+ftrace_profile_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       char buf[64];           /* big enough to hold a number */
+       int r;
+
+       r = sprintf(buf, "%u\n", ftrace_profile_enabled);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ftrace_profile_fops = {
+       .open           = tracing_open_generic,
+       .read           = ftrace_profile_read,
+       .write          = ftrace_profile_write,
+};
+
+/* used to initialize the real stat files */
+static struct tracer_stat function_stats __initdata = {
+       .name           = "functions",
+       .stat_start     = function_stat_start,
+       .stat_next      = function_stat_next,
+       .stat_cmp       = function_stat_cmp,
+       .stat_headers   = function_stat_headers,
+       .stat_show      = function_stat_show
+};
+
+static void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+       struct ftrace_profile_stat *stat;
+       struct dentry *entry;
+       char *name;
+       int ret;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               stat = &per_cpu(ftrace_profile_stats, cpu);
+
+               /* allocate enough for function name + cpu number */
+               name = kmalloc(32, GFP_KERNEL);
+               if (!name) {
+                       /*
+                        * The files created are permanent, if something happens
+                        * we still do not free memory.
+                        */
+                       kfree(stat);
+                       WARN(1,
+                            "Could not allocate stat file for cpu %d\n",
+                            cpu);
+                       return;
+               }
+               stat->stat = function_stats;
+               snprintf(name, 32, "function%d", cpu);
+               stat->stat.name = name;
+               ret = register_stat_tracer(&stat->stat);
+               if (ret) {
+                       WARN(1,
+                            "Could not register function stat for cpu %d\n",
+                            cpu);
+                       kfree(name);
+                       return;
+               }
+       }
+
+       entry = debugfs_create_file("function_profile_enabled", 0644,
+                                   d_tracer, NULL, &ftrace_profile_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'function_profile_enabled' entry\n");
+}
+
+#else /* CONFIG_FUNCTION_PROFILER */
+static void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+}
+#endif /* CONFIG_FUNCTION_PROFILER */
+
 /* set when tracing only a pid */
 struct pid *ftrace_pid_trace;
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
@@ -261,7 +832,6 @@ struct ftrace_func_probe {
        struct rcu_head         rcu;
 };
 
-
 enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
@@ -1408,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
 
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
-       .func = function_trace_probe_call,
+       .func           = function_trace_probe_call,
 };
 
 static int ftrace_probe_registered;
@@ -2430,6 +3000,9 @@ static __init int ftrace_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs "
                           "'set_ftrace_pid' entry\n");
+
+       ftrace_profile_debugfs(d_tracer);
+
        return 0;
 }
 fs_initcall(ftrace_init_debugfs);
index a0174a4..2a81dec 100644 (file)
@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
 /* trace_flags holds trace_options default values */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
-       TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
+       TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
+       TRACE_ITER_GRAPH_TIME;
 
 /**
  * trace_wake_up - wake up tasks waiting for trace input
@@ -317,6 +318,7 @@ static const char *trace_options[] = {
        "latency-format",
        "global-clock",
        "sleep-time",
+       "graph-time",
        NULL
 };
 
@@ -402,17 +404,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
        return cnt;
 }
 
-static void
-trace_print_seq(struct seq_file *m, struct trace_seq *s)
-{
-       int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
-
-       s->buffer[len] = 0;
-       seq_puts(m, s->buffer);
-
-       trace_seq_init(s);
-}
-
 /**
  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
  * @tr: tracer
index cbc168f..fec6521 100644 (file)
@@ -613,6 +613,8 @@ extern unsigned long trace_flags;
 /* Standard output formatting function used for function return traces */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern enum print_line_t print_graph_function(struct trace_iterator *iter);
+extern enum print_line_t
+trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 /* TODO: make this variable */
@@ -644,7 +646,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
        return 1;
 }
 #endif /* CONFIG_DYNAMIC_FTRACE */
-
 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
 static inline enum print_line_t
 print_graph_function(struct trace_iterator *iter)
@@ -692,6 +693,7 @@ enum trace_iterator_flags {
        TRACE_ITER_LATENCY_FMT          = 0x40000,
        TRACE_ITER_GLOBAL_CLK           = 0x80000,
        TRACE_ITER_SLEEP_TIME           = 0x100000,
+       TRACE_ITER_GRAPH_TIME           = 0x200000,
 };
 
 /*
index ad8c22e..e6e3291 100644 (file)
@@ -263,7 +263,7 @@ static int branch_stat_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static void *annotated_branch_stat_start(void)
+static void *annotated_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_annotated_branch_profile;
 }
@@ -338,7 +338,7 @@ static int all_branch_stat_headers(struct seq_file *m)
        return 0;
 }
 
-static void *all_branch_stat_start(void)
+static void *all_branch_stat_start(struct tracer_stat *trace)
 {
        return __start_branch_profile;
 }
index d28687e..10f6ad7 100644 (file)
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
        current->ret_stack[index].ret = ret;
        current->ret_stack[index].func = func;
        current->ret_stack[index].calltime = calltime;
+       current->ret_stack[index].subtime = 0;
        *depth = index;
 
        return 0;
 }
 
 /* Retrieve a function return address to the trace stack on thread info.*/
-void
+static void
 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
 {
        int index;
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
        trace->calltime = current->ret_stack[index].calltime;
        trace->overrun = atomic_read(&current->trace_overrun);
        trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
-
 }
 
 /*
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void)
        ftrace_pop_return_trace(&trace, &ret);
        trace.rettime = trace_clock_local();
        ftrace_graph_return(&trace);
+       barrier();
+       current->curr_ret_stack--;
 
        if (unlikely(!ret)) {
                ftrace_graph_stop();
@@ -426,8 +426,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
        return TRACE_TYPE_HANDLED;
 }
 
-static enum print_line_t
-print_graph_duration(unsigned long long duration, struct trace_seq *s)
+enum print_line_t
+trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 {
        unsigned long nsecs_rem = do_div(duration, 1000);
        /* log10(ULONG_MAX) + '\0' */
@@ -464,12 +464,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
        }
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+print_graph_duration(unsigned long long duration, struct trace_seq *s)
+{
+       int ret;
+
+       ret = trace_print_graph_duration(duration, s);
+       if (ret != TRACE_TYPE_HANDLED)
+               return ret;
 
        ret = trace_seq_printf(s, "|  ");
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
-       return TRACE_TYPE_HANDLED;
 
+       return TRACE_TYPE_HANDLED;
 }
 
 /* Case of a leaf function on its call entry */
index d72b9a6..aeac358 100644 (file)
@@ -19,6 +19,16 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
 
 static int next_event_type = __TRACE_LAST_TYPE + 1;
 
+void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+       int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+
+       s->buffer[len] = 0;
+       seq_puts(m, s->buffer);
+
+       trace_seq_init(s);
+}
+
 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
index e0bde39..9163021 100644 (file)
@@ -20,6 +20,8 @@ trace_print_bprintk_msg_only(struct trace_iterator *iter);
 extern enum print_line_t
 trace_print_printk_msg_only(struct trace_iterator *iter);
 
+extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+
 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
        __attribute__ ((format (printf, 2, 3)));
 extern int
index acdebd7..fdde3a4 100644 (file)
@@ -85,7 +85,7 @@ static int stat_seq_init(struct tracer_stat_session *session)
        if (!ts->stat_cmp)
                ts->stat_cmp = dummy_cmp;
 
-       stat = ts->stat_start();
+       stat = ts->stat_start(ts);
        if (!stat)
                goto exit;
 
index 202274c..f3546a2 100644 (file)
@@ -12,7 +12,7 @@ struct tracer_stat {
        /* The name of your stat file */
        const char              *name;
        /* Iteration over statistic entries */
-       void                    *(*stat_start)(void);
+       void                    *(*stat_start)(struct tracer_stat *trace);
        void                    *(*stat_next)(void *prev, int idx);
        /* Compare two entries for stats sorting */
        int                     (*stat_cmp)(void *p1, void *p2);
index 797201e..984b917 100644 (file)
@@ -152,7 +152,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
        return ret;
 }
 
-static void *workqueue_stat_start(void)
+static void *workqueue_stat_start(struct tracer_stat *trace)
 {
        int cpu;
        void *ret = NULL;