Merge branch 'linus' into tracing/hw-branch-tracing
[pandora-kernel.git] / kernel / trace / trace_hw_branches.c
1 /*
2  * h/w branch tracer for x86 based on BTS
3  *
4  * Copyright (C) 2008-2009 Intel Corporation.
5  * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
6  */
7 #include <linux/spinlock.h>
8 #include <linux/kallsyms.h>
9 #include <linux/debugfs.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/cpu.h>
13 #include <linux/smp.h>
14 #include <linux/fs.h>
15
16 #include <asm/ds.h>
17
18 #include "trace_output.h"
19 #include "trace.h"
20
21
22 #define BTS_BUFFER_SIZE (1 << 13)
23
24 /*
25  * The tracer lock protects the below per-cpu tracer array.
26  * It needs to be held to:
27  * - start tracing on all cpus
28  * - stop tracing on all cpus
29  * - start tracing on a single hotplug cpu
30  * - stop tracing on a single hotplug cpu
31  * - read the trace from all cpus
32  * - read the trace from a single cpu
33  */
34 static DEFINE_SPINLOCK(bts_tracer_lock);
35 static DEFINE_PER_CPU(struct bts_tracer *, tracer);
36 static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
37
38 #define this_tracer per_cpu(tracer, smp_processor_id())
39 #define this_buffer per_cpu(buffer, smp_processor_id())
40
41 static int trace_hw_branches_enabled __read_mostly;
42 static int trace_hw_branches_suspended __read_mostly;
43 static struct trace_array *hw_branch_trace __read_mostly;
44
45
46 /*
47  * Initialize the tracer for the current cpu.
48  * The argument is ignored.
49  *
50  * pre: bts_tracer_lock must be locked.
51  */
52 static void bts_trace_init_cpu(void *arg)
53 {
54         if (this_tracer)
55                 ds_release_bts(this_tracer);
56
57         this_tracer = ds_request_bts(NULL, this_buffer, BTS_BUFFER_SIZE,
58                                      NULL, (size_t)-1, BTS_KERNEL);
59         if (IS_ERR(this_tracer)) {
60                 this_tracer = NULL;
61                 return;
62         }
63 }
64
65 static int bts_trace_init(struct trace_array *tr)
66 {
67         int cpu, avail;
68
69         spin_lock(&bts_tracer_lock);
70
71         hw_branch_trace = tr;
72
73         on_each_cpu(bts_trace_init_cpu, NULL, 1);
74
75         /* Check on how many cpus we could enable tracing */
76         avail = 0;
77         for_each_online_cpu(cpu)
78                 if (per_cpu(tracer, cpu))
79                         avail++;
80
81         trace_hw_branches_enabled = (avail ? 1 : 0);
82         trace_hw_branches_suspended = 0;
83
84         spin_unlock(&bts_tracer_lock);
85
86
87         /* If we could not enable tracing on a single cpu, we fail. */
88         return avail ? 0 : -EOPNOTSUPP;
89 }
90
91 /*
92  * Release the tracer for the current cpu.
93  * The argument is ignored.
94  *
95  * pre: bts_tracer_lock must be locked.
96  */
97 static void bts_trace_release_cpu(void *arg)
98 {
99         if (this_tracer) {
100                 ds_release_bts(this_tracer);
101                 this_tracer = NULL;
102         }
103 }
104
105 static void bts_trace_reset(struct trace_array *tr)
106 {
107         spin_lock(&bts_tracer_lock);
108
109         on_each_cpu(bts_trace_release_cpu, NULL, 1);
110         trace_hw_branches_enabled = 0;
111         trace_hw_branches_suspended = 0;
112
113         spin_unlock(&bts_tracer_lock);
114 }
115
116 /*
117  * Resume tracing on the current cpu.
118  * The argument is ignored.
119  *
120  * pre: bts_tracer_lock must be locked.
121  */
122 static void bts_trace_resume_cpu(void *arg)
123 {
124         if (this_tracer)
125                 ds_resume_bts(this_tracer);
126 }
127
128 static void bts_trace_start(struct trace_array *tr)
129 {
130         spin_lock(&bts_tracer_lock);
131
132         on_each_cpu(bts_trace_resume_cpu, NULL, 1);
133         trace_hw_branches_suspended = 0;
134
135         spin_unlock(&bts_tracer_lock);
136 }
137
138 /*
139  * Suspend tracing on the current cpu.
140  * The argument is ignored.
141  *
142  * pre: bts_tracer_lock must be locked.
143  */
144 static void bts_trace_suspend_cpu(void *arg)
145 {
146         if (this_tracer)
147                 ds_suspend_bts(this_tracer);
148 }
149
150 static void bts_trace_stop(struct trace_array *tr)
151 {
152         spin_lock(&bts_tracer_lock);
153
154         on_each_cpu(bts_trace_suspend_cpu, NULL, 1);
155         trace_hw_branches_suspended = 1;
156
157         spin_unlock(&bts_tracer_lock);
158 }
159
160 static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
161                                      unsigned long action, void *hcpu)
162 {
163         unsigned int cpu = (unsigned long)hcpu;
164
165         spin_lock(&bts_tracer_lock);
166
167         if (!trace_hw_branches_enabled)
168                 goto out;
169
170         switch (action) {
171         case CPU_ONLINE:
172         case CPU_DOWN_FAILED:
173                 smp_call_function_single(cpu, bts_trace_init_cpu, NULL, 1);
174
175                 if (trace_hw_branches_suspended)
176                         smp_call_function_single(cpu, bts_trace_suspend_cpu,
177                                                  NULL, 1);
178                 break;
179         case CPU_DOWN_PREPARE:
180                 smp_call_function_single(cpu, bts_trace_release_cpu, NULL, 1);
181                 break;
182         }
183
184  out:
185         spin_unlock(&bts_tracer_lock);
186         return NOTIFY_DONE;
187 }
188
189 static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
190         .notifier_call = bts_hotcpu_handler
191 };
192
193 static void bts_trace_print_header(struct seq_file *m)
194 {
195         seq_puts(m, "# CPU#        TO  <-  FROM\n");
196 }
197
198 static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
199 {
200         unsigned long symflags = TRACE_ITER_SYM_OFFSET;
201         struct trace_entry *entry = iter->ent;
202         struct trace_seq *seq = &iter->seq;
203         struct hw_branch_entry *it;
204
205         trace_assign_type(it, entry);
206
207         if (entry->type == TRACE_HW_BRANCHES) {
208                 if (trace_seq_printf(seq, "%4d  ", iter->cpu) &&
209                     seq_print_ip_sym(seq, it->to, symflags) &&
210                     trace_seq_printf(seq, "\t  <-  ") &&
211                     seq_print_ip_sym(seq, it->from, symflags) &&
212                     trace_seq_printf(seq, "\n"))
213                         return TRACE_TYPE_HANDLED;
214                 return TRACE_TYPE_PARTIAL_LINE;;
215         }
216         return TRACE_TYPE_UNHANDLED;
217 }
218
219 void trace_hw_branch(u64 from, u64 to)
220 {
221         struct trace_array *tr = hw_branch_trace;
222         struct ring_buffer_event *event;
223         struct hw_branch_entry *entry;
224         unsigned long irq1;
225         int cpu;
226
227         if (unlikely(!tr))
228                 return;
229
230         if (unlikely(!trace_hw_branches_enabled))
231                 return;
232
233         local_irq_save(irq1);
234         cpu = raw_smp_processor_id();
235         if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
236                 goto out;
237
238         event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
239                                           sizeof(*entry), 0, 0);
240         if (!event)
241                 goto out;
242         entry   = ring_buffer_event_data(event);
243         tracing_generic_entry_update(&entry->ent, 0, from);
244         entry->ent.type = TRACE_HW_BRANCHES;
245         entry->from = from;
246         entry->to   = to;
247         trace_buffer_unlock_commit(tr, event, 0, 0);
248
249  out:
250         atomic_dec(&tr->data[cpu]->disabled);
251         local_irq_restore(irq1);
252 }
253
254 static void trace_bts_at(const struct bts_trace *trace, void *at)
255 {
256         struct bts_struct bts;
257         int err = 0;
258
259         WARN_ON_ONCE(!trace->read);
260         if (!trace->read)
261                 return;
262
263         err = trace->read(this_tracer, at, &bts);
264         if (err < 0)
265                 return;
266
267         switch (bts.qualifier) {
268         case BTS_BRANCH:
269                 trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
270                 break;
271         }
272 }
273
274 /*
275  * Collect the trace on the current cpu and write it into the ftrace buffer.
276  *
277  * pre: bts_tracer_lock must be locked
278  */
279 static void trace_bts_cpu(void *arg)
280 {
281         struct trace_array *tr = (struct trace_array *)arg;
282         const struct bts_trace *trace;
283         unsigned char *at;
284
285         if (unlikely(!tr))
286                 return;
287
288         if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
289                 return;
290
291         if (unlikely(!this_tracer))
292                 return;
293
294         ds_suspend_bts(this_tracer);
295         trace = ds_read_bts(this_tracer);
296         if (!trace)
297                 goto out;
298
299         for (at = trace->ds.top; (void *)at < trace->ds.end;
300              at += trace->ds.size)
301                 trace_bts_at(trace, at);
302
303         for (at = trace->ds.begin; (void *)at < trace->ds.top;
304              at += trace->ds.size)
305                 trace_bts_at(trace, at);
306
307 out:
308         ds_resume_bts(this_tracer);
309 }
310
311 static void trace_bts_prepare(struct trace_iterator *iter)
312 {
313         spin_lock(&bts_tracer_lock);
314
315         on_each_cpu(trace_bts_cpu, iter->tr, 1);
316
317         spin_unlock(&bts_tracer_lock);
318 }
319
320 static void trace_bts_close(struct trace_iterator *iter)
321 {
322         tracing_reset_online_cpus(iter->tr);
323 }
324
325 void trace_hw_branch_oops(void)
326 {
327         spin_lock(&bts_tracer_lock);
328
329         if (trace_hw_branches_enabled)
330                 trace_bts_cpu(hw_branch_trace);
331
332         spin_unlock(&bts_tracer_lock);
333 }
334
335 struct tracer bts_tracer __read_mostly =
336 {
337         .name           = "hw-branch-tracer",
338         .init           = bts_trace_init,
339         .reset          = bts_trace_reset,
340         .print_header   = bts_trace_print_header,
341         .print_line     = bts_trace_print_line,
342         .start          = bts_trace_start,
343         .stop           = bts_trace_stop,
344         .open           = trace_bts_prepare,
345         .close          = trace_bts_close,
346 #ifdef CONFIG_FTRACE_SELFTEST
347         .selftest       = trace_selftest_startup_hw_branches,
348 #endif /* CONFIG_FTRACE_SELFTEST */
349 };
350
351 __init static int init_bts_trace(void)
352 {
353         register_hotcpu_notifier(&bts_hotcpu_notifier);
354         return register_tracer(&bts_tracer);
355 }
356 device_initcall(init_bts_trace);