Merge branch 'linus' into tracing/sysprof
[pandora-kernel.git] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static atomic_t                 sched_ref;
20
21 static void
22 sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23                         struct task_struct *next)
24 {
25         struct trace_array **ptr = private;
26         struct trace_array *tr = *ptr;
27         struct trace_array_cpu *data;
28         unsigned long flags;
29         long disabled;
30         int cpu;
31
32         if (!tracer_enabled)
33                 return;
34
35         local_irq_save(flags);
36         cpu = raw_smp_processor_id();
37         data = tr->data[cpu];
38         disabled = atomic_inc_return(&data->disabled);
39
40         if (likely(disabled == 1))
41                 tracing_sched_switch_trace(tr, data, prev, next, flags);
42
43         atomic_dec(&data->disabled);
44         local_irq_restore(flags);
45 }
46
47 static notrace void
48 sched_switch_callback(void *probe_data, void *call_data,
49                       const char *format, va_list *args)
50 {
51         struct task_struct *prev;
52         struct task_struct *next;
53         struct rq *__rq;
54
55         if (!atomic_read(&sched_ref))
56                 return;
57
58         /* skip prev_pid %d next_pid %d prev_state %ld */
59         (void)va_arg(*args, int);
60         (void)va_arg(*args, int);
61         (void)va_arg(*args, long);
62         __rq = va_arg(*args, typeof(__rq));
63         prev = va_arg(*args, typeof(prev));
64         next = va_arg(*args, typeof(next));
65
66         tracing_record_cmdline(prev);
67
68         /*
69          * If tracer_switch_func only points to the local
70          * switch func, it still needs the ptr passed to it.
71          */
72         sched_switch_func(probe_data, __rq, prev, next);
73 }
74
75 static void
76 wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
77                         task_struct *curr)
78 {
79         struct trace_array **ptr = private;
80         struct trace_array *tr = *ptr;
81         struct trace_array_cpu *data;
82         unsigned long flags;
83         long disabled;
84         int cpu;
85
86         if (!tracer_enabled)
87                 return;
88
89         tracing_record_cmdline(curr);
90
91         local_irq_save(flags);
92         cpu = raw_smp_processor_id();
93         data = tr->data[cpu];
94         disabled = atomic_inc_return(&data->disabled);
95
96         if (likely(disabled == 1))
97                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
98
99         atomic_dec(&data->disabled);
100         local_irq_restore(flags);
101 }
102
103 static notrace void
104 wake_up_callback(void *probe_data, void *call_data,
105                  const char *format, va_list *args)
106 {
107         struct task_struct *curr;
108         struct task_struct *task;
109         struct rq *__rq;
110
111         if (likely(!tracer_enabled))
112                 return;
113
114         /* Skip pid %d state %ld */
115         (void)va_arg(*args, int);
116         (void)va_arg(*args, long);
117         /* now get the meat: "rq %p task %p rq->curr %p" */
118         __rq = va_arg(*args, typeof(__rq));
119         task = va_arg(*args, typeof(task));
120         curr = va_arg(*args, typeof(curr));
121
122         tracing_record_cmdline(task);
123         tracing_record_cmdline(curr);
124
125         wakeup_func(probe_data, __rq, task, curr);
126 }
127
128 void
129 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
130 {
131         struct trace_array *tr = ctx_trace;
132         struct trace_array_cpu *data;
133         unsigned long flags;
134         long disabled;
135         int cpu;
136
137         if (!tracer_enabled)
138                 return;
139
140         local_irq_save(flags);
141         cpu = raw_smp_processor_id();
142         data = tr->data[cpu];
143         disabled = atomic_inc_return(&data->disabled);
144
145         if (likely(disabled == 1))
146                 __trace_special(tr, data, arg1, arg2, arg3);
147
148         atomic_dec(&data->disabled);
149         local_irq_restore(flags);
150 }
151
152 static void sched_switch_reset(struct trace_array *tr)
153 {
154         int cpu;
155
156         tr->time_start = ftrace_now(tr->cpu);
157
158         for_each_online_cpu(cpu)
159                 tracing_reset(tr->data[cpu]);
160 }
161
162 static int tracing_sched_register(void)
163 {
164         int ret;
165
166         ret = marker_probe_register("kernel_sched_wakeup",
167                         "pid %d state %ld ## rq %p task %p rq->curr %p",
168                         wake_up_callback,
169                         &ctx_trace);
170         if (ret) {
171                 pr_info("wakeup trace: Couldn't add marker"
172                         " probe to kernel_sched_wakeup\n");
173                 return ret;
174         }
175
176         ret = marker_probe_register("kernel_sched_wakeup_new",
177                         "pid %d state %ld ## rq %p task %p rq->curr %p",
178                         wake_up_callback,
179                         &ctx_trace);
180         if (ret) {
181                 pr_info("wakeup trace: Couldn't add marker"
182                         " probe to kernel_sched_wakeup_new\n");
183                 goto fail_deprobe;
184         }
185
186         ret = marker_probe_register("kernel_sched_schedule",
187                 "prev_pid %d next_pid %d prev_state %ld "
188                 "## rq %p prev %p next %p",
189                 sched_switch_callback,
190                 &ctx_trace);
191         if (ret) {
192                 pr_info("sched trace: Couldn't add marker"
193                         " probe to kernel_sched_schedule\n");
194                 goto fail_deprobe_wake_new;
195         }
196
197         return ret;
198 fail_deprobe_wake_new:
199         marker_probe_unregister("kernel_sched_wakeup_new",
200                                 wake_up_callback,
201                                 &ctx_trace);
202 fail_deprobe:
203         marker_probe_unregister("kernel_sched_wakeup",
204                                 wake_up_callback,
205                                 &ctx_trace);
206         return ret;
207 }
208
209 static void tracing_sched_unregister(void)
210 {
211         marker_probe_unregister("kernel_sched_schedule",
212                                 sched_switch_callback,
213                                 &ctx_trace);
214         marker_probe_unregister("kernel_sched_wakeup_new",
215                                 wake_up_callback,
216                                 &ctx_trace);
217         marker_probe_unregister("kernel_sched_wakeup",
218                                 wake_up_callback,
219                                 &ctx_trace);
220 }
221
222 void tracing_start_sched_switch(void)
223 {
224         long ref;
225
226         ref = atomic_inc_return(&sched_ref);
227         if (ref == 1)
228                 tracing_sched_register();
229 }
230
231 void tracing_stop_sched_switch(void)
232 {
233         long ref;
234
235         ref = atomic_dec_and_test(&sched_ref);
236         if (ref)
237                 tracing_sched_unregister();
238 }
239
240 static void start_sched_trace(struct trace_array *tr)
241 {
242         sched_switch_reset(tr);
243         atomic_inc(&trace_record_cmdline_enabled);
244         tracer_enabled = 1;
245         tracing_start_sched_switch();
246 }
247
248 static void stop_sched_trace(struct trace_array *tr)
249 {
250         tracing_stop_sched_switch();
251         atomic_dec(&trace_record_cmdline_enabled);
252         tracer_enabled = 0;
253 }
254
255 static void sched_switch_trace_init(struct trace_array *tr)
256 {
257         ctx_trace = tr;
258
259         if (tr->ctrl)
260                 start_sched_trace(tr);
261 }
262
263 static void sched_switch_trace_reset(struct trace_array *tr)
264 {
265         if (tr->ctrl)
266                 stop_sched_trace(tr);
267 }
268
269 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
270 {
271         /* When starting a new trace, reset the buffers */
272         if (tr->ctrl)
273                 start_sched_trace(tr);
274         else
275                 stop_sched_trace(tr);
276 }
277
278 static struct tracer sched_switch_trace __read_mostly =
279 {
280         .name           = "sched_switch",
281         .init           = sched_switch_trace_init,
282         .reset          = sched_switch_trace_reset,
283         .ctrl_update    = sched_switch_trace_ctrl_update,
284 #ifdef CONFIG_FTRACE_SELFTEST
285         .selftest    = trace_selftest_startup_sched_switch,
286 #endif
287 };
288
289 __init static int init_sched_switch_trace(void)
290 {
291         int ret = 0;
292
293         if (atomic_read(&sched_ref))
294                 ret = tracing_sched_register();
295         if (ret) {
296                 pr_info("error registering scheduler trace\n");
297                 return ret;
298         }
299         return register_tracer(&sched_switch_trace);
300 }
301 device_initcall(init_sched_switch_trace);