2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
21 static struct trace_array *irqsoff_trace __read_mostly;
22 static int tracer_enabled __read_mostly;
24 static DEFINE_PER_CPU(int, tracing_cpu);
26 static DEFINE_SPINLOCK(max_trace_lock);
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
33 static int trace_type __read_mostly;
35 static int save_lat_flag;
37 #ifdef CONFIG_PREEMPT_TRACER
41 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
44 # define preempt_trace() (0)
47 #ifdef CONFIG_IRQSOFF_TRACER
51 return ((trace_type & TRACER_IRQS_OFF) &&
55 # define irq_trace() (0)
59 * Sequence count - we record it when starting a measurement and
60 * skip the latency if the sequence has changed - some other section
61 * did a maximum and could disturb our measurement with serial console
62 * printouts, etc. Truly coinciding maximum latencies should be rare
63 * and what happens together happens separately as well, so this doesnt
64 * decrease the validity of the maximum found:
66 static __cacheline_aligned_in_smp unsigned long max_sequence;
68 #ifdef CONFIG_FUNCTION_TRACER
70 * irqsoff uses its own tracer function to keep the overhead down:
73 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
75 struct trace_array *tr = irqsoff_trace;
76 struct trace_array_cpu *data;
82 * Does not matter if we preempt. We test the flags
83 * afterward, to see if irqs are disabled or not.
84 * If we preempt and get a false positive, the flags
87 cpu = raw_smp_processor_id();
88 if (likely(!per_cpu(tracing_cpu, cpu)))
91 local_save_flags(flags);
92 /* slight chance to get a false positive on tracing_cpu */
93 if (!irqs_disabled_flags(flags))
97 disabled = atomic_inc_return(&data->disabled);
99 if (likely(disabled == 1))
100 trace_function(tr, ip, parent_ip, flags, preempt_count());
102 atomic_dec(&data->disabled);
105 static struct ftrace_ops trace_ops __read_mostly =
107 .func = irqsoff_tracer_call,
109 #endif /* CONFIG_FUNCTION_TRACER */
112 * Should this new latency be reported/recorded?
114 static int report_latency(cycle_t delta)
116 if (tracing_thresh) {
117 if (delta < tracing_thresh)
120 if (delta <= tracing_max_latency)
127 check_critical_timing(struct trace_array *tr,
128 struct trace_array_cpu *data,
129 unsigned long parent_ip,
132 cycle_t T0, T1, delta;
136 T0 = data->preempt_timestamp;
137 T1 = ftrace_now(cpu);
140 local_save_flags(flags);
142 pc = preempt_count();
144 if (!report_latency(delta))
147 spin_lock_irqsave(&max_trace_lock, flags);
149 /* check if we are still the max latency */
150 if (!report_latency(delta))
153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
155 if (data->critical_sequence != max_sequence)
158 data->critical_end = parent_ip;
160 tracing_max_latency = delta;
161 update_max_tr_single(tr, current, cpu);
166 spin_unlock_irqrestore(&max_trace_lock, flags);
169 data->critical_sequence = max_sequence;
170 data->preempt_timestamp = ftrace_now(cpu);
171 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
175 start_critical_timing(unsigned long ip, unsigned long parent_ip)
178 struct trace_array *tr = irqsoff_trace;
179 struct trace_array_cpu *data;
182 if (likely(!tracer_enabled))
185 cpu = raw_smp_processor_id();
187 if (per_cpu(tracing_cpu, cpu))
190 data = tr->data[cpu];
192 if (unlikely(!data) || atomic_read(&data->disabled))
195 atomic_inc(&data->disabled);
197 data->critical_sequence = max_sequence;
198 data->preempt_timestamp = ftrace_now(cpu);
199 data->critical_start = parent_ip ? : ip;
201 local_save_flags(flags);
203 trace_function(tr, ip, parent_ip, flags, preempt_count());
205 per_cpu(tracing_cpu, cpu) = 1;
207 atomic_dec(&data->disabled);
211 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
214 struct trace_array *tr = irqsoff_trace;
215 struct trace_array_cpu *data;
218 cpu = raw_smp_processor_id();
219 /* Always clear the tracing cpu on stopping the trace */
220 if (unlikely(per_cpu(tracing_cpu, cpu)))
221 per_cpu(tracing_cpu, cpu) = 0;
228 data = tr->data[cpu];
230 if (unlikely(!data) ||
231 !data->critical_start || atomic_read(&data->disabled))
234 atomic_inc(&data->disabled);
236 local_save_flags(flags);
237 trace_function(tr, ip, parent_ip, flags, preempt_count());
238 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
239 data->critical_start = 0;
240 atomic_dec(&data->disabled);
243 /* start and stop critical timings used to for stoppage (in idle) */
244 void start_critical_timings(void)
246 if (preempt_trace() || irq_trace())
247 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
249 EXPORT_SYMBOL_GPL(start_critical_timings);
251 void stop_critical_timings(void)
253 if (preempt_trace() || irq_trace())
254 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
256 EXPORT_SYMBOL_GPL(stop_critical_timings);
258 #ifdef CONFIG_IRQSOFF_TRACER
259 #ifdef CONFIG_PROVE_LOCKING
260 void time_hardirqs_on(unsigned long a0, unsigned long a1)
262 if (!preempt_trace() && irq_trace())
263 stop_critical_timing(a0, a1);
266 void time_hardirqs_off(unsigned long a0, unsigned long a1)
268 if (!preempt_trace() && irq_trace())
269 start_critical_timing(a0, a1);
272 #else /* !CONFIG_PROVE_LOCKING */
278 void early_boot_irqs_off(void)
282 void early_boot_irqs_on(void)
286 void trace_softirqs_on(unsigned long ip)
290 void trace_softirqs_off(unsigned long ip)
294 inline void print_irqtrace_events(struct task_struct *curr)
299 * We are only interested in hardirq on/off events:
301 void trace_hardirqs_on(void)
303 if (!preempt_trace() && irq_trace())
304 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
306 EXPORT_SYMBOL(trace_hardirqs_on);
308 void trace_hardirqs_off(void)
310 if (!preempt_trace() && irq_trace())
311 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
313 EXPORT_SYMBOL(trace_hardirqs_off);
315 void trace_hardirqs_on_caller(unsigned long caller_addr)
317 if (!preempt_trace() && irq_trace())
318 stop_critical_timing(CALLER_ADDR0, caller_addr);
320 EXPORT_SYMBOL(trace_hardirqs_on_caller);
322 void trace_hardirqs_off_caller(unsigned long caller_addr)
324 if (!preempt_trace() && irq_trace())
325 start_critical_timing(CALLER_ADDR0, caller_addr);
327 EXPORT_SYMBOL(trace_hardirqs_off_caller);
329 #endif /* CONFIG_PROVE_LOCKING */
330 #endif /* CONFIG_IRQSOFF_TRACER */
332 #ifdef CONFIG_PREEMPT_TRACER
333 void trace_preempt_on(unsigned long a0, unsigned long a1)
336 stop_critical_timing(a0, a1);
339 void trace_preempt_off(unsigned long a0, unsigned long a1)
342 start_critical_timing(a0, a1);
344 #endif /* CONFIG_PREEMPT_TRACER */
346 static void start_irqsoff_tracer(struct trace_array *tr)
348 register_ftrace_function(&trace_ops);
349 if (tracing_is_enabled())
355 static void stop_irqsoff_tracer(struct trace_array *tr)
358 unregister_ftrace_function(&trace_ops);
361 static void __irqsoff_tracer_init(struct trace_array *tr)
363 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
364 trace_flags |= TRACE_ITER_LATENCY_FMT;
366 tracing_max_latency = 0;
368 /* make sure that the tracer is visible */
370 tracing_reset_online_cpus(tr);
371 start_irqsoff_tracer(tr);
374 static void irqsoff_tracer_reset(struct trace_array *tr)
376 stop_irqsoff_tracer(tr);
379 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
382 static void irqsoff_tracer_start(struct trace_array *tr)
387 static void irqsoff_tracer_stop(struct trace_array *tr)
392 #ifdef CONFIG_IRQSOFF_TRACER
393 static int irqsoff_tracer_init(struct trace_array *tr)
395 trace_type = TRACER_IRQS_OFF;
397 __irqsoff_tracer_init(tr);
400 static struct tracer irqsoff_tracer __read_mostly =
403 .init = irqsoff_tracer_init,
404 .reset = irqsoff_tracer_reset,
405 .start = irqsoff_tracer_start,
406 .stop = irqsoff_tracer_stop,
408 #ifdef CONFIG_FTRACE_SELFTEST
409 .selftest = trace_selftest_startup_irqsoff,
412 # define register_irqsoff(trace) register_tracer(&trace)
414 # define register_irqsoff(trace) do { } while (0)
417 #ifdef CONFIG_PREEMPT_TRACER
418 static int preemptoff_tracer_init(struct trace_array *tr)
420 trace_type = TRACER_PREEMPT_OFF;
422 __irqsoff_tracer_init(tr);
426 static struct tracer preemptoff_tracer __read_mostly =
428 .name = "preemptoff",
429 .init = preemptoff_tracer_init,
430 .reset = irqsoff_tracer_reset,
431 .start = irqsoff_tracer_start,
432 .stop = irqsoff_tracer_stop,
434 #ifdef CONFIG_FTRACE_SELFTEST
435 .selftest = trace_selftest_startup_preemptoff,
438 # define register_preemptoff(trace) register_tracer(&trace)
440 # define register_preemptoff(trace) do { } while (0)
443 #if defined(CONFIG_IRQSOFF_TRACER) && \
444 defined(CONFIG_PREEMPT_TRACER)
446 static int preemptirqsoff_tracer_init(struct trace_array *tr)
448 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
450 __irqsoff_tracer_init(tr);
454 static struct tracer preemptirqsoff_tracer __read_mostly =
456 .name = "preemptirqsoff",
457 .init = preemptirqsoff_tracer_init,
458 .reset = irqsoff_tracer_reset,
459 .start = irqsoff_tracer_start,
460 .stop = irqsoff_tracer_stop,
462 #ifdef CONFIG_FTRACE_SELFTEST
463 .selftest = trace_selftest_startup_preemptirqsoff,
467 # define register_preemptirqsoff(trace) register_tracer(&trace)
469 # define register_preemptirqsoff(trace) do { } while (0)
472 __init static int init_irqsoff_tracer(void)
474 register_irqsoff(irqsoff_tracer);
475 register_preemptoff(preemptoff_tracer);
476 register_preemptirqsoff(preemptirqsoff_tracer);
480 device_initcall(init_irqsoff_tracer);