1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry *entry)
10 switch (entry->type) {
24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
58 * Test the trace buffer to see if all the elements
61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 unsigned long flags, cnt = 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock);
70 cnt = ring_buffer_entries(tr->buffer);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu);
86 arch_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 /* Test dynamic code modification and ftrace filters */
105 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
106 struct trace_array *tr,
109 int save_ftrace_enabled = ftrace_enabled;
110 int save_tracer_enabled = tracer_enabled;
115 /* The ftrace test PASSED */
116 printk(KERN_CONT "PASSED\n");
117 pr_info("Testing dynamic ftrace: ");
119 /* enable tracing, and record the filter function */
123 /* passed in by parameter to fool gcc from optimizing */
127 * Some archs *cough*PowerPC*cough* add characters to the
128 * start of the function names. We simply put a '*' to
131 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133 /* filter only on our function */
134 ftrace_set_filter(func_name, strlen(func_name), 1);
137 ret = tracer_init(trace, tr);
139 warn_failed_init_tracer(trace, ret);
143 /* Sleep for a 1/10 of a second */
146 /* we should have nothing in the buffer */
147 ret = trace_test_buffer(tr, &count);
153 printk(KERN_CONT ".. filter did not filter .. ");
157 /* call our function again */
163 /* stop the tracing. */
167 /* check the trace buffer */
168 ret = trace_test_buffer(tr, &count);
172 /* we should only have one item */
173 if (!ret && count != 1) {
174 printk(KERN_CONT ".. filter failed count=%ld ..", count);
180 ftrace_enabled = save_ftrace_enabled;
181 tracer_enabled = save_tracer_enabled;
183 /* Enable tracing on all functions again */
184 ftrace_set_filter(NULL, 0, 1);
189 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
190 #endif /* CONFIG_DYNAMIC_FTRACE */
193 * Simple verification test of ftrace function tracer.
194 * Enable ftrace, sleep 1/10 second, and then read the trace
195 * buffer to see if all is in order.
198 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200 int save_ftrace_enabled = ftrace_enabled;
201 int save_tracer_enabled = tracer_enabled;
205 /* make sure msleep has been recorded */
208 /* start the tracing */
212 ret = tracer_init(trace, tr);
214 warn_failed_init_tracer(trace, ret);
218 /* Sleep for a 1/10 of a second */
220 /* stop the tracing. */
224 /* check the trace buffer */
225 ret = trace_test_buffer(tr, &count);
229 if (!ret && !count) {
230 printk(KERN_CONT ".. no entries found ..");
235 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
236 DYN_FTRACE_TEST_NAME);
239 ftrace_enabled = save_ftrace_enabled;
240 tracer_enabled = save_tracer_enabled;
242 /* kill ftrace totally if we failed */
248 #endif /* CONFIG_FUNCTION_TRACER */
251 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
253 /* Maximum number of functions to trace before diagnosing a hang */
254 #define GRAPH_MAX_FUNC_TEST 100000000
257 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
258 static unsigned int graph_hang_thresh;
260 /* Wrap the real function entry probe to avoid possible hanging */
261 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
263 /* This is harmlessly racy, we want to approximately detect a hang */
264 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
266 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 if (ftrace_dump_on_oops)
268 __ftrace_dump(false, DUMP_ALL);
272 return trace_graph_entry(trace);
276 * Pretty much the same than for the function tracer from which the selftest
280 trace_selftest_startup_function_graph(struct tracer *trace,
281 struct trace_array *tr)
287 * Simulate the init() callback but we attach a watchdog callback
288 * to detect and recover from possible hangs
290 tracing_reset_online_cpus(tr);
292 ret = register_ftrace_graph(&trace_graph_return,
293 &trace_graph_entry_watchdog);
295 warn_failed_init_tracer(trace, ret);
298 tracing_start_cmdline_record();
300 /* Sleep for a 1/10 of a second */
303 /* Have we just recovered from a hang? */
304 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
305 tracing_selftest_disabled = true;
312 /* check the trace buffer */
313 ret = trace_test_buffer(tr, &count);
318 if (!ret && !count) {
319 printk(KERN_CONT ".. no entries found ..");
324 /* Don't test dynamic tracing, the function tracer already did */
327 /* Stop it if we failed */
333 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
336 #ifdef CONFIG_IRQSOFF_TRACER
338 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
340 unsigned long save_max = tracing_max_latency;
344 /* start the tracing */
345 ret = tracer_init(trace, tr);
347 warn_failed_init_tracer(trace, ret);
351 /* reset the max latency */
352 tracing_max_latency = 0;
353 /* disable interrupts for a bit */
359 * Stop the tracer to avoid a warning subsequent
360 * to buffer flipping failure because tracing_stop()
361 * disables the tr and max buffers, making flipping impossible
362 * in case of parallels max irqs off latencies.
365 /* stop the tracing. */
367 /* check both trace buffers */
368 ret = trace_test_buffer(tr, NULL);
370 ret = trace_test_buffer(&max_tr, &count);
374 if (!ret && !count) {
375 printk(KERN_CONT ".. no entries found ..");
379 tracing_max_latency = save_max;
383 #endif /* CONFIG_IRQSOFF_TRACER */
385 #ifdef CONFIG_PREEMPT_TRACER
387 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
389 unsigned long save_max = tracing_max_latency;
394 * Now that the big kernel lock is no longer preemptable,
395 * and this is called with the BKL held, it will always
396 * fail. If preemption is already disabled, simply
397 * pass the test. When the BKL is removed, or becomes
398 * preemptible again, we will once again test this,
401 if (preempt_count()) {
402 printk(KERN_CONT "can not test ... force ");
406 /* start the tracing */
407 ret = tracer_init(trace, tr);
409 warn_failed_init_tracer(trace, ret);
413 /* reset the max latency */
414 tracing_max_latency = 0;
415 /* disable preemption for a bit */
421 * Stop the tracer to avoid a warning subsequent
422 * to buffer flipping failure because tracing_stop()
423 * disables the tr and max buffers, making flipping impossible
424 * in case of parallels max preempt off latencies.
427 /* stop the tracing. */
429 /* check both trace buffers */
430 ret = trace_test_buffer(tr, NULL);
432 ret = trace_test_buffer(&max_tr, &count);
436 if (!ret && !count) {
437 printk(KERN_CONT ".. no entries found ..");
441 tracing_max_latency = save_max;
445 #endif /* CONFIG_PREEMPT_TRACER */
447 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
449 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
451 unsigned long save_max = tracing_max_latency;
456 * Now that the big kernel lock is no longer preemptable,
457 * and this is called with the BKL held, it will always
458 * fail. If preemption is already disabled, simply
459 * pass the test. When the BKL is removed, or becomes
460 * preemptible again, we will once again test this,
463 if (preempt_count()) {
464 printk(KERN_CONT "can not test ... force ");
468 /* start the tracing */
469 ret = tracer_init(trace, tr);
471 warn_failed_init_tracer(trace, ret);
475 /* reset the max latency */
476 tracing_max_latency = 0;
478 /* disable preemption and interrupts for a bit */
483 /* reverse the order of preempt vs irqs */
487 * Stop the tracer to avoid a warning subsequent
488 * to buffer flipping failure because tracing_stop()
489 * disables the tr and max buffers, making flipping impossible
490 * in case of parallels max irqs/preempt off latencies.
493 /* stop the tracing. */
495 /* check both trace buffers */
496 ret = trace_test_buffer(tr, NULL);
500 ret = trace_test_buffer(&max_tr, &count);
504 if (!ret && !count) {
505 printk(KERN_CONT ".. no entries found ..");
510 /* do the test by disabling interrupts first this time */
511 tracing_max_latency = 0;
519 /* reverse the order of preempt vs irqs */
523 /* stop the tracing. */
525 /* check both trace buffers */
526 ret = trace_test_buffer(tr, NULL);
530 ret = trace_test_buffer(&max_tr, &count);
532 if (!ret && !count) {
533 printk(KERN_CONT ".. no entries found ..");
542 tracing_max_latency = save_max;
546 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
548 #ifdef CONFIG_NOP_TRACER
550 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
552 /* What could possibly go wrong? */
557 #ifdef CONFIG_SCHED_TRACER
558 static int trace_wakeup_test_thread(void *data)
560 /* Make this a RT thread, doesn't need to be too high */
561 static const struct sched_param param = { .sched_priority = 5 };
562 struct completion *x = data;
564 sched_setscheduler(current, SCHED_FIFO, ¶m);
566 /* Make it know we have a new prio */
569 /* now go to sleep and let the test wake us up */
570 set_current_state(TASK_INTERRUPTIBLE);
573 /* we are awake, now wait to disappear */
574 while (!kthread_should_stop()) {
576 * This is an RT task, do short sleeps to let
586 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
588 unsigned long save_max = tracing_max_latency;
589 struct task_struct *p;
590 struct completion isrt;
594 init_completion(&isrt);
596 /* create a high prio thread */
597 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
599 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
603 /* make sure the thread is running at an RT prio */
604 wait_for_completion(&isrt);
606 /* start the tracing */
607 ret = tracer_init(trace, tr);
609 warn_failed_init_tracer(trace, ret);
613 /* reset the max latency */
614 tracing_max_latency = 0;
616 /* sleep to let the RT thread sleep too */
620 * Yes this is slightly racy. It is possible that for some
621 * strange reason that the RT thread we created, did not
622 * call schedule for 100ms after doing the completion,
623 * and we do a wakeup on a task that already is awake.
624 * But that is extremely unlikely, and the worst thing that
625 * happens in such a case, is that we disable tracing.
626 * Honestly, if this race does happen something is horrible
627 * wrong with the system.
632 /* give a little time to let the thread wake up */
635 /* stop the tracing. */
637 /* check both trace buffers */
638 ret = trace_test_buffer(tr, NULL);
640 ret = trace_test_buffer(&max_tr, &count);
646 tracing_max_latency = save_max;
648 /* kill the thread */
651 if (!ret && !count) {
652 printk(KERN_CONT ".. no entries found ..");
658 #endif /* CONFIG_SCHED_TRACER */
660 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
662 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
667 /* start the tracing */
668 ret = tracer_init(trace, tr);
670 warn_failed_init_tracer(trace, ret);
674 /* Sleep for a 1/10 of a second */
676 /* stop the tracing. */
678 /* check the trace buffer */
679 ret = trace_test_buffer(tr, &count);
683 if (!ret && !count) {
684 printk(KERN_CONT ".. no entries found ..");
690 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
692 #ifdef CONFIG_BRANCH_TRACER
694 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
699 /* start the tracing */
700 ret = tracer_init(trace, tr);
702 warn_failed_init_tracer(trace, ret);
706 /* Sleep for a 1/10 of a second */
708 /* stop the tracing. */
710 /* check the trace buffer */
711 ret = trace_test_buffer(tr, &count);
715 if (!ret && !count) {
716 printk(KERN_CONT ".. no entries found ..");
722 #endif /* CONFIG_BRANCH_TRACER */