1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry *entry)
10 switch (entry->type) {
24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
58 * Test the trace buffer to see if all the elements
61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 unsigned long flags, cnt = 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock);
70 cnt = ring_buffer_entries(tr->buffer);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu);
86 arch_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 static int trace_selftest_test_probe1_cnt;
105 static void trace_selftest_test_probe1_func(unsigned long ip,
108 trace_selftest_test_probe1_cnt++;
111 static int trace_selftest_test_probe2_cnt;
112 static void trace_selftest_test_probe2_func(unsigned long ip,
115 trace_selftest_test_probe2_cnt++;
118 static int trace_selftest_test_probe3_cnt;
119 static void trace_selftest_test_probe3_func(unsigned long ip,
122 trace_selftest_test_probe3_cnt++;
125 static int trace_selftest_test_global_cnt;
126 static void trace_selftest_test_global_func(unsigned long ip,
129 trace_selftest_test_global_cnt++;
132 static int trace_selftest_test_dyn_cnt;
133 static void trace_selftest_test_dyn_func(unsigned long ip,
136 trace_selftest_test_dyn_cnt++;
139 static struct ftrace_ops test_probe1 = {
140 .func = trace_selftest_test_probe1_func,
143 static struct ftrace_ops test_probe2 = {
144 .func = trace_selftest_test_probe2_func,
147 static struct ftrace_ops test_probe3 = {
148 .func = trace_selftest_test_probe3_func,
151 static struct ftrace_ops test_global = {
152 .func = trace_selftest_test_global_func,
153 .flags = FTRACE_OPS_FL_GLOBAL,
156 static void print_counts(void)
158 printk("(%d %d %d %d %d) ",
159 trace_selftest_test_probe1_cnt,
160 trace_selftest_test_probe2_cnt,
161 trace_selftest_test_probe3_cnt,
162 trace_selftest_test_global_cnt,
163 trace_selftest_test_dyn_cnt);
166 static void reset_counts(void)
168 trace_selftest_test_probe1_cnt = 0;
169 trace_selftest_test_probe2_cnt = 0;
170 trace_selftest_test_probe3_cnt = 0;
171 trace_selftest_test_global_cnt = 0;
172 trace_selftest_test_dyn_cnt = 0;
175 static int trace_selftest_ops(int cnt)
177 int save_ftrace_enabled = ftrace_enabled;
178 struct ftrace_ops *dyn_ops;
185 printk(KERN_CONT "PASSED\n");
186 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
191 /* Handle PPC64 '.' name */
192 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
193 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
194 len1 = strlen(func1_name);
195 len2 = strlen(func2_name);
198 * Probe 1 will trace function 1.
199 * Probe 2 will trace function 2.
200 * Probe 3 will trace functions 1 and 2.
202 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
203 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
204 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
205 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
207 register_ftrace_function(&test_probe1);
208 register_ftrace_function(&test_probe2);
209 register_ftrace_function(&test_probe3);
210 register_ftrace_function(&test_global);
212 DYN_FTRACE_TEST_NAME();
216 if (trace_selftest_test_probe1_cnt != 1)
218 if (trace_selftest_test_probe2_cnt != 0)
220 if (trace_selftest_test_probe3_cnt != 1)
222 if (trace_selftest_test_global_cnt == 0)
225 DYN_FTRACE_TEST_NAME2();
229 if (trace_selftest_test_probe1_cnt != 1)
231 if (trace_selftest_test_probe2_cnt != 1)
233 if (trace_selftest_test_probe3_cnt != 2)
236 /* Add a dynamic probe */
237 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
239 printk("MEMORY ERROR ");
243 dyn_ops->func = trace_selftest_test_dyn_func;
245 register_ftrace_function(dyn_ops);
247 trace_selftest_test_global_cnt = 0;
249 DYN_FTRACE_TEST_NAME();
253 if (trace_selftest_test_probe1_cnt != 2)
255 if (trace_selftest_test_probe2_cnt != 1)
257 if (trace_selftest_test_probe3_cnt != 3)
259 if (trace_selftest_test_global_cnt == 0)
261 if (trace_selftest_test_dyn_cnt == 0)
264 DYN_FTRACE_TEST_NAME2();
268 if (trace_selftest_test_probe1_cnt != 2)
270 if (trace_selftest_test_probe2_cnt != 2)
272 if (trace_selftest_test_probe3_cnt != 4)
277 unregister_ftrace_function(dyn_ops);
281 /* Purposely unregister in the same order */
282 unregister_ftrace_function(&test_probe1);
283 unregister_ftrace_function(&test_probe2);
284 unregister_ftrace_function(&test_probe3);
285 unregister_ftrace_function(&test_global);
287 /* Make sure everything is off */
289 DYN_FTRACE_TEST_NAME();
290 DYN_FTRACE_TEST_NAME();
292 if (trace_selftest_test_probe1_cnt ||
293 trace_selftest_test_probe2_cnt ||
294 trace_selftest_test_probe3_cnt ||
295 trace_selftest_test_global_cnt ||
296 trace_selftest_test_dyn_cnt)
299 ftrace_enabled = save_ftrace_enabled;
304 /* Test dynamic code modification and ftrace filters */
305 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
306 struct trace_array *tr,
309 int save_ftrace_enabled = ftrace_enabled;
310 int save_tracer_enabled = tracer_enabled;
315 /* The ftrace test PASSED */
316 printk(KERN_CONT "PASSED\n");
317 pr_info("Testing dynamic ftrace: ");
319 /* enable tracing, and record the filter function */
323 /* passed in by parameter to fool gcc from optimizing */
327 * Some archs *cough*PowerPC*cough* add characters to the
328 * start of the function names. We simply put a '*' to
331 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
333 /* filter only on our function */
334 ftrace_set_global_filter(func_name, strlen(func_name), 1);
337 ret = tracer_init(trace, tr);
339 warn_failed_init_tracer(trace, ret);
343 /* Sleep for a 1/10 of a second */
346 /* we should have nothing in the buffer */
347 ret = trace_test_buffer(tr, &count);
353 printk(KERN_CONT ".. filter did not filter .. ");
357 /* call our function again */
363 /* stop the tracing. */
367 /* check the trace buffer */
368 ret = trace_test_buffer(tr, &count);
371 /* we should only have one item */
372 if (!ret && count != 1) {
374 printk(KERN_CONT ".. filter failed count=%ld ..", count);
379 /* Test the ops with global tracing running */
380 ret = trace_selftest_ops(1);
384 ftrace_enabled = save_ftrace_enabled;
385 tracer_enabled = save_tracer_enabled;
387 /* Enable tracing on all functions again */
388 ftrace_set_global_filter(NULL, 0, 1);
390 /* Test the ops with global tracing off */
392 ret = trace_selftest_ops(2);
397 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
398 #endif /* CONFIG_DYNAMIC_FTRACE */
401 * Simple verification test of ftrace function tracer.
402 * Enable ftrace, sleep 1/10 second, and then read the trace
403 * buffer to see if all is in order.
406 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
408 int save_ftrace_enabled = ftrace_enabled;
409 int save_tracer_enabled = tracer_enabled;
413 /* make sure msleep has been recorded */
416 /* start the tracing */
420 ret = tracer_init(trace, tr);
422 warn_failed_init_tracer(trace, ret);
426 /* Sleep for a 1/10 of a second */
428 /* stop the tracing. */
432 /* check the trace buffer */
433 ret = trace_test_buffer(tr, &count);
437 if (!ret && !count) {
438 printk(KERN_CONT ".. no entries found ..");
443 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
444 DYN_FTRACE_TEST_NAME);
447 ftrace_enabled = save_ftrace_enabled;
448 tracer_enabled = save_tracer_enabled;
450 /* kill ftrace totally if we failed */
456 #endif /* CONFIG_FUNCTION_TRACER */
459 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
461 /* Maximum number of functions to trace before diagnosing a hang */
462 #define GRAPH_MAX_FUNC_TEST 100000000
464 static unsigned int graph_hang_thresh;
466 /* Wrap the real function entry probe to avoid possible hanging */
467 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
469 /* This is harmlessly racy, we want to approximately detect a hang */
470 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
472 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
473 if (ftrace_dump_on_oops) {
474 ftrace_dump(DUMP_ALL);
475 /* ftrace_dump() disables tracing */
481 return trace_graph_entry(trace);
485 * Pretty much the same than for the function tracer from which the selftest
489 trace_selftest_startup_function_graph(struct tracer *trace,
490 struct trace_array *tr)
496 * Simulate the init() callback but we attach a watchdog callback
497 * to detect and recover from possible hangs
499 tracing_reset_online_cpus(tr);
501 ret = register_ftrace_graph(&trace_graph_return,
502 &trace_graph_entry_watchdog);
504 warn_failed_init_tracer(trace, ret);
507 tracing_start_cmdline_record();
509 /* Sleep for a 1/10 of a second */
512 /* Have we just recovered from a hang? */
513 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
514 tracing_selftest_disabled = true;
521 /* check the trace buffer */
522 ret = trace_test_buffer(tr, &count);
527 if (!ret && !count) {
528 printk(KERN_CONT ".. no entries found ..");
533 /* Don't test dynamic tracing, the function tracer already did */
536 /* Stop it if we failed */
542 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
545 #ifdef CONFIG_IRQSOFF_TRACER
547 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
549 unsigned long save_max = tracing_max_latency;
553 /* start the tracing */
554 ret = tracer_init(trace, tr);
556 warn_failed_init_tracer(trace, ret);
560 /* reset the max latency */
561 tracing_max_latency = 0;
562 /* disable interrupts for a bit */
568 * Stop the tracer to avoid a warning subsequent
569 * to buffer flipping failure because tracing_stop()
570 * disables the tr and max buffers, making flipping impossible
571 * in case of parallels max irqs off latencies.
574 /* stop the tracing. */
576 /* check both trace buffers */
577 ret = trace_test_buffer(tr, NULL);
579 ret = trace_test_buffer(&max_tr, &count);
583 if (!ret && !count) {
584 printk(KERN_CONT ".. no entries found ..");
588 tracing_max_latency = save_max;
592 #endif /* CONFIG_IRQSOFF_TRACER */
594 #ifdef CONFIG_PREEMPT_TRACER
596 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
598 unsigned long save_max = tracing_max_latency;
603 * Now that the big kernel lock is no longer preemptable,
604 * and this is called with the BKL held, it will always
605 * fail. If preemption is already disabled, simply
606 * pass the test. When the BKL is removed, or becomes
607 * preemptible again, we will once again test this,
610 if (preempt_count()) {
611 printk(KERN_CONT "can not test ... force ");
615 /* start the tracing */
616 ret = tracer_init(trace, tr);
618 warn_failed_init_tracer(trace, ret);
622 /* reset the max latency */
623 tracing_max_latency = 0;
624 /* disable preemption for a bit */
630 * Stop the tracer to avoid a warning subsequent
631 * to buffer flipping failure because tracing_stop()
632 * disables the tr and max buffers, making flipping impossible
633 * in case of parallels max preempt off latencies.
636 /* stop the tracing. */
638 /* check both trace buffers */
639 ret = trace_test_buffer(tr, NULL);
641 ret = trace_test_buffer(&max_tr, &count);
645 if (!ret && !count) {
646 printk(KERN_CONT ".. no entries found ..");
650 tracing_max_latency = save_max;
654 #endif /* CONFIG_PREEMPT_TRACER */
656 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
658 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
660 unsigned long save_max = tracing_max_latency;
665 * Now that the big kernel lock is no longer preemptable,
666 * and this is called with the BKL held, it will always
667 * fail. If preemption is already disabled, simply
668 * pass the test. When the BKL is removed, or becomes
669 * preemptible again, we will once again test this,
672 if (preempt_count()) {
673 printk(KERN_CONT "can not test ... force ");
677 /* start the tracing */
678 ret = tracer_init(trace, tr);
680 warn_failed_init_tracer(trace, ret);
684 /* reset the max latency */
685 tracing_max_latency = 0;
687 /* disable preemption and interrupts for a bit */
692 /* reverse the order of preempt vs irqs */
696 * Stop the tracer to avoid a warning subsequent
697 * to buffer flipping failure because tracing_stop()
698 * disables the tr and max buffers, making flipping impossible
699 * in case of parallels max irqs/preempt off latencies.
702 /* stop the tracing. */
704 /* check both trace buffers */
705 ret = trace_test_buffer(tr, NULL);
709 ret = trace_test_buffer(&max_tr, &count);
713 if (!ret && !count) {
714 printk(KERN_CONT ".. no entries found ..");
719 /* do the test by disabling interrupts first this time */
720 tracing_max_latency = 0;
728 /* reverse the order of preempt vs irqs */
732 /* stop the tracing. */
734 /* check both trace buffers */
735 ret = trace_test_buffer(tr, NULL);
739 ret = trace_test_buffer(&max_tr, &count);
741 if (!ret && !count) {
742 printk(KERN_CONT ".. no entries found ..");
751 tracing_max_latency = save_max;
755 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
757 #ifdef CONFIG_NOP_TRACER
759 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
761 /* What could possibly go wrong? */
766 #ifdef CONFIG_SCHED_TRACER
767 static int trace_wakeup_test_thread(void *data)
769 /* Make this a RT thread, doesn't need to be too high */
770 static const struct sched_param param = { .sched_priority = 5 };
771 struct completion *x = data;
773 sched_setscheduler(current, SCHED_FIFO, ¶m);
775 /* Make it know we have a new prio */
778 /* now go to sleep and let the test wake us up */
779 set_current_state(TASK_INTERRUPTIBLE);
782 /* we are awake, now wait to disappear */
783 while (!kthread_should_stop()) {
785 * This is an RT task, do short sleeps to let
795 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
797 unsigned long save_max = tracing_max_latency;
798 struct task_struct *p;
799 struct completion isrt;
803 init_completion(&isrt);
805 /* create a high prio thread */
806 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
808 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
812 /* make sure the thread is running at an RT prio */
813 wait_for_completion(&isrt);
815 /* start the tracing */
816 ret = tracer_init(trace, tr);
818 warn_failed_init_tracer(trace, ret);
822 /* reset the max latency */
823 tracing_max_latency = 0;
825 /* sleep to let the RT thread sleep too */
829 * Yes this is slightly racy. It is possible that for some
830 * strange reason that the RT thread we created, did not
831 * call schedule for 100ms after doing the completion,
832 * and we do a wakeup on a task that already is awake.
833 * But that is extremely unlikely, and the worst thing that
834 * happens in such a case, is that we disable tracing.
835 * Honestly, if this race does happen something is horrible
836 * wrong with the system.
841 /* give a little time to let the thread wake up */
844 /* stop the tracing. */
846 /* check both trace buffers */
847 ret = trace_test_buffer(tr, NULL);
849 ret = trace_test_buffer(&max_tr, &count);
855 tracing_max_latency = save_max;
857 /* kill the thread */
860 if (!ret && !count) {
861 printk(KERN_CONT ".. no entries found ..");
867 #endif /* CONFIG_SCHED_TRACER */
869 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
871 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
876 /* start the tracing */
877 ret = tracer_init(trace, tr);
879 warn_failed_init_tracer(trace, ret);
883 /* Sleep for a 1/10 of a second */
885 /* stop the tracing. */
887 /* check the trace buffer */
888 ret = trace_test_buffer(tr, &count);
892 if (!ret && !count) {
893 printk(KERN_CONT ".. no entries found ..");
899 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
901 #ifdef CONFIG_BRANCH_TRACER
903 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
908 /* start the tracing */
909 ret = tracer_init(trace, tr);
911 warn_failed_init_tracer(trace, ret);
915 /* Sleep for a 1/10 of a second */
917 /* stop the tracing. */
919 /* check the trace buffer */
920 ret = trace_test_buffer(tr, &count);
924 if (!ret && !count) {
925 printk(KERN_CONT ".. no entries found ..");
931 #endif /* CONFIG_BRANCH_TRACER */