1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
8 static inline int trace_valid_entry(struct trace_entry *entry)
10 switch (entry->type) {
20 case TRACE_HW_BRANCHES:
27 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
29 struct ring_buffer_event *event;
30 struct trace_entry *entry;
31 unsigned int loops = 0;
33 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
34 entry = ring_buffer_event_data(event);
37 * The ring buffer is a size of trace_buf_size, if
38 * we loop more than the size, there's something wrong
39 * with the ring buffer.
41 if (loops++ > trace_buf_size) {
42 printk(KERN_CONT ".. bad ring buffer ");
45 if (!trace_valid_entry(entry)) {
46 printk(KERN_CONT ".. invalid entry %d ",
56 printk(KERN_CONT ".. corrupted trace buffer .. ");
61 * Test the trace buffer to see if all the elements
64 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
66 unsigned long flags, cnt = 0;
69 /* Don't allow flipping of max traces now */
70 local_irq_save(flags);
71 arch_spin_lock(&ftrace_max_lock);
73 cnt = ring_buffer_entries(tr->buffer);
76 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 * If the calling tracer is broken, and is constantly filling
78 * the buffer, this will run forever, and hard lock the box.
79 * We disable the ring buffer while we do this test to prevent
83 for_each_possible_cpu(cpu) {
84 ret = trace_test_buffer_cpu(tr, cpu);
89 arch_spin_unlock(&ftrace_max_lock);
90 local_irq_restore(flags);
98 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
100 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
101 trace->name, init_ret);
103 #ifdef CONFIG_FUNCTION_TRACER
105 #ifdef CONFIG_DYNAMIC_FTRACE
107 /* Test dynamic code modification and ftrace filters */
108 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
109 struct trace_array *tr,
112 int save_ftrace_enabled = ftrace_enabled;
113 int save_tracer_enabled = tracer_enabled;
118 /* The ftrace test PASSED */
119 printk(KERN_CONT "PASSED\n");
120 pr_info("Testing dynamic ftrace: ");
122 /* enable tracing, and record the filter function */
126 /* passed in by parameter to fool gcc from optimizing */
130 * Some archs *cough*PowerPC*cough* add characters to the
131 * start of the function names. We simply put a '*' to
134 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
136 /* filter only on our function */
137 ftrace_set_filter(func_name, strlen(func_name), 1);
140 ret = tracer_init(trace, tr);
142 warn_failed_init_tracer(trace, ret);
146 /* Sleep for a 1/10 of a second */
149 /* we should have nothing in the buffer */
150 ret = trace_test_buffer(tr, &count);
156 printk(KERN_CONT ".. filter did not filter .. ");
160 /* call our function again */
166 /* stop the tracing. */
170 /* check the trace buffer */
171 ret = trace_test_buffer(tr, &count);
175 /* we should only have one item */
176 if (!ret && count != 1) {
177 printk(KERN_CONT ".. filter failed count=%ld ..", count);
183 ftrace_enabled = save_ftrace_enabled;
184 tracer_enabled = save_tracer_enabled;
186 /* Enable tracing on all functions again */
187 ftrace_set_filter(NULL, 0, 1);
192 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
193 #endif /* CONFIG_DYNAMIC_FTRACE */
196 * Simple verification test of ftrace function tracer.
197 * Enable ftrace, sleep 1/10 second, and then read the trace
198 * buffer to see if all is in order.
201 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
203 int save_ftrace_enabled = ftrace_enabled;
204 int save_tracer_enabled = tracer_enabled;
208 /* make sure msleep has been recorded */
211 /* start the tracing */
215 ret = tracer_init(trace, tr);
217 warn_failed_init_tracer(trace, ret);
221 /* Sleep for a 1/10 of a second */
223 /* stop the tracing. */
227 /* check the trace buffer */
228 ret = trace_test_buffer(tr, &count);
232 if (!ret && !count) {
233 printk(KERN_CONT ".. no entries found ..");
238 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
239 DYN_FTRACE_TEST_NAME);
242 ftrace_enabled = save_ftrace_enabled;
243 tracer_enabled = save_tracer_enabled;
245 /* kill ftrace totally if we failed */
251 #endif /* CONFIG_FUNCTION_TRACER */
254 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
256 /* Maximum number of functions to trace before diagnosing a hang */
257 #define GRAPH_MAX_FUNC_TEST 100000000
260 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
261 static unsigned int graph_hang_thresh;
263 /* Wrap the real function entry probe to avoid possible hanging */
264 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
266 /* This is harmlessly racy, we want to approximately detect a hang */
267 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
269 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
270 if (ftrace_dump_on_oops)
271 __ftrace_dump(false, DUMP_ALL);
275 return trace_graph_entry(trace);
279 * Pretty much the same than for the function tracer from which the selftest
283 trace_selftest_startup_function_graph(struct tracer *trace,
284 struct trace_array *tr)
290 * Simulate the init() callback but we attach a watchdog callback
291 * to detect and recover from possible hangs
293 tracing_reset_online_cpus(tr);
295 ret = register_ftrace_graph(&trace_graph_return,
296 &trace_graph_entry_watchdog);
298 warn_failed_init_tracer(trace, ret);
301 tracing_start_cmdline_record();
303 /* Sleep for a 1/10 of a second */
306 /* Have we just recovered from a hang? */
307 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
308 tracing_selftest_disabled = true;
315 /* check the trace buffer */
316 ret = trace_test_buffer(tr, &count);
321 if (!ret && !count) {
322 printk(KERN_CONT ".. no entries found ..");
327 /* Don't test dynamic tracing, the function tracer already did */
330 /* Stop it if we failed */
336 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
339 #ifdef CONFIG_IRQSOFF_TRACER
341 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
343 unsigned long save_max = tracing_max_latency;
347 /* start the tracing */
348 ret = tracer_init(trace, tr);
350 warn_failed_init_tracer(trace, ret);
354 /* reset the max latency */
355 tracing_max_latency = 0;
356 /* disable interrupts for a bit */
362 * Stop the tracer to avoid a warning subsequent
363 * to buffer flipping failure because tracing_stop()
364 * disables the tr and max buffers, making flipping impossible
365 * in case of parallels max irqs off latencies.
368 /* stop the tracing. */
370 /* check both trace buffers */
371 ret = trace_test_buffer(tr, NULL);
373 ret = trace_test_buffer(&max_tr, &count);
377 if (!ret && !count) {
378 printk(KERN_CONT ".. no entries found ..");
382 tracing_max_latency = save_max;
386 #endif /* CONFIG_IRQSOFF_TRACER */
388 #ifdef CONFIG_PREEMPT_TRACER
390 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
392 unsigned long save_max = tracing_max_latency;
397 * Now that the big kernel lock is no longer preemptable,
398 * and this is called with the BKL held, it will always
399 * fail. If preemption is already disabled, simply
400 * pass the test. When the BKL is removed, or becomes
401 * preemptible again, we will once again test this,
404 if (preempt_count()) {
405 printk(KERN_CONT "can not test ... force ");
409 /* start the tracing */
410 ret = tracer_init(trace, tr);
412 warn_failed_init_tracer(trace, ret);
416 /* reset the max latency */
417 tracing_max_latency = 0;
418 /* disable preemption for a bit */
424 * Stop the tracer to avoid a warning subsequent
425 * to buffer flipping failure because tracing_stop()
426 * disables the tr and max buffers, making flipping impossible
427 * in case of parallels max preempt off latencies.
430 /* stop the tracing. */
432 /* check both trace buffers */
433 ret = trace_test_buffer(tr, NULL);
435 ret = trace_test_buffer(&max_tr, &count);
439 if (!ret && !count) {
440 printk(KERN_CONT ".. no entries found ..");
444 tracing_max_latency = save_max;
448 #endif /* CONFIG_PREEMPT_TRACER */
450 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
452 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
454 unsigned long save_max = tracing_max_latency;
459 * Now that the big kernel lock is no longer preemptable,
460 * and this is called with the BKL held, it will always
461 * fail. If preemption is already disabled, simply
462 * pass the test. When the BKL is removed, or becomes
463 * preemptible again, we will once again test this,
466 if (preempt_count()) {
467 printk(KERN_CONT "can not test ... force ");
471 /* start the tracing */
472 ret = tracer_init(trace, tr);
474 warn_failed_init_tracer(trace, ret);
478 /* reset the max latency */
479 tracing_max_latency = 0;
481 /* disable preemption and interrupts for a bit */
486 /* reverse the order of preempt vs irqs */
490 * Stop the tracer to avoid a warning subsequent
491 * to buffer flipping failure because tracing_stop()
492 * disables the tr and max buffers, making flipping impossible
493 * in case of parallels max irqs/preempt off latencies.
496 /* stop the tracing. */
498 /* check both trace buffers */
499 ret = trace_test_buffer(tr, NULL);
503 ret = trace_test_buffer(&max_tr, &count);
507 if (!ret && !count) {
508 printk(KERN_CONT ".. no entries found ..");
513 /* do the test by disabling interrupts first this time */
514 tracing_max_latency = 0;
522 /* reverse the order of preempt vs irqs */
526 /* stop the tracing. */
528 /* check both trace buffers */
529 ret = trace_test_buffer(tr, NULL);
533 ret = trace_test_buffer(&max_tr, &count);
535 if (!ret && !count) {
536 printk(KERN_CONT ".. no entries found ..");
545 tracing_max_latency = save_max;
549 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
551 #ifdef CONFIG_NOP_TRACER
553 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
555 /* What could possibly go wrong? */
560 #ifdef CONFIG_SCHED_TRACER
561 static int trace_wakeup_test_thread(void *data)
563 /* Make this a RT thread, doesn't need to be too high */
564 struct sched_param param = { .sched_priority = 5 };
565 struct completion *x = data;
567 sched_setscheduler(current, SCHED_FIFO, ¶m);
569 /* Make it know we have a new prio */
572 /* now go to sleep and let the test wake us up */
573 set_current_state(TASK_INTERRUPTIBLE);
576 /* we are awake, now wait to disappear */
577 while (!kthread_should_stop()) {
579 * This is an RT task, do short sleeps to let
589 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
591 unsigned long save_max = tracing_max_latency;
592 struct task_struct *p;
593 struct completion isrt;
597 init_completion(&isrt);
599 /* create a high prio thread */
600 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
602 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
606 /* make sure the thread is running at an RT prio */
607 wait_for_completion(&isrt);
609 /* start the tracing */
610 ret = tracer_init(trace, tr);
612 warn_failed_init_tracer(trace, ret);
616 /* reset the max latency */
617 tracing_max_latency = 0;
619 /* sleep to let the RT thread sleep too */
623 * Yes this is slightly racy. It is possible that for some
624 * strange reason that the RT thread we created, did not
625 * call schedule for 100ms after doing the completion,
626 * and we do a wakeup on a task that already is awake.
627 * But that is extremely unlikely, and the worst thing that
628 * happens in such a case, is that we disable tracing.
629 * Honestly, if this race does happen something is horrible
630 * wrong with the system.
635 /* give a little time to let the thread wake up */
638 /* stop the tracing. */
640 /* check both trace buffers */
641 ret = trace_test_buffer(tr, NULL);
643 ret = trace_test_buffer(&max_tr, &count);
649 tracing_max_latency = save_max;
651 /* kill the thread */
654 if (!ret && !count) {
655 printk(KERN_CONT ".. no entries found ..");
661 #endif /* CONFIG_SCHED_TRACER */
663 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
665 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
670 /* start the tracing */
671 ret = tracer_init(trace, tr);
673 warn_failed_init_tracer(trace, ret);
677 /* Sleep for a 1/10 of a second */
679 /* stop the tracing. */
681 /* check the trace buffer */
682 ret = trace_test_buffer(tr, &count);
686 if (!ret && !count) {
687 printk(KERN_CONT ".. no entries found ..");
693 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
695 #ifdef CONFIG_SYSPROF_TRACER
697 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
702 /* start the tracing */
703 ret = tracer_init(trace, tr);
705 warn_failed_init_tracer(trace, ret);
709 /* Sleep for a 1/10 of a second */
711 /* stop the tracing. */
713 /* check the trace buffer */
714 ret = trace_test_buffer(tr, &count);
718 if (!ret && !count) {
719 printk(KERN_CONT ".. no entries found ..");
725 #endif /* CONFIG_SYSPROF_TRACER */
727 #ifdef CONFIG_BRANCH_TRACER
729 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
734 /* start the tracing */
735 ret = tracer_init(trace, tr);
737 warn_failed_init_tracer(trace, ret);
741 /* Sleep for a 1/10 of a second */
743 /* stop the tracing. */
745 /* check the trace buffer */
746 ret = trace_test_buffer(tr, &count);
750 if (!ret && !count) {
751 printk(KERN_CONT ".. no entries found ..");
757 #endif /* CONFIG_BRANCH_TRACER */
759 #ifdef CONFIG_HW_BRANCH_TRACER
761 trace_selftest_startup_hw_branches(struct tracer *trace,
762 struct trace_array *tr)
764 struct trace_iterator *iter;
765 struct tracer tracer;
770 printk(KERN_CONT "missing open function...");
774 ret = tracer_init(trace, tr);
776 warn_failed_init_tracer(trace, ret);
781 * The hw-branch tracer needs to collect the trace from the various
782 * cpu trace buffers - before tracing is stopped.
784 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
788 memcpy(&tracer, trace, sizeof(tracer));
790 iter->trace = &tracer;
793 mutex_init(&iter->mutex);
797 mutex_destroy(&iter->mutex);
802 ret = trace_test_buffer(tr, &count);
806 if (!ret && !count) {
807 printk(KERN_CONT "no entries found..");
813 #endif /* CONFIG_HW_BRANCH_TRACER */
815 #ifdef CONFIG_KSYM_TRACER
816 static int ksym_selftest_dummy;
819 trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
824 /* start the tracing */
825 ret = tracer_init(trace, tr);
827 warn_failed_init_tracer(trace, ret);
831 ksym_selftest_dummy = 0;
832 /* Register the read-write tracing request */
834 ret = process_new_ksym_entry("ksym_selftest_dummy",
835 HW_BREAKPOINT_R | HW_BREAKPOINT_W,
836 (unsigned long)(&ksym_selftest_dummy));
839 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
842 /* Perform a read and a write operation over the dummy variable to
845 if (ksym_selftest_dummy == 0)
846 ksym_selftest_dummy++;
848 /* stop the tracing. */
850 /* check the trace buffer */
851 ret = trace_test_buffer(tr, &count);
855 /* read & write operations - one each is performed on the dummy variable
856 * triggering two entries in the trace buffer
858 if (!ret && count != 2) {
859 printk(KERN_CONT "Ksym tracer startup test failed");
866 #endif /* CONFIG_KSYM_TRACER */