4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include <asm/setup.h>
22 #include "trace_output.h"
24 #define TRACE_SYSTEM "TRACE_SYSTEM"
26 DEFINE_MUTEX(event_mutex);
28 LIST_HEAD(ftrace_events);
30 int trace_define_field(struct ftrace_event_call *call, char *type,
31 char *name, int offset, int size, int is_signed)
33 struct ftrace_event_field *field;
35 field = kzalloc(sizeof(*field), GFP_KERNEL);
39 field->name = kstrdup(name, GFP_KERNEL);
43 field->type = kstrdup(type, GFP_KERNEL);
47 field->offset = offset;
49 field->is_signed = is_signed;
50 list_add(&field->link, &call->fields);
63 EXPORT_SYMBOL_GPL(trace_define_field);
67 static void trace_destroy_fields(struct ftrace_event_call *call)
69 struct ftrace_event_field *field, *next;
71 list_for_each_entry_safe(field, next, &call->fields, link) {
72 list_del(&field->link);
79 #endif /* CONFIG_MODULES */
81 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
88 tracing_stop_cmdline_record();
95 tracing_start_cmdline_record();
102 static void ftrace_clear_events(void)
104 struct ftrace_event_call *call;
106 mutex_lock(&event_mutex);
107 list_for_each_entry(call, &ftrace_events, list) {
108 ftrace_event_enable_disable(call, 0);
110 mutex_unlock(&event_mutex);
114 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
116 static int __ftrace_set_clr_event(const char *match, const char *sub,
117 const char *event, int set)
119 struct ftrace_event_call *call;
122 mutex_lock(&event_mutex);
123 list_for_each_entry(call, &ftrace_events, list) {
125 if (!call->name || !call->regfunc)
129 strcmp(match, call->name) != 0 &&
130 strcmp(match, call->system) != 0)
133 if (sub && strcmp(sub, call->system) != 0)
136 if (event && strcmp(event, call->name) != 0)
139 ftrace_event_enable_disable(call, set);
143 mutex_unlock(&event_mutex);
148 static int ftrace_set_clr_event(char *buf, int set)
150 char *event = NULL, *sub = NULL, *match;
153 * The buf format can be <subsystem>:<event-name>
154 * *:<event-name> means any event by that name.
155 * :<event-name> is the same.
157 * <subsystem>:* means all events in that subsystem
158 * <subsystem>: means the same.
160 * <name> (no ':') means all events in a subsystem with
161 * the name <name> or any event that matches <name>
164 match = strsep(&buf, ":");
170 if (!strlen(sub) || strcmp(sub, "*") == 0)
172 if (!strlen(event) || strcmp(event, "*") == 0)
176 return __ftrace_set_clr_event(match, sub, event, set);
180 * trace_set_clr_event - enable or disable an event
181 * @system: system name to match (NULL for any system)
182 * @event: event name to match (NULL for all events, within system)
183 * @set: 1 to enable, 0 to disable
185 * This is a way for other parts of the kernel to enable or disable
188 * Returns 0 on success, -EINVAL if the parameters do not match any
191 int trace_set_clr_event(const char *system, const char *event, int set)
193 return __ftrace_set_clr_event(NULL, system, event, set);
196 /* 128 should be much more than enough */
197 #define EVENT_BUF_SIZE 127
200 ftrace_event_write(struct file *file, const char __user *ubuf,
201 size_t cnt, loff_t *ppos)
212 ret = tracing_update_buffers();
216 ret = get_user(ch, ubuf++);
222 /* skip white space */
223 while (cnt && isspace(ch)) {
224 ret = get_user(ch, ubuf++);
231 /* Only white space found? */
238 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
242 if (cnt > EVENT_BUF_SIZE)
243 cnt = EVENT_BUF_SIZE;
246 while (cnt && !isspace(ch)) {
252 ret = get_user(ch, ubuf++);
262 ret = ftrace_set_clr_event(buf, set);
275 t_next(struct seq_file *m, void *v, loff_t *pos)
277 struct list_head *list = m->private;
278 struct ftrace_event_call *call;
283 if (list == &ftrace_events)
286 call = list_entry(list, struct ftrace_event_call, list);
289 * The ftrace subsystem is for showing formats only.
290 * They can not be enabled or disabled via the event files.
298 m->private = list->next;
303 static void *t_start(struct seq_file *m, loff_t *pos)
305 struct ftrace_event_call *call = NULL;
308 mutex_lock(&event_mutex);
310 m->private = ftrace_events.next;
311 for (l = 0; l <= *pos; ) {
312 call = t_next(m, NULL, &l);
320 s_next(struct seq_file *m, void *v, loff_t *pos)
322 struct list_head *list = m->private;
323 struct ftrace_event_call *call;
328 if (list == &ftrace_events)
331 call = list_entry(list, struct ftrace_event_call, list);
333 if (!call->enabled) {
338 m->private = list->next;
343 static void *s_start(struct seq_file *m, loff_t *pos)
345 struct ftrace_event_call *call = NULL;
348 mutex_lock(&event_mutex);
350 m->private = ftrace_events.next;
351 for (l = 0; l <= *pos; ) {
352 call = s_next(m, NULL, &l);
359 static int t_show(struct seq_file *m, void *v)
361 struct ftrace_event_call *call = v;
363 if (strcmp(call->system, TRACE_SYSTEM) != 0)
364 seq_printf(m, "%s:", call->system);
365 seq_printf(m, "%s\n", call->name);
370 static void t_stop(struct seq_file *m, void *p)
372 mutex_unlock(&event_mutex);
376 ftrace_event_seq_open(struct inode *inode, struct file *file)
378 const struct seq_operations *seq_ops;
380 if ((file->f_mode & FMODE_WRITE) &&
381 !(file->f_flags & O_APPEND))
382 ftrace_clear_events();
384 seq_ops = inode->i_private;
385 return seq_open(file, seq_ops);
389 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
392 struct ftrace_event_call *call = filp->private_data;
400 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
404 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
407 struct ftrace_event_call *call = filp->private_data;
412 if (cnt >= sizeof(buf))
415 if (copy_from_user(&buf, ubuf, cnt))
420 ret = strict_strtoul(buf, 10, &val);
424 ret = tracing_update_buffers();
431 mutex_lock(&event_mutex);
432 ftrace_event_enable_disable(call, val);
433 mutex_unlock(&event_mutex);
446 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
449 const char set_to_char[4] = { '?', '0', '1', 'X' };
450 const char *system = filp->private_data;
451 struct ftrace_event_call *call;
456 mutex_lock(&event_mutex);
457 list_for_each_entry(call, &ftrace_events, list) {
458 if (!call->name || !call->regfunc)
461 if (system && strcmp(call->system, system) != 0)
465 * We need to find out if all the events are set
466 * or if all events or cleared, or if we have
469 set |= (1 << !!call->enabled);
472 * If we have a mixture, no need to look further.
477 mutex_unlock(&event_mutex);
479 buf[0] = set_to_char[set];
482 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
488 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
491 const char *system = filp->private_data;
496 if (cnt >= sizeof(buf))
499 if (copy_from_user(&buf, ubuf, cnt))
504 ret = strict_strtoul(buf, 10, &val);
508 ret = tracing_update_buffers();
512 if (val != 0 && val != 1)
515 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
527 extern char *__bad_type_size(void);
530 #define FIELD(type, name) \
531 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
532 #type, "common_" #name, offsetof(typeof(field), name), \
535 static int trace_write_header(struct trace_seq *s)
537 struct trace_entry field;
539 /* struct trace_entry */
540 return trace_seq_printf(s,
541 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
542 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
543 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
544 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
545 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
547 FIELD(unsigned short, type),
548 FIELD(unsigned char, flags),
549 FIELD(unsigned char, preempt_count),
555 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
558 struct ftrace_event_call *call = filp->private_data;
566 s = kmalloc(sizeof(*s), GFP_KERNEL);
572 /* If any of the first writes fail, so will the show_format. */
574 trace_seq_printf(s, "name: %s\n", call->name);
575 trace_seq_printf(s, "ID: %d\n", call->id);
576 trace_seq_printf(s, "format:\n");
577 trace_write_header(s);
579 r = call->show_format(s);
582 * ug! The format output is bigger than a PAGE!!
584 buf = "FORMAT TOO BIG\n";
585 r = simple_read_from_buffer(ubuf, cnt, ppos,
590 r = simple_read_from_buffer(ubuf, cnt, ppos,
598 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
600 struct ftrace_event_call *call = filp->private_data;
607 s = kmalloc(sizeof(*s), GFP_KERNEL);
612 trace_seq_printf(s, "%d\n", call->id);
614 r = simple_read_from_buffer(ubuf, cnt, ppos,
621 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
624 struct ftrace_event_call *call = filp->private_data;
631 s = kmalloc(sizeof(*s), GFP_KERNEL);
637 print_event_filter(call, s);
638 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
646 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
649 struct ftrace_event_call *call = filp->private_data;
653 if (cnt >= PAGE_SIZE)
656 buf = (char *)__get_free_page(GFP_TEMPORARY);
660 if (copy_from_user(buf, ubuf, cnt)) {
661 free_page((unsigned long) buf);
666 err = apply_event_filter(call, buf);
667 free_page((unsigned long) buf);
677 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
680 struct event_subsystem *system = filp->private_data;
687 s = kmalloc(sizeof(*s), GFP_KERNEL);
693 print_subsystem_event_filter(system, s);
694 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
702 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
705 struct event_subsystem *system = filp->private_data;
709 if (cnt >= PAGE_SIZE)
712 buf = (char *)__get_free_page(GFP_TEMPORARY);
716 if (copy_from_user(buf, ubuf, cnt)) {
717 free_page((unsigned long) buf);
722 err = apply_subsystem_event_filter(system, buf);
723 free_page((unsigned long) buf);
733 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
735 int (*func)(struct trace_seq *s) = filp->private_data;
742 s = kmalloc(sizeof(*s), GFP_KERNEL);
749 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
756 static const struct seq_operations show_event_seq_ops = {
763 static const struct seq_operations show_set_event_seq_ops = {
770 static const struct file_operations ftrace_avail_fops = {
771 .open = ftrace_event_seq_open,
774 .release = seq_release,
777 static const struct file_operations ftrace_set_event_fops = {
778 .open = ftrace_event_seq_open,
780 .write = ftrace_event_write,
782 .release = seq_release,
785 static const struct file_operations ftrace_enable_fops = {
786 .open = tracing_open_generic,
787 .read = event_enable_read,
788 .write = event_enable_write,
791 static const struct file_operations ftrace_event_format_fops = {
792 .open = tracing_open_generic,
793 .read = event_format_read,
796 static const struct file_operations ftrace_event_id_fops = {
797 .open = tracing_open_generic,
798 .read = event_id_read,
801 static const struct file_operations ftrace_event_filter_fops = {
802 .open = tracing_open_generic,
803 .read = event_filter_read,
804 .write = event_filter_write,
807 static const struct file_operations ftrace_subsystem_filter_fops = {
808 .open = tracing_open_generic,
809 .read = subsystem_filter_read,
810 .write = subsystem_filter_write,
813 static const struct file_operations ftrace_system_enable_fops = {
814 .open = tracing_open_generic,
815 .read = system_enable_read,
816 .write = system_enable_write,
819 static const struct file_operations ftrace_show_header_fops = {
820 .open = tracing_open_generic,
824 static struct dentry *event_trace_events_dir(void)
826 static struct dentry *d_tracer;
827 static struct dentry *d_events;
832 d_tracer = tracing_init_dentry();
836 d_events = debugfs_create_dir("events", d_tracer);
838 pr_warning("Could not create debugfs "
839 "'events' directory\n");
844 static LIST_HEAD(event_subsystems);
846 static struct dentry *
847 event_subsystem_dir(const char *name, struct dentry *d_events)
849 struct event_subsystem *system;
850 struct dentry *entry;
852 /* First see if we did not already create this dir */
853 list_for_each_entry(system, &event_subsystems, list) {
854 if (strcmp(system->name, name) == 0)
855 return system->entry;
858 /* need to create new entry */
859 system = kmalloc(sizeof(*system), GFP_KERNEL);
861 pr_warning("No memory to create event subsystem %s\n",
866 system->entry = debugfs_create_dir(name, d_events);
867 if (!system->entry) {
868 pr_warning("Could not create event subsystem %s\n",
874 system->name = kstrdup(name, GFP_KERNEL);
876 debugfs_remove(system->entry);
881 list_add(&system->list, &event_subsystems);
883 system->filter = NULL;
885 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
886 if (!system->filter) {
887 pr_warning("Could not allocate filter for subsystem "
889 return system->entry;
892 entry = debugfs_create_file("filter", 0644, system->entry, system,
893 &ftrace_subsystem_filter_fops);
895 kfree(system->filter);
896 system->filter = NULL;
897 pr_warning("Could not create debugfs "
898 "'%s/filter' entry\n", name);
901 entry = trace_create_file("enable", 0644, system->entry,
902 (void *)system->name,
903 &ftrace_system_enable_fops);
905 return system->entry;
909 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
910 const struct file_operations *id,
911 const struct file_operations *enable,
912 const struct file_operations *filter,
913 const struct file_operations *format)
915 struct dentry *entry;
919 * If the trace point header did not define TRACE_SYSTEM
920 * then the system would be called "TRACE_SYSTEM".
922 if (strcmp(call->system, TRACE_SYSTEM) != 0)
923 d_events = event_subsystem_dir(call->system, d_events);
925 if (call->raw_init) {
926 ret = call->raw_init();
928 pr_warning("Could not initialize trace point"
929 " events/%s\n", call->name);
934 call->dir = debugfs_create_dir(call->name, d_events);
936 pr_warning("Could not create debugfs "
937 "'%s' directory\n", call->name);
942 entry = trace_create_file("enable", 0644, call->dir, call,
946 entry = trace_create_file("id", 0444, call->dir, call,
949 if (call->define_fields) {
950 ret = call->define_fields();
952 pr_warning("Could not initialize trace point"
953 " events/%s\n", call->name);
956 entry = trace_create_file("filter", 0644, call->dir, call,
960 /* A trace may not want to export its format */
961 if (!call->show_format)
964 entry = trace_create_file("format", 0444, call->dir, call,
970 #define for_each_event(event, start, end) \
971 for (event = start; \
972 (unsigned long)event < (unsigned long)end; \
975 #ifdef CONFIG_MODULES
977 static LIST_HEAD(ftrace_module_file_list);
980 * Modules must own their file_operations to keep up with
981 * reference counting.
983 struct ftrace_module_file_ops {
984 struct list_head list;
986 struct file_operations id;
987 struct file_operations enable;
988 struct file_operations format;
989 struct file_operations filter;
992 static struct ftrace_module_file_ops *
993 trace_create_file_ops(struct module *mod)
995 struct ftrace_module_file_ops *file_ops;
998 * This is a bit of a PITA. To allow for correct reference
999 * counting, modules must "own" their file_operations.
1000 * To do this, we allocate the file operations that will be
1001 * used in the event directory.
1004 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1008 file_ops->mod = mod;
1010 file_ops->id = ftrace_event_id_fops;
1011 file_ops->id.owner = mod;
1013 file_ops->enable = ftrace_enable_fops;
1014 file_ops->enable.owner = mod;
1016 file_ops->filter = ftrace_event_filter_fops;
1017 file_ops->filter.owner = mod;
1019 file_ops->format = ftrace_event_format_fops;
1020 file_ops->format.owner = mod;
1022 list_add(&file_ops->list, &ftrace_module_file_list);
1027 static void trace_module_add_events(struct module *mod)
1029 struct ftrace_module_file_ops *file_ops = NULL;
1030 struct ftrace_event_call *call, *start, *end;
1031 struct dentry *d_events;
1033 start = mod->trace_events;
1034 end = mod->trace_events + mod->num_trace_events;
1039 d_events = event_trace_events_dir();
1043 for_each_event(call, start, end) {
1044 /* The linker may leave blanks */
1049 * This module has events, create file ops for this module
1050 * if not already done.
1053 file_ops = trace_create_file_ops(mod);
1058 list_add(&call->list, &ftrace_events);
1059 event_create_dir(call, d_events,
1060 &file_ops->id, &file_ops->enable,
1061 &file_ops->filter, &file_ops->format);
1065 static void trace_module_remove_events(struct module *mod)
1067 struct ftrace_module_file_ops *file_ops;
1068 struct ftrace_event_call *call, *p;
1071 down_write(&trace_event_mutex);
1072 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1073 if (call->mod == mod) {
1075 ftrace_event_enable_disable(call, 0);
1077 __unregister_ftrace_event(call->event);
1078 debugfs_remove_recursive(call->dir);
1079 list_del(&call->list);
1080 trace_destroy_fields(call);
1081 destroy_preds(call);
1085 /* Now free the file_operations */
1086 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1087 if (file_ops->mod == mod)
1090 if (&file_ops->list != &ftrace_module_file_list) {
1091 list_del(&file_ops->list);
1096 * It is safest to reset the ring buffer if the module being unloaded
1097 * registered any events.
1100 tracing_reset_current_online_cpus();
1101 up_write(&trace_event_mutex);
1104 static int trace_module_notify(struct notifier_block *self,
1105 unsigned long val, void *data)
1107 struct module *mod = data;
1109 mutex_lock(&event_mutex);
1111 case MODULE_STATE_COMING:
1112 trace_module_add_events(mod);
1114 case MODULE_STATE_GOING:
1115 trace_module_remove_events(mod);
1118 mutex_unlock(&event_mutex);
1123 static int trace_module_notify(struct notifier_block *self,
1124 unsigned long val, void *data)
1128 #endif /* CONFIG_MODULES */
1130 struct notifier_block trace_module_nb = {
1131 .notifier_call = trace_module_notify,
1135 extern struct ftrace_event_call __start_ftrace_events[];
1136 extern struct ftrace_event_call __stop_ftrace_events[];
1138 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1140 static __init int setup_trace_event(char *str)
1142 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1143 ring_buffer_expanded = 1;
1144 tracing_selftest_disabled = 1;
1148 __setup("trace_event=", setup_trace_event);
1150 static __init int event_trace_init(void)
1152 struct ftrace_event_call *call;
1153 struct dentry *d_tracer;
1154 struct dentry *entry;
1155 struct dentry *d_events;
1157 char *buf = bootup_event_buf;
1160 d_tracer = tracing_init_dentry();
1164 entry = debugfs_create_file("available_events", 0444, d_tracer,
1165 (void *)&show_event_seq_ops,
1166 &ftrace_avail_fops);
1168 pr_warning("Could not create debugfs "
1169 "'available_events' entry\n");
1171 entry = debugfs_create_file("set_event", 0644, d_tracer,
1172 (void *)&show_set_event_seq_ops,
1173 &ftrace_set_event_fops);
1175 pr_warning("Could not create debugfs "
1176 "'set_event' entry\n");
1178 d_events = event_trace_events_dir();
1182 /* ring buffer internal formats */
1183 trace_create_file("header_page", 0444, d_events,
1184 ring_buffer_print_page_header,
1185 &ftrace_show_header_fops);
1187 trace_create_file("header_event", 0444, d_events,
1188 ring_buffer_print_entry_header,
1189 &ftrace_show_header_fops);
1191 trace_create_file("enable", 0644, d_events,
1192 NULL, &ftrace_system_enable_fops);
1194 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1195 /* The linker may leave blanks */
1198 list_add(&call->list, &ftrace_events);
1199 event_create_dir(call, d_events, &ftrace_event_id_fops,
1200 &ftrace_enable_fops, &ftrace_event_filter_fops,
1201 &ftrace_event_format_fops);
1205 token = strsep(&buf, ",");
1212 ret = ftrace_set_clr_event(token, 1);
1214 pr_warning("Failed to enable trace event: %s\n", token);
1217 ret = register_module_notifier(&trace_module_nb);
1219 pr_warning("Failed to register trace events module notifier\n");
1223 fs_initcall(event_trace_init);
1225 #ifdef CONFIG_FTRACE_STARTUP_TEST
1227 static DEFINE_SPINLOCK(test_spinlock);
1228 static DEFINE_SPINLOCK(test_spinlock_irq);
1229 static DEFINE_MUTEX(test_mutex);
1231 static __init void test_work(struct work_struct *dummy)
1233 spin_lock(&test_spinlock);
1234 spin_lock_irq(&test_spinlock_irq);
1236 spin_unlock_irq(&test_spinlock_irq);
1237 spin_unlock(&test_spinlock);
1239 mutex_lock(&test_mutex);
1241 mutex_unlock(&test_mutex);
1244 static __init int event_test_thread(void *unused)
1248 test_malloc = kmalloc(1234, GFP_KERNEL);
1250 pr_info("failed to kmalloc\n");
1252 schedule_on_each_cpu(test_work);
1256 set_current_state(TASK_INTERRUPTIBLE);
1257 while (!kthread_should_stop())
1264 * Do various things that may trigger events.
1266 static __init void event_test_stuff(void)
1268 struct task_struct *test_thread;
1270 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1272 kthread_stop(test_thread);
1276 * For every trace event defined, we will test each trace point separately,
1277 * and then by groups, and finally all trace points.
1279 static __init void event_trace_self_tests(void)
1281 struct ftrace_event_call *call;
1282 struct event_subsystem *system;
1285 pr_info("Running tests on trace events:\n");
1287 list_for_each_entry(call, &ftrace_events, list) {
1289 /* Only test those that have a regfunc */
1293 pr_info("Testing event %s: ", call->name);
1296 * If an event is already enabled, someone is using
1297 * it and the self test should not be on.
1299 if (call->enabled) {
1300 pr_warning("Enabled event during self test!\n");
1305 ftrace_event_enable_disable(call, 1);
1307 ftrace_event_enable_disable(call, 0);
1312 /* Now test at the sub system level */
1314 pr_info("Running tests on trace event systems:\n");
1316 list_for_each_entry(system, &event_subsystems, list) {
1318 /* the ftrace system is special, skip it */
1319 if (strcmp(system->name, "ftrace") == 0)
1322 pr_info("Testing event system %s: ", system->name);
1324 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1325 if (WARN_ON_ONCE(ret)) {
1326 pr_warning("error enabling system %s\n",
1333 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1334 if (WARN_ON_ONCE(ret))
1335 pr_warning("error disabling system %s\n",
1341 /* Test with all events enabled */
1343 pr_info("Running tests on all trace events:\n");
1344 pr_info("Testing all events: ");
1346 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1347 if (WARN_ON_ONCE(ret)) {
1348 pr_warning("error enabling all events\n");
1355 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1356 if (WARN_ON_ONCE(ret)) {
1357 pr_warning("error disabling all events\n");
1364 #ifdef CONFIG_FUNCTION_TRACER
1366 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1369 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1371 struct ring_buffer_event *event;
1372 struct ftrace_entry *entry;
1373 unsigned long flags;
1379 pc = preempt_count();
1380 resched = ftrace_preempt_disable();
1381 cpu = raw_smp_processor_id();
1382 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1387 local_save_flags(flags);
1389 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1393 entry = ring_buffer_event_data(event);
1395 entry->parent_ip = parent_ip;
1397 trace_nowake_buffer_unlock_commit(event, flags, pc);
1400 atomic_dec(&per_cpu(test_event_disable, cpu));
1401 ftrace_preempt_enable(resched);
1404 static struct ftrace_ops trace_ops __initdata =
1406 .func = function_test_events_call,
1409 static __init void event_trace_self_test_with_function(void)
1411 register_ftrace_function(&trace_ops);
1412 pr_info("Running tests again, along with the function tracer\n");
1413 event_trace_self_tests();
1414 unregister_ftrace_function(&trace_ops);
1417 static __init void event_trace_self_test_with_function(void)
1422 static __init int event_trace_self_tests_init(void)
1424 if (!tracing_selftest_disabled) {
1425 event_trace_self_tests();
1426 event_trace_self_test_with_function();
1432 late_initcall(event_trace_self_tests_init);