2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace = -1;
53 /* Quick disabling of function tracer. */
54 int function_trace_stop;
56 /* By default, current tracing type is normal tracing. */
57 enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly;
65 static DEFINE_SPINLOCK(ftrace_lock);
66 static DEFINE_MUTEX(ftrace_sysctl_lock);
67 static DEFINE_MUTEX(ftrace_start_lock);
69 static struct ftrace_ops ftrace_list_end __read_mostly =
74 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
75 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
79 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
81 struct ftrace_ops *op = ftrace_list;
83 /* in case someone actually ports this to alpha! */
84 read_barrier_depends();
86 while (op != &ftrace_list_end) {
88 read_barrier_depends();
89 op->func(ip, parent_ip);
94 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96 if (current->pid != ftrace_pid_trace)
99 ftrace_pid_function(ip, parent_ip);
102 static void set_ftrace_pid_function(ftrace_func_t func)
104 /* do not set ftrace_pid_function to itself! */
105 if (func != ftrace_pid_func)
106 ftrace_pid_function = func;
110 * clear_ftrace_function - reset the ftrace function
112 * This NULLs the ftrace function and in essence stops
113 * tracing. There may be lag
115 void clear_ftrace_function(void)
117 ftrace_trace_function = ftrace_stub;
118 __ftrace_trace_function = ftrace_stub;
119 ftrace_pid_function = ftrace_stub;
122 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124 * For those archs that do not test ftrace_trace_stop in their
125 * mcount call site, we need to do it from C.
127 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129 if (function_trace_stop)
132 __ftrace_trace_function(ip, parent_ip);
136 static int __register_ftrace_function(struct ftrace_ops *ops)
138 /* should not be called from interrupt context */
139 spin_lock(&ftrace_lock);
141 ops->next = ftrace_list;
143 * We are entering ops into the ftrace_list but another
144 * CPU might be walking that list. We need to make sure
145 * the ops->next pointer is valid before another CPU sees
146 * the ops pointer included into the ftrace_list.
151 if (ftrace_enabled) {
154 if (ops->next == &ftrace_list_end)
157 func = ftrace_list_func;
159 if (ftrace_pid_trace >= 0) {
160 set_ftrace_pid_function(func);
161 func = ftrace_pid_func;
165 * For one func, simply call it directly.
166 * For more than one func, call the chain.
168 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
169 ftrace_trace_function = func;
171 __ftrace_trace_function = func;
172 ftrace_trace_function = ftrace_test_stop_func;
176 spin_unlock(&ftrace_lock);
181 static int __unregister_ftrace_function(struct ftrace_ops *ops)
183 struct ftrace_ops **p;
186 /* should not be called from interrupt context */
187 spin_lock(&ftrace_lock);
190 * If we are removing the last function, then simply point
191 * to the ftrace_stub.
193 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
194 ftrace_trace_function = ftrace_stub;
195 ftrace_list = &ftrace_list_end;
199 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
210 if (ftrace_enabled) {
211 /* If we only have one func left, then call that directly */
212 if (ftrace_list->next == &ftrace_list_end) {
213 ftrace_func_t func = ftrace_list->func;
215 if (ftrace_pid_trace >= 0) {
216 set_ftrace_pid_function(func);
217 func = ftrace_pid_func;
219 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
220 ftrace_trace_function = func;
222 __ftrace_trace_function = func;
228 spin_unlock(&ftrace_lock);
233 static void ftrace_update_pid_func(void)
237 /* should not be called from interrupt context */
238 spin_lock(&ftrace_lock);
240 if (ftrace_trace_function == ftrace_stub)
243 func = ftrace_trace_function;
245 if (ftrace_pid_trace >= 0) {
246 set_ftrace_pid_function(func);
247 func = ftrace_pid_func;
249 if (func != ftrace_pid_func)
252 set_ftrace_pid_function(func);
255 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
256 ftrace_trace_function = func;
258 __ftrace_trace_function = func;
262 spin_unlock(&ftrace_lock);
265 #ifdef CONFIG_DYNAMIC_FTRACE
266 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
267 # error Dynamic ftrace depends on MCOUNT_RECORD
271 * Since MCOUNT_ADDR may point to mcount itself, we do not want
272 * to get it confused by reading a reference in the code as we
273 * are parsing on objcopy output of text. Use a variable for
276 static unsigned long mcount_addr = MCOUNT_ADDR;
279 FTRACE_ENABLE_CALLS = (1 << 0),
280 FTRACE_DISABLE_CALLS = (1 << 1),
281 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
282 FTRACE_ENABLE_MCOUNT = (1 << 3),
283 FTRACE_DISABLE_MCOUNT = (1 << 4),
284 FTRACE_START_FUNC_RET = (1 << 5),
285 FTRACE_STOP_FUNC_RET = (1 << 6),
288 static int ftrace_filtered;
290 static LIST_HEAD(ftrace_new_addrs);
292 static DEFINE_MUTEX(ftrace_regex_lock);
295 struct ftrace_page *next;
297 struct dyn_ftrace records[];
300 #define ENTRIES_PER_PAGE \
301 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
303 /* estimate from running different kernels */
304 #define NR_TO_INIT 10000
306 static struct ftrace_page *ftrace_pages_start;
307 static struct ftrace_page *ftrace_pages;
309 static struct dyn_ftrace *ftrace_free_records;
312 #ifdef CONFIG_KPROBES
314 static int frozen_record_count;
316 static inline void freeze_record(struct dyn_ftrace *rec)
318 if (!(rec->flags & FTRACE_FL_FROZEN)) {
319 rec->flags |= FTRACE_FL_FROZEN;
320 frozen_record_count++;
324 static inline void unfreeze_record(struct dyn_ftrace *rec)
326 if (rec->flags & FTRACE_FL_FROZEN) {
327 rec->flags &= ~FTRACE_FL_FROZEN;
328 frozen_record_count--;
332 static inline int record_frozen(struct dyn_ftrace *rec)
334 return rec->flags & FTRACE_FL_FROZEN;
337 # define freeze_record(rec) ({ 0; })
338 # define unfreeze_record(rec) ({ 0; })
339 # define record_frozen(rec) ({ 0; })
340 #endif /* CONFIG_KPROBES */
342 static void ftrace_free_rec(struct dyn_ftrace *rec)
344 rec->ip = (unsigned long)ftrace_free_records;
345 ftrace_free_records = rec;
346 rec->flags |= FTRACE_FL_FREE;
349 void ftrace_release(void *start, unsigned long size)
351 struct dyn_ftrace *rec;
352 struct ftrace_page *pg;
353 unsigned long s = (unsigned long)start;
354 unsigned long e = s + size;
357 if (ftrace_disabled || !start)
360 /* should not be called from interrupt context */
361 spin_lock(&ftrace_lock);
363 for (pg = ftrace_pages_start; pg; pg = pg->next) {
364 for (i = 0; i < pg->index; i++) {
365 rec = &pg->records[i];
367 if ((rec->ip >= s) && (rec->ip < e))
368 ftrace_free_rec(rec);
371 spin_unlock(&ftrace_lock);
374 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
376 struct dyn_ftrace *rec;
378 /* First check for freed records */
379 if (ftrace_free_records) {
380 rec = ftrace_free_records;
382 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
383 FTRACE_WARN_ON_ONCE(1);
384 ftrace_free_records = NULL;
388 ftrace_free_records = (void *)rec->ip;
389 memset(rec, 0, sizeof(*rec));
393 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
394 if (!ftrace_pages->next) {
395 /* allocate another page */
397 (void *)get_zeroed_page(GFP_KERNEL);
398 if (!ftrace_pages->next)
401 ftrace_pages = ftrace_pages->next;
404 return &ftrace_pages->records[ftrace_pages->index++];
407 static struct dyn_ftrace *
408 ftrace_record_ip(unsigned long ip)
410 struct dyn_ftrace *rec;
415 rec = ftrace_alloc_dyn_node(ip);
421 list_add(&rec->list, &ftrace_new_addrs);
426 static void print_ip_ins(const char *fmt, unsigned char *p)
430 printk(KERN_CONT "%s", fmt);
432 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
433 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
436 static void ftrace_bug(int failed, unsigned long ip)
440 FTRACE_WARN_ON_ONCE(1);
441 pr_info("ftrace faulted on modifying ");
445 FTRACE_WARN_ON_ONCE(1);
446 pr_info("ftrace failed to modify ");
448 print_ip_ins(" actual: ", (unsigned char *)ip);
449 printk(KERN_CONT "\n");
452 FTRACE_WARN_ON_ONCE(1);
453 pr_info("ftrace faulted on writing ");
457 FTRACE_WARN_ON_ONCE(1);
458 pr_info("ftrace faulted on unknown error ");
465 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
467 unsigned long ip, fl;
468 unsigned long ftrace_addr;
470 ftrace_addr = (unsigned long)ftrace_caller;
475 * If this record is not to be traced and
476 * it is not enabled then do nothing.
478 * If this record is not to be traced and
479 * it is enabled then disabled it.
482 if (rec->flags & FTRACE_FL_NOTRACE) {
483 if (rec->flags & FTRACE_FL_ENABLED)
484 rec->flags &= ~FTRACE_FL_ENABLED;
488 } else if (ftrace_filtered && enable) {
493 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
495 /* Record is filtered and enabled, do nothing */
496 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
499 /* Record is not filtered and is not enabled do nothing */
503 /* Record is not filtered but enabled, disable it */
504 if (fl == FTRACE_FL_ENABLED)
505 rec->flags &= ~FTRACE_FL_ENABLED;
507 /* Otherwise record is filtered but not enabled, enable it */
508 rec->flags |= FTRACE_FL_ENABLED;
510 /* Disable or not filtered */
513 /* if record is enabled, do nothing */
514 if (rec->flags & FTRACE_FL_ENABLED)
517 rec->flags |= FTRACE_FL_ENABLED;
521 /* if record is not enabled do nothing */
522 if (!(rec->flags & FTRACE_FL_ENABLED))
525 rec->flags &= ~FTRACE_FL_ENABLED;
529 if (rec->flags & FTRACE_FL_ENABLED)
530 return ftrace_make_call(rec, ftrace_addr);
532 return ftrace_make_nop(NULL, rec, ftrace_addr);
535 static void ftrace_replace_code(int enable)
538 struct dyn_ftrace *rec;
539 struct ftrace_page *pg;
541 for (pg = ftrace_pages_start; pg; pg = pg->next) {
542 for (i = 0; i < pg->index; i++) {
543 rec = &pg->records[i];
546 * Skip over free records and records that have
549 if (rec->flags & FTRACE_FL_FREE ||
550 rec->flags & FTRACE_FL_FAILED)
553 /* ignore updates to this record's mcount site */
554 if (get_kprobe((void *)rec->ip)) {
558 unfreeze_record(rec);
561 failed = __ftrace_replace_code(rec, enable);
562 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
563 rec->flags |= FTRACE_FL_FAILED;
564 if ((system_state == SYSTEM_BOOTING) ||
565 !core_kernel_text(rec->ip)) {
566 ftrace_free_rec(rec);
568 ftrace_bug(failed, rec->ip);
575 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
582 ret = ftrace_make_nop(mod, rec, mcount_addr);
585 rec->flags |= FTRACE_FL_FAILED;
591 static int __ftrace_modify_code(void *data)
595 if (*command & FTRACE_ENABLE_CALLS)
596 ftrace_replace_code(1);
597 else if (*command & FTRACE_DISABLE_CALLS)
598 ftrace_replace_code(0);
600 if (*command & FTRACE_UPDATE_TRACE_FUNC)
601 ftrace_update_ftrace_func(ftrace_trace_function);
603 if (*command & FTRACE_START_FUNC_RET)
604 ftrace_enable_ftrace_graph_caller();
605 else if (*command & FTRACE_STOP_FUNC_RET)
606 ftrace_disable_ftrace_graph_caller();
611 static void ftrace_run_update_code(int command)
613 stop_machine(__ftrace_modify_code, &command, NULL);
616 static ftrace_func_t saved_ftrace_func;
617 static int ftrace_start_up;
619 static void ftrace_startup_enable(int command)
621 if (saved_ftrace_func != ftrace_trace_function) {
622 saved_ftrace_func = ftrace_trace_function;
623 command |= FTRACE_UPDATE_TRACE_FUNC;
626 if (!command || !ftrace_enabled)
629 ftrace_run_update_code(command);
632 static void ftrace_startup(int command)
634 if (unlikely(ftrace_disabled))
637 mutex_lock(&ftrace_start_lock);
639 command |= FTRACE_ENABLE_CALLS;
641 ftrace_startup_enable(command);
643 mutex_unlock(&ftrace_start_lock);
646 static void ftrace_shutdown(int command)
648 if (unlikely(ftrace_disabled))
651 mutex_lock(&ftrace_start_lock);
653 if (!ftrace_start_up)
654 command |= FTRACE_DISABLE_CALLS;
656 if (saved_ftrace_func != ftrace_trace_function) {
657 saved_ftrace_func = ftrace_trace_function;
658 command |= FTRACE_UPDATE_TRACE_FUNC;
661 if (!command || !ftrace_enabled)
664 ftrace_run_update_code(command);
666 mutex_unlock(&ftrace_start_lock);
669 static void ftrace_startup_sysctl(void)
671 int command = FTRACE_ENABLE_MCOUNT;
673 if (unlikely(ftrace_disabled))
676 mutex_lock(&ftrace_start_lock);
677 /* Force update next time */
678 saved_ftrace_func = NULL;
679 /* ftrace_start_up is true if we want ftrace running */
681 command |= FTRACE_ENABLE_CALLS;
683 ftrace_run_update_code(command);
684 mutex_unlock(&ftrace_start_lock);
687 static void ftrace_shutdown_sysctl(void)
689 int command = FTRACE_DISABLE_MCOUNT;
691 if (unlikely(ftrace_disabled))
694 mutex_lock(&ftrace_start_lock);
695 /* ftrace_start_up is true if ftrace is running */
697 command |= FTRACE_DISABLE_CALLS;
699 ftrace_run_update_code(command);
700 mutex_unlock(&ftrace_start_lock);
703 static cycle_t ftrace_update_time;
704 static unsigned long ftrace_update_cnt;
705 unsigned long ftrace_update_tot_cnt;
707 static int ftrace_update_code(struct module *mod)
709 struct dyn_ftrace *p, *t;
712 start = ftrace_now(raw_smp_processor_id());
713 ftrace_update_cnt = 0;
715 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
717 /* If something went wrong, bail without enabling anything */
718 if (unlikely(ftrace_disabled))
721 list_del_init(&p->list);
723 /* convert record (i.e, patch mcount-call with NOP) */
724 if (ftrace_code_disable(mod, p)) {
725 p->flags |= FTRACE_FL_CONVERTED;
731 stop = ftrace_now(raw_smp_processor_id());
732 ftrace_update_time = stop - start;
733 ftrace_update_tot_cnt += ftrace_update_cnt;
738 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
740 struct ftrace_page *pg;
744 /* allocate a few pages */
745 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
746 if (!ftrace_pages_start)
750 * Allocate a few more pages.
752 * TODO: have some parser search vmlinux before
753 * final linking to find all calls to ftrace.
755 * a) know how many pages to allocate.
757 * b) set up the table then.
759 * The dynamic code is still necessary for
763 pg = ftrace_pages = ftrace_pages_start;
765 cnt = num_to_init / ENTRIES_PER_PAGE;
766 pr_info("ftrace: allocating %ld entries in %d pages\n",
767 num_to_init, cnt + 1);
769 for (i = 0; i < cnt; i++) {
770 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
772 /* If we fail, we'll try later anyway */
783 FTRACE_ITER_FILTER = (1 << 0),
784 FTRACE_ITER_CONT = (1 << 1),
785 FTRACE_ITER_NOTRACE = (1 << 2),
786 FTRACE_ITER_FAILURES = (1 << 3),
789 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
791 struct ftrace_iterator {
793 struct ftrace_page *pg;
796 unsigned char buffer[FTRACE_BUFF_MAX+1];
802 t_next(struct seq_file *m, void *v, loff_t *pos)
804 struct ftrace_iterator *iter = m->private;
805 struct dyn_ftrace *rec = NULL;
809 /* should not be called from interrupt context */
810 spin_lock(&ftrace_lock);
812 if (iter->idx >= iter->pg->index) {
813 if (iter->pg->next) {
814 iter->pg = iter->pg->next;
819 rec = &iter->pg->records[iter->idx++];
820 if ((rec->flags & FTRACE_FL_FREE) ||
822 (!(iter->flags & FTRACE_ITER_FAILURES) &&
823 (rec->flags & FTRACE_FL_FAILED)) ||
825 ((iter->flags & FTRACE_ITER_FAILURES) &&
826 !(rec->flags & FTRACE_FL_FAILED)) ||
828 ((iter->flags & FTRACE_ITER_FILTER) &&
829 !(rec->flags & FTRACE_FL_FILTER)) ||
831 ((iter->flags & FTRACE_ITER_NOTRACE) &&
832 !(rec->flags & FTRACE_FL_NOTRACE))) {
837 spin_unlock(&ftrace_lock);
844 static void *t_start(struct seq_file *m, loff_t *pos)
846 struct ftrace_iterator *iter = m->private;
850 if (*pos > iter->pos)
854 p = t_next(m, p, &l);
859 static void t_stop(struct seq_file *m, void *p)
863 static int t_show(struct seq_file *m, void *v)
865 struct ftrace_iterator *iter = m->private;
866 struct dyn_ftrace *rec = v;
867 char str[KSYM_SYMBOL_LEN];
873 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
875 ret = seq_printf(m, "%s\n", str);
884 static struct seq_operations show_ftrace_seq_ops = {
892 ftrace_avail_open(struct inode *inode, struct file *file)
894 struct ftrace_iterator *iter;
897 if (unlikely(ftrace_disabled))
900 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
904 iter->pg = ftrace_pages_start;
907 ret = seq_open(file, &show_ftrace_seq_ops);
909 struct seq_file *m = file->private_data;
919 int ftrace_avail_release(struct inode *inode, struct file *file)
921 struct seq_file *m = (struct seq_file *)file->private_data;
922 struct ftrace_iterator *iter = m->private;
924 seq_release(inode, file);
931 ftrace_failures_open(struct inode *inode, struct file *file)
935 struct ftrace_iterator *iter;
937 ret = ftrace_avail_open(inode, file);
939 m = (struct seq_file *)file->private_data;
940 iter = (struct ftrace_iterator *)m->private;
941 iter->flags = FTRACE_ITER_FAILURES;
948 static void ftrace_filter_reset(int enable)
950 struct ftrace_page *pg;
951 struct dyn_ftrace *rec;
952 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
955 /* should not be called from interrupt context */
956 spin_lock(&ftrace_lock);
959 pg = ftrace_pages_start;
961 for (i = 0; i < pg->index; i++) {
962 rec = &pg->records[i];
963 if (rec->flags & FTRACE_FL_FAILED)
969 spin_unlock(&ftrace_lock);
973 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
975 struct ftrace_iterator *iter;
978 if (unlikely(ftrace_disabled))
981 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
985 mutex_lock(&ftrace_regex_lock);
986 if ((file->f_mode & FMODE_WRITE) &&
987 !(file->f_flags & O_APPEND))
988 ftrace_filter_reset(enable);
990 if (file->f_mode & FMODE_READ) {
991 iter->pg = ftrace_pages_start;
993 iter->flags = enable ? FTRACE_ITER_FILTER :
996 ret = seq_open(file, &show_ftrace_seq_ops);
998 struct seq_file *m = file->private_data;
1003 file->private_data = iter;
1004 mutex_unlock(&ftrace_regex_lock);
1010 ftrace_filter_open(struct inode *inode, struct file *file)
1012 return ftrace_regex_open(inode, file, 1);
1016 ftrace_notrace_open(struct inode *inode, struct file *file)
1018 return ftrace_regex_open(inode, file, 0);
1022 ftrace_regex_read(struct file *file, char __user *ubuf,
1023 size_t cnt, loff_t *ppos)
1025 if (file->f_mode & FMODE_READ)
1026 return seq_read(file, ubuf, cnt, ppos);
1032 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1036 if (file->f_mode & FMODE_READ)
1037 ret = seq_lseek(file, offset, origin);
1039 file->f_pos = ret = 1;
1052 ftrace_match(unsigned char *buff, int len, int enable)
1054 char str[KSYM_SYMBOL_LEN];
1055 char *search = NULL;
1056 struct ftrace_page *pg;
1057 struct dyn_ftrace *rec;
1058 int type = MATCH_FULL;
1059 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1060 unsigned i, match = 0, search_len = 0;
1062 for (i = 0; i < len; i++) {
1063 if (buff[i] == '*') {
1065 search = buff + i + 1;
1066 type = MATCH_END_ONLY;
1067 search_len = len - (i + 1);
1069 if (type == MATCH_END_ONLY) {
1070 type = MATCH_MIDDLE_ONLY;
1073 type = MATCH_FRONT_ONLY;
1081 /* should not be called from interrupt context */
1082 spin_lock(&ftrace_lock);
1084 ftrace_filtered = 1;
1085 pg = ftrace_pages_start;
1087 for (i = 0; i < pg->index; i++) {
1091 rec = &pg->records[i];
1092 if (rec->flags & FTRACE_FL_FAILED)
1094 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1097 if (strcmp(str, buff) == 0)
1100 case MATCH_FRONT_ONLY:
1101 if (memcmp(str, buff, match) == 0)
1104 case MATCH_MIDDLE_ONLY:
1105 if (strstr(str, search))
1108 case MATCH_END_ONLY:
1109 ptr = strstr(str, search);
1110 if (ptr && (ptr[search_len] == 0))
1119 spin_unlock(&ftrace_lock);
1123 ftrace_regex_write(struct file *file, const char __user *ubuf,
1124 size_t cnt, loff_t *ppos, int enable)
1126 struct ftrace_iterator *iter;
1131 if (!cnt || cnt < 0)
1134 mutex_lock(&ftrace_regex_lock);
1136 if (file->f_mode & FMODE_READ) {
1137 struct seq_file *m = file->private_data;
1140 iter = file->private_data;
1143 iter->flags &= ~FTRACE_ITER_CONT;
1144 iter->buffer_idx = 0;
1147 ret = get_user(ch, ubuf++);
1153 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1154 /* skip white space */
1155 while (cnt && isspace(ch)) {
1156 ret = get_user(ch, ubuf++);
1164 file->f_pos += read;
1169 iter->buffer_idx = 0;
1172 while (cnt && !isspace(ch)) {
1173 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1174 iter->buffer[iter->buffer_idx++] = ch;
1179 ret = get_user(ch, ubuf++);
1188 iter->buffer[iter->buffer_idx] = 0;
1189 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1190 iter->buffer_idx = 0;
1192 iter->flags |= FTRACE_ITER_CONT;
1195 file->f_pos += read;
1199 mutex_unlock(&ftrace_regex_lock);
1205 ftrace_filter_write(struct file *file, const char __user *ubuf,
1206 size_t cnt, loff_t *ppos)
1208 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1212 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1213 size_t cnt, loff_t *ppos)
1215 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1219 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1221 if (unlikely(ftrace_disabled))
1224 mutex_lock(&ftrace_regex_lock);
1226 ftrace_filter_reset(enable);
1228 ftrace_match(buf, len, enable);
1229 mutex_unlock(&ftrace_regex_lock);
1233 * ftrace_set_filter - set a function to filter on in ftrace
1234 * @buf - the string that holds the function filter text.
1235 * @len - the length of the string.
1236 * @reset - non zero to reset all filters before applying this filter.
1238 * Filters denote which functions should be enabled when tracing is enabled.
1239 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1241 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1243 ftrace_set_regex(buf, len, reset, 1);
1247 * ftrace_set_notrace - set a function to not trace in ftrace
1248 * @buf - the string that holds the function notrace text.
1249 * @len - the length of the string.
1250 * @reset - non zero to reset all filters before applying this filter.
1252 * Notrace Filters denote which functions should not be enabled when tracing
1253 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1256 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1258 ftrace_set_regex(buf, len, reset, 0);
1262 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1264 struct seq_file *m = (struct seq_file *)file->private_data;
1265 struct ftrace_iterator *iter;
1267 mutex_lock(&ftrace_regex_lock);
1268 if (file->f_mode & FMODE_READ) {
1271 seq_release(inode, file);
1273 iter = file->private_data;
1275 if (iter->buffer_idx) {
1277 iter->buffer[iter->buffer_idx] = 0;
1278 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1281 mutex_lock(&ftrace_sysctl_lock);
1282 mutex_lock(&ftrace_start_lock);
1283 if (ftrace_start_up && ftrace_enabled)
1284 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1285 mutex_unlock(&ftrace_start_lock);
1286 mutex_unlock(&ftrace_sysctl_lock);
1289 mutex_unlock(&ftrace_regex_lock);
1294 ftrace_filter_release(struct inode *inode, struct file *file)
1296 return ftrace_regex_release(inode, file, 1);
1300 ftrace_notrace_release(struct inode *inode, struct file *file)
1302 return ftrace_regex_release(inode, file, 0);
1305 static struct file_operations ftrace_avail_fops = {
1306 .open = ftrace_avail_open,
1308 .llseek = seq_lseek,
1309 .release = ftrace_avail_release,
1312 static struct file_operations ftrace_failures_fops = {
1313 .open = ftrace_failures_open,
1315 .llseek = seq_lseek,
1316 .release = ftrace_avail_release,
1319 static struct file_operations ftrace_filter_fops = {
1320 .open = ftrace_filter_open,
1321 .read = ftrace_regex_read,
1322 .write = ftrace_filter_write,
1323 .llseek = ftrace_regex_lseek,
1324 .release = ftrace_filter_release,
1327 static struct file_operations ftrace_notrace_fops = {
1328 .open = ftrace_notrace_open,
1329 .read = ftrace_regex_read,
1330 .write = ftrace_notrace_write,
1331 .llseek = ftrace_regex_lseek,
1332 .release = ftrace_notrace_release,
1335 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1337 struct dentry *entry;
1339 entry = debugfs_create_file("available_filter_functions", 0444,
1340 d_tracer, NULL, &ftrace_avail_fops);
1342 pr_warning("Could not create debugfs "
1343 "'available_filter_functions' entry\n");
1345 entry = debugfs_create_file("failures", 0444,
1346 d_tracer, NULL, &ftrace_failures_fops);
1348 pr_warning("Could not create debugfs 'failures' entry\n");
1350 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1351 NULL, &ftrace_filter_fops);
1353 pr_warning("Could not create debugfs "
1354 "'set_ftrace_filter' entry\n");
1356 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1357 NULL, &ftrace_notrace_fops);
1359 pr_warning("Could not create debugfs "
1360 "'set_ftrace_notrace' entry\n");
1365 static int ftrace_convert_nops(struct module *mod,
1366 unsigned long *start,
1371 unsigned long flags;
1373 mutex_lock(&ftrace_start_lock);
1376 addr = ftrace_call_adjust(*p++);
1378 * Some architecture linkers will pad between
1379 * the different mcount_loc sections of different
1380 * object files to satisfy alignments.
1381 * Skip any NULL pointers.
1385 ftrace_record_ip(addr);
1388 /* disable interrupts to prevent kstop machine */
1389 local_irq_save(flags);
1390 ftrace_update_code(mod);
1391 local_irq_restore(flags);
1392 mutex_unlock(&ftrace_start_lock);
1397 void ftrace_init_module(struct module *mod,
1398 unsigned long *start, unsigned long *end)
1400 if (ftrace_disabled || start == end)
1402 ftrace_convert_nops(mod, start, end);
1405 extern unsigned long __start_mcount_loc[];
1406 extern unsigned long __stop_mcount_loc[];
1408 void __init ftrace_init(void)
1410 unsigned long count, addr, flags;
1413 /* Keep the ftrace pointer to the stub */
1414 addr = (unsigned long)ftrace_stub;
1416 local_irq_save(flags);
1417 ftrace_dyn_arch_init(&addr);
1418 local_irq_restore(flags);
1420 /* ftrace_dyn_arch_init places the return code in addr */
1424 count = __stop_mcount_loc - __start_mcount_loc;
1426 ret = ftrace_dyn_table_alloc(count);
1430 last_ftrace_enabled = ftrace_enabled = 1;
1432 ret = ftrace_convert_nops(NULL,
1438 ftrace_disabled = 1;
1443 static int __init ftrace_nodyn_init(void)
1448 device_initcall(ftrace_nodyn_init);
1450 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1451 static inline void ftrace_startup_enable(int command) { }
1452 /* Keep as macros so we do not need to define the commands */
1453 # define ftrace_startup(command) do { } while (0)
1454 # define ftrace_shutdown(command) do { } while (0)
1455 # define ftrace_startup_sysctl() do { } while (0)
1456 # define ftrace_shutdown_sysctl() do { } while (0)
1457 #endif /* CONFIG_DYNAMIC_FTRACE */
1460 ftrace_pid_read(struct file *file, char __user *ubuf,
1461 size_t cnt, loff_t *ppos)
1466 if (ftrace_pid_trace >= 0)
1467 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1469 r = sprintf(buf, "no pid\n");
1471 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1475 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1476 size_t cnt, loff_t *ppos)
1482 if (cnt >= sizeof(buf))
1485 if (copy_from_user(&buf, ubuf, cnt))
1490 ret = strict_strtol(buf, 10, &val);
1494 mutex_lock(&ftrace_start_lock);
1496 /* disable pid tracing */
1497 if (ftrace_pid_trace < 0)
1499 ftrace_pid_trace = -1;
1503 if (ftrace_pid_trace == val)
1506 ftrace_pid_trace = val;
1509 /* update the function call */
1510 ftrace_update_pid_func();
1511 ftrace_startup_enable(0);
1514 mutex_unlock(&ftrace_start_lock);
1519 static struct file_operations ftrace_pid_fops = {
1520 .read = ftrace_pid_read,
1521 .write = ftrace_pid_write,
1524 static __init int ftrace_init_debugfs(void)
1526 struct dentry *d_tracer;
1527 struct dentry *entry;
1529 d_tracer = tracing_init_dentry();
1533 ftrace_init_dyn_debugfs(d_tracer);
1535 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1536 NULL, &ftrace_pid_fops);
1538 pr_warning("Could not create debugfs "
1539 "'set_ftrace_pid' entry\n");
1543 fs_initcall(ftrace_init_debugfs);
1546 * ftrace_kill - kill ftrace
1548 * This function should be used by panic code. It stops ftrace
1549 * but in a not so nice way. If you need to simply kill ftrace
1550 * from a non-atomic section, use ftrace_kill.
1552 void ftrace_kill(void)
1554 ftrace_disabled = 1;
1556 clear_ftrace_function();
1560 * register_ftrace_function - register a function for profiling
1561 * @ops - ops structure that holds the function for profiling.
1563 * Register a function to be called by all functions in the
1566 * Note: @ops->func and all the functions it calls must be labeled
1567 * with "notrace", otherwise it will go into a
1570 int register_ftrace_function(struct ftrace_ops *ops)
1574 if (unlikely(ftrace_disabled))
1577 mutex_lock(&ftrace_sysctl_lock);
1579 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1584 ret = __register_ftrace_function(ops);
1588 mutex_unlock(&ftrace_sysctl_lock);
1593 * unregister_ftrace_function - unresgister a function for profiling.
1594 * @ops - ops structure that holds the function to unregister
1596 * Unregister a function that was added to be called by ftrace profiling.
1598 int unregister_ftrace_function(struct ftrace_ops *ops)
1602 mutex_lock(&ftrace_sysctl_lock);
1603 ret = __unregister_ftrace_function(ops);
1605 mutex_unlock(&ftrace_sysctl_lock);
1611 ftrace_enable_sysctl(struct ctl_table *table, int write,
1612 struct file *file, void __user *buffer, size_t *lenp,
1617 if (unlikely(ftrace_disabled))
1620 mutex_lock(&ftrace_sysctl_lock);
1622 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1624 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1627 last_ftrace_enabled = ftrace_enabled;
1629 if (ftrace_enabled) {
1631 ftrace_startup_sysctl();
1633 /* we are starting ftrace again */
1634 if (ftrace_list != &ftrace_list_end) {
1635 if (ftrace_list->next == &ftrace_list_end)
1636 ftrace_trace_function = ftrace_list->func;
1638 ftrace_trace_function = ftrace_list_func;
1642 /* stopping ftrace calls (just send to ftrace_stub) */
1643 ftrace_trace_function = ftrace_stub;
1645 ftrace_shutdown_sysctl();
1649 mutex_unlock(&ftrace_sysctl_lock);
1653 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1655 static atomic_t ftrace_graph_active;
1657 /* The callbacks that hook a function */
1658 trace_func_graph_ret_t ftrace_graph_return =
1659 (trace_func_graph_ret_t)ftrace_stub;
1660 trace_func_graph_ent_t ftrace_graph_entry =
1661 (trace_func_graph_ent_t)ftrace_stub;
1663 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1664 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1668 unsigned long flags;
1669 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1670 struct task_struct *g, *t;
1672 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1673 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1674 * sizeof(struct ftrace_ret_stack),
1676 if (!ret_stack_list[i]) {
1684 read_lock_irqsave(&tasklist_lock, flags);
1685 do_each_thread(g, t) {
1691 if (t->ret_stack == NULL) {
1692 t->ret_stack = ret_stack_list[start++];
1693 t->curr_ret_stack = -1;
1694 atomic_set(&t->trace_overrun, 0);
1696 } while_each_thread(g, t);
1699 read_unlock_irqrestore(&tasklist_lock, flags);
1701 for (i = start; i < end; i++)
1702 kfree(ret_stack_list[i]);
1706 /* Allocate a return stack for each task */
1707 static int start_graph_tracing(void)
1709 struct ftrace_ret_stack **ret_stack_list;
1712 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1713 sizeof(struct ftrace_ret_stack *),
1716 if (!ret_stack_list)
1720 ret = alloc_retstack_tasklist(ret_stack_list);
1721 } while (ret == -EAGAIN);
1723 kfree(ret_stack_list);
1727 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1728 trace_func_graph_ent_t entryfunc)
1732 mutex_lock(&ftrace_sysctl_lock);
1735 * Don't launch return tracing if normal function
1736 * tracing is already running.
1738 if (ftrace_trace_function != ftrace_stub) {
1742 atomic_inc(&ftrace_graph_active);
1743 ret = start_graph_tracing();
1745 atomic_dec(&ftrace_graph_active);
1748 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1749 ftrace_graph_return = retfunc;
1750 ftrace_graph_entry = entryfunc;
1751 ftrace_startup(FTRACE_START_FUNC_RET);
1754 mutex_unlock(&ftrace_sysctl_lock);
1758 void unregister_ftrace_graph(void)
1760 mutex_lock(&ftrace_sysctl_lock);
1762 atomic_dec(&ftrace_graph_active);
1763 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1764 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1765 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1766 /* Restore normal tracing type */
1767 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1769 mutex_unlock(&ftrace_sysctl_lock);
1772 /* Allocate a return stack for newly created task */
1773 void ftrace_graph_init_task(struct task_struct *t)
1775 if (atomic_read(&ftrace_graph_active)) {
1776 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1777 * sizeof(struct ftrace_ret_stack),
1781 t->curr_ret_stack = -1;
1782 atomic_set(&t->trace_overrun, 0);
1784 t->ret_stack = NULL;
1787 void ftrace_graph_exit_task(struct task_struct *t)
1789 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1791 t->ret_stack = NULL;
1792 /* NULL must become visible to IRQs before we free it: */