2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
52 * ftrace_disabled is set when an anomaly is discovered.
53 * ftrace_disabled is much stronger than ftrace_enabled.
55 static int ftrace_disabled __read_mostly;
57 static DEFINE_SPINLOCK(ftrace_lock);
58 static DEFINE_MUTEX(ftrace_sysctl_lock);
60 static struct ftrace_ops ftrace_list_end __read_mostly =
65 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
66 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
68 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
70 struct ftrace_ops *op = ftrace_list;
72 /* in case someone actually ports this to alpha! */
73 read_barrier_depends();
75 while (op != &ftrace_list_end) {
77 read_barrier_depends();
78 op->func(ip, parent_ip);
84 * clear_ftrace_function - reset the ftrace function
86 * This NULLs the ftrace function and in essence stops
87 * tracing. There may be lag
89 void clear_ftrace_function(void)
91 ftrace_trace_function = ftrace_stub;
94 static int __register_ftrace_function(struct ftrace_ops *ops)
96 /* should not be called from interrupt context */
97 spin_lock(&ftrace_lock);
99 ops->next = ftrace_list;
101 * We are entering ops into the ftrace_list but another
102 * CPU might be walking that list. We need to make sure
103 * the ops->next pointer is valid before another CPU sees
104 * the ops pointer included into the ftrace_list.
109 if (ftrace_enabled) {
111 * For one func, simply call it directly.
112 * For more than one func, call the chain.
114 if (ops->next == &ftrace_list_end)
115 ftrace_trace_function = ops->func;
117 ftrace_trace_function = ftrace_list_func;
120 spin_unlock(&ftrace_lock);
125 static int __unregister_ftrace_function(struct ftrace_ops *ops)
127 struct ftrace_ops **p;
130 /* should not be called from interrupt context */
131 spin_lock(&ftrace_lock);
134 * If we are removing the last function, then simply point
135 * to the ftrace_stub.
137 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
138 ftrace_trace_function = ftrace_stub;
139 ftrace_list = &ftrace_list_end;
143 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
154 if (ftrace_enabled) {
155 /* If we only have one func left, then call that directly */
156 if (ftrace_list == &ftrace_list_end ||
157 ftrace_list->next == &ftrace_list_end)
158 ftrace_trace_function = ftrace_list->func;
162 spin_unlock(&ftrace_lock);
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
169 # error Dynamic ftrace depends on MCOUNT_RECORD
173 * Since MCOUNT_ADDR may point to mcount itself, we do not want
174 * to get it confused by reading a reference in the code as we
175 * are parsing on objcopy output of text. Use a variable for
178 static unsigned long mcount_addr = MCOUNT_ADDR;
181 FTRACE_ENABLE_CALLS = (1 << 0),
182 FTRACE_DISABLE_CALLS = (1 << 1),
183 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
184 FTRACE_ENABLE_MCOUNT = (1 << 3),
185 FTRACE_DISABLE_MCOUNT = (1 << 4),
188 static int ftrace_filtered;
189 static int tracing_on;
190 static int frozen_record_count;
192 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
194 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
196 static DEFINE_MUTEX(ftrace_regex_lock);
199 struct ftrace_page *next;
201 struct dyn_ftrace records[];
204 #define ENTRIES_PER_PAGE \
205 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
207 /* estimate from running different kernels */
208 #define NR_TO_INIT 10000
210 static struct ftrace_page *ftrace_pages_start;
211 static struct ftrace_page *ftrace_pages;
213 static int ftrace_record_suspend;
215 static struct dyn_ftrace *ftrace_free_records;
218 #ifdef CONFIG_KPROBES
219 static inline void freeze_record(struct dyn_ftrace *rec)
221 if (!(rec->flags & FTRACE_FL_FROZEN)) {
222 rec->flags |= FTRACE_FL_FROZEN;
223 frozen_record_count++;
227 static inline void unfreeze_record(struct dyn_ftrace *rec)
229 if (rec->flags & FTRACE_FL_FROZEN) {
230 rec->flags &= ~FTRACE_FL_FROZEN;
231 frozen_record_count--;
235 static inline int record_frozen(struct dyn_ftrace *rec)
237 return rec->flags & FTRACE_FL_FROZEN;
240 # define freeze_record(rec) ({ 0; })
241 # define unfreeze_record(rec) ({ 0; })
242 # define record_frozen(rec) ({ 0; })
243 #endif /* CONFIG_KPROBES */
245 int skip_trace(unsigned long ip)
248 struct dyn_ftrace *rec;
249 struct hlist_node *t;
250 struct hlist_head *head;
252 if (frozen_record_count == 0)
255 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
256 hlist_for_each_entry_rcu(rec, t, head, node) {
258 if (record_frozen(rec)) {
259 if (rec->flags & FTRACE_FL_FAILED)
262 if (!(rec->flags & FTRACE_FL_CONVERTED))
265 if (!tracing_on || !ftrace_enabled)
268 if (ftrace_filtered) {
269 fl = rec->flags & (FTRACE_FL_FILTER |
271 if (!fl || (fl & FTRACE_FL_NOTRACE))
283 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
285 struct dyn_ftrace *p;
286 struct hlist_node *t;
289 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
300 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
302 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
305 /* called from kstop_machine */
306 static inline void ftrace_del_hash(struct dyn_ftrace *node)
308 hlist_del(&node->node);
311 static void ftrace_free_rec(struct dyn_ftrace *rec)
313 rec->ip = (unsigned long)ftrace_free_records;
314 ftrace_free_records = rec;
315 rec->flags |= FTRACE_FL_FREE;
318 void ftrace_release(void *start, unsigned long size)
320 struct dyn_ftrace *rec;
321 struct ftrace_page *pg;
322 unsigned long s = (unsigned long)start;
323 unsigned long e = s + size;
326 if (ftrace_disabled || !start)
329 /* should not be called from interrupt context */
330 spin_lock(&ftrace_lock);
332 for (pg = ftrace_pages_start; pg; pg = pg->next) {
333 for (i = 0; i < pg->index; i++) {
334 rec = &pg->records[i];
336 if ((rec->ip >= s) && (rec->ip < e))
337 ftrace_free_rec(rec);
340 spin_unlock(&ftrace_lock);
342 ftrace_release_hash(s, e);
345 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
347 struct dyn_ftrace *rec;
349 /* First check for freed records */
350 if (ftrace_free_records) {
351 rec = ftrace_free_records;
353 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
354 FTRACE_WARN_ON_ONCE(1);
355 ftrace_free_records = NULL;
359 ftrace_free_records = (void *)rec->ip;
360 memset(rec, 0, sizeof(*rec));
364 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365 if (!ftrace_pages->next)
367 ftrace_pages = ftrace_pages->next;
370 return &ftrace_pages->records[ftrace_pages->index++];
374 ftrace_record_ip(unsigned long ip)
376 struct dyn_ftrace *node;
381 if (!ftrace_enabled || ftrace_disabled)
384 resched = need_resched();
385 preempt_disable_notrace();
388 * We simply need to protect against recursion.
389 * Use the the raw version of smp_processor_id and not
390 * __get_cpu_var which can call debug hooks that can
391 * cause a recursive crash here.
393 cpu = raw_smp_processor_id();
394 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
395 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
398 if (unlikely(ftrace_record_suspend))
401 key = hash_long(ip, FTRACE_HASHBITS);
403 FTRACE_WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
405 if (ftrace_ip_in_hash(ip, key))
408 /* This ip may have hit the hash before the lock */
409 if (ftrace_ip_in_hash(ip, key))
412 node = ftrace_alloc_dyn_node(ip);
418 ftrace_add_hash(node, key);
421 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
423 /* prevent recursion with scheduler */
425 preempt_enable_no_resched_notrace();
427 preempt_enable_notrace();
430 #define FTRACE_ADDR ((long)(ftrace_caller))
433 __ftrace_replace_code(struct dyn_ftrace *rec,
434 unsigned char *old, unsigned char *new, int enable)
436 unsigned long ip, fl;
440 if (ftrace_filtered && enable) {
442 * If filtering is on:
444 * If this record is set to be filtered and
445 * is enabled then do nothing.
447 * If this record is set to be filtered and
448 * it is not enabled, enable it.
450 * If this record is not set to be filtered
451 * and it is not enabled do nothing.
453 * If this record is set not to trace then
456 * If this record is set not to trace and
457 * it is enabled then disable it.
459 * If this record is not set to be filtered and
460 * it is enabled, disable it.
463 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
466 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
467 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
468 !fl || (fl == FTRACE_FL_NOTRACE))
472 * If it is enabled disable it,
473 * otherwise enable it!
475 if (fl & FTRACE_FL_ENABLED) {
476 /* swap new and old */
478 old = ftrace_call_replace(ip, FTRACE_ADDR);
479 rec->flags &= ~FTRACE_FL_ENABLED;
481 new = ftrace_call_replace(ip, FTRACE_ADDR);
482 rec->flags |= FTRACE_FL_ENABLED;
488 * If this record is set not to trace and is
489 * not enabled, do nothing.
491 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
492 if (fl == FTRACE_FL_NOTRACE)
495 new = ftrace_call_replace(ip, FTRACE_ADDR);
497 old = ftrace_call_replace(ip, FTRACE_ADDR);
500 if (rec->flags & FTRACE_FL_ENABLED)
502 rec->flags |= FTRACE_FL_ENABLED;
504 if (!(rec->flags & FTRACE_FL_ENABLED))
506 rec->flags &= ~FTRACE_FL_ENABLED;
510 return ftrace_modify_code(ip, old, new);
513 static void ftrace_replace_code(int enable)
516 unsigned char *new = NULL, *old = NULL;
517 struct dyn_ftrace *rec;
518 struct ftrace_page *pg;
521 old = ftrace_nop_replace();
523 new = ftrace_nop_replace();
525 for (pg = ftrace_pages_start; pg; pg = pg->next) {
526 for (i = 0; i < pg->index; i++) {
527 rec = &pg->records[i];
529 /* don't modify code that has already faulted */
530 if (rec->flags & FTRACE_FL_FAILED)
533 /* ignore updates to this record's mcount site */
534 if (get_kprobe((void *)rec->ip)) {
538 unfreeze_record(rec);
541 failed = __ftrace_replace_code(rec, old, new, enable);
542 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
543 rec->flags |= FTRACE_FL_FAILED;
544 if ((system_state == SYSTEM_BOOTING) ||
545 !core_kernel_text(rec->ip)) {
546 ftrace_del_hash(rec);
547 ftrace_free_rec(rec);
554 static void ftrace_shutdown_replenish(void)
556 if (ftrace_pages->next)
559 /* allocate another page */
560 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
563 static void print_ip_ins(const char *fmt, unsigned char *p)
567 printk(KERN_CONT "%s", fmt);
569 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
570 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
574 ftrace_code_disable(struct dyn_ftrace *rec)
577 unsigned char *nop, *call;
582 nop = ftrace_nop_replace();
583 call = ftrace_call_replace(ip, mcount_addr);
585 ret = ftrace_modify_code(ip, call, nop);
589 FTRACE_WARN_ON_ONCE(1);
590 pr_info("ftrace faulted on modifying ");
594 FTRACE_WARN_ON_ONCE(1);
595 pr_info("ftrace failed to modify ");
597 print_ip_ins(" expected: ", call);
598 print_ip_ins(" actual: ", (unsigned char *)ip);
599 print_ip_ins(" replace: ", nop);
600 printk(KERN_CONT "\n");
603 FTRACE_WARN_ON_ONCE(1);
604 pr_info("ftrace faulted on writing ");
608 FTRACE_WARN_ON_ONCE(1);
609 pr_info("ftrace faulted on unknown error ");
613 rec->flags |= FTRACE_FL_FAILED;
619 static int ftrace_update_code(void *ignore);
621 static int __ftrace_modify_code(void *data)
625 if (*command & FTRACE_ENABLE_CALLS) {
627 * Update any recorded ips now that we have the
630 ftrace_update_code(NULL);
631 ftrace_replace_code(1);
633 } else if (*command & FTRACE_DISABLE_CALLS) {
634 ftrace_replace_code(0);
638 if (*command & FTRACE_UPDATE_TRACE_FUNC)
639 ftrace_update_ftrace_func(ftrace_trace_function);
644 static void ftrace_run_update_code(int command)
646 stop_machine(__ftrace_modify_code, &command, NULL);
649 static ftrace_func_t saved_ftrace_func;
650 static int ftrace_start;
651 static DEFINE_MUTEX(ftrace_start_lock);
653 static void ftrace_startup(void)
657 if (unlikely(ftrace_disabled))
660 mutex_lock(&ftrace_start_lock);
662 if (ftrace_start == 1)
663 command |= FTRACE_ENABLE_CALLS;
665 if (saved_ftrace_func != ftrace_trace_function) {
666 saved_ftrace_func = ftrace_trace_function;
667 command |= FTRACE_UPDATE_TRACE_FUNC;
670 if (!command || !ftrace_enabled)
673 ftrace_run_update_code(command);
675 mutex_unlock(&ftrace_start_lock);
678 static void ftrace_shutdown(void)
682 if (unlikely(ftrace_disabled))
685 mutex_lock(&ftrace_start_lock);
688 command |= FTRACE_DISABLE_CALLS;
690 if (saved_ftrace_func != ftrace_trace_function) {
691 saved_ftrace_func = ftrace_trace_function;
692 command |= FTRACE_UPDATE_TRACE_FUNC;
695 if (!command || !ftrace_enabled)
698 ftrace_run_update_code(command);
700 mutex_unlock(&ftrace_start_lock);
703 static void ftrace_startup_sysctl(void)
705 int command = FTRACE_ENABLE_MCOUNT;
707 if (unlikely(ftrace_disabled))
710 mutex_lock(&ftrace_start_lock);
711 /* Force update next time */
712 saved_ftrace_func = NULL;
713 /* ftrace_start is true if we want ftrace running */
715 command |= FTRACE_ENABLE_CALLS;
717 ftrace_run_update_code(command);
718 mutex_unlock(&ftrace_start_lock);
721 static void ftrace_shutdown_sysctl(void)
723 int command = FTRACE_DISABLE_MCOUNT;
725 if (unlikely(ftrace_disabled))
728 mutex_lock(&ftrace_start_lock);
729 /* ftrace_start is true if ftrace is running */
731 command |= FTRACE_DISABLE_CALLS;
733 ftrace_run_update_code(command);
734 mutex_unlock(&ftrace_start_lock);
737 static cycle_t ftrace_update_time;
738 static unsigned long ftrace_update_cnt;
739 unsigned long ftrace_update_tot_cnt;
741 static int ftrace_update_code(void *ignore)
743 int i, save_ftrace_enabled;
745 struct dyn_ftrace *p;
746 struct hlist_node *t, *n;
747 struct hlist_head *head, temp_list;
749 /* Don't be recording funcs now */
750 ftrace_record_suspend++;
751 save_ftrace_enabled = ftrace_enabled;
754 start = ftrace_now(raw_smp_processor_id());
755 ftrace_update_cnt = 0;
757 /* No locks needed, the machine is stopped! */
758 for (i = 0; i < FTRACE_HASHSIZE; i++) {
759 INIT_HLIST_HEAD(&temp_list);
760 head = &ftrace_hash[i];
762 /* all CPUS are stopped, we are safe to modify code */
763 hlist_for_each_entry_safe(p, t, n, head, node) {
764 /* Skip over failed records which have not been
766 if (p->flags & FTRACE_FL_FAILED)
769 /* Unconverted records are always at the head of the
770 * hash bucket. Once we encounter a converted record,
771 * simply skip over to the next bucket. Saves ftraced
772 * some processor cycles (ftrace does its bid for
773 * global warming :-p ). */
774 if (p->flags & (FTRACE_FL_CONVERTED))
777 /* Ignore updates to this record's mcount site.
778 * Reintroduce this record at the head of this
779 * bucket to attempt to "convert" it again if
780 * the kprobe on it is unregistered before the
782 if (get_kprobe((void *)p->ip)) {
784 INIT_HLIST_NODE(&p->node);
785 hlist_add_head(&p->node, &temp_list);
792 /* convert record (i.e, patch mcount-call with NOP) */
793 if (ftrace_code_disable(p)) {
794 p->flags |= FTRACE_FL_CONVERTED;
797 if ((system_state == SYSTEM_BOOTING) ||
798 !core_kernel_text(p->ip)) {
805 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
807 INIT_HLIST_NODE(&p->node);
808 hlist_add_head(&p->node, head);
812 stop = ftrace_now(raw_smp_processor_id());
813 ftrace_update_time = stop - start;
814 ftrace_update_tot_cnt += ftrace_update_cnt;
816 ftrace_enabled = save_ftrace_enabled;
817 ftrace_record_suspend--;
822 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
824 struct ftrace_page *pg;
828 /* allocate a few pages */
829 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
830 if (!ftrace_pages_start)
834 * Allocate a few more pages.
836 * TODO: have some parser search vmlinux before
837 * final linking to find all calls to ftrace.
839 * a) know how many pages to allocate.
841 * b) set up the table then.
843 * The dynamic code is still necessary for
847 pg = ftrace_pages = ftrace_pages_start;
849 cnt = num_to_init / ENTRIES_PER_PAGE;
850 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
853 for (i = 0; i < cnt; i++) {
854 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
856 /* If we fail, we'll try later anyway */
867 FTRACE_ITER_FILTER = (1 << 0),
868 FTRACE_ITER_CONT = (1 << 1),
869 FTRACE_ITER_NOTRACE = (1 << 2),
870 FTRACE_ITER_FAILURES = (1 << 3),
873 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
875 struct ftrace_iterator {
877 struct ftrace_page *pg;
880 unsigned char buffer[FTRACE_BUFF_MAX+1];
886 t_next(struct seq_file *m, void *v, loff_t *pos)
888 struct ftrace_iterator *iter = m->private;
889 struct dyn_ftrace *rec = NULL;
893 /* should not be called from interrupt context */
894 spin_lock(&ftrace_lock);
896 if (iter->idx >= iter->pg->index) {
897 if (iter->pg->next) {
898 iter->pg = iter->pg->next;
903 rec = &iter->pg->records[iter->idx++];
904 if ((rec->flags & FTRACE_FL_FREE) ||
906 (!(iter->flags & FTRACE_ITER_FAILURES) &&
907 (rec->flags & FTRACE_FL_FAILED)) ||
909 ((iter->flags & FTRACE_ITER_FAILURES) &&
910 !(rec->flags & FTRACE_FL_FAILED)) ||
912 ((iter->flags & FTRACE_ITER_NOTRACE) &&
913 !(rec->flags & FTRACE_FL_NOTRACE))) {
918 spin_unlock(&ftrace_lock);
925 static void *t_start(struct seq_file *m, loff_t *pos)
927 struct ftrace_iterator *iter = m->private;
931 if (*pos != iter->pos) {
932 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
936 p = t_next(m, p, &l);
942 static void t_stop(struct seq_file *m, void *p)
946 static int t_show(struct seq_file *m, void *v)
948 struct dyn_ftrace *rec = v;
949 char str[KSYM_SYMBOL_LEN];
954 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
956 seq_printf(m, "%s\n", str);
961 static struct seq_operations show_ftrace_seq_ops = {
969 ftrace_avail_open(struct inode *inode, struct file *file)
971 struct ftrace_iterator *iter;
974 if (unlikely(ftrace_disabled))
977 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
981 iter->pg = ftrace_pages_start;
984 ret = seq_open(file, &show_ftrace_seq_ops);
986 struct seq_file *m = file->private_data;
996 int ftrace_avail_release(struct inode *inode, struct file *file)
998 struct seq_file *m = (struct seq_file *)file->private_data;
999 struct ftrace_iterator *iter = m->private;
1001 seq_release(inode, file);
1008 ftrace_failures_open(struct inode *inode, struct file *file)
1012 struct ftrace_iterator *iter;
1014 ret = ftrace_avail_open(inode, file);
1016 m = (struct seq_file *)file->private_data;
1017 iter = (struct ftrace_iterator *)m->private;
1018 iter->flags = FTRACE_ITER_FAILURES;
1025 static void ftrace_filter_reset(int enable)
1027 struct ftrace_page *pg;
1028 struct dyn_ftrace *rec;
1029 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1032 /* should not be called from interrupt context */
1033 spin_lock(&ftrace_lock);
1035 ftrace_filtered = 0;
1036 pg = ftrace_pages_start;
1038 for (i = 0; i < pg->index; i++) {
1039 rec = &pg->records[i];
1040 if (rec->flags & FTRACE_FL_FAILED)
1042 rec->flags &= ~type;
1046 spin_unlock(&ftrace_lock);
1050 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1052 struct ftrace_iterator *iter;
1055 if (unlikely(ftrace_disabled))
1058 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1062 mutex_lock(&ftrace_regex_lock);
1063 if ((file->f_mode & FMODE_WRITE) &&
1064 !(file->f_flags & O_APPEND))
1065 ftrace_filter_reset(enable);
1067 if (file->f_mode & FMODE_READ) {
1068 iter->pg = ftrace_pages_start;
1070 iter->flags = enable ? FTRACE_ITER_FILTER :
1071 FTRACE_ITER_NOTRACE;
1073 ret = seq_open(file, &show_ftrace_seq_ops);
1075 struct seq_file *m = file->private_data;
1080 file->private_data = iter;
1081 mutex_unlock(&ftrace_regex_lock);
1087 ftrace_filter_open(struct inode *inode, struct file *file)
1089 return ftrace_regex_open(inode, file, 1);
1093 ftrace_notrace_open(struct inode *inode, struct file *file)
1095 return ftrace_regex_open(inode, file, 0);
1099 ftrace_regex_read(struct file *file, char __user *ubuf,
1100 size_t cnt, loff_t *ppos)
1102 if (file->f_mode & FMODE_READ)
1103 return seq_read(file, ubuf, cnt, ppos);
1109 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1113 if (file->f_mode & FMODE_READ)
1114 ret = seq_lseek(file, offset, origin);
1116 file->f_pos = ret = 1;
1129 ftrace_match(unsigned char *buff, int len, int enable)
1131 char str[KSYM_SYMBOL_LEN];
1132 char *search = NULL;
1133 struct ftrace_page *pg;
1134 struct dyn_ftrace *rec;
1135 int type = MATCH_FULL;
1136 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1137 unsigned i, match = 0, search_len = 0;
1139 for (i = 0; i < len; i++) {
1140 if (buff[i] == '*') {
1142 search = buff + i + 1;
1143 type = MATCH_END_ONLY;
1144 search_len = len - (i + 1);
1146 if (type == MATCH_END_ONLY) {
1147 type = MATCH_MIDDLE_ONLY;
1150 type = MATCH_FRONT_ONLY;
1158 /* should not be called from interrupt context */
1159 spin_lock(&ftrace_lock);
1161 ftrace_filtered = 1;
1162 pg = ftrace_pages_start;
1164 for (i = 0; i < pg->index; i++) {
1168 rec = &pg->records[i];
1169 if (rec->flags & FTRACE_FL_FAILED)
1171 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1174 if (strcmp(str, buff) == 0)
1177 case MATCH_FRONT_ONLY:
1178 if (memcmp(str, buff, match) == 0)
1181 case MATCH_MIDDLE_ONLY:
1182 if (strstr(str, search))
1185 case MATCH_END_ONLY:
1186 ptr = strstr(str, search);
1187 if (ptr && (ptr[search_len] == 0))
1196 spin_unlock(&ftrace_lock);
1200 ftrace_regex_write(struct file *file, const char __user *ubuf,
1201 size_t cnt, loff_t *ppos, int enable)
1203 struct ftrace_iterator *iter;
1208 if (!cnt || cnt < 0)
1211 mutex_lock(&ftrace_regex_lock);
1213 if (file->f_mode & FMODE_READ) {
1214 struct seq_file *m = file->private_data;
1217 iter = file->private_data;
1220 iter->flags &= ~FTRACE_ITER_CONT;
1221 iter->buffer_idx = 0;
1224 ret = get_user(ch, ubuf++);
1230 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1231 /* skip white space */
1232 while (cnt && isspace(ch)) {
1233 ret = get_user(ch, ubuf++);
1241 file->f_pos += read;
1246 iter->buffer_idx = 0;
1249 while (cnt && !isspace(ch)) {
1250 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1251 iter->buffer[iter->buffer_idx++] = ch;
1256 ret = get_user(ch, ubuf++);
1265 iter->buffer[iter->buffer_idx] = 0;
1266 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1267 iter->buffer_idx = 0;
1269 iter->flags |= FTRACE_ITER_CONT;
1272 file->f_pos += read;
1276 mutex_unlock(&ftrace_regex_lock);
1282 ftrace_filter_write(struct file *file, const char __user *ubuf,
1283 size_t cnt, loff_t *ppos)
1285 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1289 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1290 size_t cnt, loff_t *ppos)
1292 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1296 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1298 if (unlikely(ftrace_disabled))
1301 mutex_lock(&ftrace_regex_lock);
1303 ftrace_filter_reset(enable);
1305 ftrace_match(buf, len, enable);
1306 mutex_unlock(&ftrace_regex_lock);
1310 * ftrace_set_filter - set a function to filter on in ftrace
1311 * @buf - the string that holds the function filter text.
1312 * @len - the length of the string.
1313 * @reset - non zero to reset all filters before applying this filter.
1315 * Filters denote which functions should be enabled when tracing is enabled.
1316 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1318 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1320 ftrace_set_regex(buf, len, reset, 1);
1324 * ftrace_set_notrace - set a function to not trace in ftrace
1325 * @buf - the string that holds the function notrace text.
1326 * @len - the length of the string.
1327 * @reset - non zero to reset all filters before applying this filter.
1329 * Notrace Filters denote which functions should not be enabled when tracing
1330 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1333 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1335 ftrace_set_regex(buf, len, reset, 0);
1339 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1341 struct seq_file *m = (struct seq_file *)file->private_data;
1342 struct ftrace_iterator *iter;
1344 mutex_lock(&ftrace_regex_lock);
1345 if (file->f_mode & FMODE_READ) {
1348 seq_release(inode, file);
1350 iter = file->private_data;
1352 if (iter->buffer_idx) {
1354 iter->buffer[iter->buffer_idx] = 0;
1355 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1358 mutex_lock(&ftrace_sysctl_lock);
1359 mutex_lock(&ftrace_start_lock);
1360 if (iter->filtered && ftrace_start && ftrace_enabled)
1361 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1362 mutex_unlock(&ftrace_start_lock);
1363 mutex_unlock(&ftrace_sysctl_lock);
1366 mutex_unlock(&ftrace_regex_lock);
1371 ftrace_filter_release(struct inode *inode, struct file *file)
1373 return ftrace_regex_release(inode, file, 1);
1377 ftrace_notrace_release(struct inode *inode, struct file *file)
1379 return ftrace_regex_release(inode, file, 0);
1382 static struct file_operations ftrace_avail_fops = {
1383 .open = ftrace_avail_open,
1385 .llseek = seq_lseek,
1386 .release = ftrace_avail_release,
1389 static struct file_operations ftrace_failures_fops = {
1390 .open = ftrace_failures_open,
1392 .llseek = seq_lseek,
1393 .release = ftrace_avail_release,
1396 static struct file_operations ftrace_filter_fops = {
1397 .open = ftrace_filter_open,
1398 .read = ftrace_regex_read,
1399 .write = ftrace_filter_write,
1400 .llseek = ftrace_regex_lseek,
1401 .release = ftrace_filter_release,
1404 static struct file_operations ftrace_notrace_fops = {
1405 .open = ftrace_notrace_open,
1406 .read = ftrace_regex_read,
1407 .write = ftrace_notrace_write,
1408 .llseek = ftrace_regex_lseek,
1409 .release = ftrace_notrace_release,
1412 static __init int ftrace_init_debugfs(void)
1414 struct dentry *d_tracer;
1415 struct dentry *entry;
1417 d_tracer = tracing_init_dentry();
1419 entry = debugfs_create_file("available_filter_functions", 0444,
1420 d_tracer, NULL, &ftrace_avail_fops);
1422 pr_warning("Could not create debugfs "
1423 "'available_filter_functions' entry\n");
1425 entry = debugfs_create_file("failures", 0444,
1426 d_tracer, NULL, &ftrace_failures_fops);
1428 pr_warning("Could not create debugfs 'failures' entry\n");
1430 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1431 NULL, &ftrace_filter_fops);
1433 pr_warning("Could not create debugfs "
1434 "'set_ftrace_filter' entry\n");
1436 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1437 NULL, &ftrace_notrace_fops);
1439 pr_warning("Could not create debugfs "
1440 "'set_ftrace_notrace' entry\n");
1445 fs_initcall(ftrace_init_debugfs);
1447 static int ftrace_convert_nops(unsigned long *start,
1452 unsigned long flags;
1456 addr = ftrace_call_adjust(*p++);
1457 /* should not be called from interrupt context */
1458 spin_lock(&ftrace_lock);
1459 ftrace_record_ip(addr);
1460 spin_unlock(&ftrace_lock);
1461 ftrace_shutdown_replenish();
1465 local_irq_save(flags);
1466 ftrace_update_code(p);
1467 local_irq_restore(flags);
1472 void ftrace_init_module(unsigned long *start, unsigned long *end)
1474 if (ftrace_disabled || start == end)
1476 ftrace_convert_nops(start, end);
1479 extern unsigned long __start_mcount_loc[];
1480 extern unsigned long __stop_mcount_loc[];
1482 void __init ftrace_init(void)
1484 unsigned long count, addr, flags;
1487 /* Keep the ftrace pointer to the stub */
1488 addr = (unsigned long)ftrace_stub;
1490 local_irq_save(flags);
1491 ftrace_dyn_arch_init(&addr);
1492 local_irq_restore(flags);
1494 /* ftrace_dyn_arch_init places the return code in addr */
1498 count = __stop_mcount_loc - __start_mcount_loc;
1500 ret = ftrace_dyn_table_alloc(count);
1504 last_ftrace_enabled = ftrace_enabled = 1;
1506 ret = ftrace_convert_nops(__start_mcount_loc,
1511 ftrace_disabled = 1;
1515 # define ftrace_startup() do { } while (0)
1516 # define ftrace_shutdown() do { } while (0)
1517 # define ftrace_startup_sysctl() do { } while (0)
1518 # define ftrace_shutdown_sysctl() do { } while (0)
1519 #endif /* CONFIG_DYNAMIC_FTRACE */
1522 * ftrace_kill - kill ftrace
1524 * This function should be used by panic code. It stops ftrace
1525 * but in a not so nice way. If you need to simply kill ftrace
1526 * from a non-atomic section, use ftrace_kill.
1528 void ftrace_kill(void)
1530 ftrace_disabled = 1;
1532 clear_ftrace_function();
1536 * register_ftrace_function - register a function for profiling
1537 * @ops - ops structure that holds the function for profiling.
1539 * Register a function to be called by all functions in the
1542 * Note: @ops->func and all the functions it calls must be labeled
1543 * with "notrace", otherwise it will go into a
1546 int register_ftrace_function(struct ftrace_ops *ops)
1550 if (unlikely(ftrace_disabled))
1553 mutex_lock(&ftrace_sysctl_lock);
1554 ret = __register_ftrace_function(ops);
1556 mutex_unlock(&ftrace_sysctl_lock);
1562 * unregister_ftrace_function - unresgister a function for profiling.
1563 * @ops - ops structure that holds the function to unregister
1565 * Unregister a function that was added to be called by ftrace profiling.
1567 int unregister_ftrace_function(struct ftrace_ops *ops)
1571 mutex_lock(&ftrace_sysctl_lock);
1572 ret = __unregister_ftrace_function(ops);
1574 mutex_unlock(&ftrace_sysctl_lock);
1580 ftrace_enable_sysctl(struct ctl_table *table, int write,
1581 struct file *file, void __user *buffer, size_t *lenp,
1586 if (unlikely(ftrace_disabled))
1589 mutex_lock(&ftrace_sysctl_lock);
1591 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1593 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1596 last_ftrace_enabled = ftrace_enabled;
1598 if (ftrace_enabled) {
1600 ftrace_startup_sysctl();
1602 /* we are starting ftrace again */
1603 if (ftrace_list != &ftrace_list_end) {
1604 if (ftrace_list->next == &ftrace_list_end)
1605 ftrace_trace_function = ftrace_list->func;
1607 ftrace_trace_function = ftrace_list_func;
1611 /* stopping ftrace calls (just send to ftrace_stub) */
1612 ftrace_trace_function = ftrace_stub;
1614 ftrace_shutdown_sysctl();
1618 mutex_unlock(&ftrace_sysctl_lock);