2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/module.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/slab.h>
29 #include <linux/ctype.h>
30 #include <linux/list.h>
31 #include <linux/hash.h>
32 #include <linux/rcupdate.h>
34 #include <trace/events/sched.h>
36 #include <asm/setup.h>
38 #include "trace_output.h"
39 #include "trace_stat.h"
41 #define FTRACE_WARN_ON(cond) \
49 #define FTRACE_WARN_ON_ONCE(cond) \
52 if (WARN_ON_ONCE(___r)) \
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
73 struct list_head list;
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
81 static int ftrace_disabled __read_mostly;
83 static DEFINE_MUTEX(ftrace_lock);
85 static struct ftrace_ops ftrace_list_end __read_mostly = {
89 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
90 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
102 * can use rcu_dereference_raw() is that elements removed from this list
103 * are simply leaked, so there is no need to interact with a grace-period
104 * mechanism. The rcu_dereference_raw() calls are needed to handle
105 * concurrent insertions into the ftrace_global_list.
107 * Silly Alpha and silly pointer-speculation compiler optimizations!
109 static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
112 struct ftrace_ops *op;
114 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
117 trace_recursion_set(TRACE_GLOBAL_BIT);
118 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
119 while (op != &ftrace_list_end) {
120 op->func(ip, parent_ip);
121 op = rcu_dereference_raw(op->next); /*see above*/
123 trace_recursion_clear(TRACE_GLOBAL_BIT);
126 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
128 if (!test_tsk_trace_trace(current))
131 ftrace_pid_function(ip, parent_ip);
134 static void set_ftrace_pid_function(ftrace_func_t func)
136 /* do not set ftrace_pid_function to itself! */
137 if (func != ftrace_pid_func)
138 ftrace_pid_function = func;
142 * clear_ftrace_function - reset the ftrace function
144 * This NULLs the ftrace function and in essence stops
145 * tracing. There may be lag
147 void clear_ftrace_function(void)
149 ftrace_trace_function = ftrace_stub;
150 __ftrace_trace_function = ftrace_stub;
151 __ftrace_trace_function_delay = ftrace_stub;
152 ftrace_pid_function = ftrace_stub;
155 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
157 * For those archs that do not test ftrace_trace_stop in their
158 * mcount call site, we need to do it from C.
160 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
162 if (function_trace_stop)
165 __ftrace_trace_function(ip, parent_ip);
169 static void update_global_ops(void)
174 * If there's only one function registered, then call that
175 * function directly. Otherwise, we need to iterate over the
176 * registered callers.
178 if (ftrace_global_list == &ftrace_list_end ||
179 ftrace_global_list->next == &ftrace_list_end)
180 func = ftrace_global_list->func;
182 func = ftrace_global_list_func;
184 /* If we filter on pids, update to use the pid function */
185 if (!list_empty(&ftrace_pids)) {
186 set_ftrace_pid_function(func);
187 func = ftrace_pid_func;
190 global_ops.func = func;
193 static void update_ftrace_function(void)
200 * If we are at the end of the list and this ops is
201 * not dynamic, then have the mcount trampoline call
202 * the function directly
204 if (ftrace_ops_list == &ftrace_list_end ||
205 (ftrace_ops_list->next == &ftrace_list_end &&
206 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
207 func = ftrace_ops_list->func;
209 func = ftrace_ops_list_func;
211 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
212 ftrace_trace_function = func;
214 #ifdef CONFIG_DYNAMIC_FTRACE
215 /* do not update till all functions have been modified */
216 __ftrace_trace_function_delay = func;
218 __ftrace_trace_function = func;
220 ftrace_trace_function = ftrace_test_stop_func;
224 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
228 * We are entering ops into the list but another
229 * CPU might be walking that list. We need to make sure
230 * the ops->next pointer is valid before another CPU sees
231 * the ops pointer included into the list.
233 rcu_assign_pointer(*list, ops);
236 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
238 struct ftrace_ops **p;
241 * If we are removing the last function, then simply point
242 * to the ftrace_stub.
244 if (*list == ops && ops->next == &ftrace_list_end) {
245 *list = &ftrace_list_end;
249 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
260 static int __register_ftrace_function(struct ftrace_ops *ops)
262 if (FTRACE_WARN_ON(ops == &global_ops))
265 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
268 if (!core_kernel_data((unsigned long)ops))
269 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
271 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
272 int first = ftrace_global_list == &ftrace_list_end;
273 add_ftrace_ops(&ftrace_global_list, ops);
274 ops->flags |= FTRACE_OPS_FL_ENABLED;
276 add_ftrace_ops(&ftrace_ops_list, &global_ops);
278 add_ftrace_ops(&ftrace_ops_list, ops);
281 update_ftrace_function();
286 static void ftrace_sync(struct work_struct *work)
289 * This function is just a stub to implement a hard force
290 * of synchronize_sched(). This requires synchronizing
291 * tasks even in userspace and idle.
293 * Yes, function tracing is rude.
297 static int __unregister_ftrace_function(struct ftrace_ops *ops)
301 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
304 if (FTRACE_WARN_ON(ops == &global_ops))
307 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
308 ret = remove_ftrace_ops(&ftrace_global_list, ops);
309 if (!ret && ftrace_global_list == &ftrace_list_end)
310 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
312 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
314 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
320 update_ftrace_function();
323 * Dynamic ops may be freed, we must make sure that all
324 * callers are done before leaving this function.
326 * Again, normal synchronize_sched() is not good enough.
327 * We need to do a hard force of sched synchronization.
329 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
330 schedule_on_each_cpu(ftrace_sync);
336 static void ftrace_update_pid_func(void)
338 /* Only do something if we are tracing something */
339 if (ftrace_trace_function == ftrace_stub)
342 update_ftrace_function();
345 #ifdef CONFIG_FUNCTION_PROFILER
346 struct ftrace_profile {
347 struct hlist_node node;
349 unsigned long counter;
350 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
351 unsigned long long time;
352 unsigned long long time_squared;
356 struct ftrace_profile_page {
357 struct ftrace_profile_page *next;
359 struct ftrace_profile records[];
362 struct ftrace_profile_stat {
364 struct hlist_head *hash;
365 struct ftrace_profile_page *pages;
366 struct ftrace_profile_page *start;
367 struct tracer_stat stat;
370 #define PROFILE_RECORDS_SIZE \
371 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
373 #define PROFILES_PER_PAGE \
374 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
376 static int ftrace_profile_bits __read_mostly;
377 static int ftrace_profile_enabled __read_mostly;
379 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
380 static DEFINE_MUTEX(ftrace_profile_lock);
382 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
384 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
387 function_stat_next(void *v, int idx)
389 struct ftrace_profile *rec = v;
390 struct ftrace_profile_page *pg;
392 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
398 if ((void *)rec >= (void *)&pg->records[pg->index]) {
402 rec = &pg->records[0];
410 static void *function_stat_start(struct tracer_stat *trace)
412 struct ftrace_profile_stat *stat =
413 container_of(trace, struct ftrace_profile_stat, stat);
415 if (!stat || !stat->start)
418 return function_stat_next(&stat->start->records[0], 0);
421 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
422 /* function graph compares on total time */
423 static int function_stat_cmp(void *p1, void *p2)
425 struct ftrace_profile *a = p1;
426 struct ftrace_profile *b = p2;
428 if (a->time < b->time)
430 if (a->time > b->time)
436 /* not function graph compares against hits */
437 static int function_stat_cmp(void *p1, void *p2)
439 struct ftrace_profile *a = p1;
440 struct ftrace_profile *b = p2;
442 if (a->counter < b->counter)
444 if (a->counter > b->counter)
451 static int function_stat_headers(struct seq_file *m)
453 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
454 seq_printf(m, " Function "
457 "--- ---- --- ---\n");
459 seq_printf(m, " Function Hit\n"
465 static int function_stat_show(struct seq_file *m, void *v)
467 struct ftrace_profile *rec = v;
468 char str[KSYM_SYMBOL_LEN];
470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
471 static struct trace_seq s;
472 unsigned long long avg;
473 unsigned long long stddev;
475 mutex_lock(&ftrace_profile_lock);
477 /* we raced with function_profile_reset() */
478 if (unlikely(rec->counter == 0)) {
483 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
484 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
486 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
489 do_div(avg, rec->counter);
491 /* Sample standard deviation (s^2) */
492 if (rec->counter <= 1)
495 stddev = rec->time_squared - rec->counter * avg * avg;
497 * Divide only 1000 for ns^2 -> us^2 conversion.
498 * trace_print_graph_duration will divide 1000 again.
500 do_div(stddev, (rec->counter - 1) * 1000);
504 trace_print_graph_duration(rec->time, &s);
505 trace_seq_puts(&s, " ");
506 trace_print_graph_duration(avg, &s);
507 trace_seq_puts(&s, " ");
508 trace_print_graph_duration(stddev, &s);
509 trace_print_seq(m, &s);
513 mutex_unlock(&ftrace_profile_lock);
518 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
520 struct ftrace_profile_page *pg;
522 pg = stat->pages = stat->start;
525 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
530 memset(stat->hash, 0,
531 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
534 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
536 struct ftrace_profile_page *pg;
541 /* If we already allocated, do nothing */
545 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
549 #ifdef CONFIG_DYNAMIC_FTRACE
550 functions = ftrace_update_tot_cnt;
553 * We do not know the number of functions that exist because
554 * dynamic tracing is what counts them. With past experience
555 * we have around 20K functions. That should be more than enough.
556 * It is highly unlikely we will execute every function in
562 pg = stat->start = stat->pages;
564 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
566 for (i = 1; i < pages; i++) {
567 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
578 unsigned long tmp = (unsigned long)pg;
590 static int ftrace_profile_init_cpu(int cpu)
592 struct ftrace_profile_stat *stat;
595 stat = &per_cpu(ftrace_profile_stats, cpu);
598 /* If the profile is already created, simply reset it */
599 ftrace_profile_reset(stat);
604 * We are profiling all functions, but usually only a few thousand
605 * functions are hit. We'll make a hash of 1024 items.
607 size = FTRACE_PROFILE_HASH_SIZE;
609 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
614 if (!ftrace_profile_bits) {
617 for (; size; size >>= 1)
618 ftrace_profile_bits++;
621 /* Preallocate the function profiling pages */
622 if (ftrace_profile_pages_init(stat) < 0) {
631 static int ftrace_profile_init(void)
636 for_each_possible_cpu(cpu) {
637 ret = ftrace_profile_init_cpu(cpu);
645 /* interrupts must be disabled */
646 static struct ftrace_profile *
647 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
649 struct ftrace_profile *rec;
650 struct hlist_head *hhd;
651 struct hlist_node *n;
654 key = hash_long(ip, ftrace_profile_bits);
655 hhd = &stat->hash[key];
657 if (hlist_empty(hhd))
660 hlist_for_each_entry_rcu(rec, n, hhd, node) {
668 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
669 struct ftrace_profile *rec)
673 key = hash_long(rec->ip, ftrace_profile_bits);
674 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
678 * The memory is already allocated, this simply finds a new record to use.
680 static struct ftrace_profile *
681 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
683 struct ftrace_profile *rec = NULL;
685 /* prevent recursion (from NMIs) */
686 if (atomic_inc_return(&stat->disabled) != 1)
690 * Try to find the function again since an NMI
691 * could have added it
693 rec = ftrace_find_profiled_func(stat, ip);
697 if (stat->pages->index == PROFILES_PER_PAGE) {
698 if (!stat->pages->next)
700 stat->pages = stat->pages->next;
703 rec = &stat->pages->records[stat->pages->index++];
705 ftrace_add_profile(stat, rec);
708 atomic_dec(&stat->disabled);
714 function_profile_call(unsigned long ip, unsigned long parent_ip)
716 struct ftrace_profile_stat *stat;
717 struct ftrace_profile *rec;
720 if (!ftrace_profile_enabled)
723 local_irq_save(flags);
725 stat = &__get_cpu_var(ftrace_profile_stats);
726 if (!stat->hash || !ftrace_profile_enabled)
729 rec = ftrace_find_profiled_func(stat, ip);
731 rec = ftrace_profile_alloc(stat, ip);
738 local_irq_restore(flags);
741 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
742 static int profile_graph_entry(struct ftrace_graph_ent *trace)
744 function_profile_call(trace->func, 0);
748 static void profile_graph_return(struct ftrace_graph_ret *trace)
750 struct ftrace_profile_stat *stat;
751 unsigned long long calltime;
752 struct ftrace_profile *rec;
755 local_irq_save(flags);
756 stat = &__get_cpu_var(ftrace_profile_stats);
757 if (!stat->hash || !ftrace_profile_enabled)
760 /* If the calltime was zero'd ignore it */
761 if (!trace->calltime)
764 calltime = trace->rettime - trace->calltime;
766 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
769 index = trace->depth;
771 /* Append this call time to the parent time to subtract */
773 current->ret_stack[index - 1].subtime += calltime;
775 if (current->ret_stack[index].subtime < calltime)
776 calltime -= current->ret_stack[index].subtime;
781 rec = ftrace_find_profiled_func(stat, trace->func);
783 rec->time += calltime;
784 rec->time_squared += calltime * calltime;
788 local_irq_restore(flags);
791 static int register_ftrace_profiler(void)
793 return register_ftrace_graph(&profile_graph_return,
794 &profile_graph_entry);
797 static void unregister_ftrace_profiler(void)
799 unregister_ftrace_graph();
802 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
803 .func = function_profile_call,
806 static int register_ftrace_profiler(void)
808 return register_ftrace_function(&ftrace_profile_ops);
811 static void unregister_ftrace_profiler(void)
813 unregister_ftrace_function(&ftrace_profile_ops);
815 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
818 ftrace_profile_write(struct file *filp, const char __user *ubuf,
819 size_t cnt, loff_t *ppos)
824 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
830 mutex_lock(&ftrace_profile_lock);
831 if (ftrace_profile_enabled ^ val) {
833 ret = ftrace_profile_init();
839 ret = register_ftrace_profiler();
844 ftrace_profile_enabled = 1;
846 ftrace_profile_enabled = 0;
848 * unregister_ftrace_profiler calls stop_machine
849 * so this acts like an synchronize_sched.
851 unregister_ftrace_profiler();
855 mutex_unlock(&ftrace_profile_lock);
863 ftrace_profile_read(struct file *filp, char __user *ubuf,
864 size_t cnt, loff_t *ppos)
866 char buf[64]; /* big enough to hold a number */
869 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
870 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
873 static const struct file_operations ftrace_profile_fops = {
874 .open = tracing_open_generic,
875 .read = ftrace_profile_read,
876 .write = ftrace_profile_write,
877 .llseek = default_llseek,
880 /* used to initialize the real stat files */
881 static struct tracer_stat function_stats __initdata = {
883 .stat_start = function_stat_start,
884 .stat_next = function_stat_next,
885 .stat_cmp = function_stat_cmp,
886 .stat_headers = function_stat_headers,
887 .stat_show = function_stat_show
890 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
892 struct ftrace_profile_stat *stat;
893 struct dentry *entry;
898 for_each_possible_cpu(cpu) {
899 stat = &per_cpu(ftrace_profile_stats, cpu);
901 /* allocate enough for function name + cpu number */
902 name = kmalloc(32, GFP_KERNEL);
905 * The files created are permanent, if something happens
906 * we still do not free memory.
909 "Could not allocate stat file for cpu %d\n",
913 stat->stat = function_stats;
914 snprintf(name, 32, "function%d", cpu);
915 stat->stat.name = name;
916 ret = register_stat_tracer(&stat->stat);
919 "Could not register function stat for cpu %d\n",
926 entry = debugfs_create_file("function_profile_enabled", 0644,
927 d_tracer, NULL, &ftrace_profile_fops);
929 pr_warning("Could not create debugfs "
930 "'function_profile_enabled' entry\n");
933 #else /* CONFIG_FUNCTION_PROFILER */
934 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
937 #endif /* CONFIG_FUNCTION_PROFILER */
939 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
942 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
946 if (file->f_mode & FMODE_READ)
947 ret = seq_lseek(file, offset, whence);
949 file->f_pos = ret = 1;
954 #ifdef CONFIG_DYNAMIC_FTRACE
956 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
957 # error Dynamic ftrace depends on MCOUNT_RECORD
960 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
962 struct ftrace_func_probe {
963 struct hlist_node node;
964 struct ftrace_probe_ops *ops;
972 FTRACE_UPDATE_CALLS = (1 << 0),
973 FTRACE_DISABLE_CALLS = (1 << 1),
974 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
975 FTRACE_START_FUNC_RET = (1 << 3),
976 FTRACE_STOP_FUNC_RET = (1 << 4),
978 struct ftrace_func_entry {
979 struct hlist_node hlist;
984 unsigned long size_bits;
985 struct hlist_head *buckets;
991 * We make these constant because no one should touch them,
992 * but they are used as the default "empty hash", to avoid allocating
993 * it all the time. These are in a read only section such that if
994 * anyone does try to modify it, it will cause an exception.
996 static const struct hlist_head empty_buckets[1];
997 static const struct ftrace_hash empty_hash = {
998 .buckets = (struct hlist_head *)empty_buckets,
1000 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1002 static struct ftrace_ops global_ops = {
1003 .func = ftrace_stub,
1004 .notrace_hash = EMPTY_HASH,
1005 .filter_hash = EMPTY_HASH,
1008 static struct dyn_ftrace *ftrace_new_addrs;
1010 static DEFINE_MUTEX(ftrace_regex_lock);
1012 struct ftrace_page {
1013 struct ftrace_page *next;
1015 struct dyn_ftrace records[];
1018 #define ENTRIES_PER_PAGE \
1019 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
1021 /* estimate from running different kernels */
1022 #define NR_TO_INIT 10000
1024 static struct ftrace_page *ftrace_pages_start;
1025 static struct ftrace_page *ftrace_pages;
1027 static struct dyn_ftrace *ftrace_free_records;
1029 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1031 return !hash || !hash->count;
1034 static struct ftrace_func_entry *
1035 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1038 struct ftrace_func_entry *entry;
1039 struct hlist_head *hhd;
1040 struct hlist_node *n;
1042 if (ftrace_hash_empty(hash))
1045 if (hash->size_bits > 0)
1046 key = hash_long(ip, hash->size_bits);
1050 hhd = &hash->buckets[key];
1052 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1053 if (entry->ip == ip)
1059 static void __add_hash_entry(struct ftrace_hash *hash,
1060 struct ftrace_func_entry *entry)
1062 struct hlist_head *hhd;
1065 if (hash->size_bits)
1066 key = hash_long(entry->ip, hash->size_bits);
1070 hhd = &hash->buckets[key];
1071 hlist_add_head(&entry->hlist, hhd);
1075 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1077 struct ftrace_func_entry *entry;
1079 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1084 __add_hash_entry(hash, entry);
1090 free_hash_entry(struct ftrace_hash *hash,
1091 struct ftrace_func_entry *entry)
1093 hlist_del(&entry->hlist);
1099 remove_hash_entry(struct ftrace_hash *hash,
1100 struct ftrace_func_entry *entry)
1102 hlist_del(&entry->hlist);
1106 static void ftrace_hash_clear(struct ftrace_hash *hash)
1108 struct hlist_head *hhd;
1109 struct hlist_node *tp, *tn;
1110 struct ftrace_func_entry *entry;
1111 int size = 1 << hash->size_bits;
1117 for (i = 0; i < size; i++) {
1118 hhd = &hash->buckets[i];
1119 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1120 free_hash_entry(hash, entry);
1122 FTRACE_WARN_ON(hash->count);
1125 static void free_ftrace_hash(struct ftrace_hash *hash)
1127 if (!hash || hash == EMPTY_HASH)
1129 ftrace_hash_clear(hash);
1130 kfree(hash->buckets);
1134 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1136 struct ftrace_hash *hash;
1138 hash = container_of(rcu, struct ftrace_hash, rcu);
1139 free_ftrace_hash(hash);
1142 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1144 if (!hash || hash == EMPTY_HASH)
1146 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1149 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1151 struct ftrace_hash *hash;
1154 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1158 size = 1 << size_bits;
1159 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1161 if (!hash->buckets) {
1166 hash->size_bits = size_bits;
1171 static struct ftrace_hash *
1172 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1174 struct ftrace_func_entry *entry;
1175 struct ftrace_hash *new_hash;
1176 struct hlist_node *tp;
1181 new_hash = alloc_ftrace_hash(size_bits);
1186 if (ftrace_hash_empty(hash))
1189 size = 1 << hash->size_bits;
1190 for (i = 0; i < size; i++) {
1191 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1192 ret = add_hash_entry(new_hash, entry->ip);
1198 FTRACE_WARN_ON(new_hash->count != hash->count);
1203 free_ftrace_hash(new_hash);
1208 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1210 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1213 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1214 struct ftrace_hash **dst, struct ftrace_hash *src)
1216 struct ftrace_func_entry *entry;
1217 struct hlist_node *tp, *tn;
1218 struct hlist_head *hhd;
1219 struct ftrace_hash *old_hash;
1220 struct ftrace_hash *new_hash;
1222 int size = src->count;
1228 * Remove the current set, update the hash and add
1231 ftrace_hash_rec_disable(ops, enable);
1234 * If the new source is empty, just free dst and assign it
1238 free_ftrace_hash_rcu(*dst);
1239 rcu_assign_pointer(*dst, EMPTY_HASH);
1240 /* still need to update the function records */
1246 * Make the hash size about 1/2 the # found
1248 for (size /= 2; size; size >>= 1)
1251 /* Don't allocate too much */
1252 if (bits > FTRACE_HASH_MAX_BITS)
1253 bits = FTRACE_HASH_MAX_BITS;
1256 new_hash = alloc_ftrace_hash(bits);
1260 size = 1 << src->size_bits;
1261 for (i = 0; i < size; i++) {
1262 hhd = &src->buckets[i];
1263 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1265 key = hash_long(entry->ip, bits);
1268 remove_hash_entry(src, entry);
1269 __add_hash_entry(new_hash, entry);
1274 rcu_assign_pointer(*dst, new_hash);
1275 free_ftrace_hash_rcu(old_hash);
1280 * Enable regardless of ret:
1281 * On success, we enable the new hash.
1282 * On failure, we re-enable the original hash.
1284 ftrace_hash_rec_enable(ops, enable);
1290 * Test the hashes for this ops to see if we want to call
1291 * the ops->func or not.
1293 * It's a match if the ip is in the ops->filter_hash or
1294 * the filter_hash does not exist or is empty,
1296 * the ip is not in the ops->notrace_hash.
1298 * This needs to be called with preemption disabled as
1299 * the hashes are freed with call_rcu_sched().
1302 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1304 struct ftrace_hash *filter_hash;
1305 struct ftrace_hash *notrace_hash;
1308 filter_hash = rcu_dereference_raw(ops->filter_hash);
1309 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1311 if ((ftrace_hash_empty(filter_hash) ||
1312 ftrace_lookup_ip(filter_hash, ip)) &&
1313 (ftrace_hash_empty(notrace_hash) ||
1314 !ftrace_lookup_ip(notrace_hash, ip)))
1323 * This is a double for. Do not use 'break' to break out of the loop,
1324 * you must use a goto.
1326 #define do_for_each_ftrace_rec(pg, rec) \
1327 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1329 for (_____i = 0; _____i < pg->index; _____i++) { \
1330 rec = &pg->records[_____i];
1332 #define while_for_each_ftrace_rec() \
1336 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1340 struct ftrace_hash *hash;
1341 struct ftrace_hash *other_hash;
1342 struct ftrace_page *pg;
1343 struct dyn_ftrace *rec;
1347 /* Only update if the ops has been registered */
1348 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1352 * In the filter_hash case:
1353 * If the count is zero, we update all records.
1354 * Otherwise we just update the items in the hash.
1356 * In the notrace_hash case:
1357 * We enable the update in the hash.
1358 * As disabling notrace means enabling the tracing,
1359 * and enabling notrace means disabling, the inc variable
1363 hash = ops->filter_hash;
1364 other_hash = ops->notrace_hash;
1365 if (ftrace_hash_empty(hash))
1369 hash = ops->notrace_hash;
1370 other_hash = ops->filter_hash;
1372 * If the notrace hash has no items,
1373 * then there's nothing to do.
1375 if (ftrace_hash_empty(hash))
1379 do_for_each_ftrace_rec(pg, rec) {
1380 int in_other_hash = 0;
1386 * Only the filter_hash affects all records.
1387 * Update if the record is not in the notrace hash.
1389 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1392 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1393 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1398 if (filter_hash && in_hash && !in_other_hash)
1400 else if (!filter_hash && in_hash &&
1401 (in_other_hash || ftrace_hash_empty(other_hash)))
1409 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1412 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1417 /* Shortcut, if we handled all records, we are done. */
1418 if (!all && count == hash->count)
1420 } while_for_each_ftrace_rec();
1423 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1426 __ftrace_hash_rec_update(ops, filter_hash, 0);
1429 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1432 __ftrace_hash_rec_update(ops, filter_hash, 1);
1435 static void ftrace_free_rec(struct dyn_ftrace *rec)
1437 rec->freelist = ftrace_free_records;
1438 ftrace_free_records = rec;
1439 rec->flags |= FTRACE_FL_FREE;
1442 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1444 struct dyn_ftrace *rec;
1446 /* First check for freed records */
1447 if (ftrace_free_records) {
1448 rec = ftrace_free_records;
1450 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1451 FTRACE_WARN_ON_ONCE(1);
1452 ftrace_free_records = NULL;
1456 ftrace_free_records = rec->freelist;
1457 memset(rec, 0, sizeof(*rec));
1461 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1462 if (!ftrace_pages->next) {
1463 /* allocate another page */
1464 ftrace_pages->next =
1465 (void *)get_zeroed_page(GFP_KERNEL);
1466 if (!ftrace_pages->next)
1469 ftrace_pages = ftrace_pages->next;
1472 return &ftrace_pages->records[ftrace_pages->index++];
1475 static struct dyn_ftrace *
1476 ftrace_record_ip(unsigned long ip)
1478 struct dyn_ftrace *rec;
1480 if (ftrace_disabled)
1483 rec = ftrace_alloc_dyn_node(ip);
1488 rec->newlist = ftrace_new_addrs;
1489 ftrace_new_addrs = rec;
1494 static void print_ip_ins(const char *fmt, unsigned char *p)
1498 printk(KERN_CONT "%s", fmt);
1500 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1501 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1504 static void ftrace_bug(int failed, unsigned long ip)
1508 FTRACE_WARN_ON_ONCE(1);
1509 pr_info("ftrace faulted on modifying ");
1513 FTRACE_WARN_ON_ONCE(1);
1514 pr_info("ftrace failed to modify ");
1516 print_ip_ins(" actual: ", (unsigned char *)ip);
1517 printk(KERN_CONT "\n");
1520 FTRACE_WARN_ON_ONCE(1);
1521 pr_info("ftrace faulted on writing ");
1525 FTRACE_WARN_ON_ONCE(1);
1526 pr_info("ftrace faulted on unknown error ");
1532 /* Return 1 if the address range is reserved for ftrace */
1533 int ftrace_text_reserved(void *start, void *end)
1535 struct dyn_ftrace *rec;
1536 struct ftrace_page *pg;
1538 do_for_each_ftrace_rec(pg, rec) {
1539 if (rec->ip <= (unsigned long)end &&
1540 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1542 } while_for_each_ftrace_rec();
1548 __ftrace_replace_code(struct dyn_ftrace *rec, int update)
1550 unsigned long ftrace_addr;
1551 unsigned long flag = 0UL;
1553 ftrace_addr = (unsigned long)FTRACE_ADDR;
1556 * If we are updating calls:
1558 * If the record has a ref count, then we need to enable it
1559 * because someone is using it.
1561 * Otherwise we make sure its disabled.
1563 * If we are disabling calls, then disable all records that
1566 if (update && (rec->flags & ~FTRACE_FL_MASK))
1567 flag = FTRACE_FL_ENABLED;
1569 /* If the state of this record hasn't changed, then do nothing */
1570 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1574 rec->flags |= FTRACE_FL_ENABLED;
1575 return ftrace_make_call(rec, ftrace_addr);
1578 rec->flags &= ~FTRACE_FL_ENABLED;
1579 return ftrace_make_nop(NULL, rec, ftrace_addr);
1582 static void ftrace_replace_code(int update)
1584 struct dyn_ftrace *rec;
1585 struct ftrace_page *pg;
1588 if (unlikely(ftrace_disabled))
1591 do_for_each_ftrace_rec(pg, rec) {
1592 /* Skip over free records */
1593 if (rec->flags & FTRACE_FL_FREE)
1596 failed = __ftrace_replace_code(rec, update);
1598 ftrace_bug(failed, rec->ip);
1599 /* Stop processing */
1602 } while_for_each_ftrace_rec();
1606 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1613 if (unlikely(ftrace_disabled))
1616 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1618 ftrace_bug(ret, ip);
1625 * archs can override this function if they must do something
1626 * before the modifying code is performed.
1628 int __weak ftrace_arch_code_modify_prepare(void)
1634 * archs can override this function if they must do something
1635 * after the modifying code is performed.
1637 int __weak ftrace_arch_code_modify_post_process(void)
1642 static int __ftrace_modify_code(void *data)
1644 int *command = data;
1647 * Do not call function tracer while we update the code.
1648 * We are in stop machine, no worrying about races.
1650 function_trace_stop++;
1652 if (*command & FTRACE_UPDATE_CALLS)
1653 ftrace_replace_code(1);
1654 else if (*command & FTRACE_DISABLE_CALLS)
1655 ftrace_replace_code(0);
1657 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1658 ftrace_update_ftrace_func(ftrace_trace_function);
1660 if (*command & FTRACE_START_FUNC_RET)
1661 ftrace_enable_ftrace_graph_caller();
1662 else if (*command & FTRACE_STOP_FUNC_RET)
1663 ftrace_disable_ftrace_graph_caller();
1665 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1667 * For archs that call ftrace_test_stop_func(), we must
1668 * wait till after we update all the function callers
1669 * before we update the callback. This keeps different
1670 * ops that record different functions from corrupting
1673 __ftrace_trace_function = __ftrace_trace_function_delay;
1675 function_trace_stop--;
1680 static void ftrace_run_update_code(int command)
1684 ret = ftrace_arch_code_modify_prepare();
1685 FTRACE_WARN_ON(ret);
1689 stop_machine(__ftrace_modify_code, &command, NULL);
1691 ret = ftrace_arch_code_modify_post_process();
1692 FTRACE_WARN_ON(ret);
1695 static ftrace_func_t saved_ftrace_func;
1696 static int ftrace_start_up;
1697 static int global_start_up;
1699 static void ftrace_startup_enable(int command)
1701 if (saved_ftrace_func != ftrace_trace_function) {
1702 saved_ftrace_func = ftrace_trace_function;
1703 command |= FTRACE_UPDATE_TRACE_FUNC;
1706 if (!command || !ftrace_enabled)
1709 ftrace_run_update_code(command);
1712 static int ftrace_startup(struct ftrace_ops *ops, int command)
1714 bool hash_enable = true;
1717 if (unlikely(ftrace_disabled))
1720 ret = __register_ftrace_function(ops);
1725 command |= FTRACE_UPDATE_CALLS;
1727 /* ops marked global share the filter hashes */
1728 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1730 /* Don't update hash if global is already set */
1731 if (global_start_up)
1732 hash_enable = false;
1736 ops->flags |= FTRACE_OPS_FL_ENABLED;
1738 ftrace_hash_rec_enable(ops, 1);
1740 ftrace_startup_enable(command);
1745 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
1747 bool hash_disable = true;
1750 if (unlikely(ftrace_disabled))
1753 ret = __unregister_ftrace_function(ops);
1759 * Just warn in case of unbalance, no need to kill ftrace, it's not
1760 * critical but the ftrace_call callers may be never nopped again after
1761 * further ftrace uses.
1763 WARN_ON_ONCE(ftrace_start_up < 0);
1765 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1768 WARN_ON_ONCE(global_start_up < 0);
1769 /* Don't update hash if global still has users */
1770 if (global_start_up) {
1771 WARN_ON_ONCE(!ftrace_start_up);
1772 hash_disable = false;
1777 ftrace_hash_rec_disable(ops, 1);
1779 if (ops != &global_ops || !global_start_up)
1780 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1782 command |= FTRACE_UPDATE_CALLS;
1784 if (saved_ftrace_func != ftrace_trace_function) {
1785 saved_ftrace_func = ftrace_trace_function;
1786 command |= FTRACE_UPDATE_TRACE_FUNC;
1789 if (!command || !ftrace_enabled)
1792 ftrace_run_update_code(command);
1796 static void ftrace_startup_sysctl(void)
1798 if (unlikely(ftrace_disabled))
1801 /* Force update next time */
1802 saved_ftrace_func = NULL;
1803 /* ftrace_start_up is true if we want ftrace running */
1804 if (ftrace_start_up)
1805 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
1808 static void ftrace_shutdown_sysctl(void)
1810 if (unlikely(ftrace_disabled))
1813 /* ftrace_start_up is true if ftrace is running */
1814 if (ftrace_start_up)
1815 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1818 static cycle_t ftrace_update_time;
1819 static unsigned long ftrace_update_cnt;
1820 unsigned long ftrace_update_tot_cnt;
1822 static inline int ops_traces_mod(struct ftrace_ops *ops)
1825 * Filter_hash being empty will default to trace module.
1826 * But notrace hash requires a test of individual module functions.
1828 return ftrace_hash_empty(ops->filter_hash) &&
1829 ftrace_hash_empty(ops->notrace_hash);
1833 * Check if the current ops references the record.
1835 * If the ops traces all functions, then it was already accounted for.
1836 * If the ops does not trace the current record function, skip it.
1837 * If the ops ignores the function via notrace filter, skip it.
1840 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
1842 /* If ops isn't enabled, ignore it */
1843 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1846 /* If ops traces all mods, we already accounted for it */
1847 if (ops_traces_mod(ops))
1850 /* The function must be in the filter */
1851 if (!ftrace_hash_empty(ops->filter_hash) &&
1852 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
1855 /* If in notrace hash, we ignore it too */
1856 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
1862 static int referenced_filters(struct dyn_ftrace *rec)
1864 struct ftrace_ops *ops;
1867 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
1868 if (ops_references_rec(ops, rec))
1875 static int ftrace_update_code(struct module *mod)
1877 struct dyn_ftrace *p;
1878 cycle_t start, stop;
1879 unsigned long ref = 0;
1883 * When adding a module, we need to check if tracers are
1884 * currently enabled and if they are set to trace all functions.
1885 * If they are, we need to enable the module functions as well
1886 * as update the reference counts for those function records.
1889 struct ftrace_ops *ops;
1891 for (ops = ftrace_ops_list;
1892 ops != &ftrace_list_end; ops = ops->next) {
1893 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
1894 if (ops_traces_mod(ops))
1902 start = ftrace_now(raw_smp_processor_id());
1903 ftrace_update_cnt = 0;
1905 while (ftrace_new_addrs) {
1908 /* If something went wrong, bail without enabling anything */
1909 if (unlikely(ftrace_disabled))
1912 p = ftrace_new_addrs;
1913 ftrace_new_addrs = p->newlist;
1915 cnt += referenced_filters(p);
1919 * Do the initial record conversion from mcount jump
1920 * to the NOP instructions.
1922 if (!ftrace_code_disable(mod, p)) {
1928 ftrace_update_cnt++;
1931 * If the tracing is enabled, go ahead and enable the record.
1933 * The reason not to enable the record immediatelly is the
1934 * inherent check of ftrace_make_nop/ftrace_make_call for
1935 * correct previous instructions. Making first the NOP
1936 * conversion puts the module to the correct state, thus
1937 * passing the ftrace_make_call check.
1939 if (ftrace_start_up && cnt) {
1940 int failed = __ftrace_replace_code(p, 1);
1942 ftrace_bug(failed, p->ip);
1948 stop = ftrace_now(raw_smp_processor_id());
1949 ftrace_update_time = stop - start;
1950 ftrace_update_tot_cnt += ftrace_update_cnt;
1955 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1957 struct ftrace_page *pg;
1961 /* allocate a few pages */
1962 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1963 if (!ftrace_pages_start)
1967 * Allocate a few more pages.
1969 * TODO: have some parser search vmlinux before
1970 * final linking to find all calls to ftrace.
1972 * a) know how many pages to allocate.
1974 * b) set up the table then.
1976 * The dynamic code is still necessary for
1980 pg = ftrace_pages = ftrace_pages_start;
1982 cnt = num_to_init / ENTRIES_PER_PAGE;
1983 pr_info("ftrace: allocating %ld entries in %d pages\n",
1984 num_to_init, cnt + 1);
1986 for (i = 0; i < cnt; i++) {
1987 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1989 /* If we fail, we'll try later anyway */
2000 FTRACE_ITER_FILTER = (1 << 0),
2001 FTRACE_ITER_NOTRACE = (1 << 1),
2002 FTRACE_ITER_PRINTALL = (1 << 2),
2003 FTRACE_ITER_HASH = (1 << 3),
2004 FTRACE_ITER_ENABLED = (1 << 4),
2007 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2009 struct ftrace_iterator {
2012 struct ftrace_page *pg;
2013 struct dyn_ftrace *func;
2014 struct ftrace_func_probe *probe;
2015 struct trace_parser parser;
2016 struct ftrace_hash *hash;
2017 struct ftrace_ops *ops;
2024 t_hash_next(struct seq_file *m, loff_t *pos)
2026 struct ftrace_iterator *iter = m->private;
2027 struct hlist_node *hnd = NULL;
2028 struct hlist_head *hhd;
2034 hnd = &iter->probe->node;
2036 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2039 hhd = &ftrace_func_hash[iter->hidx];
2041 if (hlist_empty(hhd)) {
2057 if (WARN_ON_ONCE(!hnd))
2060 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2065 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2067 struct ftrace_iterator *iter = m->private;
2071 if (iter->func_pos > *pos)
2075 for (l = 0; l <= (*pos - iter->func_pos); ) {
2076 p = t_hash_next(m, &l);
2083 /* Only set this if we have an item */
2084 iter->flags |= FTRACE_ITER_HASH;
2090 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2092 struct ftrace_func_probe *rec;
2095 if (WARN_ON_ONCE(!rec))
2098 if (rec->ops->print)
2099 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2101 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2104 seq_printf(m, ":%p", rec->data);
2111 t_next(struct seq_file *m, void *v, loff_t *pos)
2113 struct ftrace_iterator *iter = m->private;
2114 struct ftrace_ops *ops = &global_ops;
2115 struct dyn_ftrace *rec = NULL;
2117 if (unlikely(ftrace_disabled))
2120 if (iter->flags & FTRACE_ITER_HASH)
2121 return t_hash_next(m, pos);
2124 iter->pos = iter->func_pos = *pos;
2126 if (iter->flags & FTRACE_ITER_PRINTALL)
2127 return t_hash_start(m, pos);
2130 if (iter->idx >= iter->pg->index) {
2131 if (iter->pg->next) {
2132 iter->pg = iter->pg->next;
2137 rec = &iter->pg->records[iter->idx++];
2138 if ((rec->flags & FTRACE_FL_FREE) ||
2140 ((iter->flags & FTRACE_ITER_FILTER) &&
2141 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2143 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2144 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2146 ((iter->flags & FTRACE_ITER_ENABLED) &&
2147 !(rec->flags & ~FTRACE_FL_MASK))) {
2155 return t_hash_start(m, pos);
2162 static void reset_iter_read(struct ftrace_iterator *iter)
2166 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2169 static void *t_start(struct seq_file *m, loff_t *pos)
2171 struct ftrace_iterator *iter = m->private;
2172 struct ftrace_ops *ops = &global_ops;
2176 mutex_lock(&ftrace_lock);
2178 if (unlikely(ftrace_disabled))
2182 * If an lseek was done, then reset and start from beginning.
2184 if (*pos < iter->pos)
2185 reset_iter_read(iter);
2188 * For set_ftrace_filter reading, if we have the filter
2189 * off, we can short cut and just print out that all
2190 * functions are enabled.
2192 if (iter->flags & FTRACE_ITER_FILTER &&
2193 ftrace_hash_empty(ops->filter_hash)) {
2195 return t_hash_start(m, pos);
2196 iter->flags |= FTRACE_ITER_PRINTALL;
2197 /* reset in case of seek/pread */
2198 iter->flags &= ~FTRACE_ITER_HASH;
2202 if (iter->flags & FTRACE_ITER_HASH)
2203 return t_hash_start(m, pos);
2206 * Unfortunately, we need to restart at ftrace_pages_start
2207 * every time we let go of the ftrace_mutex. This is because
2208 * those pointers can change without the lock.
2210 iter->pg = ftrace_pages_start;
2212 for (l = 0; l <= *pos; ) {
2213 p = t_next(m, p, &l);
2219 if (iter->flags & FTRACE_ITER_FILTER)
2220 return t_hash_start(m, pos);
2228 static void t_stop(struct seq_file *m, void *p)
2230 mutex_unlock(&ftrace_lock);
2233 static int t_show(struct seq_file *m, void *v)
2235 struct ftrace_iterator *iter = m->private;
2236 struct dyn_ftrace *rec;
2238 if (iter->flags & FTRACE_ITER_HASH)
2239 return t_hash_show(m, iter);
2241 if (iter->flags & FTRACE_ITER_PRINTALL) {
2242 seq_printf(m, "#### all functions enabled ####\n");
2251 seq_printf(m, "%ps", (void *)rec->ip);
2252 if (iter->flags & FTRACE_ITER_ENABLED)
2253 seq_printf(m, " (%ld)",
2254 rec->flags & ~FTRACE_FL_MASK);
2255 seq_printf(m, "\n");
2260 static const struct seq_operations show_ftrace_seq_ops = {
2268 ftrace_avail_open(struct inode *inode, struct file *file)
2270 struct ftrace_iterator *iter;
2273 if (unlikely(ftrace_disabled))
2276 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2280 iter->pg = ftrace_pages_start;
2282 ret = seq_open(file, &show_ftrace_seq_ops);
2284 struct seq_file *m = file->private_data;
2295 ftrace_enabled_open(struct inode *inode, struct file *file)
2297 struct ftrace_iterator *iter;
2300 if (unlikely(ftrace_disabled))
2303 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2307 iter->pg = ftrace_pages_start;
2308 iter->flags = FTRACE_ITER_ENABLED;
2310 ret = seq_open(file, &show_ftrace_seq_ops);
2312 struct seq_file *m = file->private_data;
2322 static void ftrace_filter_reset(struct ftrace_hash *hash)
2324 mutex_lock(&ftrace_lock);
2325 ftrace_hash_clear(hash);
2326 mutex_unlock(&ftrace_lock);
2330 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2331 struct inode *inode, struct file *file)
2333 struct ftrace_iterator *iter;
2334 struct ftrace_hash *hash;
2337 if (unlikely(ftrace_disabled))
2340 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2344 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2349 if (flag & FTRACE_ITER_NOTRACE)
2350 hash = ops->notrace_hash;
2352 hash = ops->filter_hash;
2357 if (file->f_mode & FMODE_WRITE) {
2358 mutex_lock(&ftrace_lock);
2359 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2360 mutex_unlock(&ftrace_lock);
2363 trace_parser_put(&iter->parser);
2369 mutex_lock(&ftrace_regex_lock);
2371 if ((file->f_mode & FMODE_WRITE) &&
2372 (file->f_flags & O_TRUNC))
2373 ftrace_filter_reset(iter->hash);
2375 if (file->f_mode & FMODE_READ) {
2376 iter->pg = ftrace_pages_start;
2378 ret = seq_open(file, &show_ftrace_seq_ops);
2380 struct seq_file *m = file->private_data;
2384 free_ftrace_hash(iter->hash);
2385 trace_parser_put(&iter->parser);
2389 file->private_data = iter;
2390 mutex_unlock(&ftrace_regex_lock);
2396 ftrace_filter_open(struct inode *inode, struct file *file)
2398 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2403 ftrace_notrace_open(struct inode *inode, struct file *file)
2405 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2409 static int ftrace_match(char *str, char *regex, int len, int type)
2416 if (strcmp(str, regex) == 0)
2419 case MATCH_FRONT_ONLY:
2420 if (strncmp(str, regex, len) == 0)
2423 case MATCH_MIDDLE_ONLY:
2424 if (strstr(str, regex))
2427 case MATCH_END_ONLY:
2429 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2438 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2440 struct ftrace_func_entry *entry;
2443 entry = ftrace_lookup_ip(hash, rec->ip);
2445 /* Do nothing if it doesn't exist */
2449 free_hash_entry(hash, entry);
2451 /* Do nothing if it exists */
2455 ret = add_hash_entry(hash, rec->ip);
2461 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2462 char *regex, int len, int type)
2464 char str[KSYM_SYMBOL_LEN];
2467 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2470 /* module lookup requires matching the module */
2471 if (!modname || strcmp(modname, mod))
2474 /* blank search means to match all funcs in the mod */
2479 return ftrace_match(str, regex, len, type);
2483 match_records(struct ftrace_hash *hash, char *buff,
2484 int len, char *mod, int not)
2486 unsigned search_len = 0;
2487 struct ftrace_page *pg;
2488 struct dyn_ftrace *rec;
2489 int type = MATCH_FULL;
2490 char *search = buff;
2495 type = filter_parse_regex(buff, len, &search, ¬);
2496 search_len = strlen(search);
2499 mutex_lock(&ftrace_lock);
2501 if (unlikely(ftrace_disabled))
2504 do_for_each_ftrace_rec(pg, rec) {
2506 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2507 ret = enter_record(hash, rec, not);
2514 } while_for_each_ftrace_rec();
2516 mutex_unlock(&ftrace_lock);
2522 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2524 return match_records(hash, buff, len, NULL, 0);
2528 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2532 /* blank or '*' mean the same */
2533 if (strcmp(buff, "*") == 0)
2536 /* handle the case of 'dont filter this module' */
2537 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2542 return match_records(hash, buff, strlen(buff), mod, not);
2546 * We register the module command as a template to show others how
2547 * to register the a command as well.
2551 ftrace_mod_callback(struct ftrace_hash *hash,
2552 char *func, char *cmd, char *param, int enable)
2558 * cmd == 'mod' because we only registered this func
2559 * for the 'mod' ftrace_func_command.
2560 * But if you register one func with multiple commands,
2561 * you can tell which command was used by the cmd
2565 /* we must have a module name */
2569 mod = strsep(¶m, ":");
2573 ret = ftrace_match_module_records(hash, func, mod);
2582 static struct ftrace_func_command ftrace_mod_cmd = {
2584 .func = ftrace_mod_callback,
2587 static int __init ftrace_mod_cmd_init(void)
2589 return register_ftrace_command(&ftrace_mod_cmd);
2591 device_initcall(ftrace_mod_cmd_init);
2594 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2596 struct ftrace_func_probe *entry;
2597 struct hlist_head *hhd;
2598 struct hlist_node *n;
2601 key = hash_long(ip, FTRACE_HASH_BITS);
2603 hhd = &ftrace_func_hash[key];
2605 if (hlist_empty(hhd))
2609 * Disable preemption for these calls to prevent a RCU grace
2610 * period. This syncs the hash iteration and freeing of items
2611 * on the hash. rcu_read_lock is too dangerous here.
2613 preempt_disable_notrace();
2614 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2615 if (entry->ip == ip)
2616 entry->ops->func(ip, parent_ip, &entry->data);
2618 preempt_enable_notrace();
2621 static struct ftrace_ops trace_probe_ops __read_mostly =
2623 .func = function_trace_probe_call,
2626 static int ftrace_probe_registered;
2628 static void __enable_ftrace_function_probe(void)
2633 if (ftrace_probe_registered)
2636 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2637 struct hlist_head *hhd = &ftrace_func_hash[i];
2641 /* Nothing registered? */
2642 if (i == FTRACE_FUNC_HASHSIZE)
2645 ret = ftrace_startup(&trace_probe_ops, 0);
2647 ftrace_probe_registered = 1;
2650 static void __disable_ftrace_function_probe(void)
2654 if (!ftrace_probe_registered)
2657 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2658 struct hlist_head *hhd = &ftrace_func_hash[i];
2663 /* no more funcs left */
2664 ftrace_shutdown(&trace_probe_ops, 0);
2666 ftrace_probe_registered = 0;
2670 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2672 struct ftrace_func_probe *entry =
2673 container_of(rhp, struct ftrace_func_probe, rcu);
2675 if (entry->ops->free)
2676 entry->ops->free(&entry->data);
2682 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2685 struct ftrace_func_probe *entry;
2686 struct ftrace_page *pg;
2687 struct dyn_ftrace *rec;
2693 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2694 len = strlen(search);
2696 /* we do not support '!' for function probes */
2700 mutex_lock(&ftrace_lock);
2702 if (unlikely(ftrace_disabled))
2705 do_for_each_ftrace_rec(pg, rec) {
2707 if (!ftrace_match_record(rec, NULL, search, len, type))
2710 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2712 /* If we did not process any, then return error */
2723 * The caller might want to do something special
2724 * for each function we find. We call the callback
2725 * to give the caller an opportunity to do so.
2727 if (ops->callback) {
2728 if (ops->callback(rec->ip, &entry->data) < 0) {
2729 /* caller does not like this func */
2736 entry->ip = rec->ip;
2738 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2739 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2741 } while_for_each_ftrace_rec();
2742 __enable_ftrace_function_probe();
2745 mutex_unlock(&ftrace_lock);
2751 PROBE_TEST_FUNC = 1,
2756 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2757 void *data, int flags)
2759 struct ftrace_func_probe *entry;
2760 struct hlist_node *n, *tmp;
2761 char str[KSYM_SYMBOL_LEN];
2762 int type = MATCH_FULL;
2766 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2771 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2772 len = strlen(search);
2774 /* we do not support '!' for function probes */
2779 mutex_lock(&ftrace_lock);
2780 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2781 struct hlist_head *hhd = &ftrace_func_hash[i];
2783 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2785 /* break up if statements for readability */
2786 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2789 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2792 /* do this last, since it is the most expensive */
2794 kallsyms_lookup(entry->ip, NULL, NULL,
2796 if (!ftrace_match(str, glob, len, type))
2800 hlist_del_rcu(&entry->node);
2801 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
2804 __disable_ftrace_function_probe();
2805 mutex_unlock(&ftrace_lock);
2809 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2812 __unregister_ftrace_function_probe(glob, ops, data,
2813 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2817 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2819 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2822 void unregister_ftrace_function_probe_all(char *glob)
2824 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2827 static LIST_HEAD(ftrace_commands);
2828 static DEFINE_MUTEX(ftrace_cmd_mutex);
2830 int register_ftrace_command(struct ftrace_func_command *cmd)
2832 struct ftrace_func_command *p;
2835 mutex_lock(&ftrace_cmd_mutex);
2836 list_for_each_entry(p, &ftrace_commands, list) {
2837 if (strcmp(cmd->name, p->name) == 0) {
2842 list_add(&cmd->list, &ftrace_commands);
2844 mutex_unlock(&ftrace_cmd_mutex);
2849 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2851 struct ftrace_func_command *p, *n;
2854 mutex_lock(&ftrace_cmd_mutex);
2855 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2856 if (strcmp(cmd->name, p->name) == 0) {
2858 list_del_init(&p->list);
2863 mutex_unlock(&ftrace_cmd_mutex);
2868 static int ftrace_process_regex(struct ftrace_hash *hash,
2869 char *buff, int len, int enable)
2871 char *func, *command, *next = buff;
2872 struct ftrace_func_command *p;
2875 func = strsep(&next, ":");
2878 ret = ftrace_match_records(hash, func, len);
2888 command = strsep(&next, ":");
2890 mutex_lock(&ftrace_cmd_mutex);
2891 list_for_each_entry(p, &ftrace_commands, list) {
2892 if (strcmp(p->name, command) == 0) {
2893 ret = p->func(hash, func, command, next, enable);
2898 mutex_unlock(&ftrace_cmd_mutex);
2904 ftrace_regex_write(struct file *file, const char __user *ubuf,
2905 size_t cnt, loff_t *ppos, int enable)
2907 struct ftrace_iterator *iter;
2908 struct trace_parser *parser;
2914 mutex_lock(&ftrace_regex_lock);
2917 if (unlikely(ftrace_disabled))
2920 if (file->f_mode & FMODE_READ) {
2921 struct seq_file *m = file->private_data;
2924 iter = file->private_data;
2926 parser = &iter->parser;
2927 read = trace_get_user(parser, ubuf, cnt, ppos);
2929 if (read >= 0 && trace_parser_loaded(parser) &&
2930 !trace_parser_cont(parser)) {
2931 ret = ftrace_process_regex(iter->hash, parser->buffer,
2932 parser->idx, enable);
2933 trace_parser_clear(parser);
2940 mutex_unlock(&ftrace_regex_lock);
2946 ftrace_filter_write(struct file *file, const char __user *ubuf,
2947 size_t cnt, loff_t *ppos)
2949 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2953 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2954 size_t cnt, loff_t *ppos)
2956 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2960 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2961 int reset, int enable)
2963 struct ftrace_hash **orig_hash;
2964 struct ftrace_hash *hash;
2967 /* All global ops uses the global ops filters */
2968 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2971 if (unlikely(ftrace_disabled))
2975 orig_hash = &ops->filter_hash;
2977 orig_hash = &ops->notrace_hash;
2979 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2983 mutex_lock(&ftrace_regex_lock);
2985 ftrace_filter_reset(hash);
2987 ftrace_match_records(hash, buf, len);
2989 mutex_lock(&ftrace_lock);
2990 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2991 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2993 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2995 mutex_unlock(&ftrace_lock);
2997 mutex_unlock(&ftrace_regex_lock);
2999 free_ftrace_hash(hash);
3004 * ftrace_set_filter - set a function to filter on in ftrace
3005 * @ops - the ops to set the filter with
3006 * @buf - the string that holds the function filter text.
3007 * @len - the length of the string.
3008 * @reset - non zero to reset all filters before applying this filter.
3010 * Filters denote which functions should be enabled when tracing is enabled.
3011 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3013 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3016 ftrace_set_regex(ops, buf, len, reset, 1);
3018 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3021 * ftrace_set_notrace - set a function to not trace in ftrace
3022 * @ops - the ops to set the notrace filter with
3023 * @buf - the string that holds the function notrace text.
3024 * @len - the length of the string.
3025 * @reset - non zero to reset all filters before applying this filter.
3027 * Notrace Filters denote which functions should not be enabled when tracing
3028 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3031 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3034 ftrace_set_regex(ops, buf, len, reset, 0);
3036 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3038 * ftrace_set_filter - set a function to filter on in ftrace
3039 * @ops - the ops to set the filter with
3040 * @buf - the string that holds the function filter text.
3041 * @len - the length of the string.
3042 * @reset - non zero to reset all filters before applying this filter.
3044 * Filters denote which functions should be enabled when tracing is enabled.
3045 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3047 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3049 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3051 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3054 * ftrace_set_notrace - set a function to not trace in ftrace
3055 * @ops - the ops to set the notrace filter with
3056 * @buf - the string that holds the function notrace text.
3057 * @len - the length of the string.
3058 * @reset - non zero to reset all filters before applying this filter.
3060 * Notrace Filters denote which functions should not be enabled when tracing
3061 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3064 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3066 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3068 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3071 * command line interface to allow users to set filters on boot up.
3073 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3074 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3075 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3077 static int __init set_ftrace_notrace(char *str)
3079 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3082 __setup("ftrace_notrace=", set_ftrace_notrace);
3084 static int __init set_ftrace_filter(char *str)
3086 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3089 __setup("ftrace_filter=", set_ftrace_filter);
3091 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3092 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3093 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3095 static int __init set_graph_function(char *str)
3097 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3100 __setup("ftrace_graph_filter=", set_graph_function);
3102 static void __init set_ftrace_early_graph(char *buf)
3108 func = strsep(&buf, ",");
3109 /* we allow only one expression at a time */
3110 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3113 printk(KERN_DEBUG "ftrace: function %s not "
3114 "traceable\n", func);
3117 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3120 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3125 func = strsep(&buf, ",");
3126 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3130 static void __init set_ftrace_early_filters(void)
3132 if (ftrace_filter_buf[0])
3133 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3134 if (ftrace_notrace_buf[0])
3135 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3136 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3137 if (ftrace_graph_buf[0])
3138 set_ftrace_early_graph(ftrace_graph_buf);
3139 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3143 ftrace_regex_release(struct inode *inode, struct file *file)
3145 struct seq_file *m = (struct seq_file *)file->private_data;
3146 struct ftrace_iterator *iter;
3147 struct ftrace_hash **orig_hash;
3148 struct trace_parser *parser;
3152 mutex_lock(&ftrace_regex_lock);
3153 if (file->f_mode & FMODE_READ) {
3156 seq_release(inode, file);
3158 iter = file->private_data;
3160 parser = &iter->parser;
3161 if (trace_parser_loaded(parser)) {
3162 parser->buffer[parser->idx] = 0;
3163 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3166 trace_parser_put(parser);
3168 if (file->f_mode & FMODE_WRITE) {
3169 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3172 orig_hash = &iter->ops->filter_hash;
3174 orig_hash = &iter->ops->notrace_hash;
3176 mutex_lock(&ftrace_lock);
3177 ret = ftrace_hash_move(iter->ops, filter_hash,
3178 orig_hash, iter->hash);
3179 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3181 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3183 mutex_unlock(&ftrace_lock);
3185 free_ftrace_hash(iter->hash);
3188 mutex_unlock(&ftrace_regex_lock);
3192 static const struct file_operations ftrace_avail_fops = {
3193 .open = ftrace_avail_open,
3195 .llseek = seq_lseek,
3196 .release = seq_release_private,
3199 static const struct file_operations ftrace_enabled_fops = {
3200 .open = ftrace_enabled_open,
3202 .llseek = seq_lseek,
3203 .release = seq_release_private,
3206 static const struct file_operations ftrace_filter_fops = {
3207 .open = ftrace_filter_open,
3209 .write = ftrace_filter_write,
3210 .llseek = ftrace_filter_lseek,
3211 .release = ftrace_regex_release,
3214 static const struct file_operations ftrace_notrace_fops = {
3215 .open = ftrace_notrace_open,
3217 .write = ftrace_notrace_write,
3218 .llseek = ftrace_filter_lseek,
3219 .release = ftrace_regex_release,
3222 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3224 static DEFINE_MUTEX(graph_lock);
3226 int ftrace_graph_count;
3227 int ftrace_graph_filter_enabled;
3228 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3231 __g_next(struct seq_file *m, loff_t *pos)
3233 if (*pos >= ftrace_graph_count)
3235 return &ftrace_graph_funcs[*pos];
3239 g_next(struct seq_file *m, void *v, loff_t *pos)
3242 return __g_next(m, pos);
3245 static void *g_start(struct seq_file *m, loff_t *pos)
3247 mutex_lock(&graph_lock);
3249 /* Nothing, tell g_show to print all functions are enabled */
3250 if (!ftrace_graph_filter_enabled && !*pos)
3253 return __g_next(m, pos);
3256 static void g_stop(struct seq_file *m, void *p)
3258 mutex_unlock(&graph_lock);
3261 static int g_show(struct seq_file *m, void *v)
3263 unsigned long *ptr = v;
3268 if (ptr == (unsigned long *)1) {
3269 seq_printf(m, "#### all functions enabled ####\n");
3273 seq_printf(m, "%ps\n", (void *)*ptr);
3278 static const struct seq_operations ftrace_graph_seq_ops = {
3286 ftrace_graph_open(struct inode *inode, struct file *file)
3290 if (unlikely(ftrace_disabled))
3293 mutex_lock(&graph_lock);
3294 if ((file->f_mode & FMODE_WRITE) &&
3295 (file->f_flags & O_TRUNC)) {
3296 ftrace_graph_filter_enabled = 0;
3297 ftrace_graph_count = 0;
3298 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3300 mutex_unlock(&graph_lock);
3302 if (file->f_mode & FMODE_READ)
3303 ret = seq_open(file, &ftrace_graph_seq_ops);
3309 ftrace_graph_release(struct inode *inode, struct file *file)
3311 if (file->f_mode & FMODE_READ)
3312 seq_release(inode, file);
3317 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3319 struct dyn_ftrace *rec;
3320 struct ftrace_page *pg;
3329 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
3330 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3333 search_len = strlen(search);
3335 mutex_lock(&ftrace_lock);
3337 if (unlikely(ftrace_disabled)) {
3338 mutex_unlock(&ftrace_lock);
3342 do_for_each_ftrace_rec(pg, rec) {
3344 if (rec->flags & FTRACE_FL_FREE)
3347 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3348 /* if it is in the array */
3350 for (i = 0; i < *idx; i++) {
3351 if (array[i] == rec->ip) {
3360 array[(*idx)++] = rec->ip;
3361 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3366 array[i] = array[--(*idx)];
3372 } while_for_each_ftrace_rec();
3374 mutex_unlock(&ftrace_lock);
3379 ftrace_graph_filter_enabled = !!(*idx);
3385 ftrace_graph_write(struct file *file, const char __user *ubuf,
3386 size_t cnt, loff_t *ppos)
3388 struct trace_parser parser;
3394 mutex_lock(&graph_lock);
3396 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3401 read = trace_get_user(&parser, ubuf, cnt, ppos);
3403 if (read >= 0 && trace_parser_loaded((&parser))) {
3404 parser.buffer[parser.idx] = 0;
3406 /* we allow only one expression at a time */
3407 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3416 trace_parser_put(&parser);
3418 mutex_unlock(&graph_lock);
3423 static const struct file_operations ftrace_graph_fops = {
3424 .open = ftrace_graph_open,
3426 .write = ftrace_graph_write,
3427 .llseek = ftrace_filter_lseek,
3428 .release = ftrace_graph_release,
3430 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3432 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3435 trace_create_file("available_filter_functions", 0444,
3436 d_tracer, NULL, &ftrace_avail_fops);
3438 trace_create_file("enabled_functions", 0444,
3439 d_tracer, NULL, &ftrace_enabled_fops);
3441 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3442 NULL, &ftrace_filter_fops);
3444 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3445 NULL, &ftrace_notrace_fops);
3447 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3448 trace_create_file("set_graph_function", 0444, d_tracer,
3450 &ftrace_graph_fops);
3451 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3456 static int ftrace_process_locs(struct module *mod,
3457 unsigned long *start,
3462 unsigned long flags = 0; /* Shut up gcc */
3464 mutex_lock(&ftrace_lock);
3467 addr = ftrace_call_adjust(*p++);
3469 * Some architecture linkers will pad between
3470 * the different mcount_loc sections of different
3471 * object files to satisfy alignments.
3472 * Skip any NULL pointers.
3476 ftrace_record_ip(addr);
3480 * We only need to disable interrupts on start up
3481 * because we are modifying code that an interrupt
3482 * may execute, and the modification is not atomic.
3483 * But for modules, nothing runs the code we modify
3484 * until we are finished with it, and there's no
3485 * reason to cause large interrupt latencies while we do it.
3488 local_irq_save(flags);
3489 ftrace_update_code(mod);
3491 local_irq_restore(flags);
3492 mutex_unlock(&ftrace_lock);
3497 #ifdef CONFIG_MODULES
3498 void ftrace_release_mod(struct module *mod)
3500 struct dyn_ftrace *rec;
3501 struct ftrace_page *pg;
3503 mutex_lock(&ftrace_lock);
3505 if (ftrace_disabled)
3508 do_for_each_ftrace_rec(pg, rec) {
3509 if (within_module_core(rec->ip, mod)) {
3511 * rec->ip is changed in ftrace_free_rec()
3512 * It should not between s and e if record was freed.
3514 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3515 ftrace_free_rec(rec);
3517 } while_for_each_ftrace_rec();
3519 mutex_unlock(&ftrace_lock);
3522 static void ftrace_init_module(struct module *mod,
3523 unsigned long *start, unsigned long *end)
3525 if (ftrace_disabled || start == end)
3527 ftrace_process_locs(mod, start, end);
3530 static int ftrace_module_notify_enter(struct notifier_block *self,
3531 unsigned long val, void *data)
3533 struct module *mod = data;
3535 if (val == MODULE_STATE_COMING)
3536 ftrace_init_module(mod, mod->ftrace_callsites,
3537 mod->ftrace_callsites +
3538 mod->num_ftrace_callsites);
3542 static int ftrace_module_notify_exit(struct notifier_block *self,
3543 unsigned long val, void *data)
3545 struct module *mod = data;
3547 if (val == MODULE_STATE_GOING)
3548 ftrace_release_mod(mod);
3553 static int ftrace_module_notify_enter(struct notifier_block *self,
3554 unsigned long val, void *data)
3558 static int ftrace_module_notify_exit(struct notifier_block *self,
3559 unsigned long val, void *data)
3563 #endif /* CONFIG_MODULES */
3565 struct notifier_block ftrace_module_enter_nb = {
3566 .notifier_call = ftrace_module_notify_enter,
3567 .priority = INT_MAX, /* Run before anything that can use kprobes */
3570 struct notifier_block ftrace_module_exit_nb = {
3571 .notifier_call = ftrace_module_notify_exit,
3572 .priority = INT_MIN, /* Run after anything that can remove kprobes */
3575 extern unsigned long __start_mcount_loc[];
3576 extern unsigned long __stop_mcount_loc[];
3578 void __init ftrace_init(void)
3580 unsigned long count, addr, flags;
3583 /* Keep the ftrace pointer to the stub */
3584 addr = (unsigned long)ftrace_stub;
3586 local_irq_save(flags);
3587 ftrace_dyn_arch_init(&addr);
3588 local_irq_restore(flags);
3590 /* ftrace_dyn_arch_init places the return code in addr */
3594 count = __stop_mcount_loc - __start_mcount_loc;
3596 ret = ftrace_dyn_table_alloc(count);
3600 last_ftrace_enabled = ftrace_enabled = 1;
3602 ret = ftrace_process_locs(NULL,
3606 ret = register_module_notifier(&ftrace_module_enter_nb);
3608 pr_warning("Failed to register trace ftrace module enter notifier\n");
3610 ret = register_module_notifier(&ftrace_module_exit_nb);
3612 pr_warning("Failed to register trace ftrace module exit notifier\n");
3614 set_ftrace_early_filters();
3618 ftrace_disabled = 1;
3623 static struct ftrace_ops global_ops = {
3624 .func = ftrace_stub,
3627 static int __init ftrace_nodyn_init(void)
3632 device_initcall(ftrace_nodyn_init);
3634 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3635 static inline void ftrace_startup_enable(int command) { }
3636 /* Keep as macros so we do not need to define the commands */
3637 # define ftrace_startup(ops, command) \
3639 int ___ret = __register_ftrace_function(ops); \
3641 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3644 # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
3646 # define ftrace_startup_sysctl() do { } while (0)
3647 # define ftrace_shutdown_sysctl() do { } while (0)
3650 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3655 #endif /* CONFIG_DYNAMIC_FTRACE */
3658 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3660 struct ftrace_ops *op;
3662 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3665 trace_recursion_set(TRACE_INTERNAL_BIT);
3667 * Some of the ops may be dynamically allocated,
3668 * they must be freed after a synchronize_sched().
3670 preempt_disable_notrace();
3671 op = rcu_dereference_raw(ftrace_ops_list);
3672 while (op != &ftrace_list_end) {
3673 if (ftrace_ops_test(op, ip))
3674 op->func(ip, parent_ip);
3675 op = rcu_dereference_raw(op->next);
3677 preempt_enable_notrace();
3678 trace_recursion_clear(TRACE_INTERNAL_BIT);
3681 static void clear_ftrace_swapper(void)
3683 struct task_struct *p;
3687 for_each_online_cpu(cpu) {
3689 clear_tsk_trace_trace(p);
3694 static void set_ftrace_swapper(void)
3696 struct task_struct *p;
3700 for_each_online_cpu(cpu) {
3702 set_tsk_trace_trace(p);
3707 static void clear_ftrace_pid(struct pid *pid)
3709 struct task_struct *p;
3712 do_each_pid_task(pid, PIDTYPE_PID, p) {
3713 clear_tsk_trace_trace(p);
3714 } while_each_pid_task(pid, PIDTYPE_PID, p);
3720 static void set_ftrace_pid(struct pid *pid)
3722 struct task_struct *p;
3725 do_each_pid_task(pid, PIDTYPE_PID, p) {
3726 set_tsk_trace_trace(p);
3727 } while_each_pid_task(pid, PIDTYPE_PID, p);
3731 static void clear_ftrace_pid_task(struct pid *pid)
3733 if (pid == ftrace_swapper_pid)
3734 clear_ftrace_swapper();
3736 clear_ftrace_pid(pid);
3739 static void set_ftrace_pid_task(struct pid *pid)
3741 if (pid == ftrace_swapper_pid)
3742 set_ftrace_swapper();
3744 set_ftrace_pid(pid);
3747 static int ftrace_pid_add(int p)
3750 struct ftrace_pid *fpid;
3753 mutex_lock(&ftrace_lock);
3756 pid = ftrace_swapper_pid;
3758 pid = find_get_pid(p);
3765 list_for_each_entry(fpid, &ftrace_pids, list)
3766 if (fpid->pid == pid)
3771 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3775 list_add(&fpid->list, &ftrace_pids);
3778 set_ftrace_pid_task(pid);
3780 ftrace_update_pid_func();
3781 ftrace_startup_enable(0);
3783 mutex_unlock(&ftrace_lock);
3787 if (pid != ftrace_swapper_pid)
3791 mutex_unlock(&ftrace_lock);
3795 static void ftrace_pid_reset(void)
3797 struct ftrace_pid *fpid, *safe;
3799 mutex_lock(&ftrace_lock);
3800 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3801 struct pid *pid = fpid->pid;
3803 clear_ftrace_pid_task(pid);
3805 list_del(&fpid->list);
3809 ftrace_update_pid_func();
3810 ftrace_startup_enable(0);
3812 mutex_unlock(&ftrace_lock);
3815 static void *fpid_start(struct seq_file *m, loff_t *pos)
3817 mutex_lock(&ftrace_lock);
3819 if (list_empty(&ftrace_pids) && (!*pos))
3822 return seq_list_start(&ftrace_pids, *pos);
3825 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3830 return seq_list_next(v, &ftrace_pids, pos);
3833 static void fpid_stop(struct seq_file *m, void *p)
3835 mutex_unlock(&ftrace_lock);
3838 static int fpid_show(struct seq_file *m, void *v)
3840 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3842 if (v == (void *)1) {
3843 seq_printf(m, "no pid\n");
3847 if (fpid->pid == ftrace_swapper_pid)
3848 seq_printf(m, "swapper tasks\n");
3850 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3855 static const struct seq_operations ftrace_pid_sops = {
3856 .start = fpid_start,
3863 ftrace_pid_open(struct inode *inode, struct file *file)
3867 if ((file->f_mode & FMODE_WRITE) &&
3868 (file->f_flags & O_TRUNC))
3871 if (file->f_mode & FMODE_READ)
3872 ret = seq_open(file, &ftrace_pid_sops);
3878 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3879 size_t cnt, loff_t *ppos)
3885 if (cnt >= sizeof(buf))
3888 if (copy_from_user(&buf, ubuf, cnt))
3894 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3895 * to clean the filter quietly.
3897 tmp = strstrip(buf);
3898 if (strlen(tmp) == 0)
3901 ret = strict_strtol(tmp, 10, &val);
3905 ret = ftrace_pid_add(val);
3907 return ret ? ret : cnt;
3911 ftrace_pid_release(struct inode *inode, struct file *file)
3913 if (file->f_mode & FMODE_READ)
3914 seq_release(inode, file);
3919 static const struct file_operations ftrace_pid_fops = {
3920 .open = ftrace_pid_open,
3921 .write = ftrace_pid_write,
3923 .llseek = ftrace_filter_lseek,
3924 .release = ftrace_pid_release,
3927 static __init int ftrace_init_debugfs(void)
3929 struct dentry *d_tracer;
3931 d_tracer = tracing_init_dentry();
3935 ftrace_init_dyn_debugfs(d_tracer);
3937 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3938 NULL, &ftrace_pid_fops);
3940 ftrace_profile_debugfs(d_tracer);
3944 fs_initcall(ftrace_init_debugfs);
3947 * ftrace_kill - kill ftrace
3949 * This function should be used by panic code. It stops ftrace
3950 * but in a not so nice way. If you need to simply kill ftrace
3951 * from a non-atomic section, use ftrace_kill.
3953 void ftrace_kill(void)
3955 ftrace_disabled = 1;
3957 clear_ftrace_function();
3961 * Test if ftrace is dead or not.
3963 int ftrace_is_dead(void)
3965 return ftrace_disabled;
3969 * register_ftrace_function - register a function for profiling
3970 * @ops - ops structure that holds the function for profiling.
3972 * Register a function to be called by all functions in the
3975 * Note: @ops->func and all the functions it calls must be labeled
3976 * with "notrace", otherwise it will go into a
3979 int register_ftrace_function(struct ftrace_ops *ops)
3983 mutex_lock(&ftrace_lock);
3985 ret = ftrace_startup(ops, 0);
3987 mutex_unlock(&ftrace_lock);
3990 EXPORT_SYMBOL_GPL(register_ftrace_function);
3993 * unregister_ftrace_function - unregister a function for profiling.
3994 * @ops - ops structure that holds the function to unregister
3996 * Unregister a function that was added to be called by ftrace profiling.
3998 int unregister_ftrace_function(struct ftrace_ops *ops)
4002 mutex_lock(&ftrace_lock);
4003 ret = ftrace_shutdown(ops, 0);
4004 mutex_unlock(&ftrace_lock);
4008 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4011 ftrace_enable_sysctl(struct ctl_table *table, int write,
4012 void __user *buffer, size_t *lenp,
4017 mutex_lock(&ftrace_lock);
4019 if (unlikely(ftrace_disabled))
4022 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4024 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4027 last_ftrace_enabled = !!ftrace_enabled;
4029 if (ftrace_enabled) {
4031 ftrace_startup_sysctl();
4033 /* we are starting ftrace again */
4034 if (ftrace_ops_list != &ftrace_list_end)
4035 update_ftrace_function();
4038 /* stopping ftrace calls (just send to ftrace_stub) */
4039 ftrace_trace_function = ftrace_stub;
4041 ftrace_shutdown_sysctl();
4045 mutex_unlock(&ftrace_lock);
4049 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4051 static int ftrace_graph_active;
4052 static struct notifier_block ftrace_suspend_notifier;
4054 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4059 /* The callbacks that hook a function */
4060 trace_func_graph_ret_t ftrace_graph_return =
4061 (trace_func_graph_ret_t)ftrace_stub;
4062 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4064 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4065 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4069 unsigned long flags;
4070 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4071 struct task_struct *g, *t;
4073 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4074 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4075 * sizeof(struct ftrace_ret_stack),
4077 if (!ret_stack_list[i]) {
4085 read_lock_irqsave(&tasklist_lock, flags);
4086 do_each_thread(g, t) {
4092 if (t->ret_stack == NULL) {
4093 atomic_set(&t->tracing_graph_pause, 0);
4094 atomic_set(&t->trace_overrun, 0);
4095 t->curr_ret_stack = -1;
4096 /* Make sure the tasks see the -1 first: */
4098 t->ret_stack = ret_stack_list[start++];
4100 } while_each_thread(g, t);
4103 read_unlock_irqrestore(&tasklist_lock, flags);
4105 for (i = start; i < end; i++)
4106 kfree(ret_stack_list[i]);
4111 ftrace_graph_probe_sched_switch(void *ignore,
4112 struct task_struct *prev, struct task_struct *next)
4114 unsigned long long timestamp;
4118 * Does the user want to count the time a function was asleep.
4119 * If so, do not update the time stamps.
4121 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4124 timestamp = trace_clock_local();
4126 prev->ftrace_timestamp = timestamp;
4128 /* only process tasks that we timestamped */
4129 if (!next->ftrace_timestamp)
4133 * Update all the counters in next to make up for the
4134 * time next was sleeping.
4136 timestamp -= next->ftrace_timestamp;
4138 for (index = next->curr_ret_stack; index >= 0; index--)
4139 next->ret_stack[index].calltime += timestamp;
4142 /* Allocate a return stack for each task */
4143 static int start_graph_tracing(void)
4145 struct ftrace_ret_stack **ret_stack_list;
4148 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4149 sizeof(struct ftrace_ret_stack *),
4152 if (!ret_stack_list)
4155 /* The cpu_boot init_task->ret_stack will never be freed */
4156 for_each_online_cpu(cpu) {
4157 if (!idle_task(cpu)->ret_stack)
4158 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4162 ret = alloc_retstack_tasklist(ret_stack_list);
4163 } while (ret == -EAGAIN);
4166 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4168 pr_info("ftrace_graph: Couldn't activate tracepoint"
4169 " probe to kernel_sched_switch\n");
4172 kfree(ret_stack_list);
4177 * Hibernation protection.
4178 * The state of the current task is too much unstable during
4179 * suspend/restore to disk. We want to protect against that.
4182 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4186 case PM_HIBERNATION_PREPARE:
4187 pause_graph_tracing();
4190 case PM_POST_HIBERNATION:
4191 unpause_graph_tracing();
4197 /* Just a place holder for function graph */
4198 static struct ftrace_ops fgraph_ops __read_mostly = {
4199 .func = ftrace_stub,
4200 .flags = FTRACE_OPS_FL_GLOBAL,
4203 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4204 trace_func_graph_ent_t entryfunc)
4208 mutex_lock(&ftrace_lock);
4210 /* we currently allow only one tracer registered at a time */
4211 if (ftrace_graph_active) {
4216 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4217 register_pm_notifier(&ftrace_suspend_notifier);
4219 ftrace_graph_active++;
4220 ret = start_graph_tracing();
4222 ftrace_graph_active--;
4226 ftrace_graph_return = retfunc;
4227 ftrace_graph_entry = entryfunc;
4229 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
4232 mutex_unlock(&ftrace_lock);
4236 void unregister_ftrace_graph(void)
4238 mutex_lock(&ftrace_lock);
4240 if (unlikely(!ftrace_graph_active))
4243 ftrace_graph_active--;
4244 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4245 ftrace_graph_entry = ftrace_graph_entry_stub;
4246 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4247 unregister_pm_notifier(&ftrace_suspend_notifier);
4248 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4251 mutex_unlock(&ftrace_lock);
4254 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4257 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4259 atomic_set(&t->tracing_graph_pause, 0);
4260 atomic_set(&t->trace_overrun, 0);
4261 t->ftrace_timestamp = 0;
4262 /* make curr_ret_stack visible before we add the ret_stack */
4264 t->ret_stack = ret_stack;
4268 * Allocate a return stack for the idle task. May be the first
4269 * time through, or it may be done by CPU hotplug online.
4271 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4273 t->curr_ret_stack = -1;
4275 * The idle task has no parent, it either has its own
4276 * stack or no stack at all.
4279 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4281 if (ftrace_graph_active) {
4282 struct ftrace_ret_stack *ret_stack;
4284 ret_stack = per_cpu(idle_ret_stack, cpu);
4286 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4287 * sizeof(struct ftrace_ret_stack),
4291 per_cpu(idle_ret_stack, cpu) = ret_stack;
4293 graph_init_task(t, ret_stack);
4297 /* Allocate a return stack for newly created task */
4298 void ftrace_graph_init_task(struct task_struct *t)
4300 /* Make sure we do not use the parent ret_stack */
4301 t->ret_stack = NULL;
4302 t->curr_ret_stack = -1;
4304 if (ftrace_graph_active) {
4305 struct ftrace_ret_stack *ret_stack;
4307 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4308 * sizeof(struct ftrace_ret_stack),
4312 graph_init_task(t, ret_stack);
4316 void ftrace_graph_exit_task(struct task_struct *t)
4318 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4320 t->ret_stack = NULL;
4321 /* NULL must become visible to IRQs before we free it: */
4327 void ftrace_graph_stop(void)