Merge branch 'perf/urgent' into perf/core
[pandora-kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         ({                                      \
42                 int ___r = cond;                \
43                 if (WARN_ON(___r))              \
44                         ftrace_kill();          \
45                 ___r;                           \
46         })
47
48 #define FTRACE_WARN_ON_ONCE(cond)               \
49         ({                                      \
50                 int ___r = cond;                \
51                 if (WARN_ON_ONCE(___r))         \
52                         ftrace_kill();          \
53                 ___r;                           \
54         })
55
56 /* hash bits for specific function selection */
57 #define FTRACE_HASH_BITS 7
58 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
59 #define FTRACE_HASH_DEFAULT_BITS 10
60 #define FTRACE_HASH_MAX_BITS 12
61
62 /* ftrace_enabled is a method to turn ftrace on or off */
63 int ftrace_enabled __read_mostly;
64 static int last_ftrace_enabled;
65
66 /* Quick disabling of function tracer. */
67 int function_trace_stop;
68
69 /* List for set_ftrace_pid's pids. */
70 LIST_HEAD(ftrace_pids);
71 struct ftrace_pid {
72         struct list_head list;
73         struct pid *pid;
74 };
75
76 /*
77  * ftrace_disabled is set when an anomaly is discovered.
78  * ftrace_disabled is much stronger than ftrace_enabled.
79  */
80 static int ftrace_disabled __read_mostly;
81
82 static DEFINE_MUTEX(ftrace_lock);
83
84 static struct ftrace_ops ftrace_list_end __read_mostly = {
85         .func           = ftrace_stub,
86 };
87
88 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
93 static struct ftrace_ops global_ops;
94
95 static void
96 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
97
98 /*
99  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
100  * can use rcu_dereference_raw() is that elements removed from this list
101  * are simply leaked, so there is no need to interact with a grace-period
102  * mechanism.  The rcu_dereference_raw() calls are needed to handle
103  * concurrent insertions into the ftrace_global_list.
104  *
105  * Silly Alpha and silly pointer-speculation compiler optimizations!
106  */
107 static void ftrace_global_list_func(unsigned long ip,
108                                     unsigned long parent_ip)
109 {
110         struct ftrace_ops *op;
111
112         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
113                 return;
114
115         trace_recursion_set(TRACE_GLOBAL_BIT);
116         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
117         while (op != &ftrace_list_end) {
118                 op->func(ip, parent_ip);
119                 op = rcu_dereference_raw(op->next); /*see above*/
120         };
121         trace_recursion_clear(TRACE_GLOBAL_BIT);
122 }
123
124 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
125 {
126         if (!test_tsk_trace_trace(current))
127                 return;
128
129         ftrace_pid_function(ip, parent_ip);
130 }
131
132 static void set_ftrace_pid_function(ftrace_func_t func)
133 {
134         /* do not set ftrace_pid_function to itself! */
135         if (func != ftrace_pid_func)
136                 ftrace_pid_function = func;
137 }
138
139 /**
140  * clear_ftrace_function - reset the ftrace function
141  *
142  * This NULLs the ftrace function and in essence stops
143  * tracing.  There may be lag
144  */
145 void clear_ftrace_function(void)
146 {
147         ftrace_trace_function = ftrace_stub;
148         __ftrace_trace_function = ftrace_stub;
149         ftrace_pid_function = ftrace_stub;
150 }
151
152 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
153 /*
154  * For those archs that do not test ftrace_trace_stop in their
155  * mcount call site, we need to do it from C.
156  */
157 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
158 {
159         if (function_trace_stop)
160                 return;
161
162         __ftrace_trace_function(ip, parent_ip);
163 }
164 #endif
165
166 static void update_global_ops(void)
167 {
168         ftrace_func_t func;
169
170         /*
171          * If there's only one function registered, then call that
172          * function directly. Otherwise, we need to iterate over the
173          * registered callers.
174          */
175         if (ftrace_global_list == &ftrace_list_end ||
176             ftrace_global_list->next == &ftrace_list_end)
177                 func = ftrace_global_list->func;
178         else
179                 func = ftrace_global_list_func;
180
181         /* If we filter on pids, update to use the pid function */
182         if (!list_empty(&ftrace_pids)) {
183                 set_ftrace_pid_function(func);
184                 func = ftrace_pid_func;
185         }
186
187         global_ops.func = func;
188 }
189
190 static void update_ftrace_function(void)
191 {
192         ftrace_func_t func;
193
194         update_global_ops();
195
196         /*
197          * If we are at the end of the list and this ops is
198          * not dynamic, then have the mcount trampoline call
199          * the function directly
200          */
201         if (ftrace_ops_list == &ftrace_list_end ||
202             (ftrace_ops_list->next == &ftrace_list_end &&
203              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
204                 func = ftrace_ops_list->func;
205         else
206                 func = ftrace_ops_list_func;
207
208 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
209         ftrace_trace_function = func;
210 #else
211         __ftrace_trace_function = func;
212         ftrace_trace_function = ftrace_test_stop_func;
213 #endif
214 }
215
216 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
217 {
218         ops->next = *list;
219         /*
220          * We are entering ops into the list but another
221          * CPU might be walking that list. We need to make sure
222          * the ops->next pointer is valid before another CPU sees
223          * the ops pointer included into the list.
224          */
225         rcu_assign_pointer(*list, ops);
226 }
227
228 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
229 {
230         struct ftrace_ops **p;
231
232         /*
233          * If we are removing the last function, then simply point
234          * to the ftrace_stub.
235          */
236         if (*list == ops && ops->next == &ftrace_list_end) {
237                 *list = &ftrace_list_end;
238                 return 0;
239         }
240
241         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
242                 if (*p == ops)
243                         break;
244
245         if (*p != ops)
246                 return -1;
247
248         *p = (*p)->next;
249         return 0;
250 }
251
252 static int __register_ftrace_function(struct ftrace_ops *ops)
253 {
254         if (ftrace_disabled)
255                 return -ENODEV;
256
257         if (FTRACE_WARN_ON(ops == &global_ops))
258                 return -EINVAL;
259
260         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
261                 return -EBUSY;
262
263         if (!core_kernel_data((unsigned long)ops))
264                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
265
266         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
267                 int first = ftrace_global_list == &ftrace_list_end;
268                 add_ftrace_ops(&ftrace_global_list, ops);
269                 ops->flags |= FTRACE_OPS_FL_ENABLED;
270                 if (first)
271                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
272         } else
273                 add_ftrace_ops(&ftrace_ops_list, ops);
274
275         if (ftrace_enabled)
276                 update_ftrace_function();
277
278         return 0;
279 }
280
281 static int __unregister_ftrace_function(struct ftrace_ops *ops)
282 {
283         int ret;
284
285         if (ftrace_disabled)
286                 return -ENODEV;
287
288         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
289                 return -EBUSY;
290
291         if (FTRACE_WARN_ON(ops == &global_ops))
292                 return -EINVAL;
293
294         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
295                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
296                 if (!ret && ftrace_global_list == &ftrace_list_end)
297                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
298                 if (!ret)
299                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
300         } else
301                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
302
303         if (ret < 0)
304                 return ret;
305
306         if (ftrace_enabled)
307                 update_ftrace_function();
308
309         /*
310          * Dynamic ops may be freed, we must make sure that all
311          * callers are done before leaving this function.
312          */
313         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
314                 synchronize_sched();
315
316         return 0;
317 }
318
319 static void ftrace_update_pid_func(void)
320 {
321         /* Only do something if we are tracing something */
322         if (ftrace_trace_function == ftrace_stub)
323                 return;
324
325         update_ftrace_function();
326 }
327
328 #ifdef CONFIG_FUNCTION_PROFILER
329 struct ftrace_profile {
330         struct hlist_node               node;
331         unsigned long                   ip;
332         unsigned long                   counter;
333 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
334         unsigned long long              time;
335         unsigned long long              time_squared;
336 #endif
337 };
338
339 struct ftrace_profile_page {
340         struct ftrace_profile_page      *next;
341         unsigned long                   index;
342         struct ftrace_profile           records[];
343 };
344
345 struct ftrace_profile_stat {
346         atomic_t                        disabled;
347         struct hlist_head               *hash;
348         struct ftrace_profile_page      *pages;
349         struct ftrace_profile_page      *start;
350         struct tracer_stat              stat;
351 };
352
353 #define PROFILE_RECORDS_SIZE                                            \
354         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
355
356 #define PROFILES_PER_PAGE                                       \
357         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
358
359 static int ftrace_profile_bits __read_mostly;
360 static int ftrace_profile_enabled __read_mostly;
361
362 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
363 static DEFINE_MUTEX(ftrace_profile_lock);
364
365 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
366
367 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
368
369 static void *
370 function_stat_next(void *v, int idx)
371 {
372         struct ftrace_profile *rec = v;
373         struct ftrace_profile_page *pg;
374
375         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
376
377  again:
378         if (idx != 0)
379                 rec++;
380
381         if ((void *)rec >= (void *)&pg->records[pg->index]) {
382                 pg = pg->next;
383                 if (!pg)
384                         return NULL;
385                 rec = &pg->records[0];
386                 if (!rec->counter)
387                         goto again;
388         }
389
390         return rec;
391 }
392
393 static void *function_stat_start(struct tracer_stat *trace)
394 {
395         struct ftrace_profile_stat *stat =
396                 container_of(trace, struct ftrace_profile_stat, stat);
397
398         if (!stat || !stat->start)
399                 return NULL;
400
401         return function_stat_next(&stat->start->records[0], 0);
402 }
403
404 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
405 /* function graph compares on total time */
406 static int function_stat_cmp(void *p1, void *p2)
407 {
408         struct ftrace_profile *a = p1;
409         struct ftrace_profile *b = p2;
410
411         if (a->time < b->time)
412                 return -1;
413         if (a->time > b->time)
414                 return 1;
415         else
416                 return 0;
417 }
418 #else
419 /* not function graph compares against hits */
420 static int function_stat_cmp(void *p1, void *p2)
421 {
422         struct ftrace_profile *a = p1;
423         struct ftrace_profile *b = p2;
424
425         if (a->counter < b->counter)
426                 return -1;
427         if (a->counter > b->counter)
428                 return 1;
429         else
430                 return 0;
431 }
432 #endif
433
434 static int function_stat_headers(struct seq_file *m)
435 {
436 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
437         seq_printf(m, "  Function                               "
438                    "Hit    Time            Avg             s^2\n"
439                       "  --------                               "
440                    "---    ----            ---             ---\n");
441 #else
442         seq_printf(m, "  Function                               Hit\n"
443                       "  --------                               ---\n");
444 #endif
445         return 0;
446 }
447
448 static int function_stat_show(struct seq_file *m, void *v)
449 {
450         struct ftrace_profile *rec = v;
451         char str[KSYM_SYMBOL_LEN];
452         int ret = 0;
453 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
454         static struct trace_seq s;
455         unsigned long long avg;
456         unsigned long long stddev;
457 #endif
458         mutex_lock(&ftrace_profile_lock);
459
460         /* we raced with function_profile_reset() */
461         if (unlikely(rec->counter == 0)) {
462                 ret = -EBUSY;
463                 goto out;
464         }
465
466         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
467         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
468
469 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
470         seq_printf(m, "    ");
471         avg = rec->time;
472         do_div(avg, rec->counter);
473
474         /* Sample standard deviation (s^2) */
475         if (rec->counter <= 1)
476                 stddev = 0;
477         else {
478                 stddev = rec->time_squared - rec->counter * avg * avg;
479                 /*
480                  * Divide only 1000 for ns^2 -> us^2 conversion.
481                  * trace_print_graph_duration will divide 1000 again.
482                  */
483                 do_div(stddev, (rec->counter - 1) * 1000);
484         }
485
486         trace_seq_init(&s);
487         trace_print_graph_duration(rec->time, &s);
488         trace_seq_puts(&s, "    ");
489         trace_print_graph_duration(avg, &s);
490         trace_seq_puts(&s, "    ");
491         trace_print_graph_duration(stddev, &s);
492         trace_print_seq(m, &s);
493 #endif
494         seq_putc(m, '\n');
495 out:
496         mutex_unlock(&ftrace_profile_lock);
497
498         return ret;
499 }
500
501 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
502 {
503         struct ftrace_profile_page *pg;
504
505         pg = stat->pages = stat->start;
506
507         while (pg) {
508                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
509                 pg->index = 0;
510                 pg = pg->next;
511         }
512
513         memset(stat->hash, 0,
514                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
515 }
516
517 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
518 {
519         struct ftrace_profile_page *pg;
520         int functions;
521         int pages;
522         int i;
523
524         /* If we already allocated, do nothing */
525         if (stat->pages)
526                 return 0;
527
528         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
529         if (!stat->pages)
530                 return -ENOMEM;
531
532 #ifdef CONFIG_DYNAMIC_FTRACE
533         functions = ftrace_update_tot_cnt;
534 #else
535         /*
536          * We do not know the number of functions that exist because
537          * dynamic tracing is what counts them. With past experience
538          * we have around 20K functions. That should be more than enough.
539          * It is highly unlikely we will execute every function in
540          * the kernel.
541          */
542         functions = 20000;
543 #endif
544
545         pg = stat->start = stat->pages;
546
547         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
548
549         for (i = 0; i < pages; i++) {
550                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
551                 if (!pg->next)
552                         goto out_free;
553                 pg = pg->next;
554         }
555
556         return 0;
557
558  out_free:
559         pg = stat->start;
560         while (pg) {
561                 unsigned long tmp = (unsigned long)pg;
562
563                 pg = pg->next;
564                 free_page(tmp);
565         }
566
567         free_page((unsigned long)stat->pages);
568         stat->pages = NULL;
569         stat->start = NULL;
570
571         return -ENOMEM;
572 }
573
574 static int ftrace_profile_init_cpu(int cpu)
575 {
576         struct ftrace_profile_stat *stat;
577         int size;
578
579         stat = &per_cpu(ftrace_profile_stats, cpu);
580
581         if (stat->hash) {
582                 /* If the profile is already created, simply reset it */
583                 ftrace_profile_reset(stat);
584                 return 0;
585         }
586
587         /*
588          * We are profiling all functions, but usually only a few thousand
589          * functions are hit. We'll make a hash of 1024 items.
590          */
591         size = FTRACE_PROFILE_HASH_SIZE;
592
593         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
594
595         if (!stat->hash)
596                 return -ENOMEM;
597
598         if (!ftrace_profile_bits) {
599                 size--;
600
601                 for (; size; size >>= 1)
602                         ftrace_profile_bits++;
603         }
604
605         /* Preallocate the function profiling pages */
606         if (ftrace_profile_pages_init(stat) < 0) {
607                 kfree(stat->hash);
608                 stat->hash = NULL;
609                 return -ENOMEM;
610         }
611
612         return 0;
613 }
614
615 static int ftrace_profile_init(void)
616 {
617         int cpu;
618         int ret = 0;
619
620         for_each_online_cpu(cpu) {
621                 ret = ftrace_profile_init_cpu(cpu);
622                 if (ret)
623                         break;
624         }
625
626         return ret;
627 }
628
629 /* interrupts must be disabled */
630 static struct ftrace_profile *
631 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
632 {
633         struct ftrace_profile *rec;
634         struct hlist_head *hhd;
635         struct hlist_node *n;
636         unsigned long key;
637
638         key = hash_long(ip, ftrace_profile_bits);
639         hhd = &stat->hash[key];
640
641         if (hlist_empty(hhd))
642                 return NULL;
643
644         hlist_for_each_entry_rcu(rec, n, hhd, node) {
645                 if (rec->ip == ip)
646                         return rec;
647         }
648
649         return NULL;
650 }
651
652 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
653                                struct ftrace_profile *rec)
654 {
655         unsigned long key;
656
657         key = hash_long(rec->ip, ftrace_profile_bits);
658         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
659 }
660
661 /*
662  * The memory is already allocated, this simply finds a new record to use.
663  */
664 static struct ftrace_profile *
665 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
666 {
667         struct ftrace_profile *rec = NULL;
668
669         /* prevent recursion (from NMIs) */
670         if (atomic_inc_return(&stat->disabled) != 1)
671                 goto out;
672
673         /*
674          * Try to find the function again since an NMI
675          * could have added it
676          */
677         rec = ftrace_find_profiled_func(stat, ip);
678         if (rec)
679                 goto out;
680
681         if (stat->pages->index == PROFILES_PER_PAGE) {
682                 if (!stat->pages->next)
683                         goto out;
684                 stat->pages = stat->pages->next;
685         }
686
687         rec = &stat->pages->records[stat->pages->index++];
688         rec->ip = ip;
689         ftrace_add_profile(stat, rec);
690
691  out:
692         atomic_dec(&stat->disabled);
693
694         return rec;
695 }
696
697 static void
698 function_profile_call(unsigned long ip, unsigned long parent_ip)
699 {
700         struct ftrace_profile_stat *stat;
701         struct ftrace_profile *rec;
702         unsigned long flags;
703
704         if (!ftrace_profile_enabled)
705                 return;
706
707         local_irq_save(flags);
708
709         stat = &__get_cpu_var(ftrace_profile_stats);
710         if (!stat->hash || !ftrace_profile_enabled)
711                 goto out;
712
713         rec = ftrace_find_profiled_func(stat, ip);
714         if (!rec) {
715                 rec = ftrace_profile_alloc(stat, ip);
716                 if (!rec)
717                         goto out;
718         }
719
720         rec->counter++;
721  out:
722         local_irq_restore(flags);
723 }
724
725 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
726 static int profile_graph_entry(struct ftrace_graph_ent *trace)
727 {
728         function_profile_call(trace->func, 0);
729         return 1;
730 }
731
732 static void profile_graph_return(struct ftrace_graph_ret *trace)
733 {
734         struct ftrace_profile_stat *stat;
735         unsigned long long calltime;
736         struct ftrace_profile *rec;
737         unsigned long flags;
738
739         local_irq_save(flags);
740         stat = &__get_cpu_var(ftrace_profile_stats);
741         if (!stat->hash || !ftrace_profile_enabled)
742                 goto out;
743
744         /* If the calltime was zero'd ignore it */
745         if (!trace->calltime)
746                 goto out;
747
748         calltime = trace->rettime - trace->calltime;
749
750         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
751                 int index;
752
753                 index = trace->depth;
754
755                 /* Append this call time to the parent time to subtract */
756                 if (index)
757                         current->ret_stack[index - 1].subtime += calltime;
758
759                 if (current->ret_stack[index].subtime < calltime)
760                         calltime -= current->ret_stack[index].subtime;
761                 else
762                         calltime = 0;
763         }
764
765         rec = ftrace_find_profiled_func(stat, trace->func);
766         if (rec) {
767                 rec->time += calltime;
768                 rec->time_squared += calltime * calltime;
769         }
770
771  out:
772         local_irq_restore(flags);
773 }
774
775 static int register_ftrace_profiler(void)
776 {
777         return register_ftrace_graph(&profile_graph_return,
778                                      &profile_graph_entry);
779 }
780
781 static void unregister_ftrace_profiler(void)
782 {
783         unregister_ftrace_graph();
784 }
785 #else
786 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
787         .func           = function_profile_call,
788 };
789
790 static int register_ftrace_profiler(void)
791 {
792         return register_ftrace_function(&ftrace_profile_ops);
793 }
794
795 static void unregister_ftrace_profiler(void)
796 {
797         unregister_ftrace_function(&ftrace_profile_ops);
798 }
799 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
800
801 static ssize_t
802 ftrace_profile_write(struct file *filp, const char __user *ubuf,
803                      size_t cnt, loff_t *ppos)
804 {
805         unsigned long val;
806         int ret;
807
808         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
809         if (ret)
810                 return ret;
811
812         val = !!val;
813
814         mutex_lock(&ftrace_profile_lock);
815         if (ftrace_profile_enabled ^ val) {
816                 if (val) {
817                         ret = ftrace_profile_init();
818                         if (ret < 0) {
819                                 cnt = ret;
820                                 goto out;
821                         }
822
823                         ret = register_ftrace_profiler();
824                         if (ret < 0) {
825                                 cnt = ret;
826                                 goto out;
827                         }
828                         ftrace_profile_enabled = 1;
829                 } else {
830                         ftrace_profile_enabled = 0;
831                         /*
832                          * unregister_ftrace_profiler calls stop_machine
833                          * so this acts like an synchronize_sched.
834                          */
835                         unregister_ftrace_profiler();
836                 }
837         }
838  out:
839         mutex_unlock(&ftrace_profile_lock);
840
841         *ppos += cnt;
842
843         return cnt;
844 }
845
846 static ssize_t
847 ftrace_profile_read(struct file *filp, char __user *ubuf,
848                      size_t cnt, loff_t *ppos)
849 {
850         char buf[64];           /* big enough to hold a number */
851         int r;
852
853         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
854         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
855 }
856
857 static const struct file_operations ftrace_profile_fops = {
858         .open           = tracing_open_generic,
859         .read           = ftrace_profile_read,
860         .write          = ftrace_profile_write,
861         .llseek         = default_llseek,
862 };
863
864 /* used to initialize the real stat files */
865 static struct tracer_stat function_stats __initdata = {
866         .name           = "functions",
867         .stat_start     = function_stat_start,
868         .stat_next      = function_stat_next,
869         .stat_cmp       = function_stat_cmp,
870         .stat_headers   = function_stat_headers,
871         .stat_show      = function_stat_show
872 };
873
874 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
875 {
876         struct ftrace_profile_stat *stat;
877         struct dentry *entry;
878         char *name;
879         int ret;
880         int cpu;
881
882         for_each_possible_cpu(cpu) {
883                 stat = &per_cpu(ftrace_profile_stats, cpu);
884
885                 /* allocate enough for function name + cpu number */
886                 name = kmalloc(32, GFP_KERNEL);
887                 if (!name) {
888                         /*
889                          * The files created are permanent, if something happens
890                          * we still do not free memory.
891                          */
892                         WARN(1,
893                              "Could not allocate stat file for cpu %d\n",
894                              cpu);
895                         return;
896                 }
897                 stat->stat = function_stats;
898                 snprintf(name, 32, "function%d", cpu);
899                 stat->stat.name = name;
900                 ret = register_stat_tracer(&stat->stat);
901                 if (ret) {
902                         WARN(1,
903                              "Could not register function stat for cpu %d\n",
904                              cpu);
905                         kfree(name);
906                         return;
907                 }
908         }
909
910         entry = debugfs_create_file("function_profile_enabled", 0644,
911                                     d_tracer, NULL, &ftrace_profile_fops);
912         if (!entry)
913                 pr_warning("Could not create debugfs "
914                            "'function_profile_enabled' entry\n");
915 }
916
917 #else /* CONFIG_FUNCTION_PROFILER */
918 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
919 {
920 }
921 #endif /* CONFIG_FUNCTION_PROFILER */
922
923 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
924
925 #ifdef CONFIG_DYNAMIC_FTRACE
926
927 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
928 # error Dynamic ftrace depends on MCOUNT_RECORD
929 #endif
930
931 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
932
933 struct ftrace_func_probe {
934         struct hlist_node       node;
935         struct ftrace_probe_ops *ops;
936         unsigned long           flags;
937         unsigned long           ip;
938         void                    *data;
939         struct rcu_head         rcu;
940 };
941
942 enum {
943         FTRACE_ENABLE_CALLS             = (1 << 0),
944         FTRACE_DISABLE_CALLS            = (1 << 1),
945         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
946         FTRACE_START_FUNC_RET           = (1 << 3),
947         FTRACE_STOP_FUNC_RET            = (1 << 4),
948 };
949 struct ftrace_func_entry {
950         struct hlist_node hlist;
951         unsigned long ip;
952 };
953
954 struct ftrace_hash {
955         unsigned long           size_bits;
956         struct hlist_head       *buckets;
957         unsigned long           count;
958         struct rcu_head         rcu;
959 };
960
961 /*
962  * We make these constant because no one should touch them,
963  * but they are used as the default "empty hash", to avoid allocating
964  * it all the time. These are in a read only section such that if
965  * anyone does try to modify it, it will cause an exception.
966  */
967 static const struct hlist_head empty_buckets[1];
968 static const struct ftrace_hash empty_hash = {
969         .buckets = (struct hlist_head *)empty_buckets,
970 };
971 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
972
973 static struct ftrace_ops global_ops = {
974         .func                   = ftrace_stub,
975         .notrace_hash           = EMPTY_HASH,
976         .filter_hash            = EMPTY_HASH,
977 };
978
979 static struct dyn_ftrace *ftrace_new_addrs;
980
981 static DEFINE_MUTEX(ftrace_regex_lock);
982
983 struct ftrace_page {
984         struct ftrace_page      *next;
985         int                     index;
986         struct dyn_ftrace       records[];
987 };
988
989 #define ENTRIES_PER_PAGE \
990   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
991
992 /* estimate from running different kernels */
993 #define NR_TO_INIT              10000
994
995 static struct ftrace_page       *ftrace_pages_start;
996 static struct ftrace_page       *ftrace_pages;
997
998 static struct dyn_ftrace *ftrace_free_records;
999
1000 static struct ftrace_func_entry *
1001 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1002 {
1003         unsigned long key;
1004         struct ftrace_func_entry *entry;
1005         struct hlist_head *hhd;
1006         struct hlist_node *n;
1007
1008         if (!hash->count)
1009                 return NULL;
1010
1011         if (hash->size_bits > 0)
1012                 key = hash_long(ip, hash->size_bits);
1013         else
1014                 key = 0;
1015
1016         hhd = &hash->buckets[key];
1017
1018         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1019                 if (entry->ip == ip)
1020                         return entry;
1021         }
1022         return NULL;
1023 }
1024
1025 static void __add_hash_entry(struct ftrace_hash *hash,
1026                              struct ftrace_func_entry *entry)
1027 {
1028         struct hlist_head *hhd;
1029         unsigned long key;
1030
1031         if (hash->size_bits)
1032                 key = hash_long(entry->ip, hash->size_bits);
1033         else
1034                 key = 0;
1035
1036         hhd = &hash->buckets[key];
1037         hlist_add_head(&entry->hlist, hhd);
1038         hash->count++;
1039 }
1040
1041 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1042 {
1043         struct ftrace_func_entry *entry;
1044
1045         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1046         if (!entry)
1047                 return -ENOMEM;
1048
1049         entry->ip = ip;
1050         __add_hash_entry(hash, entry);
1051
1052         return 0;
1053 }
1054
1055 static void
1056 free_hash_entry(struct ftrace_hash *hash,
1057                   struct ftrace_func_entry *entry)
1058 {
1059         hlist_del(&entry->hlist);
1060         kfree(entry);
1061         hash->count--;
1062 }
1063
1064 static void
1065 remove_hash_entry(struct ftrace_hash *hash,
1066                   struct ftrace_func_entry *entry)
1067 {
1068         hlist_del(&entry->hlist);
1069         hash->count--;
1070 }
1071
1072 static void ftrace_hash_clear(struct ftrace_hash *hash)
1073 {
1074         struct hlist_head *hhd;
1075         struct hlist_node *tp, *tn;
1076         struct ftrace_func_entry *entry;
1077         int size = 1 << hash->size_bits;
1078         int i;
1079
1080         if (!hash->count)
1081                 return;
1082
1083         for (i = 0; i < size; i++) {
1084                 hhd = &hash->buckets[i];
1085                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1086                         free_hash_entry(hash, entry);
1087         }
1088         FTRACE_WARN_ON(hash->count);
1089 }
1090
1091 static void free_ftrace_hash(struct ftrace_hash *hash)
1092 {
1093         if (!hash || hash == EMPTY_HASH)
1094                 return;
1095         ftrace_hash_clear(hash);
1096         kfree(hash->buckets);
1097         kfree(hash);
1098 }
1099
1100 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1101 {
1102         struct ftrace_hash *hash;
1103
1104         hash = container_of(rcu, struct ftrace_hash, rcu);
1105         free_ftrace_hash(hash);
1106 }
1107
1108 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1109 {
1110         if (!hash || hash == EMPTY_HASH)
1111                 return;
1112         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1113 }
1114
1115 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1116 {
1117         struct ftrace_hash *hash;
1118         int size;
1119
1120         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1121         if (!hash)
1122                 return NULL;
1123
1124         size = 1 << size_bits;
1125         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1126
1127         if (!hash->buckets) {
1128                 kfree(hash);
1129                 return NULL;
1130         }
1131
1132         hash->size_bits = size_bits;
1133
1134         return hash;
1135 }
1136
1137 static struct ftrace_hash *
1138 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1139 {
1140         struct ftrace_func_entry *entry;
1141         struct ftrace_hash *new_hash;
1142         struct hlist_node *tp;
1143         int size;
1144         int ret;
1145         int i;
1146
1147         new_hash = alloc_ftrace_hash(size_bits);
1148         if (!new_hash)
1149                 return NULL;
1150
1151         /* Empty hash? */
1152         if (!hash || !hash->count)
1153                 return new_hash;
1154
1155         size = 1 << hash->size_bits;
1156         for (i = 0; i < size; i++) {
1157                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1158                         ret = add_hash_entry(new_hash, entry->ip);
1159                         if (ret < 0)
1160                                 goto free_hash;
1161                 }
1162         }
1163
1164         FTRACE_WARN_ON(new_hash->count != hash->count);
1165
1166         return new_hash;
1167
1168  free_hash:
1169         free_ftrace_hash(new_hash);
1170         return NULL;
1171 }
1172
1173 static int
1174 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1175 {
1176         struct ftrace_func_entry *entry;
1177         struct hlist_node *tp, *tn;
1178         struct hlist_head *hhd;
1179         struct ftrace_hash *old_hash;
1180         struct ftrace_hash *new_hash;
1181         unsigned long key;
1182         int size = src->count;
1183         int bits = 0;
1184         int i;
1185
1186         /*
1187          * If the new source is empty, just free dst and assign it
1188          * the empty_hash.
1189          */
1190         if (!src->count) {
1191                 free_ftrace_hash_rcu(*dst);
1192                 rcu_assign_pointer(*dst, EMPTY_HASH);
1193                 return 0;
1194         }
1195
1196         /*
1197          * Make the hash size about 1/2 the # found
1198          */
1199         for (size /= 2; size; size >>= 1)
1200                 bits++;
1201
1202         /* Don't allocate too much */
1203         if (bits > FTRACE_HASH_MAX_BITS)
1204                 bits = FTRACE_HASH_MAX_BITS;
1205
1206         new_hash = alloc_ftrace_hash(bits);
1207         if (!new_hash)
1208                 return -ENOMEM;
1209
1210         size = 1 << src->size_bits;
1211         for (i = 0; i < size; i++) {
1212                 hhd = &src->buckets[i];
1213                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1214                         if (bits > 0)
1215                                 key = hash_long(entry->ip, bits);
1216                         else
1217                                 key = 0;
1218                         remove_hash_entry(src, entry);
1219                         __add_hash_entry(new_hash, entry);
1220                 }
1221         }
1222
1223         old_hash = *dst;
1224         rcu_assign_pointer(*dst, new_hash);
1225         free_ftrace_hash_rcu(old_hash);
1226
1227         return 0;
1228 }
1229
1230 /*
1231  * Test the hashes for this ops to see if we want to call
1232  * the ops->func or not.
1233  *
1234  * It's a match if the ip is in the ops->filter_hash or
1235  * the filter_hash does not exist or is empty,
1236  *  AND
1237  * the ip is not in the ops->notrace_hash.
1238  *
1239  * This needs to be called with preemption disabled as
1240  * the hashes are freed with call_rcu_sched().
1241  */
1242 static int
1243 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1244 {
1245         struct ftrace_hash *filter_hash;
1246         struct ftrace_hash *notrace_hash;
1247         int ret;
1248
1249         filter_hash = rcu_dereference_raw(ops->filter_hash);
1250         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1251
1252         if ((!filter_hash || !filter_hash->count ||
1253              ftrace_lookup_ip(filter_hash, ip)) &&
1254             (!notrace_hash || !notrace_hash->count ||
1255              !ftrace_lookup_ip(notrace_hash, ip)))
1256                 ret = 1;
1257         else
1258                 ret = 0;
1259
1260         return ret;
1261 }
1262
1263 /*
1264  * This is a double for. Do not use 'break' to break out of the loop,
1265  * you must use a goto.
1266  */
1267 #define do_for_each_ftrace_rec(pg, rec)                                 \
1268         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1269                 int _____i;                                             \
1270                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1271                         rec = &pg->records[_____i];
1272
1273 #define while_for_each_ftrace_rec()             \
1274                 }                               \
1275         }
1276
1277 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1278                                      int filter_hash,
1279                                      bool inc)
1280 {
1281         struct ftrace_hash *hash;
1282         struct ftrace_hash *other_hash;
1283         struct ftrace_page *pg;
1284         struct dyn_ftrace *rec;
1285         int count = 0;
1286         int all = 0;
1287
1288         /* Only update if the ops has been registered */
1289         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1290                 return;
1291
1292         /*
1293          * In the filter_hash case:
1294          *   If the count is zero, we update all records.
1295          *   Otherwise we just update the items in the hash.
1296          *
1297          * In the notrace_hash case:
1298          *   We enable the update in the hash.
1299          *   As disabling notrace means enabling the tracing,
1300          *   and enabling notrace means disabling, the inc variable
1301          *   gets inversed.
1302          */
1303         if (filter_hash) {
1304                 hash = ops->filter_hash;
1305                 other_hash = ops->notrace_hash;
1306                 if (!hash || !hash->count)
1307                         all = 1;
1308         } else {
1309                 inc = !inc;
1310                 hash = ops->notrace_hash;
1311                 other_hash = ops->filter_hash;
1312                 /*
1313                  * If the notrace hash has no items,
1314                  * then there's nothing to do.
1315                  */
1316                 if (hash && !hash->count)
1317                         return;
1318         }
1319
1320         do_for_each_ftrace_rec(pg, rec) {
1321                 int in_other_hash = 0;
1322                 int in_hash = 0;
1323                 int match = 0;
1324
1325                 if (all) {
1326                         /*
1327                          * Only the filter_hash affects all records.
1328                          * Update if the record is not in the notrace hash.
1329                          */
1330                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1331                                 match = 1;
1332                 } else {
1333                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1334                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1335
1336                         /*
1337                          *
1338                          */
1339                         if (filter_hash && in_hash && !in_other_hash)
1340                                 match = 1;
1341                         else if (!filter_hash && in_hash &&
1342                                  (in_other_hash || !other_hash->count))
1343                                 match = 1;
1344                 }
1345                 if (!match)
1346                         continue;
1347
1348                 if (inc) {
1349                         rec->flags++;
1350                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1351                                 return;
1352                 } else {
1353                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1354                                 return;
1355                         rec->flags--;
1356                 }
1357                 count++;
1358                 /* Shortcut, if we handled all records, we are done. */
1359                 if (!all && count == hash->count)
1360                         return;
1361         } while_for_each_ftrace_rec();
1362 }
1363
1364 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1365                                     int filter_hash)
1366 {
1367         __ftrace_hash_rec_update(ops, filter_hash, 0);
1368 }
1369
1370 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1371                                    int filter_hash)
1372 {
1373         __ftrace_hash_rec_update(ops, filter_hash, 1);
1374 }
1375
1376 static void ftrace_free_rec(struct dyn_ftrace *rec)
1377 {
1378         rec->freelist = ftrace_free_records;
1379         ftrace_free_records = rec;
1380         rec->flags |= FTRACE_FL_FREE;
1381 }
1382
1383 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1384 {
1385         struct dyn_ftrace *rec;
1386
1387         /* First check for freed records */
1388         if (ftrace_free_records) {
1389                 rec = ftrace_free_records;
1390
1391                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1392                         FTRACE_WARN_ON_ONCE(1);
1393                         ftrace_free_records = NULL;
1394                         return NULL;
1395                 }
1396
1397                 ftrace_free_records = rec->freelist;
1398                 memset(rec, 0, sizeof(*rec));
1399                 return rec;
1400         }
1401
1402         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1403                 if (!ftrace_pages->next) {
1404                         /* allocate another page */
1405                         ftrace_pages->next =
1406                                 (void *)get_zeroed_page(GFP_KERNEL);
1407                         if (!ftrace_pages->next)
1408                                 return NULL;
1409                 }
1410                 ftrace_pages = ftrace_pages->next;
1411         }
1412
1413         return &ftrace_pages->records[ftrace_pages->index++];
1414 }
1415
1416 static struct dyn_ftrace *
1417 ftrace_record_ip(unsigned long ip)
1418 {
1419         struct dyn_ftrace *rec;
1420
1421         if (ftrace_disabled)
1422                 return NULL;
1423
1424         rec = ftrace_alloc_dyn_node(ip);
1425         if (!rec)
1426                 return NULL;
1427
1428         rec->ip = ip;
1429         rec->newlist = ftrace_new_addrs;
1430         ftrace_new_addrs = rec;
1431
1432         return rec;
1433 }
1434
1435 static void print_ip_ins(const char *fmt, unsigned char *p)
1436 {
1437         int i;
1438
1439         printk(KERN_CONT "%s", fmt);
1440
1441         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1442                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1443 }
1444
1445 static void ftrace_bug(int failed, unsigned long ip)
1446 {
1447         switch (failed) {
1448         case -EFAULT:
1449                 FTRACE_WARN_ON_ONCE(1);
1450                 pr_info("ftrace faulted on modifying ");
1451                 print_ip_sym(ip);
1452                 break;
1453         case -EINVAL:
1454                 FTRACE_WARN_ON_ONCE(1);
1455                 pr_info("ftrace failed to modify ");
1456                 print_ip_sym(ip);
1457                 print_ip_ins(" actual: ", (unsigned char *)ip);
1458                 printk(KERN_CONT "\n");
1459                 break;
1460         case -EPERM:
1461                 FTRACE_WARN_ON_ONCE(1);
1462                 pr_info("ftrace faulted on writing ");
1463                 print_ip_sym(ip);
1464                 break;
1465         default:
1466                 FTRACE_WARN_ON_ONCE(1);
1467                 pr_info("ftrace faulted on unknown error ");
1468                 print_ip_sym(ip);
1469         }
1470 }
1471
1472
1473 /* Return 1 if the address range is reserved for ftrace */
1474 int ftrace_text_reserved(void *start, void *end)
1475 {
1476         struct dyn_ftrace *rec;
1477         struct ftrace_page *pg;
1478
1479         do_for_each_ftrace_rec(pg, rec) {
1480                 if (rec->ip <= (unsigned long)end &&
1481                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1482                         return 1;
1483         } while_for_each_ftrace_rec();
1484         return 0;
1485 }
1486
1487
1488 static int
1489 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1490 {
1491         unsigned long ftrace_addr;
1492         unsigned long flag = 0UL;
1493
1494         ftrace_addr = (unsigned long)FTRACE_ADDR;
1495
1496         /*
1497          * If we are enabling tracing:
1498          *
1499          *   If the record has a ref count, then we need to enable it
1500          *   because someone is using it.
1501          *
1502          *   Otherwise we make sure its disabled.
1503          *
1504          * If we are disabling tracing, then disable all records that
1505          * are enabled.
1506          */
1507         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1508                 flag = FTRACE_FL_ENABLED;
1509
1510         /* If the state of this record hasn't changed, then do nothing */
1511         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1512                 return 0;
1513
1514         if (flag) {
1515                 rec->flags |= FTRACE_FL_ENABLED;
1516                 return ftrace_make_call(rec, ftrace_addr);
1517         }
1518
1519         rec->flags &= ~FTRACE_FL_ENABLED;
1520         return ftrace_make_nop(NULL, rec, ftrace_addr);
1521 }
1522
1523 static void ftrace_replace_code(int enable)
1524 {
1525         struct dyn_ftrace *rec;
1526         struct ftrace_page *pg;
1527         int failed;
1528
1529         if (unlikely(ftrace_disabled))
1530                 return;
1531
1532         do_for_each_ftrace_rec(pg, rec) {
1533                 /* Skip over free records */
1534                 if (rec->flags & FTRACE_FL_FREE)
1535                         continue;
1536
1537                 failed = __ftrace_replace_code(rec, enable);
1538                 if (failed) {
1539                         ftrace_bug(failed, rec->ip);
1540                         /* Stop processing */
1541                         return;
1542                 }
1543         } while_for_each_ftrace_rec();
1544 }
1545
1546 static int
1547 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1548 {
1549         unsigned long ip;
1550         int ret;
1551
1552         ip = rec->ip;
1553
1554         if (unlikely(ftrace_disabled))
1555                 return 0;
1556
1557         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1558         if (ret) {
1559                 ftrace_bug(ret, ip);
1560                 return 0;
1561         }
1562         return 1;
1563 }
1564
1565 /*
1566  * archs can override this function if they must do something
1567  * before the modifying code is performed.
1568  */
1569 int __weak ftrace_arch_code_modify_prepare(void)
1570 {
1571         return 0;
1572 }
1573
1574 /*
1575  * archs can override this function if they must do something
1576  * after the modifying code is performed.
1577  */
1578 int __weak ftrace_arch_code_modify_post_process(void)
1579 {
1580         return 0;
1581 }
1582
1583 static int __ftrace_modify_code(void *data)
1584 {
1585         int *command = data;
1586
1587         if (*command & FTRACE_ENABLE_CALLS)
1588                 ftrace_replace_code(1);
1589         else if (*command & FTRACE_DISABLE_CALLS)
1590                 ftrace_replace_code(0);
1591
1592         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1593                 ftrace_update_ftrace_func(ftrace_trace_function);
1594
1595         if (*command & FTRACE_START_FUNC_RET)
1596                 ftrace_enable_ftrace_graph_caller();
1597         else if (*command & FTRACE_STOP_FUNC_RET)
1598                 ftrace_disable_ftrace_graph_caller();
1599
1600         return 0;
1601 }
1602
1603 static void ftrace_run_update_code(int command)
1604 {
1605         int ret;
1606
1607         ret = ftrace_arch_code_modify_prepare();
1608         FTRACE_WARN_ON(ret);
1609         if (ret)
1610                 return;
1611
1612         stop_machine(__ftrace_modify_code, &command, NULL);
1613
1614         ret = ftrace_arch_code_modify_post_process();
1615         FTRACE_WARN_ON(ret);
1616 }
1617
1618 static ftrace_func_t saved_ftrace_func;
1619 static int ftrace_start_up;
1620 static int global_start_up;
1621
1622 static void ftrace_startup_enable(int command)
1623 {
1624         if (saved_ftrace_func != ftrace_trace_function) {
1625                 saved_ftrace_func = ftrace_trace_function;
1626                 command |= FTRACE_UPDATE_TRACE_FUNC;
1627         }
1628
1629         if (!command || !ftrace_enabled)
1630                 return;
1631
1632         ftrace_run_update_code(command);
1633 }
1634
1635 static int ftrace_startup(struct ftrace_ops *ops, int command)
1636 {
1637         bool hash_enable = true;
1638
1639         if (unlikely(ftrace_disabled))
1640                 return -ENODEV;
1641
1642         ftrace_start_up++;
1643         command |= FTRACE_ENABLE_CALLS;
1644
1645         /* ops marked global share the filter hashes */
1646         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1647                 ops = &global_ops;
1648                 /* Don't update hash if global is already set */
1649                 if (global_start_up)
1650                         hash_enable = false;
1651                 global_start_up++;
1652         }
1653
1654         ops->flags |= FTRACE_OPS_FL_ENABLED;
1655         if (hash_enable)
1656                 ftrace_hash_rec_enable(ops, 1);
1657
1658         ftrace_startup_enable(command);
1659
1660         return 0;
1661 }
1662
1663 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1664 {
1665         bool hash_disable = true;
1666
1667         if (unlikely(ftrace_disabled))
1668                 return;
1669
1670         ftrace_start_up--;
1671         /*
1672          * Just warn in case of unbalance, no need to kill ftrace, it's not
1673          * critical but the ftrace_call callers may be never nopped again after
1674          * further ftrace uses.
1675          */
1676         WARN_ON_ONCE(ftrace_start_up < 0);
1677
1678         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1679                 ops = &global_ops;
1680                 global_start_up--;
1681                 WARN_ON_ONCE(global_start_up < 0);
1682                 /* Don't update hash if global still has users */
1683                 if (global_start_up) {
1684                         WARN_ON_ONCE(!ftrace_start_up);
1685                         hash_disable = false;
1686                 }
1687         }
1688
1689         if (hash_disable)
1690                 ftrace_hash_rec_disable(ops, 1);
1691
1692         if (ops != &global_ops || !global_start_up)
1693                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1694
1695         if (!ftrace_start_up)
1696                 command |= FTRACE_DISABLE_CALLS;
1697
1698         if (saved_ftrace_func != ftrace_trace_function) {
1699                 saved_ftrace_func = ftrace_trace_function;
1700                 command |= FTRACE_UPDATE_TRACE_FUNC;
1701         }
1702
1703         if (!command || !ftrace_enabled)
1704                 return;
1705
1706         ftrace_run_update_code(command);
1707 }
1708
1709 static void ftrace_startup_sysctl(void)
1710 {
1711         if (unlikely(ftrace_disabled))
1712                 return;
1713
1714         /* Force update next time */
1715         saved_ftrace_func = NULL;
1716         /* ftrace_start_up is true if we want ftrace running */
1717         if (ftrace_start_up)
1718                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1719 }
1720
1721 static void ftrace_shutdown_sysctl(void)
1722 {
1723         if (unlikely(ftrace_disabled))
1724                 return;
1725
1726         /* ftrace_start_up is true if ftrace is running */
1727         if (ftrace_start_up)
1728                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1729 }
1730
1731 static cycle_t          ftrace_update_time;
1732 static unsigned long    ftrace_update_cnt;
1733 unsigned long           ftrace_update_tot_cnt;
1734
1735 static int ops_traces_mod(struct ftrace_ops *ops)
1736 {
1737         struct ftrace_hash *hash;
1738
1739         hash = ops->filter_hash;
1740         return !!(!hash || !hash->count);
1741 }
1742
1743 static int ftrace_update_code(struct module *mod)
1744 {
1745         struct dyn_ftrace *p;
1746         cycle_t start, stop;
1747         unsigned long ref = 0;
1748
1749         /*
1750          * When adding a module, we need to check if tracers are
1751          * currently enabled and if they are set to trace all functions.
1752          * If they are, we need to enable the module functions as well
1753          * as update the reference counts for those function records.
1754          */
1755         if (mod) {
1756                 struct ftrace_ops *ops;
1757
1758                 for (ops = ftrace_ops_list;
1759                      ops != &ftrace_list_end; ops = ops->next) {
1760                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1761                             ops_traces_mod(ops))
1762                                 ref++;
1763                 }
1764         }
1765
1766         start = ftrace_now(raw_smp_processor_id());
1767         ftrace_update_cnt = 0;
1768
1769         while (ftrace_new_addrs) {
1770
1771                 /* If something went wrong, bail without enabling anything */
1772                 if (unlikely(ftrace_disabled))
1773                         return -1;
1774
1775                 p = ftrace_new_addrs;
1776                 ftrace_new_addrs = p->newlist;
1777                 p->flags = ref;
1778
1779                 /*
1780                  * Do the initial record conversion from mcount jump
1781                  * to the NOP instructions.
1782                  */
1783                 if (!ftrace_code_disable(mod, p)) {
1784                         ftrace_free_rec(p);
1785                         /* Game over */
1786                         break;
1787                 }
1788
1789                 ftrace_update_cnt++;
1790
1791                 /*
1792                  * If the tracing is enabled, go ahead and enable the record.
1793                  *
1794                  * The reason not to enable the record immediatelly is the
1795                  * inherent check of ftrace_make_nop/ftrace_make_call for
1796                  * correct previous instructions.  Making first the NOP
1797                  * conversion puts the module to the correct state, thus
1798                  * passing the ftrace_make_call check.
1799                  */
1800                 if (ftrace_start_up && ref) {
1801                         int failed = __ftrace_replace_code(p, 1);
1802                         if (failed) {
1803                                 ftrace_bug(failed, p->ip);
1804                                 ftrace_free_rec(p);
1805                         }
1806                 }
1807         }
1808
1809         stop = ftrace_now(raw_smp_processor_id());
1810         ftrace_update_time = stop - start;
1811         ftrace_update_tot_cnt += ftrace_update_cnt;
1812
1813         return 0;
1814 }
1815
1816 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1817 {
1818         struct ftrace_page *pg;
1819         int cnt;
1820         int i;
1821
1822         /* allocate a few pages */
1823         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1824         if (!ftrace_pages_start)
1825                 return -1;
1826
1827         /*
1828          * Allocate a few more pages.
1829          *
1830          * TODO: have some parser search vmlinux before
1831          *   final linking to find all calls to ftrace.
1832          *   Then we can:
1833          *    a) know how many pages to allocate.
1834          *     and/or
1835          *    b) set up the table then.
1836          *
1837          *  The dynamic code is still necessary for
1838          *  modules.
1839          */
1840
1841         pg = ftrace_pages = ftrace_pages_start;
1842
1843         cnt = num_to_init / ENTRIES_PER_PAGE;
1844         pr_info("ftrace: allocating %ld entries in %d pages\n",
1845                 num_to_init, cnt + 1);
1846
1847         for (i = 0; i < cnt; i++) {
1848                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1849
1850                 /* If we fail, we'll try later anyway */
1851                 if (!pg->next)
1852                         break;
1853
1854                 pg = pg->next;
1855         }
1856
1857         return 0;
1858 }
1859
1860 enum {
1861         FTRACE_ITER_FILTER      = (1 << 0),
1862         FTRACE_ITER_NOTRACE     = (1 << 1),
1863         FTRACE_ITER_PRINTALL    = (1 << 2),
1864         FTRACE_ITER_HASH        = (1 << 3),
1865         FTRACE_ITER_ENABLED     = (1 << 4),
1866 };
1867
1868 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1869
1870 struct ftrace_iterator {
1871         loff_t                          pos;
1872         loff_t                          func_pos;
1873         struct ftrace_page              *pg;
1874         struct dyn_ftrace               *func;
1875         struct ftrace_func_probe        *probe;
1876         struct trace_parser             parser;
1877         struct ftrace_hash              *hash;
1878         struct ftrace_ops               *ops;
1879         int                             hidx;
1880         int                             idx;
1881         unsigned                        flags;
1882 };
1883
1884 static void *
1885 t_hash_next(struct seq_file *m, loff_t *pos)
1886 {
1887         struct ftrace_iterator *iter = m->private;
1888         struct hlist_node *hnd = NULL;
1889         struct hlist_head *hhd;
1890
1891         (*pos)++;
1892         iter->pos = *pos;
1893
1894         if (iter->probe)
1895                 hnd = &iter->probe->node;
1896  retry:
1897         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1898                 return NULL;
1899
1900         hhd = &ftrace_func_hash[iter->hidx];
1901
1902         if (hlist_empty(hhd)) {
1903                 iter->hidx++;
1904                 hnd = NULL;
1905                 goto retry;
1906         }
1907
1908         if (!hnd)
1909                 hnd = hhd->first;
1910         else {
1911                 hnd = hnd->next;
1912                 if (!hnd) {
1913                         iter->hidx++;
1914                         goto retry;
1915                 }
1916         }
1917
1918         if (WARN_ON_ONCE(!hnd))
1919                 return NULL;
1920
1921         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1922
1923         return iter;
1924 }
1925
1926 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1927 {
1928         struct ftrace_iterator *iter = m->private;
1929         void *p = NULL;
1930         loff_t l;
1931
1932         if (iter->func_pos > *pos)
1933                 return NULL;
1934
1935         iter->hidx = 0;
1936         for (l = 0; l <= (*pos - iter->func_pos); ) {
1937                 p = t_hash_next(m, &l);
1938                 if (!p)
1939                         break;
1940         }
1941         if (!p)
1942                 return NULL;
1943
1944         /* Only set this if we have an item */
1945         iter->flags |= FTRACE_ITER_HASH;
1946
1947         return iter;
1948 }
1949
1950 static int
1951 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1952 {
1953         struct ftrace_func_probe *rec;
1954
1955         rec = iter->probe;
1956         if (WARN_ON_ONCE(!rec))
1957                 return -EIO;
1958
1959         if (rec->ops->print)
1960                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1961
1962         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1963
1964         if (rec->data)
1965                 seq_printf(m, ":%p", rec->data);
1966         seq_putc(m, '\n');
1967
1968         return 0;
1969 }
1970
1971 static void *
1972 t_next(struct seq_file *m, void *v, loff_t *pos)
1973 {
1974         struct ftrace_iterator *iter = m->private;
1975         struct ftrace_ops *ops = &global_ops;
1976         struct dyn_ftrace *rec = NULL;
1977
1978         if (unlikely(ftrace_disabled))
1979                 return NULL;
1980
1981         if (iter->flags & FTRACE_ITER_HASH)
1982                 return t_hash_next(m, pos);
1983
1984         (*pos)++;
1985         iter->pos = iter->func_pos = *pos;
1986
1987         if (iter->flags & FTRACE_ITER_PRINTALL)
1988                 return t_hash_start(m, pos);
1989
1990  retry:
1991         if (iter->idx >= iter->pg->index) {
1992                 if (iter->pg->next) {
1993                         iter->pg = iter->pg->next;
1994                         iter->idx = 0;
1995                         goto retry;
1996                 }
1997         } else {
1998                 rec = &iter->pg->records[iter->idx++];
1999                 if ((rec->flags & FTRACE_FL_FREE) ||
2000
2001                     ((iter->flags & FTRACE_ITER_FILTER) &&
2002                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2003
2004                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2005                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2006
2007                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2008                      !(rec->flags & ~FTRACE_FL_MASK))) {
2009
2010                         rec = NULL;
2011                         goto retry;
2012                 }
2013         }
2014
2015         if (!rec)
2016                 return t_hash_start(m, pos);
2017
2018         iter->func = rec;
2019
2020         return iter;
2021 }
2022
2023 static void reset_iter_read(struct ftrace_iterator *iter)
2024 {
2025         iter->pos = 0;
2026         iter->func_pos = 0;
2027         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2028 }
2029
2030 static void *t_start(struct seq_file *m, loff_t *pos)
2031 {
2032         struct ftrace_iterator *iter = m->private;
2033         struct ftrace_ops *ops = &global_ops;
2034         void *p = NULL;
2035         loff_t l;
2036
2037         mutex_lock(&ftrace_lock);
2038
2039         if (unlikely(ftrace_disabled))
2040                 return NULL;
2041
2042         /*
2043          * If an lseek was done, then reset and start from beginning.
2044          */
2045         if (*pos < iter->pos)
2046                 reset_iter_read(iter);
2047
2048         /*
2049          * For set_ftrace_filter reading, if we have the filter
2050          * off, we can short cut and just print out that all
2051          * functions are enabled.
2052          */
2053         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2054                 if (*pos > 0)
2055                         return t_hash_start(m, pos);
2056                 iter->flags |= FTRACE_ITER_PRINTALL;
2057                 /* reset in case of seek/pread */
2058                 iter->flags &= ~FTRACE_ITER_HASH;
2059                 return iter;
2060         }
2061
2062         if (iter->flags & FTRACE_ITER_HASH)
2063                 return t_hash_start(m, pos);
2064
2065         /*
2066          * Unfortunately, we need to restart at ftrace_pages_start
2067          * every time we let go of the ftrace_mutex. This is because
2068          * those pointers can change without the lock.
2069          */
2070         iter->pg = ftrace_pages_start;
2071         iter->idx = 0;
2072         for (l = 0; l <= *pos; ) {
2073                 p = t_next(m, p, &l);
2074                 if (!p)
2075                         break;
2076         }
2077
2078         if (!p) {
2079                 if (iter->flags & FTRACE_ITER_FILTER)
2080                         return t_hash_start(m, pos);
2081
2082                 return NULL;
2083         }
2084
2085         return iter;
2086 }
2087
2088 static void t_stop(struct seq_file *m, void *p)
2089 {
2090         mutex_unlock(&ftrace_lock);
2091 }
2092
2093 static int t_show(struct seq_file *m, void *v)
2094 {
2095         struct ftrace_iterator *iter = m->private;
2096         struct dyn_ftrace *rec;
2097
2098         if (iter->flags & FTRACE_ITER_HASH)
2099                 return t_hash_show(m, iter);
2100
2101         if (iter->flags & FTRACE_ITER_PRINTALL) {
2102                 seq_printf(m, "#### all functions enabled ####\n");
2103                 return 0;
2104         }
2105
2106         rec = iter->func;
2107
2108         if (!rec)
2109                 return 0;
2110
2111         seq_printf(m, "%ps", (void *)rec->ip);
2112         if (iter->flags & FTRACE_ITER_ENABLED)
2113                 seq_printf(m, " (%ld)",
2114                            rec->flags & ~FTRACE_FL_MASK);
2115         seq_printf(m, "\n");
2116
2117         return 0;
2118 }
2119
2120 static const struct seq_operations show_ftrace_seq_ops = {
2121         .start = t_start,
2122         .next = t_next,
2123         .stop = t_stop,
2124         .show = t_show,
2125 };
2126
2127 static int
2128 ftrace_avail_open(struct inode *inode, struct file *file)
2129 {
2130         struct ftrace_iterator *iter;
2131         int ret;
2132
2133         if (unlikely(ftrace_disabled))
2134                 return -ENODEV;
2135
2136         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2137         if (!iter)
2138                 return -ENOMEM;
2139
2140         iter->pg = ftrace_pages_start;
2141
2142         ret = seq_open(file, &show_ftrace_seq_ops);
2143         if (!ret) {
2144                 struct seq_file *m = file->private_data;
2145
2146                 m->private = iter;
2147         } else {
2148                 kfree(iter);
2149         }
2150
2151         return ret;
2152 }
2153
2154 static int
2155 ftrace_enabled_open(struct inode *inode, struct file *file)
2156 {
2157         struct ftrace_iterator *iter;
2158         int ret;
2159
2160         if (unlikely(ftrace_disabled))
2161                 return -ENODEV;
2162
2163         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2164         if (!iter)
2165                 return -ENOMEM;
2166
2167         iter->pg = ftrace_pages_start;
2168         iter->flags = FTRACE_ITER_ENABLED;
2169
2170         ret = seq_open(file, &show_ftrace_seq_ops);
2171         if (!ret) {
2172                 struct seq_file *m = file->private_data;
2173
2174                 m->private = iter;
2175         } else {
2176                 kfree(iter);
2177         }
2178
2179         return ret;
2180 }
2181
2182 static void ftrace_filter_reset(struct ftrace_hash *hash)
2183 {
2184         mutex_lock(&ftrace_lock);
2185         ftrace_hash_clear(hash);
2186         mutex_unlock(&ftrace_lock);
2187 }
2188
2189 static int
2190 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2191                   struct inode *inode, struct file *file)
2192 {
2193         struct ftrace_iterator *iter;
2194         struct ftrace_hash *hash;
2195         int ret = 0;
2196
2197         if (unlikely(ftrace_disabled))
2198                 return -ENODEV;
2199
2200         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2201         if (!iter)
2202                 return -ENOMEM;
2203
2204         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2205                 kfree(iter);
2206                 return -ENOMEM;
2207         }
2208
2209         if (flag & FTRACE_ITER_NOTRACE)
2210                 hash = ops->notrace_hash;
2211         else
2212                 hash = ops->filter_hash;
2213
2214         iter->ops = ops;
2215         iter->flags = flag;
2216
2217         if (file->f_mode & FMODE_WRITE) {
2218                 mutex_lock(&ftrace_lock);
2219                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2220                 mutex_unlock(&ftrace_lock);
2221
2222                 if (!iter->hash) {
2223                         trace_parser_put(&iter->parser);
2224                         kfree(iter);
2225                         return -ENOMEM;
2226                 }
2227         }
2228
2229         mutex_lock(&ftrace_regex_lock);
2230
2231         if ((file->f_mode & FMODE_WRITE) &&
2232             (file->f_flags & O_TRUNC))
2233                 ftrace_filter_reset(iter->hash);
2234
2235         if (file->f_mode & FMODE_READ) {
2236                 iter->pg = ftrace_pages_start;
2237
2238                 ret = seq_open(file, &show_ftrace_seq_ops);
2239                 if (!ret) {
2240                         struct seq_file *m = file->private_data;
2241                         m->private = iter;
2242                 } else {
2243                         /* Failed */
2244                         free_ftrace_hash(iter->hash);
2245                         trace_parser_put(&iter->parser);
2246                         kfree(iter);
2247                 }
2248         } else
2249                 file->private_data = iter;
2250         mutex_unlock(&ftrace_regex_lock);
2251
2252         return ret;
2253 }
2254
2255 static int
2256 ftrace_filter_open(struct inode *inode, struct file *file)
2257 {
2258         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2259                                  inode, file);
2260 }
2261
2262 static int
2263 ftrace_notrace_open(struct inode *inode, struct file *file)
2264 {
2265         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2266                                  inode, file);
2267 }
2268
2269 static loff_t
2270 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2271 {
2272         loff_t ret;
2273
2274         if (file->f_mode & FMODE_READ)
2275                 ret = seq_lseek(file, offset, origin);
2276         else
2277                 file->f_pos = ret = 1;
2278
2279         return ret;
2280 }
2281
2282 static int ftrace_match(char *str, char *regex, int len, int type)
2283 {
2284         int matched = 0;
2285         int slen;
2286
2287         switch (type) {
2288         case MATCH_FULL:
2289                 if (strcmp(str, regex) == 0)
2290                         matched = 1;
2291                 break;
2292         case MATCH_FRONT_ONLY:
2293                 if (strncmp(str, regex, len) == 0)
2294                         matched = 1;
2295                 break;
2296         case MATCH_MIDDLE_ONLY:
2297                 if (strstr(str, regex))
2298                         matched = 1;
2299                 break;
2300         case MATCH_END_ONLY:
2301                 slen = strlen(str);
2302                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2303                         matched = 1;
2304                 break;
2305         }
2306
2307         return matched;
2308 }
2309
2310 static int
2311 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2312 {
2313         struct ftrace_func_entry *entry;
2314         int ret = 0;
2315
2316         entry = ftrace_lookup_ip(hash, rec->ip);
2317         if (not) {
2318                 /* Do nothing if it doesn't exist */
2319                 if (!entry)
2320                         return 0;
2321
2322                 free_hash_entry(hash, entry);
2323         } else {
2324                 /* Do nothing if it exists */
2325                 if (entry)
2326                         return 0;
2327
2328                 ret = add_hash_entry(hash, rec->ip);
2329         }
2330         return ret;
2331 }
2332
2333 static int
2334 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2335                     char *regex, int len, int type)
2336 {
2337         char str[KSYM_SYMBOL_LEN];
2338         char *modname;
2339
2340         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2341
2342         if (mod) {
2343                 /* module lookup requires matching the module */
2344                 if (!modname || strcmp(modname, mod))
2345                         return 0;
2346
2347                 /* blank search means to match all funcs in the mod */
2348                 if (!len)
2349                         return 1;
2350         }
2351
2352         return ftrace_match(str, regex, len, type);
2353 }
2354
2355 static int
2356 match_records(struct ftrace_hash *hash, char *buff,
2357               int len, char *mod, int not)
2358 {
2359         unsigned search_len = 0;
2360         struct ftrace_page *pg;
2361         struct dyn_ftrace *rec;
2362         int type = MATCH_FULL;
2363         char *search = buff;
2364         int found = 0;
2365         int ret;
2366
2367         if (len) {
2368                 type = filter_parse_regex(buff, len, &search, &not);
2369                 search_len = strlen(search);
2370         }
2371
2372         mutex_lock(&ftrace_lock);
2373
2374         if (unlikely(ftrace_disabled))
2375                 goto out_unlock;
2376
2377         do_for_each_ftrace_rec(pg, rec) {
2378
2379                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2380                         ret = enter_record(hash, rec, not);
2381                         if (ret < 0) {
2382                                 found = ret;
2383                                 goto out_unlock;
2384                         }
2385                         found = 1;
2386                 }
2387         } while_for_each_ftrace_rec();
2388  out_unlock:
2389         mutex_unlock(&ftrace_lock);
2390
2391         return found;
2392 }
2393
2394 static int
2395 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2396 {
2397         return match_records(hash, buff, len, NULL, 0);
2398 }
2399
2400 static int
2401 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2402 {
2403         int not = 0;
2404
2405         /* blank or '*' mean the same */
2406         if (strcmp(buff, "*") == 0)
2407                 buff[0] = 0;
2408
2409         /* handle the case of 'dont filter this module' */
2410         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2411                 buff[0] = 0;
2412                 not = 1;
2413         }
2414
2415         return match_records(hash, buff, strlen(buff), mod, not);
2416 }
2417
2418 /*
2419  * We register the module command as a template to show others how
2420  * to register the a command as well.
2421  */
2422
2423 static int
2424 ftrace_mod_callback(struct ftrace_hash *hash,
2425                     char *func, char *cmd, char *param, int enable)
2426 {
2427         char *mod;
2428         int ret = -EINVAL;
2429
2430         /*
2431          * cmd == 'mod' because we only registered this func
2432          * for the 'mod' ftrace_func_command.
2433          * But if you register one func with multiple commands,
2434          * you can tell which command was used by the cmd
2435          * parameter.
2436          */
2437
2438         /* we must have a module name */
2439         if (!param)
2440                 return ret;
2441
2442         mod = strsep(&param, ":");
2443         if (!strlen(mod))
2444                 return ret;
2445
2446         ret = ftrace_match_module_records(hash, func, mod);
2447         if (!ret)
2448                 ret = -EINVAL;
2449         if (ret < 0)
2450                 return ret;
2451
2452         return 0;
2453 }
2454
2455 static struct ftrace_func_command ftrace_mod_cmd = {
2456         .name                   = "mod",
2457         .func                   = ftrace_mod_callback,
2458 };
2459
2460 static int __init ftrace_mod_cmd_init(void)
2461 {
2462         return register_ftrace_command(&ftrace_mod_cmd);
2463 }
2464 device_initcall(ftrace_mod_cmd_init);
2465
2466 static void
2467 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2468 {
2469         struct ftrace_func_probe *entry;
2470         struct hlist_head *hhd;
2471         struct hlist_node *n;
2472         unsigned long key;
2473
2474         key = hash_long(ip, FTRACE_HASH_BITS);
2475
2476         hhd = &ftrace_func_hash[key];
2477
2478         if (hlist_empty(hhd))
2479                 return;
2480
2481         /*
2482          * Disable preemption for these calls to prevent a RCU grace
2483          * period. This syncs the hash iteration and freeing of items
2484          * on the hash. rcu_read_lock is too dangerous here.
2485          */
2486         preempt_disable_notrace();
2487         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2488                 if (entry->ip == ip)
2489                         entry->ops->func(ip, parent_ip, &entry->data);
2490         }
2491         preempt_enable_notrace();
2492 }
2493
2494 static struct ftrace_ops trace_probe_ops __read_mostly =
2495 {
2496         .func           = function_trace_probe_call,
2497 };
2498
2499 static int ftrace_probe_registered;
2500
2501 static void __enable_ftrace_function_probe(void)
2502 {
2503         int ret;
2504         int i;
2505
2506         if (ftrace_probe_registered)
2507                 return;
2508
2509         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2510                 struct hlist_head *hhd = &ftrace_func_hash[i];
2511                 if (hhd->first)
2512                         break;
2513         }
2514         /* Nothing registered? */
2515         if (i == FTRACE_FUNC_HASHSIZE)
2516                 return;
2517
2518         ret = __register_ftrace_function(&trace_probe_ops);
2519         if (!ret)
2520                 ret = ftrace_startup(&trace_probe_ops, 0);
2521
2522         ftrace_probe_registered = 1;
2523 }
2524
2525 static void __disable_ftrace_function_probe(void)
2526 {
2527         int ret;
2528         int i;
2529
2530         if (!ftrace_probe_registered)
2531                 return;
2532
2533         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2534                 struct hlist_head *hhd = &ftrace_func_hash[i];
2535                 if (hhd->first)
2536                         return;
2537         }
2538
2539         /* no more funcs left */
2540         ret = __unregister_ftrace_function(&trace_probe_ops);
2541         if (!ret)
2542                 ftrace_shutdown(&trace_probe_ops, 0);
2543
2544         ftrace_probe_registered = 0;
2545 }
2546
2547
2548 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2549 {
2550         struct ftrace_func_probe *entry =
2551                 container_of(rhp, struct ftrace_func_probe, rcu);
2552
2553         if (entry->ops->free)
2554                 entry->ops->free(&entry->data);
2555         kfree(entry);
2556 }
2557
2558
2559 int
2560 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2561                               void *data)
2562 {
2563         struct ftrace_func_probe *entry;
2564         struct ftrace_page *pg;
2565         struct dyn_ftrace *rec;
2566         int type, len, not;
2567         unsigned long key;
2568         int count = 0;
2569         char *search;
2570
2571         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2572         len = strlen(search);
2573
2574         /* we do not support '!' for function probes */
2575         if (WARN_ON(not))
2576                 return -EINVAL;
2577
2578         mutex_lock(&ftrace_lock);
2579
2580         if (unlikely(ftrace_disabled))
2581                 goto out_unlock;
2582
2583         do_for_each_ftrace_rec(pg, rec) {
2584
2585                 if (!ftrace_match_record(rec, NULL, search, len, type))
2586                         continue;
2587
2588                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2589                 if (!entry) {
2590                         /* If we did not process any, then return error */
2591                         if (!count)
2592                                 count = -ENOMEM;
2593                         goto out_unlock;
2594                 }
2595
2596                 count++;
2597
2598                 entry->data = data;
2599
2600                 /*
2601                  * The caller might want to do something special
2602                  * for each function we find. We call the callback
2603                  * to give the caller an opportunity to do so.
2604                  */
2605                 if (ops->callback) {
2606                         if (ops->callback(rec->ip, &entry->data) < 0) {
2607                                 /* caller does not like this func */
2608                                 kfree(entry);
2609                                 continue;
2610                         }
2611                 }
2612
2613                 entry->ops = ops;
2614                 entry->ip = rec->ip;
2615
2616                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2617                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2618
2619         } while_for_each_ftrace_rec();
2620         __enable_ftrace_function_probe();
2621
2622  out_unlock:
2623         mutex_unlock(&ftrace_lock);
2624
2625         return count;
2626 }
2627
2628 enum {
2629         PROBE_TEST_FUNC         = 1,
2630         PROBE_TEST_DATA         = 2
2631 };
2632
2633 static void
2634 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2635                                   void *data, int flags)
2636 {
2637         struct ftrace_func_probe *entry;
2638         struct hlist_node *n, *tmp;
2639         char str[KSYM_SYMBOL_LEN];
2640         int type = MATCH_FULL;
2641         int i, len = 0;
2642         char *search;
2643
2644         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2645                 glob = NULL;
2646         else if (glob) {
2647                 int not;
2648
2649                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2650                 len = strlen(search);
2651
2652                 /* we do not support '!' for function probes */
2653                 if (WARN_ON(not))
2654                         return;
2655         }
2656
2657         mutex_lock(&ftrace_lock);
2658         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2659                 struct hlist_head *hhd = &ftrace_func_hash[i];
2660
2661                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2662
2663                         /* break up if statements for readability */
2664                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2665                                 continue;
2666
2667                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2668                                 continue;
2669
2670                         /* do this last, since it is the most expensive */
2671                         if (glob) {
2672                                 kallsyms_lookup(entry->ip, NULL, NULL,
2673                                                 NULL, str);
2674                                 if (!ftrace_match(str, glob, len, type))
2675                                         continue;
2676                         }
2677
2678                         hlist_del(&entry->node);
2679                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2680                 }
2681         }
2682         __disable_ftrace_function_probe();
2683         mutex_unlock(&ftrace_lock);
2684 }
2685
2686 void
2687 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2688                                 void *data)
2689 {
2690         __unregister_ftrace_function_probe(glob, ops, data,
2691                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2692 }
2693
2694 void
2695 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2696 {
2697         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2698 }
2699
2700 void unregister_ftrace_function_probe_all(char *glob)
2701 {
2702         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2703 }
2704
2705 static LIST_HEAD(ftrace_commands);
2706 static DEFINE_MUTEX(ftrace_cmd_mutex);
2707
2708 int register_ftrace_command(struct ftrace_func_command *cmd)
2709 {
2710         struct ftrace_func_command *p;
2711         int ret = 0;
2712
2713         mutex_lock(&ftrace_cmd_mutex);
2714         list_for_each_entry(p, &ftrace_commands, list) {
2715                 if (strcmp(cmd->name, p->name) == 0) {
2716                         ret = -EBUSY;
2717                         goto out_unlock;
2718                 }
2719         }
2720         list_add(&cmd->list, &ftrace_commands);
2721  out_unlock:
2722         mutex_unlock(&ftrace_cmd_mutex);
2723
2724         return ret;
2725 }
2726
2727 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2728 {
2729         struct ftrace_func_command *p, *n;
2730         int ret = -ENODEV;
2731
2732         mutex_lock(&ftrace_cmd_mutex);
2733         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2734                 if (strcmp(cmd->name, p->name) == 0) {
2735                         ret = 0;
2736                         list_del_init(&p->list);
2737                         goto out_unlock;
2738                 }
2739         }
2740  out_unlock:
2741         mutex_unlock(&ftrace_cmd_mutex);
2742
2743         return ret;
2744 }
2745
2746 static int ftrace_process_regex(struct ftrace_hash *hash,
2747                                 char *buff, int len, int enable)
2748 {
2749         char *func, *command, *next = buff;
2750         struct ftrace_func_command *p;
2751         int ret = -EINVAL;
2752
2753         func = strsep(&next, ":");
2754
2755         if (!next) {
2756                 ret = ftrace_match_records(hash, func, len);
2757                 if (!ret)
2758                         ret = -EINVAL;
2759                 if (ret < 0)
2760                         return ret;
2761                 return 0;
2762         }
2763
2764         /* command found */
2765
2766         command = strsep(&next, ":");
2767
2768         mutex_lock(&ftrace_cmd_mutex);
2769         list_for_each_entry(p, &ftrace_commands, list) {
2770                 if (strcmp(p->name, command) == 0) {
2771                         ret = p->func(hash, func, command, next, enable);
2772                         goto out_unlock;
2773                 }
2774         }
2775  out_unlock:
2776         mutex_unlock(&ftrace_cmd_mutex);
2777
2778         return ret;
2779 }
2780
2781 static ssize_t
2782 ftrace_regex_write(struct file *file, const char __user *ubuf,
2783                    size_t cnt, loff_t *ppos, int enable)
2784 {
2785         struct ftrace_iterator *iter;
2786         struct trace_parser *parser;
2787         ssize_t ret, read;
2788
2789         if (!cnt)
2790                 return 0;
2791
2792         mutex_lock(&ftrace_regex_lock);
2793
2794         ret = -ENODEV;
2795         if (unlikely(ftrace_disabled))
2796                 goto out_unlock;
2797
2798         if (file->f_mode & FMODE_READ) {
2799                 struct seq_file *m = file->private_data;
2800                 iter = m->private;
2801         } else
2802                 iter = file->private_data;
2803
2804         parser = &iter->parser;
2805         read = trace_get_user(parser, ubuf, cnt, ppos);
2806
2807         if (read >= 0 && trace_parser_loaded(parser) &&
2808             !trace_parser_cont(parser)) {
2809                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2810                                            parser->idx, enable);
2811                 trace_parser_clear(parser);
2812                 if (ret)
2813                         goto out_unlock;
2814         }
2815
2816         ret = read;
2817 out_unlock:
2818         mutex_unlock(&ftrace_regex_lock);
2819
2820         return ret;
2821 }
2822
2823 static ssize_t
2824 ftrace_filter_write(struct file *file, const char __user *ubuf,
2825                     size_t cnt, loff_t *ppos)
2826 {
2827         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2828 }
2829
2830 static ssize_t
2831 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2832                      size_t cnt, loff_t *ppos)
2833 {
2834         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2835 }
2836
2837 static int
2838 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2839                  int reset, int enable)
2840 {
2841         struct ftrace_hash **orig_hash;
2842         struct ftrace_hash *hash;
2843         int ret;
2844
2845         /* All global ops uses the global ops filters */
2846         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2847                 ops = &global_ops;
2848
2849         if (unlikely(ftrace_disabled))
2850                 return -ENODEV;
2851
2852         if (enable)
2853                 orig_hash = &ops->filter_hash;
2854         else
2855                 orig_hash = &ops->notrace_hash;
2856
2857         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2858         if (!hash)
2859                 return -ENOMEM;
2860
2861         mutex_lock(&ftrace_regex_lock);
2862         if (reset)
2863                 ftrace_filter_reset(hash);
2864         if (buf)
2865                 ftrace_match_records(hash, buf, len);
2866
2867         mutex_lock(&ftrace_lock);
2868         ret = ftrace_hash_move(orig_hash, hash);
2869         mutex_unlock(&ftrace_lock);
2870
2871         mutex_unlock(&ftrace_regex_lock);
2872
2873         free_ftrace_hash(hash);
2874         return ret;
2875 }
2876
2877 /**
2878  * ftrace_set_filter - set a function to filter on in ftrace
2879  * @ops - the ops to set the filter with
2880  * @buf - the string that holds the function filter text.
2881  * @len - the length of the string.
2882  * @reset - non zero to reset all filters before applying this filter.
2883  *
2884  * Filters denote which functions should be enabled when tracing is enabled.
2885  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2886  */
2887 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2888                        int len, int reset)
2889 {
2890         ftrace_set_regex(ops, buf, len, reset, 1);
2891 }
2892 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2893
2894 /**
2895  * ftrace_set_notrace - set a function to not trace in ftrace
2896  * @ops - the ops to set the notrace filter with
2897  * @buf - the string that holds the function notrace text.
2898  * @len - the length of the string.
2899  * @reset - non zero to reset all filters before applying this filter.
2900  *
2901  * Notrace Filters denote which functions should not be enabled when tracing
2902  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2903  * for tracing.
2904  */
2905 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2906                         int len, int reset)
2907 {
2908         ftrace_set_regex(ops, buf, len, reset, 0);
2909 }
2910 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2911 /**
2912  * ftrace_set_filter - set a function to filter on in ftrace
2913  * @ops - the ops to set the filter with
2914  * @buf - the string that holds the function filter text.
2915  * @len - the length of the string.
2916  * @reset - non zero to reset all filters before applying this filter.
2917  *
2918  * Filters denote which functions should be enabled when tracing is enabled.
2919  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2920  */
2921 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2922 {
2923         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2924 }
2925 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2926
2927 /**
2928  * ftrace_set_notrace - set a function to not trace in ftrace
2929  * @ops - the ops to set the notrace filter with
2930  * @buf - the string that holds the function notrace text.
2931  * @len - the length of the string.
2932  * @reset - non zero to reset all filters before applying this filter.
2933  *
2934  * Notrace Filters denote which functions should not be enabled when tracing
2935  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2936  * for tracing.
2937  */
2938 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2939 {
2940         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2941 }
2942 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2943
2944 /*
2945  * command line interface to allow users to set filters on boot up.
2946  */
2947 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2948 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2949 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2950
2951 static int __init set_ftrace_notrace(char *str)
2952 {
2953         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2954         return 1;
2955 }
2956 __setup("ftrace_notrace=", set_ftrace_notrace);
2957
2958 static int __init set_ftrace_filter(char *str)
2959 {
2960         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2961         return 1;
2962 }
2963 __setup("ftrace_filter=", set_ftrace_filter);
2964
2965 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2966 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2967 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2968
2969 static int __init set_graph_function(char *str)
2970 {
2971         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2972         return 1;
2973 }
2974 __setup("ftrace_graph_filter=", set_graph_function);
2975
2976 static void __init set_ftrace_early_graph(char *buf)
2977 {
2978         int ret;
2979         char *func;
2980
2981         while (buf) {
2982                 func = strsep(&buf, ",");
2983                 /* we allow only one expression at a time */
2984                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2985                                       func);
2986                 if (ret)
2987                         printk(KERN_DEBUG "ftrace: function %s not "
2988                                           "traceable\n", func);
2989         }
2990 }
2991 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2992
2993 static void __init
2994 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2995 {
2996         char *func;
2997
2998         while (buf) {
2999                 func = strsep(&buf, ",");
3000                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3001         }
3002 }
3003
3004 static void __init set_ftrace_early_filters(void)
3005 {
3006         if (ftrace_filter_buf[0])
3007                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3008         if (ftrace_notrace_buf[0])
3009                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3010 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3011         if (ftrace_graph_buf[0])
3012                 set_ftrace_early_graph(ftrace_graph_buf);
3013 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3014 }
3015
3016 static int
3017 ftrace_regex_release(struct inode *inode, struct file *file)
3018 {
3019         struct seq_file *m = (struct seq_file *)file->private_data;
3020         struct ftrace_iterator *iter;
3021         struct ftrace_hash **orig_hash;
3022         struct trace_parser *parser;
3023         int filter_hash;
3024         int ret;
3025
3026         mutex_lock(&ftrace_regex_lock);
3027         if (file->f_mode & FMODE_READ) {
3028                 iter = m->private;
3029
3030                 seq_release(inode, file);
3031         } else
3032                 iter = file->private_data;
3033
3034         parser = &iter->parser;
3035         if (trace_parser_loaded(parser)) {
3036                 parser->buffer[parser->idx] = 0;
3037                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3038         }
3039
3040         trace_parser_put(parser);
3041
3042         if (file->f_mode & FMODE_WRITE) {
3043                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3044
3045                 if (filter_hash)
3046                         orig_hash = &iter->ops->filter_hash;
3047                 else
3048                         orig_hash = &iter->ops->notrace_hash;
3049
3050                 mutex_lock(&ftrace_lock);
3051                 /*
3052                  * Remove the current set, update the hash and add
3053                  * them back.
3054                  */
3055                 ftrace_hash_rec_disable(iter->ops, filter_hash);
3056                 ret = ftrace_hash_move(orig_hash, iter->hash);
3057                 if (!ret) {
3058                         ftrace_hash_rec_enable(iter->ops, filter_hash);
3059                         if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3060                             && ftrace_enabled)
3061                                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3062                 }
3063                 mutex_unlock(&ftrace_lock);
3064         }
3065         free_ftrace_hash(iter->hash);
3066         kfree(iter);
3067
3068         mutex_unlock(&ftrace_regex_lock);
3069         return 0;
3070 }
3071
3072 static const struct file_operations ftrace_avail_fops = {
3073         .open = ftrace_avail_open,
3074         .read = seq_read,
3075         .llseek = seq_lseek,
3076         .release = seq_release_private,
3077 };
3078
3079 static const struct file_operations ftrace_enabled_fops = {
3080         .open = ftrace_enabled_open,
3081         .read = seq_read,
3082         .llseek = seq_lseek,
3083         .release = seq_release_private,
3084 };
3085
3086 static const struct file_operations ftrace_filter_fops = {
3087         .open = ftrace_filter_open,
3088         .read = seq_read,
3089         .write = ftrace_filter_write,
3090         .llseek = ftrace_regex_lseek,
3091         .release = ftrace_regex_release,
3092 };
3093
3094 static const struct file_operations ftrace_notrace_fops = {
3095         .open = ftrace_notrace_open,
3096         .read = seq_read,
3097         .write = ftrace_notrace_write,
3098         .llseek = ftrace_regex_lseek,
3099         .release = ftrace_regex_release,
3100 };
3101
3102 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3103
3104 static DEFINE_MUTEX(graph_lock);
3105
3106 int ftrace_graph_count;
3107 int ftrace_graph_filter_enabled;
3108 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3109
3110 static void *
3111 __g_next(struct seq_file *m, loff_t *pos)
3112 {
3113         if (*pos >= ftrace_graph_count)
3114                 return NULL;
3115         return &ftrace_graph_funcs[*pos];
3116 }
3117
3118 static void *
3119 g_next(struct seq_file *m, void *v, loff_t *pos)
3120 {
3121         (*pos)++;
3122         return __g_next(m, pos);
3123 }
3124
3125 static void *g_start(struct seq_file *m, loff_t *pos)
3126 {
3127         mutex_lock(&graph_lock);
3128
3129         /* Nothing, tell g_show to print all functions are enabled */
3130         if (!ftrace_graph_filter_enabled && !*pos)
3131                 return (void *)1;
3132
3133         return __g_next(m, pos);
3134 }
3135
3136 static void g_stop(struct seq_file *m, void *p)
3137 {
3138         mutex_unlock(&graph_lock);
3139 }
3140
3141 static int g_show(struct seq_file *m, void *v)
3142 {
3143         unsigned long *ptr = v;
3144
3145         if (!ptr)
3146                 return 0;
3147
3148         if (ptr == (unsigned long *)1) {
3149                 seq_printf(m, "#### all functions enabled ####\n");
3150                 return 0;
3151         }
3152
3153         seq_printf(m, "%ps\n", (void *)*ptr);
3154
3155         return 0;
3156 }
3157
3158 static const struct seq_operations ftrace_graph_seq_ops = {
3159         .start = g_start,
3160         .next = g_next,
3161         .stop = g_stop,
3162         .show = g_show,
3163 };
3164
3165 static int
3166 ftrace_graph_open(struct inode *inode, struct file *file)
3167 {
3168         int ret = 0;
3169
3170         if (unlikely(ftrace_disabled))
3171                 return -ENODEV;
3172
3173         mutex_lock(&graph_lock);
3174         if ((file->f_mode & FMODE_WRITE) &&
3175             (file->f_flags & O_TRUNC)) {
3176                 ftrace_graph_filter_enabled = 0;
3177                 ftrace_graph_count = 0;
3178                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3179         }
3180         mutex_unlock(&graph_lock);
3181
3182         if (file->f_mode & FMODE_READ)
3183                 ret = seq_open(file, &ftrace_graph_seq_ops);
3184
3185         return ret;
3186 }
3187
3188 static int
3189 ftrace_graph_release(struct inode *inode, struct file *file)
3190 {
3191         if (file->f_mode & FMODE_READ)
3192                 seq_release(inode, file);
3193         return 0;
3194 }
3195
3196 static int
3197 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3198 {
3199         struct dyn_ftrace *rec;
3200         struct ftrace_page *pg;
3201         int search_len;
3202         int fail = 1;
3203         int type, not;
3204         char *search;
3205         bool exists;
3206         int i;
3207
3208         /* decode regex */
3209         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3210         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3211                 return -EBUSY;
3212
3213         search_len = strlen(search);
3214
3215         mutex_lock(&ftrace_lock);
3216
3217         if (unlikely(ftrace_disabled)) {
3218                 mutex_unlock(&ftrace_lock);
3219                 return -ENODEV;
3220         }
3221
3222         do_for_each_ftrace_rec(pg, rec) {
3223
3224                 if (rec->flags & FTRACE_FL_FREE)
3225                         continue;
3226
3227                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3228                         /* if it is in the array */
3229                         exists = false;
3230                         for (i = 0; i < *idx; i++) {
3231                                 if (array[i] == rec->ip) {
3232                                         exists = true;
3233                                         break;
3234                                 }
3235                         }
3236
3237                         if (!not) {
3238                                 fail = 0;
3239                                 if (!exists) {
3240                                         array[(*idx)++] = rec->ip;
3241                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3242                                                 goto out;
3243                                 }
3244                         } else {
3245                                 if (exists) {
3246                                         array[i] = array[--(*idx)];
3247                                         array[*idx] = 0;
3248                                         fail = 0;
3249                                 }
3250                         }
3251                 }
3252         } while_for_each_ftrace_rec();
3253 out:
3254         mutex_unlock(&ftrace_lock);
3255
3256         if (fail)
3257                 return -EINVAL;
3258
3259         ftrace_graph_filter_enabled = 1;
3260         return 0;
3261 }
3262
3263 static ssize_t
3264 ftrace_graph_write(struct file *file, const char __user *ubuf,
3265                    size_t cnt, loff_t *ppos)
3266 {
3267         struct trace_parser parser;
3268         ssize_t read, ret;
3269
3270         if (!cnt)
3271                 return 0;
3272
3273         mutex_lock(&graph_lock);
3274
3275         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3276                 ret = -ENOMEM;
3277                 goto out_unlock;
3278         }
3279
3280         read = trace_get_user(&parser, ubuf, cnt, ppos);
3281
3282         if (read >= 0 && trace_parser_loaded((&parser))) {
3283                 parser.buffer[parser.idx] = 0;
3284
3285                 /* we allow only one expression at a time */
3286                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3287                                         parser.buffer);
3288                 if (ret)
3289                         goto out_free;
3290         }
3291
3292         ret = read;
3293
3294 out_free:
3295         trace_parser_put(&parser);
3296 out_unlock:
3297         mutex_unlock(&graph_lock);
3298
3299         return ret;
3300 }
3301
3302 static const struct file_operations ftrace_graph_fops = {
3303         .open           = ftrace_graph_open,
3304         .read           = seq_read,
3305         .write          = ftrace_graph_write,
3306         .release        = ftrace_graph_release,
3307         .llseek         = seq_lseek,
3308 };
3309 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3310
3311 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3312 {
3313
3314         trace_create_file("available_filter_functions", 0444,
3315                         d_tracer, NULL, &ftrace_avail_fops);
3316
3317         trace_create_file("enabled_functions", 0444,
3318                         d_tracer, NULL, &ftrace_enabled_fops);
3319
3320         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3321                         NULL, &ftrace_filter_fops);
3322
3323         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3324                                     NULL, &ftrace_notrace_fops);
3325
3326 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3327         trace_create_file("set_graph_function", 0444, d_tracer,
3328                                     NULL,
3329                                     &ftrace_graph_fops);
3330 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3331
3332         return 0;
3333 }
3334
3335 static int ftrace_process_locs(struct module *mod,
3336                                unsigned long *start,
3337                                unsigned long *end)
3338 {
3339         unsigned long *p;
3340         unsigned long addr;
3341         unsigned long flags;
3342
3343         mutex_lock(&ftrace_lock);
3344         p = start;
3345         while (p < end) {
3346                 addr = ftrace_call_adjust(*p++);
3347                 /*
3348                  * Some architecture linkers will pad between
3349                  * the different mcount_loc sections of different
3350                  * object files to satisfy alignments.
3351                  * Skip any NULL pointers.
3352                  */
3353                 if (!addr)
3354                         continue;
3355                 ftrace_record_ip(addr);
3356         }
3357
3358         /*
3359          * Disable interrupts to prevent interrupts from executing
3360          * code that is being modified.
3361          */
3362         local_irq_save(flags);
3363         ftrace_update_code(mod);
3364         local_irq_restore(flags);
3365         mutex_unlock(&ftrace_lock);
3366
3367         return 0;
3368 }
3369
3370 #ifdef CONFIG_MODULES
3371 void ftrace_release_mod(struct module *mod)
3372 {
3373         struct dyn_ftrace *rec;
3374         struct ftrace_page *pg;
3375
3376         mutex_lock(&ftrace_lock);
3377
3378         if (ftrace_disabled)
3379                 goto out_unlock;
3380
3381         do_for_each_ftrace_rec(pg, rec) {
3382                 if (within_module_core(rec->ip, mod)) {
3383                         /*
3384                          * rec->ip is changed in ftrace_free_rec()
3385                          * It should not between s and e if record was freed.
3386                          */
3387                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3388                         ftrace_free_rec(rec);
3389                 }
3390         } while_for_each_ftrace_rec();
3391  out_unlock:
3392         mutex_unlock(&ftrace_lock);
3393 }
3394
3395 static void ftrace_init_module(struct module *mod,
3396                                unsigned long *start, unsigned long *end)
3397 {
3398         if (ftrace_disabled || start == end)
3399                 return;
3400         ftrace_process_locs(mod, start, end);
3401 }
3402
3403 static int ftrace_module_notify(struct notifier_block *self,
3404                                 unsigned long val, void *data)
3405 {
3406         struct module *mod = data;
3407
3408         switch (val) {
3409         case MODULE_STATE_COMING:
3410                 ftrace_init_module(mod, mod->ftrace_callsites,
3411                                    mod->ftrace_callsites +
3412                                    mod->num_ftrace_callsites);
3413                 break;
3414         case MODULE_STATE_GOING:
3415                 ftrace_release_mod(mod);
3416                 break;
3417         }
3418
3419         return 0;
3420 }
3421 #else
3422 static int ftrace_module_notify(struct notifier_block *self,
3423                                 unsigned long val, void *data)
3424 {
3425         return 0;
3426 }
3427 #endif /* CONFIG_MODULES */
3428
3429 struct notifier_block ftrace_module_nb = {
3430         .notifier_call = ftrace_module_notify,
3431         .priority = 0,
3432 };
3433
3434 extern unsigned long __start_mcount_loc[];
3435 extern unsigned long __stop_mcount_loc[];
3436
3437 void __init ftrace_init(void)
3438 {
3439         unsigned long count, addr, flags;
3440         int ret;
3441
3442         /* Keep the ftrace pointer to the stub */
3443         addr = (unsigned long)ftrace_stub;
3444
3445         local_irq_save(flags);
3446         ftrace_dyn_arch_init(&addr);
3447         local_irq_restore(flags);
3448
3449         /* ftrace_dyn_arch_init places the return code in addr */
3450         if (addr)
3451                 goto failed;
3452
3453         count = __stop_mcount_loc - __start_mcount_loc;
3454
3455         ret = ftrace_dyn_table_alloc(count);
3456         if (ret)
3457                 goto failed;
3458
3459         last_ftrace_enabled = ftrace_enabled = 1;
3460
3461         ret = ftrace_process_locs(NULL,
3462                                   __start_mcount_loc,
3463                                   __stop_mcount_loc);
3464
3465         ret = register_module_notifier(&ftrace_module_nb);
3466         if (ret)
3467                 pr_warning("Failed to register trace ftrace module notifier\n");
3468
3469         set_ftrace_early_filters();
3470
3471         return;
3472  failed:
3473         ftrace_disabled = 1;
3474 }
3475
3476 #else
3477
3478 static struct ftrace_ops global_ops = {
3479         .func                   = ftrace_stub,
3480 };
3481
3482 static int __init ftrace_nodyn_init(void)
3483 {
3484         ftrace_enabled = 1;
3485         return 0;
3486 }
3487 device_initcall(ftrace_nodyn_init);
3488
3489 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3490 static inline void ftrace_startup_enable(int command) { }
3491 /* Keep as macros so we do not need to define the commands */
3492 # define ftrace_startup(ops, command)                   \
3493         ({                                              \
3494                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3495                 0;                                      \
3496         })
3497 # define ftrace_shutdown(ops, command)  do { } while (0)
3498 # define ftrace_startup_sysctl()        do { } while (0)
3499 # define ftrace_shutdown_sysctl()       do { } while (0)
3500
3501 static inline int
3502 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3503 {
3504         return 1;
3505 }
3506
3507 #endif /* CONFIG_DYNAMIC_FTRACE */
3508
3509 static void
3510 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3511 {
3512         struct ftrace_ops *op;
3513
3514         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3515                 return;
3516
3517         trace_recursion_set(TRACE_INTERNAL_BIT);
3518         /*
3519          * Some of the ops may be dynamically allocated,
3520          * they must be freed after a synchronize_sched().
3521          */
3522         preempt_disable_notrace();
3523         op = rcu_dereference_raw(ftrace_ops_list);
3524         while (op != &ftrace_list_end) {
3525                 if (ftrace_ops_test(op, ip))
3526                         op->func(ip, parent_ip);
3527                 op = rcu_dereference_raw(op->next);
3528         };
3529         preempt_enable_notrace();
3530         trace_recursion_clear(TRACE_INTERNAL_BIT);
3531 }
3532
3533 static void clear_ftrace_swapper(void)
3534 {
3535         struct task_struct *p;
3536         int cpu;
3537
3538         get_online_cpus();
3539         for_each_online_cpu(cpu) {
3540                 p = idle_task(cpu);
3541                 clear_tsk_trace_trace(p);
3542         }
3543         put_online_cpus();
3544 }
3545
3546 static void set_ftrace_swapper(void)
3547 {
3548         struct task_struct *p;
3549         int cpu;
3550
3551         get_online_cpus();
3552         for_each_online_cpu(cpu) {
3553                 p = idle_task(cpu);
3554                 set_tsk_trace_trace(p);
3555         }
3556         put_online_cpus();
3557 }
3558
3559 static void clear_ftrace_pid(struct pid *pid)
3560 {
3561         struct task_struct *p;
3562
3563         rcu_read_lock();
3564         do_each_pid_task(pid, PIDTYPE_PID, p) {
3565                 clear_tsk_trace_trace(p);
3566         } while_each_pid_task(pid, PIDTYPE_PID, p);
3567         rcu_read_unlock();
3568
3569         put_pid(pid);
3570 }
3571
3572 static void set_ftrace_pid(struct pid *pid)
3573 {
3574         struct task_struct *p;
3575
3576         rcu_read_lock();
3577         do_each_pid_task(pid, PIDTYPE_PID, p) {
3578                 set_tsk_trace_trace(p);
3579         } while_each_pid_task(pid, PIDTYPE_PID, p);
3580         rcu_read_unlock();
3581 }
3582
3583 static void clear_ftrace_pid_task(struct pid *pid)
3584 {
3585         if (pid == ftrace_swapper_pid)
3586                 clear_ftrace_swapper();
3587         else
3588                 clear_ftrace_pid(pid);
3589 }
3590
3591 static void set_ftrace_pid_task(struct pid *pid)
3592 {
3593         if (pid == ftrace_swapper_pid)
3594                 set_ftrace_swapper();
3595         else
3596                 set_ftrace_pid(pid);
3597 }
3598
3599 static int ftrace_pid_add(int p)
3600 {
3601         struct pid *pid;
3602         struct ftrace_pid *fpid;
3603         int ret = -EINVAL;
3604
3605         mutex_lock(&ftrace_lock);
3606
3607         if (!p)
3608                 pid = ftrace_swapper_pid;
3609         else
3610                 pid = find_get_pid(p);
3611
3612         if (!pid)
3613                 goto out;
3614
3615         ret = 0;
3616
3617         list_for_each_entry(fpid, &ftrace_pids, list)
3618                 if (fpid->pid == pid)
3619                         goto out_put;
3620
3621         ret = -ENOMEM;
3622
3623         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3624         if (!fpid)
3625                 goto out_put;
3626
3627         list_add(&fpid->list, &ftrace_pids);
3628         fpid->pid = pid;
3629
3630         set_ftrace_pid_task(pid);
3631
3632         ftrace_update_pid_func();
3633         ftrace_startup_enable(0);
3634
3635         mutex_unlock(&ftrace_lock);
3636         return 0;
3637
3638 out_put:
3639         if (pid != ftrace_swapper_pid)
3640                 put_pid(pid);
3641
3642 out:
3643         mutex_unlock(&ftrace_lock);
3644         return ret;
3645 }
3646
3647 static void ftrace_pid_reset(void)
3648 {
3649         struct ftrace_pid *fpid, *safe;
3650
3651         mutex_lock(&ftrace_lock);
3652         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3653                 struct pid *pid = fpid->pid;
3654
3655                 clear_ftrace_pid_task(pid);
3656
3657                 list_del(&fpid->list);
3658                 kfree(fpid);
3659         }
3660
3661         ftrace_update_pid_func();
3662         ftrace_startup_enable(0);
3663
3664         mutex_unlock(&ftrace_lock);
3665 }
3666
3667 static void *fpid_start(struct seq_file *m, loff_t *pos)
3668 {
3669         mutex_lock(&ftrace_lock);
3670
3671         if (list_empty(&ftrace_pids) && (!*pos))
3672                 return (void *) 1;
3673
3674         return seq_list_start(&ftrace_pids, *pos);
3675 }
3676
3677 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3678 {
3679         if (v == (void *)1)
3680                 return NULL;
3681
3682         return seq_list_next(v, &ftrace_pids, pos);
3683 }
3684
3685 static void fpid_stop(struct seq_file *m, void *p)
3686 {
3687         mutex_unlock(&ftrace_lock);
3688 }
3689
3690 static int fpid_show(struct seq_file *m, void *v)
3691 {
3692         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3693
3694         if (v == (void *)1) {
3695                 seq_printf(m, "no pid\n");
3696                 return 0;
3697         }
3698
3699         if (fpid->pid == ftrace_swapper_pid)
3700                 seq_printf(m, "swapper tasks\n");
3701         else
3702                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3703
3704         return 0;
3705 }
3706
3707 static const struct seq_operations ftrace_pid_sops = {
3708         .start = fpid_start,
3709         .next = fpid_next,
3710         .stop = fpid_stop,
3711         .show = fpid_show,
3712 };
3713
3714 static int
3715 ftrace_pid_open(struct inode *inode, struct file *file)
3716 {
3717         int ret = 0;
3718
3719         if ((file->f_mode & FMODE_WRITE) &&
3720             (file->f_flags & O_TRUNC))
3721                 ftrace_pid_reset();
3722
3723         if (file->f_mode & FMODE_READ)
3724                 ret = seq_open(file, &ftrace_pid_sops);
3725
3726         return ret;
3727 }
3728
3729 static ssize_t
3730 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3731                    size_t cnt, loff_t *ppos)
3732 {
3733         char buf[64], *tmp;
3734         long val;
3735         int ret;
3736
3737         if (cnt >= sizeof(buf))
3738                 return -EINVAL;
3739
3740         if (copy_from_user(&buf, ubuf, cnt))
3741                 return -EFAULT;
3742
3743         buf[cnt] = 0;
3744
3745         /*
3746          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3747          * to clean the filter quietly.
3748          */
3749         tmp = strstrip(buf);
3750         if (strlen(tmp) == 0)
3751                 return 1;
3752
3753         ret = strict_strtol(tmp, 10, &val);
3754         if (ret < 0)
3755                 return ret;
3756
3757         ret = ftrace_pid_add(val);
3758
3759         return ret ? ret : cnt;
3760 }
3761
3762 static int
3763 ftrace_pid_release(struct inode *inode, struct file *file)
3764 {
3765         if (file->f_mode & FMODE_READ)
3766                 seq_release(inode, file);
3767
3768         return 0;
3769 }
3770
3771 static const struct file_operations ftrace_pid_fops = {
3772         .open           = ftrace_pid_open,
3773         .write          = ftrace_pid_write,
3774         .read           = seq_read,
3775         .llseek         = seq_lseek,
3776         .release        = ftrace_pid_release,
3777 };
3778
3779 static __init int ftrace_init_debugfs(void)
3780 {
3781         struct dentry *d_tracer;
3782
3783         d_tracer = tracing_init_dentry();
3784         if (!d_tracer)
3785                 return 0;
3786
3787         ftrace_init_dyn_debugfs(d_tracer);
3788
3789         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3790                             NULL, &ftrace_pid_fops);
3791
3792         ftrace_profile_debugfs(d_tracer);
3793
3794         return 0;
3795 }
3796 fs_initcall(ftrace_init_debugfs);
3797
3798 /**
3799  * ftrace_kill - kill ftrace
3800  *
3801  * This function should be used by panic code. It stops ftrace
3802  * but in a not so nice way. If you need to simply kill ftrace
3803  * from a non-atomic section, use ftrace_kill.
3804  */
3805 void ftrace_kill(void)
3806 {
3807         ftrace_disabled = 1;
3808         ftrace_enabled = 0;
3809         clear_ftrace_function();
3810 }
3811
3812 /**
3813  * register_ftrace_function - register a function for profiling
3814  * @ops - ops structure that holds the function for profiling.
3815  *
3816  * Register a function to be called by all functions in the
3817  * kernel.
3818  *
3819  * Note: @ops->func and all the functions it calls must be labeled
3820  *       with "notrace", otherwise it will go into a
3821  *       recursive loop.
3822  */
3823 int register_ftrace_function(struct ftrace_ops *ops)
3824 {
3825         int ret = -1;
3826
3827         mutex_lock(&ftrace_lock);
3828
3829         if (unlikely(ftrace_disabled))
3830                 goto out_unlock;
3831
3832         ret = __register_ftrace_function(ops);
3833         if (!ret)
3834                 ret = ftrace_startup(ops, 0);
3835
3836
3837  out_unlock:
3838         mutex_unlock(&ftrace_lock);
3839         return ret;
3840 }
3841 EXPORT_SYMBOL_GPL(register_ftrace_function);
3842
3843 /**
3844  * unregister_ftrace_function - unregister a function for profiling.
3845  * @ops - ops structure that holds the function to unregister
3846  *
3847  * Unregister a function that was added to be called by ftrace profiling.
3848  */
3849 int unregister_ftrace_function(struct ftrace_ops *ops)
3850 {
3851         int ret;
3852
3853         mutex_lock(&ftrace_lock);
3854         ret = __unregister_ftrace_function(ops);
3855         if (!ret)
3856                 ftrace_shutdown(ops, 0);
3857         mutex_unlock(&ftrace_lock);
3858
3859         return ret;
3860 }
3861 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3862
3863 int
3864 ftrace_enable_sysctl(struct ctl_table *table, int write,
3865                      void __user *buffer, size_t *lenp,
3866                      loff_t *ppos)
3867 {
3868         int ret = -ENODEV;
3869
3870         mutex_lock(&ftrace_lock);
3871
3872         if (unlikely(ftrace_disabled))
3873                 goto out;
3874
3875         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3876
3877         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3878                 goto out;
3879
3880         last_ftrace_enabled = !!ftrace_enabled;
3881
3882         if (ftrace_enabled) {
3883
3884                 ftrace_startup_sysctl();
3885
3886                 /* we are starting ftrace again */
3887                 if (ftrace_ops_list != &ftrace_list_end) {
3888                         if (ftrace_ops_list->next == &ftrace_list_end)
3889                                 ftrace_trace_function = ftrace_ops_list->func;
3890                         else
3891                                 ftrace_trace_function = ftrace_ops_list_func;
3892                 }
3893
3894         } else {
3895                 /* stopping ftrace calls (just send to ftrace_stub) */
3896                 ftrace_trace_function = ftrace_stub;
3897
3898                 ftrace_shutdown_sysctl();
3899         }
3900
3901  out:
3902         mutex_unlock(&ftrace_lock);
3903         return ret;
3904 }
3905
3906 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3907
3908 static int ftrace_graph_active;
3909 static struct notifier_block ftrace_suspend_notifier;
3910
3911 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3912 {
3913         return 0;
3914 }
3915
3916 /* The callbacks that hook a function */
3917 trace_func_graph_ret_t ftrace_graph_return =
3918                         (trace_func_graph_ret_t)ftrace_stub;
3919 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3920
3921 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3922 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3923 {
3924         int i;
3925         int ret = 0;
3926         unsigned long flags;
3927         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3928         struct task_struct *g, *t;
3929
3930         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3931                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3932                                         * sizeof(struct ftrace_ret_stack),
3933                                         GFP_KERNEL);
3934                 if (!ret_stack_list[i]) {
3935                         start = 0;
3936                         end = i;
3937                         ret = -ENOMEM;
3938                         goto free;
3939                 }
3940         }
3941
3942         read_lock_irqsave(&tasklist_lock, flags);
3943         do_each_thread(g, t) {
3944                 if (start == end) {
3945                         ret = -EAGAIN;
3946                         goto unlock;
3947                 }
3948
3949                 if (t->ret_stack == NULL) {
3950                         atomic_set(&t->tracing_graph_pause, 0);
3951                         atomic_set(&t->trace_overrun, 0);
3952                         t->curr_ret_stack = -1;
3953                         /* Make sure the tasks see the -1 first: */
3954                         smp_wmb();
3955                         t->ret_stack = ret_stack_list[start++];
3956                 }
3957         } while_each_thread(g, t);
3958
3959 unlock:
3960         read_unlock_irqrestore(&tasklist_lock, flags);
3961 free:
3962         for (i = start; i < end; i++)
3963                 kfree(ret_stack_list[i]);
3964         return ret;
3965 }
3966
3967 static void
3968 ftrace_graph_probe_sched_switch(void *ignore,
3969                         struct task_struct *prev, struct task_struct *next)
3970 {
3971         unsigned long long timestamp;
3972         int index;
3973
3974         /*
3975          * Does the user want to count the time a function was asleep.
3976          * If so, do not update the time stamps.
3977          */
3978         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3979                 return;
3980
3981         timestamp = trace_clock_local();
3982
3983         prev->ftrace_timestamp = timestamp;
3984
3985         /* only process tasks that we timestamped */
3986         if (!next->ftrace_timestamp)
3987                 return;
3988
3989         /*
3990          * Update all the counters in next to make up for the
3991          * time next was sleeping.
3992          */
3993         timestamp -= next->ftrace_timestamp;
3994
3995         for (index = next->curr_ret_stack; index >= 0; index--)
3996                 next->ret_stack[index].calltime += timestamp;
3997 }
3998
3999 /* Allocate a return stack for each task */
4000 static int start_graph_tracing(void)
4001 {
4002         struct ftrace_ret_stack **ret_stack_list;
4003         int ret, cpu;
4004
4005         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4006                                 sizeof(struct ftrace_ret_stack *),
4007                                 GFP_KERNEL);
4008
4009         if (!ret_stack_list)
4010                 return -ENOMEM;
4011
4012         /* The cpu_boot init_task->ret_stack will never be freed */
4013         for_each_online_cpu(cpu) {
4014                 if (!idle_task(cpu)->ret_stack)
4015                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4016         }
4017
4018         do {
4019                 ret = alloc_retstack_tasklist(ret_stack_list);
4020         } while (ret == -EAGAIN);
4021
4022         if (!ret) {
4023                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4024                 if (ret)
4025                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4026                                 " probe to kernel_sched_switch\n");
4027         }
4028
4029         kfree(ret_stack_list);
4030         return ret;
4031 }
4032
4033 /*
4034  * Hibernation protection.
4035  * The state of the current task is too much unstable during
4036  * suspend/restore to disk. We want to protect against that.
4037  */
4038 static int
4039 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4040                                                         void *unused)
4041 {
4042         switch (state) {
4043         case PM_HIBERNATION_PREPARE:
4044                 pause_graph_tracing();
4045                 break;
4046
4047         case PM_POST_HIBERNATION:
4048                 unpause_graph_tracing();
4049                 break;
4050         }
4051         return NOTIFY_DONE;
4052 }
4053
4054 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4055                         trace_func_graph_ent_t entryfunc)
4056 {
4057         int ret = 0;
4058
4059         mutex_lock(&ftrace_lock);
4060
4061         /* we currently allow only one tracer registered at a time */
4062         if (ftrace_graph_active) {
4063                 ret = -EBUSY;
4064                 goto out;
4065         }
4066
4067         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4068         register_pm_notifier(&ftrace_suspend_notifier);
4069
4070         ftrace_graph_active++;
4071         ret = start_graph_tracing();
4072         if (ret) {
4073                 ftrace_graph_active--;
4074                 goto out;
4075         }
4076
4077         ftrace_graph_return = retfunc;
4078         ftrace_graph_entry = entryfunc;
4079
4080         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4081
4082 out:
4083         mutex_unlock(&ftrace_lock);
4084         return ret;
4085 }
4086
4087 void unregister_ftrace_graph(void)
4088 {
4089         mutex_lock(&ftrace_lock);
4090
4091         if (unlikely(!ftrace_graph_active))
4092                 goto out;
4093
4094         ftrace_graph_active--;
4095         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4096         ftrace_graph_entry = ftrace_graph_entry_stub;
4097         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4098         unregister_pm_notifier(&ftrace_suspend_notifier);
4099         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4100
4101  out:
4102         mutex_unlock(&ftrace_lock);
4103 }
4104
4105 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4106
4107 static void
4108 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4109 {
4110         atomic_set(&t->tracing_graph_pause, 0);
4111         atomic_set(&t->trace_overrun, 0);
4112         t->ftrace_timestamp = 0;
4113         /* make curr_ret_stack visible before we add the ret_stack */
4114         smp_wmb();
4115         t->ret_stack = ret_stack;
4116 }
4117
4118 /*
4119  * Allocate a return stack for the idle task. May be the first
4120  * time through, or it may be done by CPU hotplug online.
4121  */
4122 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4123 {
4124         t->curr_ret_stack = -1;
4125         /*
4126          * The idle task has no parent, it either has its own
4127          * stack or no stack at all.
4128          */
4129         if (t->ret_stack)
4130                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4131
4132         if (ftrace_graph_active) {
4133                 struct ftrace_ret_stack *ret_stack;
4134
4135                 ret_stack = per_cpu(idle_ret_stack, cpu);
4136                 if (!ret_stack) {
4137                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4138                                             * sizeof(struct ftrace_ret_stack),
4139                                             GFP_KERNEL);
4140                         if (!ret_stack)
4141                                 return;
4142                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4143                 }
4144                 graph_init_task(t, ret_stack);
4145         }
4146 }
4147
4148 /* Allocate a return stack for newly created task */
4149 void ftrace_graph_init_task(struct task_struct *t)
4150 {
4151         /* Make sure we do not use the parent ret_stack */
4152         t->ret_stack = NULL;
4153         t->curr_ret_stack = -1;
4154
4155         if (ftrace_graph_active) {
4156                 struct ftrace_ret_stack *ret_stack;
4157
4158                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4159                                 * sizeof(struct ftrace_ret_stack),
4160                                 GFP_KERNEL);
4161                 if (!ret_stack)
4162                         return;
4163                 graph_init_task(t, ret_stack);
4164         }
4165 }
4166
4167 void ftrace_graph_exit_task(struct task_struct *t)
4168 {
4169         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4170
4171         t->ret_stack = NULL;
4172         /* NULL must become visible to IRQs before we free it: */
4173         barrier();
4174
4175         kfree(ret_stack);
4176 }
4177
4178 void ftrace_graph_stop(void)
4179 {
4180         ftrace_stop();
4181 }
4182 #endif