ftrace: Use schedule_on_each_cpu() as a heavy synchronize_sched()
[pandora-kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/module.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/slab.h>
29 #include <linux/ctype.h>
30 #include <linux/list.h>
31 #include <linux/hash.h>
32 #include <linux/rcupdate.h>
33
34 #include <trace/events/sched.h>
35
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
62
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
66
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
69
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73         struct list_head list;
74         struct pid *pid;
75 };
76
77 /*
78  * ftrace_disabled is set when an anomaly is discovered.
79  * ftrace_disabled is much stronger than ftrace_enabled.
80  */
81 static int ftrace_disabled __read_mostly;
82
83 static DEFINE_MUTEX(ftrace_lock);
84
85 static struct ftrace_ops ftrace_list_end __read_mostly = {
86         .func           = ftrace_stub,
87 };
88
89 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
90 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
96
97 static void
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
99
100 /*
101  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
102  * can use rcu_dereference_raw() is that elements removed from this list
103  * are simply leaked, so there is no need to interact with a grace-period
104  * mechanism.  The rcu_dereference_raw() calls are needed to handle
105  * concurrent insertions into the ftrace_global_list.
106  *
107  * Silly Alpha and silly pointer-speculation compiler optimizations!
108  */
109 static void ftrace_global_list_func(unsigned long ip,
110                                     unsigned long parent_ip)
111 {
112         struct ftrace_ops *op;
113
114         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
115                 return;
116
117         trace_recursion_set(TRACE_GLOBAL_BIT);
118         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
119         while (op != &ftrace_list_end) {
120                 op->func(ip, parent_ip);
121                 op = rcu_dereference_raw(op->next); /*see above*/
122         };
123         trace_recursion_clear(TRACE_GLOBAL_BIT);
124 }
125
126 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
127 {
128         if (!test_tsk_trace_trace(current))
129                 return;
130
131         ftrace_pid_function(ip, parent_ip);
132 }
133
134 static void set_ftrace_pid_function(ftrace_func_t func)
135 {
136         /* do not set ftrace_pid_function to itself! */
137         if (func != ftrace_pid_func)
138                 ftrace_pid_function = func;
139 }
140
141 /**
142  * clear_ftrace_function - reset the ftrace function
143  *
144  * This NULLs the ftrace function and in essence stops
145  * tracing.  There may be lag
146  */
147 void clear_ftrace_function(void)
148 {
149         ftrace_trace_function = ftrace_stub;
150         __ftrace_trace_function = ftrace_stub;
151         __ftrace_trace_function_delay = ftrace_stub;
152         ftrace_pid_function = ftrace_stub;
153 }
154
155 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
156 /*
157  * For those archs that do not test ftrace_trace_stop in their
158  * mcount call site, we need to do it from C.
159  */
160 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
161 {
162         if (function_trace_stop)
163                 return;
164
165         __ftrace_trace_function(ip, parent_ip);
166 }
167 #endif
168
169 static void update_global_ops(void)
170 {
171         ftrace_func_t func;
172
173         /*
174          * If there's only one function registered, then call that
175          * function directly. Otherwise, we need to iterate over the
176          * registered callers.
177          */
178         if (ftrace_global_list == &ftrace_list_end ||
179             ftrace_global_list->next == &ftrace_list_end)
180                 func = ftrace_global_list->func;
181         else
182                 func = ftrace_global_list_func;
183
184         /* If we filter on pids, update to use the pid function */
185         if (!list_empty(&ftrace_pids)) {
186                 set_ftrace_pid_function(func);
187                 func = ftrace_pid_func;
188         }
189
190         global_ops.func = func;
191 }
192
193 static void update_ftrace_function(void)
194 {
195         ftrace_func_t func;
196
197         update_global_ops();
198
199         /*
200          * If we are at the end of the list and this ops is
201          * not dynamic, then have the mcount trampoline call
202          * the function directly
203          */
204         if (ftrace_ops_list == &ftrace_list_end ||
205             (ftrace_ops_list->next == &ftrace_list_end &&
206              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
207                 func = ftrace_ops_list->func;
208         else
209                 func = ftrace_ops_list_func;
210
211 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
212         ftrace_trace_function = func;
213 #else
214 #ifdef CONFIG_DYNAMIC_FTRACE
215         /* do not update till all functions have been modified */
216         __ftrace_trace_function_delay = func;
217 #else
218         __ftrace_trace_function = func;
219 #endif
220         ftrace_trace_function = ftrace_test_stop_func;
221 #endif
222 }
223
224 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
225 {
226         ops->next = *list;
227         /*
228          * We are entering ops into the list but another
229          * CPU might be walking that list. We need to make sure
230          * the ops->next pointer is valid before another CPU sees
231          * the ops pointer included into the list.
232          */
233         rcu_assign_pointer(*list, ops);
234 }
235
236 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
237 {
238         struct ftrace_ops **p;
239
240         /*
241          * If we are removing the last function, then simply point
242          * to the ftrace_stub.
243          */
244         if (*list == ops && ops->next == &ftrace_list_end) {
245                 *list = &ftrace_list_end;
246                 return 0;
247         }
248
249         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
250                 if (*p == ops)
251                         break;
252
253         if (*p != ops)
254                 return -1;
255
256         *p = (*p)->next;
257         return 0;
258 }
259
260 static int __register_ftrace_function(struct ftrace_ops *ops)
261 {
262         if (FTRACE_WARN_ON(ops == &global_ops))
263                 return -EINVAL;
264
265         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
266                 return -EBUSY;
267
268         if (!core_kernel_data((unsigned long)ops))
269                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
270
271         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
272                 int first = ftrace_global_list == &ftrace_list_end;
273                 add_ftrace_ops(&ftrace_global_list, ops);
274                 ops->flags |= FTRACE_OPS_FL_ENABLED;
275                 if (first)
276                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
277         } else
278                 add_ftrace_ops(&ftrace_ops_list, ops);
279
280         if (ftrace_enabled)
281                 update_ftrace_function();
282
283         return 0;
284 }
285
286 static void ftrace_sync(struct work_struct *work)
287 {
288         /*
289          * This function is just a stub to implement a hard force
290          * of synchronize_sched(). This requires synchronizing
291          * tasks even in userspace and idle.
292          *
293          * Yes, function tracing is rude.
294          */
295 }
296
297 static int __unregister_ftrace_function(struct ftrace_ops *ops)
298 {
299         int ret;
300
301         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
302                 return -EBUSY;
303
304         if (FTRACE_WARN_ON(ops == &global_ops))
305                 return -EINVAL;
306
307         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
308                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
309                 if (!ret && ftrace_global_list == &ftrace_list_end)
310                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
311                 if (!ret)
312                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
313         } else
314                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
315
316         if (ret < 0)
317                 return ret;
318
319         if (ftrace_enabled)
320                 update_ftrace_function();
321
322         /*
323          * Dynamic ops may be freed, we must make sure that all
324          * callers are done before leaving this function.
325          *
326          * Again, normal synchronize_sched() is not good enough.
327          * We need to do a hard force of sched synchronization.
328          */
329         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
330                 schedule_on_each_cpu(ftrace_sync);
331
332
333         return 0;
334 }
335
336 static void ftrace_update_pid_func(void)
337 {
338         /* Only do something if we are tracing something */
339         if (ftrace_trace_function == ftrace_stub)
340                 return;
341
342         update_ftrace_function();
343 }
344
345 #ifdef CONFIG_FUNCTION_PROFILER
346 struct ftrace_profile {
347         struct hlist_node               node;
348         unsigned long                   ip;
349         unsigned long                   counter;
350 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
351         unsigned long long              time;
352         unsigned long long              time_squared;
353 #endif
354 };
355
356 struct ftrace_profile_page {
357         struct ftrace_profile_page      *next;
358         unsigned long                   index;
359         struct ftrace_profile           records[];
360 };
361
362 struct ftrace_profile_stat {
363         atomic_t                        disabled;
364         struct hlist_head               *hash;
365         struct ftrace_profile_page      *pages;
366         struct ftrace_profile_page      *start;
367         struct tracer_stat              stat;
368 };
369
370 #define PROFILE_RECORDS_SIZE                                            \
371         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
372
373 #define PROFILES_PER_PAGE                                       \
374         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
375
376 static int ftrace_profile_bits __read_mostly;
377 static int ftrace_profile_enabled __read_mostly;
378
379 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
380 static DEFINE_MUTEX(ftrace_profile_lock);
381
382 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
383
384 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
385
386 static void *
387 function_stat_next(void *v, int idx)
388 {
389         struct ftrace_profile *rec = v;
390         struct ftrace_profile_page *pg;
391
392         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
393
394  again:
395         if (idx != 0)
396                 rec++;
397
398         if ((void *)rec >= (void *)&pg->records[pg->index]) {
399                 pg = pg->next;
400                 if (!pg)
401                         return NULL;
402                 rec = &pg->records[0];
403                 if (!rec->counter)
404                         goto again;
405         }
406
407         return rec;
408 }
409
410 static void *function_stat_start(struct tracer_stat *trace)
411 {
412         struct ftrace_profile_stat *stat =
413                 container_of(trace, struct ftrace_profile_stat, stat);
414
415         if (!stat || !stat->start)
416                 return NULL;
417
418         return function_stat_next(&stat->start->records[0], 0);
419 }
420
421 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
422 /* function graph compares on total time */
423 static int function_stat_cmp(void *p1, void *p2)
424 {
425         struct ftrace_profile *a = p1;
426         struct ftrace_profile *b = p2;
427
428         if (a->time < b->time)
429                 return -1;
430         if (a->time > b->time)
431                 return 1;
432         else
433                 return 0;
434 }
435 #else
436 /* not function graph compares against hits */
437 static int function_stat_cmp(void *p1, void *p2)
438 {
439         struct ftrace_profile *a = p1;
440         struct ftrace_profile *b = p2;
441
442         if (a->counter < b->counter)
443                 return -1;
444         if (a->counter > b->counter)
445                 return 1;
446         else
447                 return 0;
448 }
449 #endif
450
451 static int function_stat_headers(struct seq_file *m)
452 {
453 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
454         seq_printf(m, "  Function                               "
455                    "Hit    Time            Avg             s^2\n"
456                       "  --------                               "
457                    "---    ----            ---             ---\n");
458 #else
459         seq_printf(m, "  Function                               Hit\n"
460                       "  --------                               ---\n");
461 #endif
462         return 0;
463 }
464
465 static int function_stat_show(struct seq_file *m, void *v)
466 {
467         struct ftrace_profile *rec = v;
468         char str[KSYM_SYMBOL_LEN];
469         int ret = 0;
470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
471         static struct trace_seq s;
472         unsigned long long avg;
473         unsigned long long stddev;
474 #endif
475         mutex_lock(&ftrace_profile_lock);
476
477         /* we raced with function_profile_reset() */
478         if (unlikely(rec->counter == 0)) {
479                 ret = -EBUSY;
480                 goto out;
481         }
482
483         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
484         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
485
486 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
487         seq_printf(m, "    ");
488         avg = rec->time;
489         do_div(avg, rec->counter);
490
491         /* Sample standard deviation (s^2) */
492         if (rec->counter <= 1)
493                 stddev = 0;
494         else {
495                 stddev = rec->time_squared - rec->counter * avg * avg;
496                 /*
497                  * Divide only 1000 for ns^2 -> us^2 conversion.
498                  * trace_print_graph_duration will divide 1000 again.
499                  */
500                 do_div(stddev, (rec->counter - 1) * 1000);
501         }
502
503         trace_seq_init(&s);
504         trace_print_graph_duration(rec->time, &s);
505         trace_seq_puts(&s, "    ");
506         trace_print_graph_duration(avg, &s);
507         trace_seq_puts(&s, "    ");
508         trace_print_graph_duration(stddev, &s);
509         trace_print_seq(m, &s);
510 #endif
511         seq_putc(m, '\n');
512 out:
513         mutex_unlock(&ftrace_profile_lock);
514
515         return ret;
516 }
517
518 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
519 {
520         struct ftrace_profile_page *pg;
521
522         pg = stat->pages = stat->start;
523
524         while (pg) {
525                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
526                 pg->index = 0;
527                 pg = pg->next;
528         }
529
530         memset(stat->hash, 0,
531                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
532 }
533
534 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
535 {
536         struct ftrace_profile_page *pg;
537         int functions;
538         int pages;
539         int i;
540
541         /* If we already allocated, do nothing */
542         if (stat->pages)
543                 return 0;
544
545         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
546         if (!stat->pages)
547                 return -ENOMEM;
548
549 #ifdef CONFIG_DYNAMIC_FTRACE
550         functions = ftrace_update_tot_cnt;
551 #else
552         /*
553          * We do not know the number of functions that exist because
554          * dynamic tracing is what counts them. With past experience
555          * we have around 20K functions. That should be more than enough.
556          * It is highly unlikely we will execute every function in
557          * the kernel.
558          */
559         functions = 20000;
560 #endif
561
562         pg = stat->start = stat->pages;
563
564         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
565
566         for (i = 1; i < pages; i++) {
567                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
568                 if (!pg->next)
569                         goto out_free;
570                 pg = pg->next;
571         }
572
573         return 0;
574
575  out_free:
576         pg = stat->start;
577         while (pg) {
578                 unsigned long tmp = (unsigned long)pg;
579
580                 pg = pg->next;
581                 free_page(tmp);
582         }
583
584         stat->pages = NULL;
585         stat->start = NULL;
586
587         return -ENOMEM;
588 }
589
590 static int ftrace_profile_init_cpu(int cpu)
591 {
592         struct ftrace_profile_stat *stat;
593         int size;
594
595         stat = &per_cpu(ftrace_profile_stats, cpu);
596
597         if (stat->hash) {
598                 /* If the profile is already created, simply reset it */
599                 ftrace_profile_reset(stat);
600                 return 0;
601         }
602
603         /*
604          * We are profiling all functions, but usually only a few thousand
605          * functions are hit. We'll make a hash of 1024 items.
606          */
607         size = FTRACE_PROFILE_HASH_SIZE;
608
609         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
610
611         if (!stat->hash)
612                 return -ENOMEM;
613
614         if (!ftrace_profile_bits) {
615                 size--;
616
617                 for (; size; size >>= 1)
618                         ftrace_profile_bits++;
619         }
620
621         /* Preallocate the function profiling pages */
622         if (ftrace_profile_pages_init(stat) < 0) {
623                 kfree(stat->hash);
624                 stat->hash = NULL;
625                 return -ENOMEM;
626         }
627
628         return 0;
629 }
630
631 static int ftrace_profile_init(void)
632 {
633         int cpu;
634         int ret = 0;
635
636         for_each_possible_cpu(cpu) {
637                 ret = ftrace_profile_init_cpu(cpu);
638                 if (ret)
639                         break;
640         }
641
642         return ret;
643 }
644
645 /* interrupts must be disabled */
646 static struct ftrace_profile *
647 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
648 {
649         struct ftrace_profile *rec;
650         struct hlist_head *hhd;
651         struct hlist_node *n;
652         unsigned long key;
653
654         key = hash_long(ip, ftrace_profile_bits);
655         hhd = &stat->hash[key];
656
657         if (hlist_empty(hhd))
658                 return NULL;
659
660         hlist_for_each_entry_rcu(rec, n, hhd, node) {
661                 if (rec->ip == ip)
662                         return rec;
663         }
664
665         return NULL;
666 }
667
668 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
669                                struct ftrace_profile *rec)
670 {
671         unsigned long key;
672
673         key = hash_long(rec->ip, ftrace_profile_bits);
674         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
675 }
676
677 /*
678  * The memory is already allocated, this simply finds a new record to use.
679  */
680 static struct ftrace_profile *
681 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
682 {
683         struct ftrace_profile *rec = NULL;
684
685         /* prevent recursion (from NMIs) */
686         if (atomic_inc_return(&stat->disabled) != 1)
687                 goto out;
688
689         /*
690          * Try to find the function again since an NMI
691          * could have added it
692          */
693         rec = ftrace_find_profiled_func(stat, ip);
694         if (rec)
695                 goto out;
696
697         if (stat->pages->index == PROFILES_PER_PAGE) {
698                 if (!stat->pages->next)
699                         goto out;
700                 stat->pages = stat->pages->next;
701         }
702
703         rec = &stat->pages->records[stat->pages->index++];
704         rec->ip = ip;
705         ftrace_add_profile(stat, rec);
706
707  out:
708         atomic_dec(&stat->disabled);
709
710         return rec;
711 }
712
713 static void
714 function_profile_call(unsigned long ip, unsigned long parent_ip)
715 {
716         struct ftrace_profile_stat *stat;
717         struct ftrace_profile *rec;
718         unsigned long flags;
719
720         if (!ftrace_profile_enabled)
721                 return;
722
723         local_irq_save(flags);
724
725         stat = &__get_cpu_var(ftrace_profile_stats);
726         if (!stat->hash || !ftrace_profile_enabled)
727                 goto out;
728
729         rec = ftrace_find_profiled_func(stat, ip);
730         if (!rec) {
731                 rec = ftrace_profile_alloc(stat, ip);
732                 if (!rec)
733                         goto out;
734         }
735
736         rec->counter++;
737  out:
738         local_irq_restore(flags);
739 }
740
741 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
742 static int profile_graph_entry(struct ftrace_graph_ent *trace)
743 {
744         function_profile_call(trace->func, 0);
745         return 1;
746 }
747
748 static void profile_graph_return(struct ftrace_graph_ret *trace)
749 {
750         struct ftrace_profile_stat *stat;
751         unsigned long long calltime;
752         struct ftrace_profile *rec;
753         unsigned long flags;
754
755         local_irq_save(flags);
756         stat = &__get_cpu_var(ftrace_profile_stats);
757         if (!stat->hash || !ftrace_profile_enabled)
758                 goto out;
759
760         /* If the calltime was zero'd ignore it */
761         if (!trace->calltime)
762                 goto out;
763
764         calltime = trace->rettime - trace->calltime;
765
766         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
767                 int index;
768
769                 index = trace->depth;
770
771                 /* Append this call time to the parent time to subtract */
772                 if (index)
773                         current->ret_stack[index - 1].subtime += calltime;
774
775                 if (current->ret_stack[index].subtime < calltime)
776                         calltime -= current->ret_stack[index].subtime;
777                 else
778                         calltime = 0;
779         }
780
781         rec = ftrace_find_profiled_func(stat, trace->func);
782         if (rec) {
783                 rec->time += calltime;
784                 rec->time_squared += calltime * calltime;
785         }
786
787  out:
788         local_irq_restore(flags);
789 }
790
791 static int register_ftrace_profiler(void)
792 {
793         return register_ftrace_graph(&profile_graph_return,
794                                      &profile_graph_entry);
795 }
796
797 static void unregister_ftrace_profiler(void)
798 {
799         unregister_ftrace_graph();
800 }
801 #else
802 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
803         .func           = function_profile_call,
804 };
805
806 static int register_ftrace_profiler(void)
807 {
808         return register_ftrace_function(&ftrace_profile_ops);
809 }
810
811 static void unregister_ftrace_profiler(void)
812 {
813         unregister_ftrace_function(&ftrace_profile_ops);
814 }
815 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
816
817 static ssize_t
818 ftrace_profile_write(struct file *filp, const char __user *ubuf,
819                      size_t cnt, loff_t *ppos)
820 {
821         unsigned long val;
822         int ret;
823
824         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
825         if (ret)
826                 return ret;
827
828         val = !!val;
829
830         mutex_lock(&ftrace_profile_lock);
831         if (ftrace_profile_enabled ^ val) {
832                 if (val) {
833                         ret = ftrace_profile_init();
834                         if (ret < 0) {
835                                 cnt = ret;
836                                 goto out;
837                         }
838
839                         ret = register_ftrace_profiler();
840                         if (ret < 0) {
841                                 cnt = ret;
842                                 goto out;
843                         }
844                         ftrace_profile_enabled = 1;
845                 } else {
846                         ftrace_profile_enabled = 0;
847                         /*
848                          * unregister_ftrace_profiler calls stop_machine
849                          * so this acts like an synchronize_sched.
850                          */
851                         unregister_ftrace_profiler();
852                 }
853         }
854  out:
855         mutex_unlock(&ftrace_profile_lock);
856
857         *ppos += cnt;
858
859         return cnt;
860 }
861
862 static ssize_t
863 ftrace_profile_read(struct file *filp, char __user *ubuf,
864                      size_t cnt, loff_t *ppos)
865 {
866         char buf[64];           /* big enough to hold a number */
867         int r;
868
869         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
870         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
871 }
872
873 static const struct file_operations ftrace_profile_fops = {
874         .open           = tracing_open_generic,
875         .read           = ftrace_profile_read,
876         .write          = ftrace_profile_write,
877         .llseek         = default_llseek,
878 };
879
880 /* used to initialize the real stat files */
881 static struct tracer_stat function_stats __initdata = {
882         .name           = "functions",
883         .stat_start     = function_stat_start,
884         .stat_next      = function_stat_next,
885         .stat_cmp       = function_stat_cmp,
886         .stat_headers   = function_stat_headers,
887         .stat_show      = function_stat_show
888 };
889
890 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
891 {
892         struct ftrace_profile_stat *stat;
893         struct dentry *entry;
894         char *name;
895         int ret;
896         int cpu;
897
898         for_each_possible_cpu(cpu) {
899                 stat = &per_cpu(ftrace_profile_stats, cpu);
900
901                 /* allocate enough for function name + cpu number */
902                 name = kmalloc(32, GFP_KERNEL);
903                 if (!name) {
904                         /*
905                          * The files created are permanent, if something happens
906                          * we still do not free memory.
907                          */
908                         WARN(1,
909                              "Could not allocate stat file for cpu %d\n",
910                              cpu);
911                         return;
912                 }
913                 stat->stat = function_stats;
914                 snprintf(name, 32, "function%d", cpu);
915                 stat->stat.name = name;
916                 ret = register_stat_tracer(&stat->stat);
917                 if (ret) {
918                         WARN(1,
919                              "Could not register function stat for cpu %d\n",
920                              cpu);
921                         kfree(name);
922                         return;
923                 }
924         }
925
926         entry = debugfs_create_file("function_profile_enabled", 0644,
927                                     d_tracer, NULL, &ftrace_profile_fops);
928         if (!entry)
929                 pr_warning("Could not create debugfs "
930                            "'function_profile_enabled' entry\n");
931 }
932
933 #else /* CONFIG_FUNCTION_PROFILER */
934 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
935 {
936 }
937 #endif /* CONFIG_FUNCTION_PROFILER */
938
939 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
940
941 static loff_t
942 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
943 {
944         loff_t ret;
945
946         if (file->f_mode & FMODE_READ)
947                 ret = seq_lseek(file, offset, whence);
948         else
949                 file->f_pos = ret = 1;
950
951         return ret;
952 }
953
954 #ifdef CONFIG_DYNAMIC_FTRACE
955
956 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
957 # error Dynamic ftrace depends on MCOUNT_RECORD
958 #endif
959
960 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
961
962 struct ftrace_func_probe {
963         struct hlist_node       node;
964         struct ftrace_probe_ops *ops;
965         unsigned long           flags;
966         unsigned long           ip;
967         void                    *data;
968         struct rcu_head         rcu;
969 };
970
971 enum {
972         FTRACE_UPDATE_CALLS             = (1 << 0),
973         FTRACE_DISABLE_CALLS            = (1 << 1),
974         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
975         FTRACE_START_FUNC_RET           = (1 << 3),
976         FTRACE_STOP_FUNC_RET            = (1 << 4),
977 };
978 struct ftrace_func_entry {
979         struct hlist_node hlist;
980         unsigned long ip;
981 };
982
983 struct ftrace_hash {
984         unsigned long           size_bits;
985         struct hlist_head       *buckets;
986         unsigned long           count;
987         struct rcu_head         rcu;
988 };
989
990 /*
991  * We make these constant because no one should touch them,
992  * but they are used as the default "empty hash", to avoid allocating
993  * it all the time. These are in a read only section such that if
994  * anyone does try to modify it, it will cause an exception.
995  */
996 static const struct hlist_head empty_buckets[1];
997 static const struct ftrace_hash empty_hash = {
998         .buckets = (struct hlist_head *)empty_buckets,
999 };
1000 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1001
1002 static struct ftrace_ops global_ops = {
1003         .func                   = ftrace_stub,
1004         .notrace_hash           = EMPTY_HASH,
1005         .filter_hash            = EMPTY_HASH,
1006 };
1007
1008 static struct dyn_ftrace *ftrace_new_addrs;
1009
1010 static DEFINE_MUTEX(ftrace_regex_lock);
1011
1012 struct ftrace_page {
1013         struct ftrace_page      *next;
1014         int                     index;
1015         struct dyn_ftrace       records[];
1016 };
1017
1018 #define ENTRIES_PER_PAGE \
1019   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
1020
1021 /* estimate from running different kernels */
1022 #define NR_TO_INIT              10000
1023
1024 static struct ftrace_page       *ftrace_pages_start;
1025 static struct ftrace_page       *ftrace_pages;
1026
1027 static struct dyn_ftrace *ftrace_free_records;
1028
1029 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1030 {
1031         return !hash || !hash->count;
1032 }
1033
1034 static struct ftrace_func_entry *
1035 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1036 {
1037         unsigned long key;
1038         struct ftrace_func_entry *entry;
1039         struct hlist_head *hhd;
1040         struct hlist_node *n;
1041
1042         if (ftrace_hash_empty(hash))
1043                 return NULL;
1044
1045         if (hash->size_bits > 0)
1046                 key = hash_long(ip, hash->size_bits);
1047         else
1048                 key = 0;
1049
1050         hhd = &hash->buckets[key];
1051
1052         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1053                 if (entry->ip == ip)
1054                         return entry;
1055         }
1056         return NULL;
1057 }
1058
1059 static void __add_hash_entry(struct ftrace_hash *hash,
1060                              struct ftrace_func_entry *entry)
1061 {
1062         struct hlist_head *hhd;
1063         unsigned long key;
1064
1065         if (hash->size_bits)
1066                 key = hash_long(entry->ip, hash->size_bits);
1067         else
1068                 key = 0;
1069
1070         hhd = &hash->buckets[key];
1071         hlist_add_head(&entry->hlist, hhd);
1072         hash->count++;
1073 }
1074
1075 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1076 {
1077         struct ftrace_func_entry *entry;
1078
1079         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1080         if (!entry)
1081                 return -ENOMEM;
1082
1083         entry->ip = ip;
1084         __add_hash_entry(hash, entry);
1085
1086         return 0;
1087 }
1088
1089 static void
1090 free_hash_entry(struct ftrace_hash *hash,
1091                   struct ftrace_func_entry *entry)
1092 {
1093         hlist_del(&entry->hlist);
1094         kfree(entry);
1095         hash->count--;
1096 }
1097
1098 static void
1099 remove_hash_entry(struct ftrace_hash *hash,
1100                   struct ftrace_func_entry *entry)
1101 {
1102         hlist_del(&entry->hlist);
1103         hash->count--;
1104 }
1105
1106 static void ftrace_hash_clear(struct ftrace_hash *hash)
1107 {
1108         struct hlist_head *hhd;
1109         struct hlist_node *tp, *tn;
1110         struct ftrace_func_entry *entry;
1111         int size = 1 << hash->size_bits;
1112         int i;
1113
1114         if (!hash->count)
1115                 return;
1116
1117         for (i = 0; i < size; i++) {
1118                 hhd = &hash->buckets[i];
1119                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1120                         free_hash_entry(hash, entry);
1121         }
1122         FTRACE_WARN_ON(hash->count);
1123 }
1124
1125 static void free_ftrace_hash(struct ftrace_hash *hash)
1126 {
1127         if (!hash || hash == EMPTY_HASH)
1128                 return;
1129         ftrace_hash_clear(hash);
1130         kfree(hash->buckets);
1131         kfree(hash);
1132 }
1133
1134 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1135 {
1136         struct ftrace_hash *hash;
1137
1138         hash = container_of(rcu, struct ftrace_hash, rcu);
1139         free_ftrace_hash(hash);
1140 }
1141
1142 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1143 {
1144         if (!hash || hash == EMPTY_HASH)
1145                 return;
1146         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1147 }
1148
1149 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1150 {
1151         struct ftrace_hash *hash;
1152         int size;
1153
1154         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1155         if (!hash)
1156                 return NULL;
1157
1158         size = 1 << size_bits;
1159         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1160
1161         if (!hash->buckets) {
1162                 kfree(hash);
1163                 return NULL;
1164         }
1165
1166         hash->size_bits = size_bits;
1167
1168         return hash;
1169 }
1170
1171 static struct ftrace_hash *
1172 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1173 {
1174         struct ftrace_func_entry *entry;
1175         struct ftrace_hash *new_hash;
1176         struct hlist_node *tp;
1177         int size;
1178         int ret;
1179         int i;
1180
1181         new_hash = alloc_ftrace_hash(size_bits);
1182         if (!new_hash)
1183                 return NULL;
1184
1185         /* Empty hash? */
1186         if (ftrace_hash_empty(hash))
1187                 return new_hash;
1188
1189         size = 1 << hash->size_bits;
1190         for (i = 0; i < size; i++) {
1191                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1192                         ret = add_hash_entry(new_hash, entry->ip);
1193                         if (ret < 0)
1194                                 goto free_hash;
1195                 }
1196         }
1197
1198         FTRACE_WARN_ON(new_hash->count != hash->count);
1199
1200         return new_hash;
1201
1202  free_hash:
1203         free_ftrace_hash(new_hash);
1204         return NULL;
1205 }
1206
1207 static void
1208 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1209 static void
1210 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1211
1212 static int
1213 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1214                  struct ftrace_hash **dst, struct ftrace_hash *src)
1215 {
1216         struct ftrace_func_entry *entry;
1217         struct hlist_node *tp, *tn;
1218         struct hlist_head *hhd;
1219         struct ftrace_hash *old_hash;
1220         struct ftrace_hash *new_hash;
1221         unsigned long key;
1222         int size = src->count;
1223         int bits = 0;
1224         int ret;
1225         int i;
1226
1227         /*
1228          * Remove the current set, update the hash and add
1229          * them back.
1230          */
1231         ftrace_hash_rec_disable(ops, enable);
1232
1233         /*
1234          * If the new source is empty, just free dst and assign it
1235          * the empty_hash.
1236          */
1237         if (!src->count) {
1238                 free_ftrace_hash_rcu(*dst);
1239                 rcu_assign_pointer(*dst, EMPTY_HASH);
1240                 /* still need to update the function records */
1241                 ret = 0;
1242                 goto out;
1243         }
1244
1245         /*
1246          * Make the hash size about 1/2 the # found
1247          */
1248         for (size /= 2; size; size >>= 1)
1249                 bits++;
1250
1251         /* Don't allocate too much */
1252         if (bits > FTRACE_HASH_MAX_BITS)
1253                 bits = FTRACE_HASH_MAX_BITS;
1254
1255         ret = -ENOMEM;
1256         new_hash = alloc_ftrace_hash(bits);
1257         if (!new_hash)
1258                 goto out;
1259
1260         size = 1 << src->size_bits;
1261         for (i = 0; i < size; i++) {
1262                 hhd = &src->buckets[i];
1263                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1264                         if (bits > 0)
1265                                 key = hash_long(entry->ip, bits);
1266                         else
1267                                 key = 0;
1268                         remove_hash_entry(src, entry);
1269                         __add_hash_entry(new_hash, entry);
1270                 }
1271         }
1272
1273         old_hash = *dst;
1274         rcu_assign_pointer(*dst, new_hash);
1275         free_ftrace_hash_rcu(old_hash);
1276
1277         ret = 0;
1278  out:
1279         /*
1280          * Enable regardless of ret:
1281          *  On success, we enable the new hash.
1282          *  On failure, we re-enable the original hash.
1283          */
1284         ftrace_hash_rec_enable(ops, enable);
1285
1286         return ret;
1287 }
1288
1289 /*
1290  * Test the hashes for this ops to see if we want to call
1291  * the ops->func or not.
1292  *
1293  * It's a match if the ip is in the ops->filter_hash or
1294  * the filter_hash does not exist or is empty,
1295  *  AND
1296  * the ip is not in the ops->notrace_hash.
1297  *
1298  * This needs to be called with preemption disabled as
1299  * the hashes are freed with call_rcu_sched().
1300  */
1301 static int
1302 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1303 {
1304         struct ftrace_hash *filter_hash;
1305         struct ftrace_hash *notrace_hash;
1306         int ret;
1307
1308         filter_hash = rcu_dereference_raw(ops->filter_hash);
1309         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1310
1311         if ((ftrace_hash_empty(filter_hash) ||
1312              ftrace_lookup_ip(filter_hash, ip)) &&
1313             (ftrace_hash_empty(notrace_hash) ||
1314              !ftrace_lookup_ip(notrace_hash, ip)))
1315                 ret = 1;
1316         else
1317                 ret = 0;
1318
1319         return ret;
1320 }
1321
1322 /*
1323  * This is a double for. Do not use 'break' to break out of the loop,
1324  * you must use a goto.
1325  */
1326 #define do_for_each_ftrace_rec(pg, rec)                                 \
1327         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1328                 int _____i;                                             \
1329                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1330                         rec = &pg->records[_____i];
1331
1332 #define while_for_each_ftrace_rec()             \
1333                 }                               \
1334         }
1335
1336 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1337                                      int filter_hash,
1338                                      bool inc)
1339 {
1340         struct ftrace_hash *hash;
1341         struct ftrace_hash *other_hash;
1342         struct ftrace_page *pg;
1343         struct dyn_ftrace *rec;
1344         int count = 0;
1345         int all = 0;
1346
1347         /* Only update if the ops has been registered */
1348         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1349                 return;
1350
1351         /*
1352          * In the filter_hash case:
1353          *   If the count is zero, we update all records.
1354          *   Otherwise we just update the items in the hash.
1355          *
1356          * In the notrace_hash case:
1357          *   We enable the update in the hash.
1358          *   As disabling notrace means enabling the tracing,
1359          *   and enabling notrace means disabling, the inc variable
1360          *   gets inversed.
1361          */
1362         if (filter_hash) {
1363                 hash = ops->filter_hash;
1364                 other_hash = ops->notrace_hash;
1365                 if (ftrace_hash_empty(hash))
1366                         all = 1;
1367         } else {
1368                 inc = !inc;
1369                 hash = ops->notrace_hash;
1370                 other_hash = ops->filter_hash;
1371                 /*
1372                  * If the notrace hash has no items,
1373                  * then there's nothing to do.
1374                  */
1375                 if (ftrace_hash_empty(hash))
1376                         return;
1377         }
1378
1379         do_for_each_ftrace_rec(pg, rec) {
1380                 int in_other_hash = 0;
1381                 int in_hash = 0;
1382                 int match = 0;
1383
1384                 if (all) {
1385                         /*
1386                          * Only the filter_hash affects all records.
1387                          * Update if the record is not in the notrace hash.
1388                          */
1389                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1390                                 match = 1;
1391                 } else {
1392                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1393                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1394
1395                         /*
1396                          *
1397                          */
1398                         if (filter_hash && in_hash && !in_other_hash)
1399                                 match = 1;
1400                         else if (!filter_hash && in_hash &&
1401                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1402                                 match = 1;
1403                 }
1404                 if (!match)
1405                         continue;
1406
1407                 if (inc) {
1408                         rec->flags++;
1409                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1410                                 return;
1411                 } else {
1412                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1413                                 return;
1414                         rec->flags--;
1415                 }
1416                 count++;
1417                 /* Shortcut, if we handled all records, we are done. */
1418                 if (!all && count == hash->count)
1419                         return;
1420         } while_for_each_ftrace_rec();
1421 }
1422
1423 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1424                                     int filter_hash)
1425 {
1426         __ftrace_hash_rec_update(ops, filter_hash, 0);
1427 }
1428
1429 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1430                                    int filter_hash)
1431 {
1432         __ftrace_hash_rec_update(ops, filter_hash, 1);
1433 }
1434
1435 static void ftrace_free_rec(struct dyn_ftrace *rec)
1436 {
1437         rec->freelist = ftrace_free_records;
1438         ftrace_free_records = rec;
1439         rec->flags |= FTRACE_FL_FREE;
1440 }
1441
1442 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1443 {
1444         struct dyn_ftrace *rec;
1445
1446         /* First check for freed records */
1447         if (ftrace_free_records) {
1448                 rec = ftrace_free_records;
1449
1450                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1451                         FTRACE_WARN_ON_ONCE(1);
1452                         ftrace_free_records = NULL;
1453                         return NULL;
1454                 }
1455
1456                 ftrace_free_records = rec->freelist;
1457                 memset(rec, 0, sizeof(*rec));
1458                 return rec;
1459         }
1460
1461         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1462                 if (!ftrace_pages->next) {
1463                         /* allocate another page */
1464                         ftrace_pages->next =
1465                                 (void *)get_zeroed_page(GFP_KERNEL);
1466                         if (!ftrace_pages->next)
1467                                 return NULL;
1468                 }
1469                 ftrace_pages = ftrace_pages->next;
1470         }
1471
1472         return &ftrace_pages->records[ftrace_pages->index++];
1473 }
1474
1475 static struct dyn_ftrace *
1476 ftrace_record_ip(unsigned long ip)
1477 {
1478         struct dyn_ftrace *rec;
1479
1480         if (ftrace_disabled)
1481                 return NULL;
1482
1483         rec = ftrace_alloc_dyn_node(ip);
1484         if (!rec)
1485                 return NULL;
1486
1487         rec->ip = ip;
1488         rec->newlist = ftrace_new_addrs;
1489         ftrace_new_addrs = rec;
1490
1491         return rec;
1492 }
1493
1494 static void print_ip_ins(const char *fmt, unsigned char *p)
1495 {
1496         int i;
1497
1498         printk(KERN_CONT "%s", fmt);
1499
1500         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1501                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1502 }
1503
1504 static void ftrace_bug(int failed, unsigned long ip)
1505 {
1506         switch (failed) {
1507         case -EFAULT:
1508                 FTRACE_WARN_ON_ONCE(1);
1509                 pr_info("ftrace faulted on modifying ");
1510                 print_ip_sym(ip);
1511                 break;
1512         case -EINVAL:
1513                 FTRACE_WARN_ON_ONCE(1);
1514                 pr_info("ftrace failed to modify ");
1515                 print_ip_sym(ip);
1516                 print_ip_ins(" actual: ", (unsigned char *)ip);
1517                 printk(KERN_CONT "\n");
1518                 break;
1519         case -EPERM:
1520                 FTRACE_WARN_ON_ONCE(1);
1521                 pr_info("ftrace faulted on writing ");
1522                 print_ip_sym(ip);
1523                 break;
1524         default:
1525                 FTRACE_WARN_ON_ONCE(1);
1526                 pr_info("ftrace faulted on unknown error ");
1527                 print_ip_sym(ip);
1528         }
1529 }
1530
1531
1532 /* Return 1 if the address range is reserved for ftrace */
1533 int ftrace_text_reserved(void *start, void *end)
1534 {
1535         struct dyn_ftrace *rec;
1536         struct ftrace_page *pg;
1537
1538         do_for_each_ftrace_rec(pg, rec) {
1539                 if (rec->ip <= (unsigned long)end &&
1540                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1541                         return 1;
1542         } while_for_each_ftrace_rec();
1543         return 0;
1544 }
1545
1546
1547 static int
1548 __ftrace_replace_code(struct dyn_ftrace *rec, int update)
1549 {
1550         unsigned long ftrace_addr;
1551         unsigned long flag = 0UL;
1552
1553         ftrace_addr = (unsigned long)FTRACE_ADDR;
1554
1555         /*
1556          * If we are updating calls:
1557          *
1558          *   If the record has a ref count, then we need to enable it
1559          *   because someone is using it.
1560          *
1561          *   Otherwise we make sure its disabled.
1562          *
1563          * If we are disabling calls, then disable all records that
1564          * are enabled.
1565          */
1566         if (update && (rec->flags & ~FTRACE_FL_MASK))
1567                 flag = FTRACE_FL_ENABLED;
1568
1569         /* If the state of this record hasn't changed, then do nothing */
1570         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1571                 return 0;
1572
1573         if (flag) {
1574                 rec->flags |= FTRACE_FL_ENABLED;
1575                 return ftrace_make_call(rec, ftrace_addr);
1576         }
1577
1578         rec->flags &= ~FTRACE_FL_ENABLED;
1579         return ftrace_make_nop(NULL, rec, ftrace_addr);
1580 }
1581
1582 static void ftrace_replace_code(int update)
1583 {
1584         struct dyn_ftrace *rec;
1585         struct ftrace_page *pg;
1586         int failed;
1587
1588         if (unlikely(ftrace_disabled))
1589                 return;
1590
1591         do_for_each_ftrace_rec(pg, rec) {
1592                 /* Skip over free records */
1593                 if (rec->flags & FTRACE_FL_FREE)
1594                         continue;
1595
1596                 failed = __ftrace_replace_code(rec, update);
1597                 if (failed) {
1598                         ftrace_bug(failed, rec->ip);
1599                         /* Stop processing */
1600                         return;
1601                 }
1602         } while_for_each_ftrace_rec();
1603 }
1604
1605 static int
1606 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1607 {
1608         unsigned long ip;
1609         int ret;
1610
1611         ip = rec->ip;
1612
1613         if (unlikely(ftrace_disabled))
1614                 return 0;
1615
1616         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1617         if (ret) {
1618                 ftrace_bug(ret, ip);
1619                 return 0;
1620         }
1621         return 1;
1622 }
1623
1624 /*
1625  * archs can override this function if they must do something
1626  * before the modifying code is performed.
1627  */
1628 int __weak ftrace_arch_code_modify_prepare(void)
1629 {
1630         return 0;
1631 }
1632
1633 /*
1634  * archs can override this function if they must do something
1635  * after the modifying code is performed.
1636  */
1637 int __weak ftrace_arch_code_modify_post_process(void)
1638 {
1639         return 0;
1640 }
1641
1642 static int __ftrace_modify_code(void *data)
1643 {
1644         int *command = data;
1645
1646         /*
1647          * Do not call function tracer while we update the code.
1648          * We are in stop machine, no worrying about races.
1649          */
1650         function_trace_stop++;
1651
1652         if (*command & FTRACE_UPDATE_CALLS)
1653                 ftrace_replace_code(1);
1654         else if (*command & FTRACE_DISABLE_CALLS)
1655                 ftrace_replace_code(0);
1656
1657         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1658                 ftrace_update_ftrace_func(ftrace_trace_function);
1659
1660         if (*command & FTRACE_START_FUNC_RET)
1661                 ftrace_enable_ftrace_graph_caller();
1662         else if (*command & FTRACE_STOP_FUNC_RET)
1663                 ftrace_disable_ftrace_graph_caller();
1664
1665 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1666         /*
1667          * For archs that call ftrace_test_stop_func(), we must
1668          * wait till after we update all the function callers
1669          * before we update the callback. This keeps different
1670          * ops that record different functions from corrupting
1671          * each other.
1672          */
1673         __ftrace_trace_function = __ftrace_trace_function_delay;
1674 #endif
1675         function_trace_stop--;
1676
1677         return 0;
1678 }
1679
1680 static void ftrace_run_update_code(int command)
1681 {
1682         int ret;
1683
1684         ret = ftrace_arch_code_modify_prepare();
1685         FTRACE_WARN_ON(ret);
1686         if (ret)
1687                 return;
1688
1689         stop_machine(__ftrace_modify_code, &command, NULL);
1690
1691         ret = ftrace_arch_code_modify_post_process();
1692         FTRACE_WARN_ON(ret);
1693 }
1694
1695 static ftrace_func_t saved_ftrace_func;
1696 static int ftrace_start_up;
1697 static int global_start_up;
1698
1699 static void ftrace_startup_enable(int command)
1700 {
1701         if (saved_ftrace_func != ftrace_trace_function) {
1702                 saved_ftrace_func = ftrace_trace_function;
1703                 command |= FTRACE_UPDATE_TRACE_FUNC;
1704         }
1705
1706         if (!command || !ftrace_enabled)
1707                 return;
1708
1709         ftrace_run_update_code(command);
1710 }
1711
1712 static int ftrace_startup(struct ftrace_ops *ops, int command)
1713 {
1714         bool hash_enable = true;
1715         int ret;
1716
1717         if (unlikely(ftrace_disabled))
1718                 return -ENODEV;
1719
1720         ret = __register_ftrace_function(ops);
1721         if (ret)
1722                 return ret;
1723
1724         ftrace_start_up++;
1725         command |= FTRACE_UPDATE_CALLS;
1726
1727         /* ops marked global share the filter hashes */
1728         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1729                 ops = &global_ops;
1730                 /* Don't update hash if global is already set */
1731                 if (global_start_up)
1732                         hash_enable = false;
1733                 global_start_up++;
1734         }
1735
1736         ops->flags |= FTRACE_OPS_FL_ENABLED;
1737         if (hash_enable)
1738                 ftrace_hash_rec_enable(ops, 1);
1739
1740         ftrace_startup_enable(command);
1741
1742         return 0;
1743 }
1744
1745 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
1746 {
1747         bool hash_disable = true;
1748         int ret;
1749
1750         if (unlikely(ftrace_disabled))
1751                 return -ENODEV;
1752
1753         ret = __unregister_ftrace_function(ops);
1754         if (ret)
1755                 return ret;
1756
1757         ftrace_start_up--;
1758         /*
1759          * Just warn in case of unbalance, no need to kill ftrace, it's not
1760          * critical but the ftrace_call callers may be never nopped again after
1761          * further ftrace uses.
1762          */
1763         WARN_ON_ONCE(ftrace_start_up < 0);
1764
1765         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1766                 ops = &global_ops;
1767                 global_start_up--;
1768                 WARN_ON_ONCE(global_start_up < 0);
1769                 /* Don't update hash if global still has users */
1770                 if (global_start_up) {
1771                         WARN_ON_ONCE(!ftrace_start_up);
1772                         hash_disable = false;
1773                 }
1774         }
1775
1776         if (hash_disable)
1777                 ftrace_hash_rec_disable(ops, 1);
1778
1779         if (ops != &global_ops || !global_start_up)
1780                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1781
1782         command |= FTRACE_UPDATE_CALLS;
1783
1784         if (saved_ftrace_func != ftrace_trace_function) {
1785                 saved_ftrace_func = ftrace_trace_function;
1786                 command |= FTRACE_UPDATE_TRACE_FUNC;
1787         }
1788
1789         if (!command || !ftrace_enabled)
1790                 return 0;
1791
1792         ftrace_run_update_code(command);
1793         return 0;
1794 }
1795
1796 static void ftrace_startup_sysctl(void)
1797 {
1798         if (unlikely(ftrace_disabled))
1799                 return;
1800
1801         /* Force update next time */
1802         saved_ftrace_func = NULL;
1803         /* ftrace_start_up is true if we want ftrace running */
1804         if (ftrace_start_up)
1805                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
1806 }
1807
1808 static void ftrace_shutdown_sysctl(void)
1809 {
1810         if (unlikely(ftrace_disabled))
1811                 return;
1812
1813         /* ftrace_start_up is true if ftrace is running */
1814         if (ftrace_start_up)
1815                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1816 }
1817
1818 static cycle_t          ftrace_update_time;
1819 static unsigned long    ftrace_update_cnt;
1820 unsigned long           ftrace_update_tot_cnt;
1821
1822 static inline int ops_traces_mod(struct ftrace_ops *ops)
1823 {
1824         /*
1825          * Filter_hash being empty will default to trace module.
1826          * But notrace hash requires a test of individual module functions.
1827          */
1828         return ftrace_hash_empty(ops->filter_hash) &&
1829                 ftrace_hash_empty(ops->notrace_hash);
1830 }
1831
1832 /*
1833  * Check if the current ops references the record.
1834  *
1835  * If the ops traces all functions, then it was already accounted for.
1836  * If the ops does not trace the current record function, skip it.
1837  * If the ops ignores the function via notrace filter, skip it.
1838  */
1839 static inline bool
1840 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
1841 {
1842         /* If ops isn't enabled, ignore it */
1843         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1844                 return 0;
1845
1846         /* If ops traces all mods, we already accounted for it */
1847         if (ops_traces_mod(ops))
1848                 return 0;
1849
1850         /* The function must be in the filter */
1851         if (!ftrace_hash_empty(ops->filter_hash) &&
1852             !ftrace_lookup_ip(ops->filter_hash, rec->ip))
1853                 return 0;
1854
1855         /* If in notrace hash, we ignore it too */
1856         if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
1857                 return 0;
1858
1859         return 1;
1860 }
1861
1862 static int referenced_filters(struct dyn_ftrace *rec)
1863 {
1864         struct ftrace_ops *ops;
1865         int cnt = 0;
1866
1867         for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
1868                 if (ops_references_rec(ops, rec))
1869                     cnt++;
1870         }
1871
1872         return cnt;
1873 }
1874
1875 static int ftrace_update_code(struct module *mod)
1876 {
1877         struct dyn_ftrace *p;
1878         cycle_t start, stop;
1879         unsigned long ref = 0;
1880         bool test = false;
1881
1882         /*
1883          * When adding a module, we need to check if tracers are
1884          * currently enabled and if they are set to trace all functions.
1885          * If they are, we need to enable the module functions as well
1886          * as update the reference counts for those function records.
1887          */
1888         if (mod) {
1889                 struct ftrace_ops *ops;
1890
1891                 for (ops = ftrace_ops_list;
1892                      ops != &ftrace_list_end; ops = ops->next) {
1893                         if (ops->flags & FTRACE_OPS_FL_ENABLED) {
1894                                 if (ops_traces_mod(ops))
1895                                         ref++;
1896                                 else
1897                                         test = true;
1898                         }
1899                 }
1900         }
1901
1902         start = ftrace_now(raw_smp_processor_id());
1903         ftrace_update_cnt = 0;
1904
1905         while (ftrace_new_addrs) {
1906                 int cnt = ref;
1907
1908                 /* If something went wrong, bail without enabling anything */
1909                 if (unlikely(ftrace_disabled))
1910                         return -1;
1911
1912                 p = ftrace_new_addrs;
1913                 ftrace_new_addrs = p->newlist;
1914                 if (test)
1915                         cnt += referenced_filters(p);
1916                 p->flags = cnt;
1917
1918                 /*
1919                  * Do the initial record conversion from mcount jump
1920                  * to the NOP instructions.
1921                  */
1922                 if (!ftrace_code_disable(mod, p)) {
1923                         ftrace_free_rec(p);
1924                         /* Game over */
1925                         break;
1926                 }
1927
1928                 ftrace_update_cnt++;
1929
1930                 /*
1931                  * If the tracing is enabled, go ahead and enable the record.
1932                  *
1933                  * The reason not to enable the record immediatelly is the
1934                  * inherent check of ftrace_make_nop/ftrace_make_call for
1935                  * correct previous instructions.  Making first the NOP
1936                  * conversion puts the module to the correct state, thus
1937                  * passing the ftrace_make_call check.
1938                  */
1939                 if (ftrace_start_up && cnt) {
1940                         int failed = __ftrace_replace_code(p, 1);
1941                         if (failed) {
1942                                 ftrace_bug(failed, p->ip);
1943                                 ftrace_free_rec(p);
1944                         }
1945                 }
1946         }
1947
1948         stop = ftrace_now(raw_smp_processor_id());
1949         ftrace_update_time = stop - start;
1950         ftrace_update_tot_cnt += ftrace_update_cnt;
1951
1952         return 0;
1953 }
1954
1955 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1956 {
1957         struct ftrace_page *pg;
1958         int cnt;
1959         int i;
1960
1961         /* allocate a few pages */
1962         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1963         if (!ftrace_pages_start)
1964                 return -1;
1965
1966         /*
1967          * Allocate a few more pages.
1968          *
1969          * TODO: have some parser search vmlinux before
1970          *   final linking to find all calls to ftrace.
1971          *   Then we can:
1972          *    a) know how many pages to allocate.
1973          *     and/or
1974          *    b) set up the table then.
1975          *
1976          *  The dynamic code is still necessary for
1977          *  modules.
1978          */
1979
1980         pg = ftrace_pages = ftrace_pages_start;
1981
1982         cnt = num_to_init / ENTRIES_PER_PAGE;
1983         pr_info("ftrace: allocating %ld entries in %d pages\n",
1984                 num_to_init, cnt + 1);
1985
1986         for (i = 0; i < cnt; i++) {
1987                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1988
1989                 /* If we fail, we'll try later anyway */
1990                 if (!pg->next)
1991                         break;
1992
1993                 pg = pg->next;
1994         }
1995
1996         return 0;
1997 }
1998
1999 enum {
2000         FTRACE_ITER_FILTER      = (1 << 0),
2001         FTRACE_ITER_NOTRACE     = (1 << 1),
2002         FTRACE_ITER_PRINTALL    = (1 << 2),
2003         FTRACE_ITER_HASH        = (1 << 3),
2004         FTRACE_ITER_ENABLED     = (1 << 4),
2005 };
2006
2007 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2008
2009 struct ftrace_iterator {
2010         loff_t                          pos;
2011         loff_t                          func_pos;
2012         struct ftrace_page              *pg;
2013         struct dyn_ftrace               *func;
2014         struct ftrace_func_probe        *probe;
2015         struct trace_parser             parser;
2016         struct ftrace_hash              *hash;
2017         struct ftrace_ops               *ops;
2018         int                             hidx;
2019         int                             idx;
2020         unsigned                        flags;
2021 };
2022
2023 static void *
2024 t_hash_next(struct seq_file *m, loff_t *pos)
2025 {
2026         struct ftrace_iterator *iter = m->private;
2027         struct hlist_node *hnd = NULL;
2028         struct hlist_head *hhd;
2029
2030         (*pos)++;
2031         iter->pos = *pos;
2032
2033         if (iter->probe)
2034                 hnd = &iter->probe->node;
2035  retry:
2036         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2037                 return NULL;
2038
2039         hhd = &ftrace_func_hash[iter->hidx];
2040
2041         if (hlist_empty(hhd)) {
2042                 iter->hidx++;
2043                 hnd = NULL;
2044                 goto retry;
2045         }
2046
2047         if (!hnd)
2048                 hnd = hhd->first;
2049         else {
2050                 hnd = hnd->next;
2051                 if (!hnd) {
2052                         iter->hidx++;
2053                         goto retry;
2054                 }
2055         }
2056
2057         if (WARN_ON_ONCE(!hnd))
2058                 return NULL;
2059
2060         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2061
2062         return iter;
2063 }
2064
2065 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2066 {
2067         struct ftrace_iterator *iter = m->private;
2068         void *p = NULL;
2069         loff_t l;
2070
2071         if (iter->func_pos > *pos)
2072                 return NULL;
2073
2074         iter->hidx = 0;
2075         for (l = 0; l <= (*pos - iter->func_pos); ) {
2076                 p = t_hash_next(m, &l);
2077                 if (!p)
2078                         break;
2079         }
2080         if (!p)
2081                 return NULL;
2082
2083         /* Only set this if we have an item */
2084         iter->flags |= FTRACE_ITER_HASH;
2085
2086         return iter;
2087 }
2088
2089 static int
2090 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2091 {
2092         struct ftrace_func_probe *rec;
2093
2094         rec = iter->probe;
2095         if (WARN_ON_ONCE(!rec))
2096                 return -EIO;
2097
2098         if (rec->ops->print)
2099                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2100
2101         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2102
2103         if (rec->data)
2104                 seq_printf(m, ":%p", rec->data);
2105         seq_putc(m, '\n');
2106
2107         return 0;
2108 }
2109
2110 static void *
2111 t_next(struct seq_file *m, void *v, loff_t *pos)
2112 {
2113         struct ftrace_iterator *iter = m->private;
2114         struct ftrace_ops *ops = &global_ops;
2115         struct dyn_ftrace *rec = NULL;
2116
2117         if (unlikely(ftrace_disabled))
2118                 return NULL;
2119
2120         if (iter->flags & FTRACE_ITER_HASH)
2121                 return t_hash_next(m, pos);
2122
2123         (*pos)++;
2124         iter->pos = iter->func_pos = *pos;
2125
2126         if (iter->flags & FTRACE_ITER_PRINTALL)
2127                 return t_hash_start(m, pos);
2128
2129  retry:
2130         if (iter->idx >= iter->pg->index) {
2131                 if (iter->pg->next) {
2132                         iter->pg = iter->pg->next;
2133                         iter->idx = 0;
2134                         goto retry;
2135                 }
2136         } else {
2137                 rec = &iter->pg->records[iter->idx++];
2138                 if ((rec->flags & FTRACE_FL_FREE) ||
2139
2140                     ((iter->flags & FTRACE_ITER_FILTER) &&
2141                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2142
2143                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2144                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2145
2146                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2147                      !(rec->flags & ~FTRACE_FL_MASK))) {
2148
2149                         rec = NULL;
2150                         goto retry;
2151                 }
2152         }
2153
2154         if (!rec)
2155                 return t_hash_start(m, pos);
2156
2157         iter->func = rec;
2158
2159         return iter;
2160 }
2161
2162 static void reset_iter_read(struct ftrace_iterator *iter)
2163 {
2164         iter->pos = 0;
2165         iter->func_pos = 0;
2166         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2167 }
2168
2169 static void *t_start(struct seq_file *m, loff_t *pos)
2170 {
2171         struct ftrace_iterator *iter = m->private;
2172         struct ftrace_ops *ops = &global_ops;
2173         void *p = NULL;
2174         loff_t l;
2175
2176         mutex_lock(&ftrace_lock);
2177
2178         if (unlikely(ftrace_disabled))
2179                 return NULL;
2180
2181         /*
2182          * If an lseek was done, then reset and start from beginning.
2183          */
2184         if (*pos < iter->pos)
2185                 reset_iter_read(iter);
2186
2187         /*
2188          * For set_ftrace_filter reading, if we have the filter
2189          * off, we can short cut and just print out that all
2190          * functions are enabled.
2191          */
2192         if (iter->flags & FTRACE_ITER_FILTER &&
2193             ftrace_hash_empty(ops->filter_hash)) {
2194                 if (*pos > 0)
2195                         return t_hash_start(m, pos);
2196                 iter->flags |= FTRACE_ITER_PRINTALL;
2197                 /* reset in case of seek/pread */
2198                 iter->flags &= ~FTRACE_ITER_HASH;
2199                 return iter;
2200         }
2201
2202         if (iter->flags & FTRACE_ITER_HASH)
2203                 return t_hash_start(m, pos);
2204
2205         /*
2206          * Unfortunately, we need to restart at ftrace_pages_start
2207          * every time we let go of the ftrace_mutex. This is because
2208          * those pointers can change without the lock.
2209          */
2210         iter->pg = ftrace_pages_start;
2211         iter->idx = 0;
2212         for (l = 0; l <= *pos; ) {
2213                 p = t_next(m, p, &l);
2214                 if (!p)
2215                         break;
2216         }
2217
2218         if (!p) {
2219                 if (iter->flags & FTRACE_ITER_FILTER)
2220                         return t_hash_start(m, pos);
2221
2222                 return NULL;
2223         }
2224
2225         return iter;
2226 }
2227
2228 static void t_stop(struct seq_file *m, void *p)
2229 {
2230         mutex_unlock(&ftrace_lock);
2231 }
2232
2233 static int t_show(struct seq_file *m, void *v)
2234 {
2235         struct ftrace_iterator *iter = m->private;
2236         struct dyn_ftrace *rec;
2237
2238         if (iter->flags & FTRACE_ITER_HASH)
2239                 return t_hash_show(m, iter);
2240
2241         if (iter->flags & FTRACE_ITER_PRINTALL) {
2242                 seq_printf(m, "#### all functions enabled ####\n");
2243                 return 0;
2244         }
2245
2246         rec = iter->func;
2247
2248         if (!rec)
2249                 return 0;
2250
2251         seq_printf(m, "%ps", (void *)rec->ip);
2252         if (iter->flags & FTRACE_ITER_ENABLED)
2253                 seq_printf(m, " (%ld)",
2254                            rec->flags & ~FTRACE_FL_MASK);
2255         seq_printf(m, "\n");
2256
2257         return 0;
2258 }
2259
2260 static const struct seq_operations show_ftrace_seq_ops = {
2261         .start = t_start,
2262         .next = t_next,
2263         .stop = t_stop,
2264         .show = t_show,
2265 };
2266
2267 static int
2268 ftrace_avail_open(struct inode *inode, struct file *file)
2269 {
2270         struct ftrace_iterator *iter;
2271         int ret;
2272
2273         if (unlikely(ftrace_disabled))
2274                 return -ENODEV;
2275
2276         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2277         if (!iter)
2278                 return -ENOMEM;
2279
2280         iter->pg = ftrace_pages_start;
2281
2282         ret = seq_open(file, &show_ftrace_seq_ops);
2283         if (!ret) {
2284                 struct seq_file *m = file->private_data;
2285
2286                 m->private = iter;
2287         } else {
2288                 kfree(iter);
2289         }
2290
2291         return ret;
2292 }
2293
2294 static int
2295 ftrace_enabled_open(struct inode *inode, struct file *file)
2296 {
2297         struct ftrace_iterator *iter;
2298         int ret;
2299
2300         if (unlikely(ftrace_disabled))
2301                 return -ENODEV;
2302
2303         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2304         if (!iter)
2305                 return -ENOMEM;
2306
2307         iter->pg = ftrace_pages_start;
2308         iter->flags = FTRACE_ITER_ENABLED;
2309
2310         ret = seq_open(file, &show_ftrace_seq_ops);
2311         if (!ret) {
2312                 struct seq_file *m = file->private_data;
2313
2314                 m->private = iter;
2315         } else {
2316                 kfree(iter);
2317         }
2318
2319         return ret;
2320 }
2321
2322 static void ftrace_filter_reset(struct ftrace_hash *hash)
2323 {
2324         mutex_lock(&ftrace_lock);
2325         ftrace_hash_clear(hash);
2326         mutex_unlock(&ftrace_lock);
2327 }
2328
2329 static int
2330 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2331                   struct inode *inode, struct file *file)
2332 {
2333         struct ftrace_iterator *iter;
2334         struct ftrace_hash *hash;
2335         int ret = 0;
2336
2337         if (unlikely(ftrace_disabled))
2338                 return -ENODEV;
2339
2340         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2341         if (!iter)
2342                 return -ENOMEM;
2343
2344         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2345                 kfree(iter);
2346                 return -ENOMEM;
2347         }
2348
2349         if (flag & FTRACE_ITER_NOTRACE)
2350                 hash = ops->notrace_hash;
2351         else
2352                 hash = ops->filter_hash;
2353
2354         iter->ops = ops;
2355         iter->flags = flag;
2356
2357         if (file->f_mode & FMODE_WRITE) {
2358                 mutex_lock(&ftrace_lock);
2359                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2360                 mutex_unlock(&ftrace_lock);
2361
2362                 if (!iter->hash) {
2363                         trace_parser_put(&iter->parser);
2364                         kfree(iter);
2365                         return -ENOMEM;
2366                 }
2367         }
2368
2369         mutex_lock(&ftrace_regex_lock);
2370
2371         if ((file->f_mode & FMODE_WRITE) &&
2372             (file->f_flags & O_TRUNC))
2373                 ftrace_filter_reset(iter->hash);
2374
2375         if (file->f_mode & FMODE_READ) {
2376                 iter->pg = ftrace_pages_start;
2377
2378                 ret = seq_open(file, &show_ftrace_seq_ops);
2379                 if (!ret) {
2380                         struct seq_file *m = file->private_data;
2381                         m->private = iter;
2382                 } else {
2383                         /* Failed */
2384                         free_ftrace_hash(iter->hash);
2385                         trace_parser_put(&iter->parser);
2386                         kfree(iter);
2387                 }
2388         } else
2389                 file->private_data = iter;
2390         mutex_unlock(&ftrace_regex_lock);
2391
2392         return ret;
2393 }
2394
2395 static int
2396 ftrace_filter_open(struct inode *inode, struct file *file)
2397 {
2398         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2399                                  inode, file);
2400 }
2401
2402 static int
2403 ftrace_notrace_open(struct inode *inode, struct file *file)
2404 {
2405         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2406                                  inode, file);
2407 }
2408
2409 static int ftrace_match(char *str, char *regex, int len, int type)
2410 {
2411         int matched = 0;
2412         int slen;
2413
2414         switch (type) {
2415         case MATCH_FULL:
2416                 if (strcmp(str, regex) == 0)
2417                         matched = 1;
2418                 break;
2419         case MATCH_FRONT_ONLY:
2420                 if (strncmp(str, regex, len) == 0)
2421                         matched = 1;
2422                 break;
2423         case MATCH_MIDDLE_ONLY:
2424                 if (strstr(str, regex))
2425                         matched = 1;
2426                 break;
2427         case MATCH_END_ONLY:
2428                 slen = strlen(str);
2429                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2430                         matched = 1;
2431                 break;
2432         }
2433
2434         return matched;
2435 }
2436
2437 static int
2438 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2439 {
2440         struct ftrace_func_entry *entry;
2441         int ret = 0;
2442
2443         entry = ftrace_lookup_ip(hash, rec->ip);
2444         if (not) {
2445                 /* Do nothing if it doesn't exist */
2446                 if (!entry)
2447                         return 0;
2448
2449                 free_hash_entry(hash, entry);
2450         } else {
2451                 /* Do nothing if it exists */
2452                 if (entry)
2453                         return 0;
2454
2455                 ret = add_hash_entry(hash, rec->ip);
2456         }
2457         return ret;
2458 }
2459
2460 static int
2461 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2462                     char *regex, int len, int type)
2463 {
2464         char str[KSYM_SYMBOL_LEN];
2465         char *modname;
2466
2467         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2468
2469         if (mod) {
2470                 /* module lookup requires matching the module */
2471                 if (!modname || strcmp(modname, mod))
2472                         return 0;
2473
2474                 /* blank search means to match all funcs in the mod */
2475                 if (!len)
2476                         return 1;
2477         }
2478
2479         return ftrace_match(str, regex, len, type);
2480 }
2481
2482 static int
2483 match_records(struct ftrace_hash *hash, char *buff,
2484               int len, char *mod, int not)
2485 {
2486         unsigned search_len = 0;
2487         struct ftrace_page *pg;
2488         struct dyn_ftrace *rec;
2489         int type = MATCH_FULL;
2490         char *search = buff;
2491         int found = 0;
2492         int ret;
2493
2494         if (len) {
2495                 type = filter_parse_regex(buff, len, &search, &not);
2496                 search_len = strlen(search);
2497         }
2498
2499         mutex_lock(&ftrace_lock);
2500
2501         if (unlikely(ftrace_disabled))
2502                 goto out_unlock;
2503
2504         do_for_each_ftrace_rec(pg, rec) {
2505
2506                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2507                         ret = enter_record(hash, rec, not);
2508                         if (ret < 0) {
2509                                 found = ret;
2510                                 goto out_unlock;
2511                         }
2512                         found = 1;
2513                 }
2514         } while_for_each_ftrace_rec();
2515  out_unlock:
2516         mutex_unlock(&ftrace_lock);
2517
2518         return found;
2519 }
2520
2521 static int
2522 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2523 {
2524         return match_records(hash, buff, len, NULL, 0);
2525 }
2526
2527 static int
2528 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2529 {
2530         int not = 0;
2531
2532         /* blank or '*' mean the same */
2533         if (strcmp(buff, "*") == 0)
2534                 buff[0] = 0;
2535
2536         /* handle the case of 'dont filter this module' */
2537         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2538                 buff[0] = 0;
2539                 not = 1;
2540         }
2541
2542         return match_records(hash, buff, strlen(buff), mod, not);
2543 }
2544
2545 /*
2546  * We register the module command as a template to show others how
2547  * to register the a command as well.
2548  */
2549
2550 static int
2551 ftrace_mod_callback(struct ftrace_hash *hash,
2552                     char *func, char *cmd, char *param, int enable)
2553 {
2554         char *mod;
2555         int ret = -EINVAL;
2556
2557         /*
2558          * cmd == 'mod' because we only registered this func
2559          * for the 'mod' ftrace_func_command.
2560          * But if you register one func with multiple commands,
2561          * you can tell which command was used by the cmd
2562          * parameter.
2563          */
2564
2565         /* we must have a module name */
2566         if (!param)
2567                 return ret;
2568
2569         mod = strsep(&param, ":");
2570         if (!strlen(mod))
2571                 return ret;
2572
2573         ret = ftrace_match_module_records(hash, func, mod);
2574         if (!ret)
2575                 ret = -EINVAL;
2576         if (ret < 0)
2577                 return ret;
2578
2579         return 0;
2580 }
2581
2582 static struct ftrace_func_command ftrace_mod_cmd = {
2583         .name                   = "mod",
2584         .func                   = ftrace_mod_callback,
2585 };
2586
2587 static int __init ftrace_mod_cmd_init(void)
2588 {
2589         return register_ftrace_command(&ftrace_mod_cmd);
2590 }
2591 device_initcall(ftrace_mod_cmd_init);
2592
2593 static void
2594 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2595 {
2596         struct ftrace_func_probe *entry;
2597         struct hlist_head *hhd;
2598         struct hlist_node *n;
2599         unsigned long key;
2600
2601         key = hash_long(ip, FTRACE_HASH_BITS);
2602
2603         hhd = &ftrace_func_hash[key];
2604
2605         if (hlist_empty(hhd))
2606                 return;
2607
2608         /*
2609          * Disable preemption for these calls to prevent a RCU grace
2610          * period. This syncs the hash iteration and freeing of items
2611          * on the hash. rcu_read_lock is too dangerous here.
2612          */
2613         preempt_disable_notrace();
2614         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2615                 if (entry->ip == ip)
2616                         entry->ops->func(ip, parent_ip, &entry->data);
2617         }
2618         preempt_enable_notrace();
2619 }
2620
2621 static struct ftrace_ops trace_probe_ops __read_mostly =
2622 {
2623         .func           = function_trace_probe_call,
2624 };
2625
2626 static int ftrace_probe_registered;
2627
2628 static void __enable_ftrace_function_probe(void)
2629 {
2630         int ret;
2631         int i;
2632
2633         if (ftrace_probe_registered)
2634                 return;
2635
2636         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2637                 struct hlist_head *hhd = &ftrace_func_hash[i];
2638                 if (hhd->first)
2639                         break;
2640         }
2641         /* Nothing registered? */
2642         if (i == FTRACE_FUNC_HASHSIZE)
2643                 return;
2644
2645         ret = ftrace_startup(&trace_probe_ops, 0);
2646
2647         ftrace_probe_registered = 1;
2648 }
2649
2650 static void __disable_ftrace_function_probe(void)
2651 {
2652         int i;
2653
2654         if (!ftrace_probe_registered)
2655                 return;
2656
2657         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2658                 struct hlist_head *hhd = &ftrace_func_hash[i];
2659                 if (hhd->first)
2660                         return;
2661         }
2662
2663         /* no more funcs left */
2664         ftrace_shutdown(&trace_probe_ops, 0);
2665
2666         ftrace_probe_registered = 0;
2667 }
2668
2669
2670 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2671 {
2672         struct ftrace_func_probe *entry =
2673                 container_of(rhp, struct ftrace_func_probe, rcu);
2674
2675         if (entry->ops->free)
2676                 entry->ops->free(&entry->data);
2677         kfree(entry);
2678 }
2679
2680
2681 int
2682 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2683                               void *data)
2684 {
2685         struct ftrace_func_probe *entry;
2686         struct ftrace_page *pg;
2687         struct dyn_ftrace *rec;
2688         int type, len, not;
2689         unsigned long key;
2690         int count = 0;
2691         char *search;
2692
2693         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2694         len = strlen(search);
2695
2696         /* we do not support '!' for function probes */
2697         if (WARN_ON(not))
2698                 return -EINVAL;
2699
2700         mutex_lock(&ftrace_lock);
2701
2702         if (unlikely(ftrace_disabled))
2703                 goto out_unlock;
2704
2705         do_for_each_ftrace_rec(pg, rec) {
2706
2707                 if (!ftrace_match_record(rec, NULL, search, len, type))
2708                         continue;
2709
2710                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2711                 if (!entry) {
2712                         /* If we did not process any, then return error */
2713                         if (!count)
2714                                 count = -ENOMEM;
2715                         goto out_unlock;
2716                 }
2717
2718                 count++;
2719
2720                 entry->data = data;
2721
2722                 /*
2723                  * The caller might want to do something special
2724                  * for each function we find. We call the callback
2725                  * to give the caller an opportunity to do so.
2726                  */
2727                 if (ops->callback) {
2728                         if (ops->callback(rec->ip, &entry->data) < 0) {
2729                                 /* caller does not like this func */
2730                                 kfree(entry);
2731                                 continue;
2732                         }
2733                 }
2734
2735                 entry->ops = ops;
2736                 entry->ip = rec->ip;
2737
2738                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2739                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2740
2741         } while_for_each_ftrace_rec();
2742         __enable_ftrace_function_probe();
2743
2744  out_unlock:
2745         mutex_unlock(&ftrace_lock);
2746
2747         return count;
2748 }
2749
2750 enum {
2751         PROBE_TEST_FUNC         = 1,
2752         PROBE_TEST_DATA         = 2
2753 };
2754
2755 static void
2756 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2757                                   void *data, int flags)
2758 {
2759         struct ftrace_func_probe *entry;
2760         struct hlist_node *n, *tmp;
2761         char str[KSYM_SYMBOL_LEN];
2762         int type = MATCH_FULL;
2763         int i, len = 0;
2764         char *search;
2765
2766         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2767                 glob = NULL;
2768         else if (glob) {
2769                 int not;
2770
2771                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2772                 len = strlen(search);
2773
2774                 /* we do not support '!' for function probes */
2775                 if (WARN_ON(not))
2776                         return;
2777         }
2778
2779         mutex_lock(&ftrace_lock);
2780         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2781                 struct hlist_head *hhd = &ftrace_func_hash[i];
2782
2783                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2784
2785                         /* break up if statements for readability */
2786                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2787                                 continue;
2788
2789                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2790                                 continue;
2791
2792                         /* do this last, since it is the most expensive */
2793                         if (glob) {
2794                                 kallsyms_lookup(entry->ip, NULL, NULL,
2795                                                 NULL, str);
2796                                 if (!ftrace_match(str, glob, len, type))
2797                                         continue;
2798                         }
2799
2800                         hlist_del_rcu(&entry->node);
2801                         call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
2802                 }
2803         }
2804         __disable_ftrace_function_probe();
2805         mutex_unlock(&ftrace_lock);
2806 }
2807
2808 void
2809 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2810                                 void *data)
2811 {
2812         __unregister_ftrace_function_probe(glob, ops, data,
2813                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2814 }
2815
2816 void
2817 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2818 {
2819         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2820 }
2821
2822 void unregister_ftrace_function_probe_all(char *glob)
2823 {
2824         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2825 }
2826
2827 static LIST_HEAD(ftrace_commands);
2828 static DEFINE_MUTEX(ftrace_cmd_mutex);
2829
2830 int register_ftrace_command(struct ftrace_func_command *cmd)
2831 {
2832         struct ftrace_func_command *p;
2833         int ret = 0;
2834
2835         mutex_lock(&ftrace_cmd_mutex);
2836         list_for_each_entry(p, &ftrace_commands, list) {
2837                 if (strcmp(cmd->name, p->name) == 0) {
2838                         ret = -EBUSY;
2839                         goto out_unlock;
2840                 }
2841         }
2842         list_add(&cmd->list, &ftrace_commands);
2843  out_unlock:
2844         mutex_unlock(&ftrace_cmd_mutex);
2845
2846         return ret;
2847 }
2848
2849 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2850 {
2851         struct ftrace_func_command *p, *n;
2852         int ret = -ENODEV;
2853
2854         mutex_lock(&ftrace_cmd_mutex);
2855         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2856                 if (strcmp(cmd->name, p->name) == 0) {
2857                         ret = 0;
2858                         list_del_init(&p->list);
2859                         goto out_unlock;
2860                 }
2861         }
2862  out_unlock:
2863         mutex_unlock(&ftrace_cmd_mutex);
2864
2865         return ret;
2866 }
2867
2868 static int ftrace_process_regex(struct ftrace_hash *hash,
2869                                 char *buff, int len, int enable)
2870 {
2871         char *func, *command, *next = buff;
2872         struct ftrace_func_command *p;
2873         int ret = -EINVAL;
2874
2875         func = strsep(&next, ":");
2876
2877         if (!next) {
2878                 ret = ftrace_match_records(hash, func, len);
2879                 if (!ret)
2880                         ret = -EINVAL;
2881                 if (ret < 0)
2882                         return ret;
2883                 return 0;
2884         }
2885
2886         /* command found */
2887
2888         command = strsep(&next, ":");
2889
2890         mutex_lock(&ftrace_cmd_mutex);
2891         list_for_each_entry(p, &ftrace_commands, list) {
2892                 if (strcmp(p->name, command) == 0) {
2893                         ret = p->func(hash, func, command, next, enable);
2894                         goto out_unlock;
2895                 }
2896         }
2897  out_unlock:
2898         mutex_unlock(&ftrace_cmd_mutex);
2899
2900         return ret;
2901 }
2902
2903 static ssize_t
2904 ftrace_regex_write(struct file *file, const char __user *ubuf,
2905                    size_t cnt, loff_t *ppos, int enable)
2906 {
2907         struct ftrace_iterator *iter;
2908         struct trace_parser *parser;
2909         ssize_t ret, read;
2910
2911         if (!cnt)
2912                 return 0;
2913
2914         mutex_lock(&ftrace_regex_lock);
2915
2916         ret = -ENODEV;
2917         if (unlikely(ftrace_disabled))
2918                 goto out_unlock;
2919
2920         if (file->f_mode & FMODE_READ) {
2921                 struct seq_file *m = file->private_data;
2922                 iter = m->private;
2923         } else
2924                 iter = file->private_data;
2925
2926         parser = &iter->parser;
2927         read = trace_get_user(parser, ubuf, cnt, ppos);
2928
2929         if (read >= 0 && trace_parser_loaded(parser) &&
2930             !trace_parser_cont(parser)) {
2931                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2932                                            parser->idx, enable);
2933                 trace_parser_clear(parser);
2934                 if (ret)
2935                         goto out_unlock;
2936         }
2937
2938         ret = read;
2939 out_unlock:
2940         mutex_unlock(&ftrace_regex_lock);
2941
2942         return ret;
2943 }
2944
2945 static ssize_t
2946 ftrace_filter_write(struct file *file, const char __user *ubuf,
2947                     size_t cnt, loff_t *ppos)
2948 {
2949         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2950 }
2951
2952 static ssize_t
2953 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2954                      size_t cnt, loff_t *ppos)
2955 {
2956         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2957 }
2958
2959 static int
2960 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2961                  int reset, int enable)
2962 {
2963         struct ftrace_hash **orig_hash;
2964         struct ftrace_hash *hash;
2965         int ret;
2966
2967         /* All global ops uses the global ops filters */
2968         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2969                 ops = &global_ops;
2970
2971         if (unlikely(ftrace_disabled))
2972                 return -ENODEV;
2973
2974         if (enable)
2975                 orig_hash = &ops->filter_hash;
2976         else
2977                 orig_hash = &ops->notrace_hash;
2978
2979         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2980         if (!hash)
2981                 return -ENOMEM;
2982
2983         mutex_lock(&ftrace_regex_lock);
2984         if (reset)
2985                 ftrace_filter_reset(hash);
2986         if (buf)
2987                 ftrace_match_records(hash, buf, len);
2988
2989         mutex_lock(&ftrace_lock);
2990         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2991         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2992             && ftrace_enabled)
2993                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2994
2995         mutex_unlock(&ftrace_lock);
2996
2997         mutex_unlock(&ftrace_regex_lock);
2998
2999         free_ftrace_hash(hash);
3000         return ret;
3001 }
3002
3003 /**
3004  * ftrace_set_filter - set a function to filter on in ftrace
3005  * @ops - the ops to set the filter with
3006  * @buf - the string that holds the function filter text.
3007  * @len - the length of the string.
3008  * @reset - non zero to reset all filters before applying this filter.
3009  *
3010  * Filters denote which functions should be enabled when tracing is enabled.
3011  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3012  */
3013 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3014                        int len, int reset)
3015 {
3016         ftrace_set_regex(ops, buf, len, reset, 1);
3017 }
3018 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3019
3020 /**
3021  * ftrace_set_notrace - set a function to not trace in ftrace
3022  * @ops - the ops to set the notrace filter with
3023  * @buf - the string that holds the function notrace text.
3024  * @len - the length of the string.
3025  * @reset - non zero to reset all filters before applying this filter.
3026  *
3027  * Notrace Filters denote which functions should not be enabled when tracing
3028  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3029  * for tracing.
3030  */
3031 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3032                         int len, int reset)
3033 {
3034         ftrace_set_regex(ops, buf, len, reset, 0);
3035 }
3036 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3037 /**
3038  * ftrace_set_filter - set a function to filter on in ftrace
3039  * @ops - the ops to set the filter with
3040  * @buf - the string that holds the function filter text.
3041  * @len - the length of the string.
3042  * @reset - non zero to reset all filters before applying this filter.
3043  *
3044  * Filters denote which functions should be enabled when tracing is enabled.
3045  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3046  */
3047 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3048 {
3049         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3050 }
3051 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3052
3053 /**
3054  * ftrace_set_notrace - set a function to not trace in ftrace
3055  * @ops - the ops to set the notrace filter with
3056  * @buf - the string that holds the function notrace text.
3057  * @len - the length of the string.
3058  * @reset - non zero to reset all filters before applying this filter.
3059  *
3060  * Notrace Filters denote which functions should not be enabled when tracing
3061  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3062  * for tracing.
3063  */
3064 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3065 {
3066         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3067 }
3068 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3069
3070 /*
3071  * command line interface to allow users to set filters on boot up.
3072  */
3073 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3074 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3075 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3076
3077 static int __init set_ftrace_notrace(char *str)
3078 {
3079         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3080         return 1;
3081 }
3082 __setup("ftrace_notrace=", set_ftrace_notrace);
3083
3084 static int __init set_ftrace_filter(char *str)
3085 {
3086         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3087         return 1;
3088 }
3089 __setup("ftrace_filter=", set_ftrace_filter);
3090
3091 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3092 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3093 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3094
3095 static int __init set_graph_function(char *str)
3096 {
3097         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3098         return 1;
3099 }
3100 __setup("ftrace_graph_filter=", set_graph_function);
3101
3102 static void __init set_ftrace_early_graph(char *buf)
3103 {
3104         int ret;
3105         char *func;
3106
3107         while (buf) {
3108                 func = strsep(&buf, ",");
3109                 /* we allow only one expression at a time */
3110                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3111                                       func);
3112                 if (ret)
3113                         printk(KERN_DEBUG "ftrace: function %s not "
3114                                           "traceable\n", func);
3115         }
3116 }
3117 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3118
3119 static void __init
3120 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3121 {
3122         char *func;
3123
3124         while (buf) {
3125                 func = strsep(&buf, ",");
3126                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3127         }
3128 }
3129
3130 static void __init set_ftrace_early_filters(void)
3131 {
3132         if (ftrace_filter_buf[0])
3133                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3134         if (ftrace_notrace_buf[0])
3135                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3136 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3137         if (ftrace_graph_buf[0])
3138                 set_ftrace_early_graph(ftrace_graph_buf);
3139 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3140 }
3141
3142 static int
3143 ftrace_regex_release(struct inode *inode, struct file *file)
3144 {
3145         struct seq_file *m = (struct seq_file *)file->private_data;
3146         struct ftrace_iterator *iter;
3147         struct ftrace_hash **orig_hash;
3148         struct trace_parser *parser;
3149         int filter_hash;
3150         int ret;
3151
3152         mutex_lock(&ftrace_regex_lock);
3153         if (file->f_mode & FMODE_READ) {
3154                 iter = m->private;
3155
3156                 seq_release(inode, file);
3157         } else
3158                 iter = file->private_data;
3159
3160         parser = &iter->parser;
3161         if (trace_parser_loaded(parser)) {
3162                 parser->buffer[parser->idx] = 0;
3163                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3164         }
3165
3166         trace_parser_put(parser);
3167
3168         if (file->f_mode & FMODE_WRITE) {
3169                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3170
3171                 if (filter_hash)
3172                         orig_hash = &iter->ops->filter_hash;
3173                 else
3174                         orig_hash = &iter->ops->notrace_hash;
3175
3176                 mutex_lock(&ftrace_lock);
3177                 ret = ftrace_hash_move(iter->ops, filter_hash,
3178                                        orig_hash, iter->hash);
3179                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3180                     && ftrace_enabled)
3181                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3182
3183                 mutex_unlock(&ftrace_lock);
3184         }
3185         free_ftrace_hash(iter->hash);
3186         kfree(iter);
3187
3188         mutex_unlock(&ftrace_regex_lock);
3189         return 0;
3190 }
3191
3192 static const struct file_operations ftrace_avail_fops = {
3193         .open = ftrace_avail_open,
3194         .read = seq_read,
3195         .llseek = seq_lseek,
3196         .release = seq_release_private,
3197 };
3198
3199 static const struct file_operations ftrace_enabled_fops = {
3200         .open = ftrace_enabled_open,
3201         .read = seq_read,
3202         .llseek = seq_lseek,
3203         .release = seq_release_private,
3204 };
3205
3206 static const struct file_operations ftrace_filter_fops = {
3207         .open = ftrace_filter_open,
3208         .read = seq_read,
3209         .write = ftrace_filter_write,
3210         .llseek = ftrace_filter_lseek,
3211         .release = ftrace_regex_release,
3212 };
3213
3214 static const struct file_operations ftrace_notrace_fops = {
3215         .open = ftrace_notrace_open,
3216         .read = seq_read,
3217         .write = ftrace_notrace_write,
3218         .llseek = ftrace_filter_lseek,
3219         .release = ftrace_regex_release,
3220 };
3221
3222 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3223
3224 static DEFINE_MUTEX(graph_lock);
3225
3226 int ftrace_graph_count;
3227 int ftrace_graph_filter_enabled;
3228 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3229
3230 static void *
3231 __g_next(struct seq_file *m, loff_t *pos)
3232 {
3233         if (*pos >= ftrace_graph_count)
3234                 return NULL;
3235         return &ftrace_graph_funcs[*pos];
3236 }
3237
3238 static void *
3239 g_next(struct seq_file *m, void *v, loff_t *pos)
3240 {
3241         (*pos)++;
3242         return __g_next(m, pos);
3243 }
3244
3245 static void *g_start(struct seq_file *m, loff_t *pos)
3246 {
3247         mutex_lock(&graph_lock);
3248
3249         /* Nothing, tell g_show to print all functions are enabled */
3250         if (!ftrace_graph_filter_enabled && !*pos)
3251                 return (void *)1;
3252
3253         return __g_next(m, pos);
3254 }
3255
3256 static void g_stop(struct seq_file *m, void *p)
3257 {
3258         mutex_unlock(&graph_lock);
3259 }
3260
3261 static int g_show(struct seq_file *m, void *v)
3262 {
3263         unsigned long *ptr = v;
3264
3265         if (!ptr)
3266                 return 0;
3267
3268         if (ptr == (unsigned long *)1) {
3269                 seq_printf(m, "#### all functions enabled ####\n");
3270                 return 0;
3271         }
3272
3273         seq_printf(m, "%ps\n", (void *)*ptr);
3274
3275         return 0;
3276 }
3277
3278 static const struct seq_operations ftrace_graph_seq_ops = {
3279         .start = g_start,
3280         .next = g_next,
3281         .stop = g_stop,
3282         .show = g_show,
3283 };
3284
3285 static int
3286 ftrace_graph_open(struct inode *inode, struct file *file)
3287 {
3288         int ret = 0;
3289
3290         if (unlikely(ftrace_disabled))
3291                 return -ENODEV;
3292
3293         mutex_lock(&graph_lock);
3294         if ((file->f_mode & FMODE_WRITE) &&
3295             (file->f_flags & O_TRUNC)) {
3296                 ftrace_graph_filter_enabled = 0;
3297                 ftrace_graph_count = 0;
3298                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3299         }
3300         mutex_unlock(&graph_lock);
3301
3302         if (file->f_mode & FMODE_READ)
3303                 ret = seq_open(file, &ftrace_graph_seq_ops);
3304
3305         return ret;
3306 }
3307
3308 static int
3309 ftrace_graph_release(struct inode *inode, struct file *file)
3310 {
3311         if (file->f_mode & FMODE_READ)
3312                 seq_release(inode, file);
3313         return 0;
3314 }
3315
3316 static int
3317 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3318 {
3319         struct dyn_ftrace *rec;
3320         struct ftrace_page *pg;
3321         int search_len;
3322         int fail = 1;
3323         int type, not;
3324         char *search;
3325         bool exists;
3326         int i;
3327
3328         /* decode regex */
3329         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3330         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3331                 return -EBUSY;
3332
3333         search_len = strlen(search);
3334
3335         mutex_lock(&ftrace_lock);
3336
3337         if (unlikely(ftrace_disabled)) {
3338                 mutex_unlock(&ftrace_lock);
3339                 return -ENODEV;
3340         }
3341
3342         do_for_each_ftrace_rec(pg, rec) {
3343
3344                 if (rec->flags & FTRACE_FL_FREE)
3345                         continue;
3346
3347                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3348                         /* if it is in the array */
3349                         exists = false;
3350                         for (i = 0; i < *idx; i++) {
3351                                 if (array[i] == rec->ip) {
3352                                         exists = true;
3353                                         break;
3354                                 }
3355                         }
3356
3357                         if (!not) {
3358                                 fail = 0;
3359                                 if (!exists) {
3360                                         array[(*idx)++] = rec->ip;
3361                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3362                                                 goto out;
3363                                 }
3364                         } else {
3365                                 if (exists) {
3366                                         array[i] = array[--(*idx)];
3367                                         array[*idx] = 0;
3368                                         fail = 0;
3369                                 }
3370                         }
3371                 }
3372         } while_for_each_ftrace_rec();
3373 out:
3374         mutex_unlock(&ftrace_lock);
3375
3376         if (fail)
3377                 return -EINVAL;
3378
3379         ftrace_graph_filter_enabled = !!(*idx);
3380
3381         return 0;
3382 }
3383
3384 static ssize_t
3385 ftrace_graph_write(struct file *file, const char __user *ubuf,
3386                    size_t cnt, loff_t *ppos)
3387 {
3388         struct trace_parser parser;
3389         ssize_t read, ret;
3390
3391         if (!cnt)
3392                 return 0;
3393
3394         mutex_lock(&graph_lock);
3395
3396         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3397                 ret = -ENOMEM;
3398                 goto out_unlock;
3399         }
3400
3401         read = trace_get_user(&parser, ubuf, cnt, ppos);
3402
3403         if (read >= 0 && trace_parser_loaded((&parser))) {
3404                 parser.buffer[parser.idx] = 0;
3405
3406                 /* we allow only one expression at a time */
3407                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3408                                         parser.buffer);
3409                 if (ret)
3410                         goto out_free;
3411         }
3412
3413         ret = read;
3414
3415 out_free:
3416         trace_parser_put(&parser);
3417 out_unlock:
3418         mutex_unlock(&graph_lock);
3419
3420         return ret;
3421 }
3422
3423 static const struct file_operations ftrace_graph_fops = {
3424         .open           = ftrace_graph_open,
3425         .read           = seq_read,
3426         .write          = ftrace_graph_write,
3427         .llseek         = ftrace_filter_lseek,
3428         .release        = ftrace_graph_release,
3429 };
3430 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3431
3432 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3433 {
3434
3435         trace_create_file("available_filter_functions", 0444,
3436                         d_tracer, NULL, &ftrace_avail_fops);
3437
3438         trace_create_file("enabled_functions", 0444,
3439                         d_tracer, NULL, &ftrace_enabled_fops);
3440
3441         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3442                         NULL, &ftrace_filter_fops);
3443
3444         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3445                                     NULL, &ftrace_notrace_fops);
3446
3447 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3448         trace_create_file("set_graph_function", 0444, d_tracer,
3449                                     NULL,
3450                                     &ftrace_graph_fops);
3451 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3452
3453         return 0;
3454 }
3455
3456 static int ftrace_process_locs(struct module *mod,
3457                                unsigned long *start,
3458                                unsigned long *end)
3459 {
3460         unsigned long *p;
3461         unsigned long addr;
3462         unsigned long flags = 0; /* Shut up gcc */
3463
3464         mutex_lock(&ftrace_lock);
3465         p = start;
3466         while (p < end) {
3467                 addr = ftrace_call_adjust(*p++);
3468                 /*
3469                  * Some architecture linkers will pad between
3470                  * the different mcount_loc sections of different
3471                  * object files to satisfy alignments.
3472                  * Skip any NULL pointers.
3473                  */
3474                 if (!addr)
3475                         continue;
3476                 ftrace_record_ip(addr);
3477         }
3478
3479         /*
3480          * We only need to disable interrupts on start up
3481          * because we are modifying code that an interrupt
3482          * may execute, and the modification is not atomic.
3483          * But for modules, nothing runs the code we modify
3484          * until we are finished with it, and there's no
3485          * reason to cause large interrupt latencies while we do it.
3486          */
3487         if (!mod)
3488                 local_irq_save(flags);
3489         ftrace_update_code(mod);
3490         if (!mod)
3491                 local_irq_restore(flags);
3492         mutex_unlock(&ftrace_lock);
3493
3494         return 0;
3495 }
3496
3497 #ifdef CONFIG_MODULES
3498 void ftrace_release_mod(struct module *mod)
3499 {
3500         struct dyn_ftrace *rec;
3501         struct ftrace_page *pg;
3502
3503         mutex_lock(&ftrace_lock);
3504
3505         if (ftrace_disabled)
3506                 goto out_unlock;
3507
3508         do_for_each_ftrace_rec(pg, rec) {
3509                 if (within_module_core(rec->ip, mod)) {
3510                         /*
3511                          * rec->ip is changed in ftrace_free_rec()
3512                          * It should not between s and e if record was freed.
3513                          */
3514                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3515                         ftrace_free_rec(rec);
3516                 }
3517         } while_for_each_ftrace_rec();
3518  out_unlock:
3519         mutex_unlock(&ftrace_lock);
3520 }
3521
3522 static void ftrace_init_module(struct module *mod,
3523                                unsigned long *start, unsigned long *end)
3524 {
3525         if (ftrace_disabled || start == end)
3526                 return;
3527         ftrace_process_locs(mod, start, end);
3528 }
3529
3530 static int ftrace_module_notify_enter(struct notifier_block *self,
3531                                       unsigned long val, void *data)
3532 {
3533         struct module *mod = data;
3534
3535         if (val == MODULE_STATE_COMING)
3536                 ftrace_init_module(mod, mod->ftrace_callsites,
3537                                    mod->ftrace_callsites +
3538                                    mod->num_ftrace_callsites);
3539         return 0;
3540 }
3541
3542 static int ftrace_module_notify_exit(struct notifier_block *self,
3543                                      unsigned long val, void *data)
3544 {
3545         struct module *mod = data;
3546
3547         if (val == MODULE_STATE_GOING)
3548                 ftrace_release_mod(mod);
3549
3550         return 0;
3551 }
3552 #else
3553 static int ftrace_module_notify_enter(struct notifier_block *self,
3554                                       unsigned long val, void *data)
3555 {
3556         return 0;
3557 }
3558 static int ftrace_module_notify_exit(struct notifier_block *self,
3559                                      unsigned long val, void *data)
3560 {
3561         return 0;
3562 }
3563 #endif /* CONFIG_MODULES */
3564
3565 struct notifier_block ftrace_module_enter_nb = {
3566         .notifier_call = ftrace_module_notify_enter,
3567         .priority = INT_MAX,    /* Run before anything that can use kprobes */
3568 };
3569
3570 struct notifier_block ftrace_module_exit_nb = {
3571         .notifier_call = ftrace_module_notify_exit,
3572         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
3573 };
3574
3575 extern unsigned long __start_mcount_loc[];
3576 extern unsigned long __stop_mcount_loc[];
3577
3578 void __init ftrace_init(void)
3579 {
3580         unsigned long count, addr, flags;
3581         int ret;
3582
3583         /* Keep the ftrace pointer to the stub */
3584         addr = (unsigned long)ftrace_stub;
3585
3586         local_irq_save(flags);
3587         ftrace_dyn_arch_init(&addr);
3588         local_irq_restore(flags);
3589
3590         /* ftrace_dyn_arch_init places the return code in addr */
3591         if (addr)
3592                 goto failed;
3593
3594         count = __stop_mcount_loc - __start_mcount_loc;
3595
3596         ret = ftrace_dyn_table_alloc(count);
3597         if (ret)
3598                 goto failed;
3599
3600         last_ftrace_enabled = ftrace_enabled = 1;
3601
3602         ret = ftrace_process_locs(NULL,
3603                                   __start_mcount_loc,
3604                                   __stop_mcount_loc);
3605
3606         ret = register_module_notifier(&ftrace_module_enter_nb);
3607         if (ret)
3608                 pr_warning("Failed to register trace ftrace module enter notifier\n");
3609
3610         ret = register_module_notifier(&ftrace_module_exit_nb);
3611         if (ret)
3612                 pr_warning("Failed to register trace ftrace module exit notifier\n");
3613
3614         set_ftrace_early_filters();
3615
3616         return;
3617  failed:
3618         ftrace_disabled = 1;
3619 }
3620
3621 #else
3622
3623 static struct ftrace_ops global_ops = {
3624         .func                   = ftrace_stub,
3625 };
3626
3627 static int __init ftrace_nodyn_init(void)
3628 {
3629         ftrace_enabled = 1;
3630         return 0;
3631 }
3632 device_initcall(ftrace_nodyn_init);
3633
3634 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3635 static inline void ftrace_startup_enable(int command) { }
3636 /* Keep as macros so we do not need to define the commands */
3637 # define ftrace_startup(ops, command)                                   \
3638         ({                                                              \
3639                 int ___ret = __register_ftrace_function(ops);           \
3640                 if (!___ret)                                            \
3641                         (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
3642                 ___ret;                                                 \
3643         })
3644 # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
3645
3646 # define ftrace_startup_sysctl()        do { } while (0)
3647 # define ftrace_shutdown_sysctl()       do { } while (0)
3648
3649 static inline int
3650 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3651 {
3652         return 1;
3653 }
3654
3655 #endif /* CONFIG_DYNAMIC_FTRACE */
3656
3657 static void
3658 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3659 {
3660         struct ftrace_ops *op;
3661
3662         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3663                 return;
3664
3665         trace_recursion_set(TRACE_INTERNAL_BIT);
3666         /*
3667          * Some of the ops may be dynamically allocated,
3668          * they must be freed after a synchronize_sched().
3669          */
3670         preempt_disable_notrace();
3671         op = rcu_dereference_raw(ftrace_ops_list);
3672         while (op != &ftrace_list_end) {
3673                 if (ftrace_ops_test(op, ip))
3674                         op->func(ip, parent_ip);
3675                 op = rcu_dereference_raw(op->next);
3676         };
3677         preempt_enable_notrace();
3678         trace_recursion_clear(TRACE_INTERNAL_BIT);
3679 }
3680
3681 static void clear_ftrace_swapper(void)
3682 {
3683         struct task_struct *p;
3684         int cpu;
3685
3686         get_online_cpus();
3687         for_each_online_cpu(cpu) {
3688                 p = idle_task(cpu);
3689                 clear_tsk_trace_trace(p);
3690         }
3691         put_online_cpus();
3692 }
3693
3694 static void set_ftrace_swapper(void)
3695 {
3696         struct task_struct *p;
3697         int cpu;
3698
3699         get_online_cpus();
3700         for_each_online_cpu(cpu) {
3701                 p = idle_task(cpu);
3702                 set_tsk_trace_trace(p);
3703         }
3704         put_online_cpus();
3705 }
3706
3707 static void clear_ftrace_pid(struct pid *pid)
3708 {
3709         struct task_struct *p;
3710
3711         rcu_read_lock();
3712         do_each_pid_task(pid, PIDTYPE_PID, p) {
3713                 clear_tsk_trace_trace(p);
3714         } while_each_pid_task(pid, PIDTYPE_PID, p);
3715         rcu_read_unlock();
3716
3717         put_pid(pid);
3718 }
3719
3720 static void set_ftrace_pid(struct pid *pid)
3721 {
3722         struct task_struct *p;
3723
3724         rcu_read_lock();
3725         do_each_pid_task(pid, PIDTYPE_PID, p) {
3726                 set_tsk_trace_trace(p);
3727         } while_each_pid_task(pid, PIDTYPE_PID, p);
3728         rcu_read_unlock();
3729 }
3730
3731 static void clear_ftrace_pid_task(struct pid *pid)
3732 {
3733         if (pid == ftrace_swapper_pid)
3734                 clear_ftrace_swapper();
3735         else
3736                 clear_ftrace_pid(pid);
3737 }
3738
3739 static void set_ftrace_pid_task(struct pid *pid)
3740 {
3741         if (pid == ftrace_swapper_pid)
3742                 set_ftrace_swapper();
3743         else
3744                 set_ftrace_pid(pid);
3745 }
3746
3747 static int ftrace_pid_add(int p)
3748 {
3749         struct pid *pid;
3750         struct ftrace_pid *fpid;
3751         int ret = -EINVAL;
3752
3753         mutex_lock(&ftrace_lock);
3754
3755         if (!p)
3756                 pid = ftrace_swapper_pid;
3757         else
3758                 pid = find_get_pid(p);
3759
3760         if (!pid)
3761                 goto out;
3762
3763         ret = 0;
3764
3765         list_for_each_entry(fpid, &ftrace_pids, list)
3766                 if (fpid->pid == pid)
3767                         goto out_put;
3768
3769         ret = -ENOMEM;
3770
3771         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3772         if (!fpid)
3773                 goto out_put;
3774
3775         list_add(&fpid->list, &ftrace_pids);
3776         fpid->pid = pid;
3777
3778         set_ftrace_pid_task(pid);
3779
3780         ftrace_update_pid_func();
3781         ftrace_startup_enable(0);
3782
3783         mutex_unlock(&ftrace_lock);
3784         return 0;
3785
3786 out_put:
3787         if (pid != ftrace_swapper_pid)
3788                 put_pid(pid);
3789
3790 out:
3791         mutex_unlock(&ftrace_lock);
3792         return ret;
3793 }
3794
3795 static void ftrace_pid_reset(void)
3796 {
3797         struct ftrace_pid *fpid, *safe;
3798
3799         mutex_lock(&ftrace_lock);
3800         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3801                 struct pid *pid = fpid->pid;
3802
3803                 clear_ftrace_pid_task(pid);
3804
3805                 list_del(&fpid->list);
3806                 kfree(fpid);
3807         }
3808
3809         ftrace_update_pid_func();
3810         ftrace_startup_enable(0);
3811
3812         mutex_unlock(&ftrace_lock);
3813 }
3814
3815 static void *fpid_start(struct seq_file *m, loff_t *pos)
3816 {
3817         mutex_lock(&ftrace_lock);
3818
3819         if (list_empty(&ftrace_pids) && (!*pos))
3820                 return (void *) 1;
3821
3822         return seq_list_start(&ftrace_pids, *pos);
3823 }
3824
3825 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3826 {
3827         if (v == (void *)1)
3828                 return NULL;
3829
3830         return seq_list_next(v, &ftrace_pids, pos);
3831 }
3832
3833 static void fpid_stop(struct seq_file *m, void *p)
3834 {
3835         mutex_unlock(&ftrace_lock);
3836 }
3837
3838 static int fpid_show(struct seq_file *m, void *v)
3839 {
3840         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3841
3842         if (v == (void *)1) {
3843                 seq_printf(m, "no pid\n");
3844                 return 0;
3845         }
3846
3847         if (fpid->pid == ftrace_swapper_pid)
3848                 seq_printf(m, "swapper tasks\n");
3849         else
3850                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3851
3852         return 0;
3853 }
3854
3855 static const struct seq_operations ftrace_pid_sops = {
3856         .start = fpid_start,
3857         .next = fpid_next,
3858         .stop = fpid_stop,
3859         .show = fpid_show,
3860 };
3861
3862 static int
3863 ftrace_pid_open(struct inode *inode, struct file *file)
3864 {
3865         int ret = 0;
3866
3867         if ((file->f_mode & FMODE_WRITE) &&
3868             (file->f_flags & O_TRUNC))
3869                 ftrace_pid_reset();
3870
3871         if (file->f_mode & FMODE_READ)
3872                 ret = seq_open(file, &ftrace_pid_sops);
3873
3874         return ret;
3875 }
3876
3877 static ssize_t
3878 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3879                    size_t cnt, loff_t *ppos)
3880 {
3881         char buf[64], *tmp;
3882         long val;
3883         int ret;
3884
3885         if (cnt >= sizeof(buf))
3886                 return -EINVAL;
3887
3888         if (copy_from_user(&buf, ubuf, cnt))
3889                 return -EFAULT;
3890
3891         buf[cnt] = 0;
3892
3893         /*
3894          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3895          * to clean the filter quietly.
3896          */
3897         tmp = strstrip(buf);
3898         if (strlen(tmp) == 0)
3899                 return 1;
3900
3901         ret = strict_strtol(tmp, 10, &val);
3902         if (ret < 0)
3903                 return ret;
3904
3905         ret = ftrace_pid_add(val);
3906
3907         return ret ? ret : cnt;
3908 }
3909
3910 static int
3911 ftrace_pid_release(struct inode *inode, struct file *file)
3912 {
3913         if (file->f_mode & FMODE_READ)
3914                 seq_release(inode, file);
3915
3916         return 0;
3917 }
3918
3919 static const struct file_operations ftrace_pid_fops = {
3920         .open           = ftrace_pid_open,
3921         .write          = ftrace_pid_write,
3922         .read           = seq_read,
3923         .llseek         = ftrace_filter_lseek,
3924         .release        = ftrace_pid_release,
3925 };
3926
3927 static __init int ftrace_init_debugfs(void)
3928 {
3929         struct dentry *d_tracer;
3930
3931         d_tracer = tracing_init_dentry();
3932         if (!d_tracer)
3933                 return 0;
3934
3935         ftrace_init_dyn_debugfs(d_tracer);
3936
3937         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3938                             NULL, &ftrace_pid_fops);
3939
3940         ftrace_profile_debugfs(d_tracer);
3941
3942         return 0;
3943 }
3944 fs_initcall(ftrace_init_debugfs);
3945
3946 /**
3947  * ftrace_kill - kill ftrace
3948  *
3949  * This function should be used by panic code. It stops ftrace
3950  * but in a not so nice way. If you need to simply kill ftrace
3951  * from a non-atomic section, use ftrace_kill.
3952  */
3953 void ftrace_kill(void)
3954 {
3955         ftrace_disabled = 1;
3956         ftrace_enabled = 0;
3957         clear_ftrace_function();
3958 }
3959
3960 /**
3961  * Test if ftrace is dead or not.
3962  */
3963 int ftrace_is_dead(void)
3964 {
3965         return ftrace_disabled;
3966 }
3967
3968 /**
3969  * register_ftrace_function - register a function for profiling
3970  * @ops - ops structure that holds the function for profiling.
3971  *
3972  * Register a function to be called by all functions in the
3973  * kernel.
3974  *
3975  * Note: @ops->func and all the functions it calls must be labeled
3976  *       with "notrace", otherwise it will go into a
3977  *       recursive loop.
3978  */
3979 int register_ftrace_function(struct ftrace_ops *ops)
3980 {
3981         int ret = -1;
3982
3983         mutex_lock(&ftrace_lock);
3984
3985         ret = ftrace_startup(ops, 0);
3986
3987         mutex_unlock(&ftrace_lock);
3988         return ret;
3989 }
3990 EXPORT_SYMBOL_GPL(register_ftrace_function);
3991
3992 /**
3993  * unregister_ftrace_function - unregister a function for profiling.
3994  * @ops - ops structure that holds the function to unregister
3995  *
3996  * Unregister a function that was added to be called by ftrace profiling.
3997  */
3998 int unregister_ftrace_function(struct ftrace_ops *ops)
3999 {
4000         int ret;
4001
4002         mutex_lock(&ftrace_lock);
4003         ret = ftrace_shutdown(ops, 0);
4004         mutex_unlock(&ftrace_lock);
4005
4006         return ret;
4007 }
4008 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4009
4010 int
4011 ftrace_enable_sysctl(struct ctl_table *table, int write,
4012                      void __user *buffer, size_t *lenp,
4013                      loff_t *ppos)
4014 {
4015         int ret = -ENODEV;
4016
4017         mutex_lock(&ftrace_lock);
4018
4019         if (unlikely(ftrace_disabled))
4020                 goto out;
4021
4022         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4023
4024         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4025                 goto out;
4026
4027         last_ftrace_enabled = !!ftrace_enabled;
4028
4029         if (ftrace_enabled) {
4030
4031                 ftrace_startup_sysctl();
4032
4033                 /* we are starting ftrace again */
4034                 if (ftrace_ops_list != &ftrace_list_end)
4035                         update_ftrace_function();
4036
4037         } else {
4038                 /* stopping ftrace calls (just send to ftrace_stub) */
4039                 ftrace_trace_function = ftrace_stub;
4040
4041                 ftrace_shutdown_sysctl();
4042         }
4043
4044  out:
4045         mutex_unlock(&ftrace_lock);
4046         return ret;
4047 }
4048
4049 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4050
4051 static int ftrace_graph_active;
4052 static struct notifier_block ftrace_suspend_notifier;
4053
4054 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4055 {
4056         return 0;
4057 }
4058
4059 /* The callbacks that hook a function */
4060 trace_func_graph_ret_t ftrace_graph_return =
4061                         (trace_func_graph_ret_t)ftrace_stub;
4062 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4063
4064 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4065 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4066 {
4067         int i;
4068         int ret = 0;
4069         unsigned long flags;
4070         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4071         struct task_struct *g, *t;
4072
4073         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4074                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4075                                         * sizeof(struct ftrace_ret_stack),
4076                                         GFP_KERNEL);
4077                 if (!ret_stack_list[i]) {
4078                         start = 0;
4079                         end = i;
4080                         ret = -ENOMEM;
4081                         goto free;
4082                 }
4083         }
4084
4085         read_lock_irqsave(&tasklist_lock, flags);
4086         do_each_thread(g, t) {
4087                 if (start == end) {
4088                         ret = -EAGAIN;
4089                         goto unlock;
4090                 }
4091
4092                 if (t->ret_stack == NULL) {
4093                         atomic_set(&t->tracing_graph_pause, 0);
4094                         atomic_set(&t->trace_overrun, 0);
4095                         t->curr_ret_stack = -1;
4096                         /* Make sure the tasks see the -1 first: */
4097                         smp_wmb();
4098                         t->ret_stack = ret_stack_list[start++];
4099                 }
4100         } while_each_thread(g, t);
4101
4102 unlock:
4103         read_unlock_irqrestore(&tasklist_lock, flags);
4104 free:
4105         for (i = start; i < end; i++)
4106                 kfree(ret_stack_list[i]);
4107         return ret;
4108 }
4109
4110 static void
4111 ftrace_graph_probe_sched_switch(void *ignore,
4112                         struct task_struct *prev, struct task_struct *next)
4113 {
4114         unsigned long long timestamp;
4115         int index;
4116
4117         /*
4118          * Does the user want to count the time a function was asleep.
4119          * If so, do not update the time stamps.
4120          */
4121         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4122                 return;
4123
4124         timestamp = trace_clock_local();
4125
4126         prev->ftrace_timestamp = timestamp;
4127
4128         /* only process tasks that we timestamped */
4129         if (!next->ftrace_timestamp)
4130                 return;
4131
4132         /*
4133          * Update all the counters in next to make up for the
4134          * time next was sleeping.
4135          */
4136         timestamp -= next->ftrace_timestamp;
4137
4138         for (index = next->curr_ret_stack; index >= 0; index--)
4139                 next->ret_stack[index].calltime += timestamp;
4140 }
4141
4142 /* Allocate a return stack for each task */
4143 static int start_graph_tracing(void)
4144 {
4145         struct ftrace_ret_stack **ret_stack_list;
4146         int ret, cpu;
4147
4148         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4149                                 sizeof(struct ftrace_ret_stack *),
4150                                 GFP_KERNEL);
4151
4152         if (!ret_stack_list)
4153                 return -ENOMEM;
4154
4155         /* The cpu_boot init_task->ret_stack will never be freed */
4156         for_each_online_cpu(cpu) {
4157                 if (!idle_task(cpu)->ret_stack)
4158                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4159         }
4160
4161         do {
4162                 ret = alloc_retstack_tasklist(ret_stack_list);
4163         } while (ret == -EAGAIN);
4164
4165         if (!ret) {
4166                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4167                 if (ret)
4168                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4169                                 " probe to kernel_sched_switch\n");
4170         }
4171
4172         kfree(ret_stack_list);
4173         return ret;
4174 }
4175
4176 /*
4177  * Hibernation protection.
4178  * The state of the current task is too much unstable during
4179  * suspend/restore to disk. We want to protect against that.
4180  */
4181 static int
4182 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4183                                                         void *unused)
4184 {
4185         switch (state) {
4186         case PM_HIBERNATION_PREPARE:
4187                 pause_graph_tracing();
4188                 break;
4189
4190         case PM_POST_HIBERNATION:
4191                 unpause_graph_tracing();
4192                 break;
4193         }
4194         return NOTIFY_DONE;
4195 }
4196
4197 /* Just a place holder for function graph */
4198 static struct ftrace_ops fgraph_ops __read_mostly = {
4199         .func           = ftrace_stub,
4200         .flags          = FTRACE_OPS_FL_GLOBAL,
4201 };
4202
4203 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4204                         trace_func_graph_ent_t entryfunc)
4205 {
4206         int ret = 0;
4207
4208         mutex_lock(&ftrace_lock);
4209
4210         /* we currently allow only one tracer registered at a time */
4211         if (ftrace_graph_active) {
4212                 ret = -EBUSY;
4213                 goto out;
4214         }
4215
4216         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4217         register_pm_notifier(&ftrace_suspend_notifier);
4218
4219         ftrace_graph_active++;
4220         ret = start_graph_tracing();
4221         if (ret) {
4222                 ftrace_graph_active--;
4223                 goto out;
4224         }
4225
4226         ftrace_graph_return = retfunc;
4227         ftrace_graph_entry = entryfunc;
4228
4229         ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
4230
4231 out:
4232         mutex_unlock(&ftrace_lock);
4233         return ret;
4234 }
4235
4236 void unregister_ftrace_graph(void)
4237 {
4238         mutex_lock(&ftrace_lock);
4239
4240         if (unlikely(!ftrace_graph_active))
4241                 goto out;
4242
4243         ftrace_graph_active--;
4244         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4245         ftrace_graph_entry = ftrace_graph_entry_stub;
4246         ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
4247         unregister_pm_notifier(&ftrace_suspend_notifier);
4248         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4249
4250  out:
4251         mutex_unlock(&ftrace_lock);
4252 }
4253
4254 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4255
4256 static void
4257 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4258 {
4259         atomic_set(&t->tracing_graph_pause, 0);
4260         atomic_set(&t->trace_overrun, 0);
4261         t->ftrace_timestamp = 0;
4262         /* make curr_ret_stack visible before we add the ret_stack */
4263         smp_wmb();
4264         t->ret_stack = ret_stack;
4265 }
4266
4267 /*
4268  * Allocate a return stack for the idle task. May be the first
4269  * time through, or it may be done by CPU hotplug online.
4270  */
4271 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4272 {
4273         t->curr_ret_stack = -1;
4274         /*
4275          * The idle task has no parent, it either has its own
4276          * stack or no stack at all.
4277          */
4278         if (t->ret_stack)
4279                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4280
4281         if (ftrace_graph_active) {
4282                 struct ftrace_ret_stack *ret_stack;
4283
4284                 ret_stack = per_cpu(idle_ret_stack, cpu);
4285                 if (!ret_stack) {
4286                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4287                                             * sizeof(struct ftrace_ret_stack),
4288                                             GFP_KERNEL);
4289                         if (!ret_stack)
4290                                 return;
4291                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4292                 }
4293                 graph_init_task(t, ret_stack);
4294         }
4295 }
4296
4297 /* Allocate a return stack for newly created task */
4298 void ftrace_graph_init_task(struct task_struct *t)
4299 {
4300         /* Make sure we do not use the parent ret_stack */
4301         t->ret_stack = NULL;
4302         t->curr_ret_stack = -1;
4303
4304         if (ftrace_graph_active) {
4305                 struct ftrace_ret_stack *ret_stack;
4306
4307                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4308                                 * sizeof(struct ftrace_ret_stack),
4309                                 GFP_KERNEL);
4310                 if (!ret_stack)
4311                         return;
4312                 graph_init_task(t, ret_stack);
4313         }
4314 }
4315
4316 void ftrace_graph_exit_task(struct task_struct *t)
4317 {
4318         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4319
4320         t->ret_stack = NULL;
4321         /* NULL must become visible to IRQs before we free it: */
4322         barrier();
4323
4324         kfree(ret_stack);
4325 }
4326
4327 void ftrace_graph_stop(void)
4328 {
4329         ftrace_stop();
4330 }
4331 #endif