ftrace: use code patching for ftrace graph tracer
[pandora-kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond)                    \
35         do {                                    \
36                 if (WARN_ON(cond))              \
37                         ftrace_kill();          \
38         } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond)               \
41         do {                                    \
42                 if (WARN_ON_ONCE(cond))         \
43                         ftrace_kill();          \
44         } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace = -1;
52
53 /* Quick disabling of function tracer. */
54 int function_trace_stop;
55
56 /* By default, current tracing type is normal tracing. */
57 enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
58
59 /*
60  * ftrace_disabled is set when an anomaly is discovered.
61  * ftrace_disabled is much stronger than ftrace_enabled.
62  */
63 static int ftrace_disabled __read_mostly;
64
65 static DEFINE_SPINLOCK(ftrace_lock);
66 static DEFINE_MUTEX(ftrace_sysctl_lock);
67 static DEFINE_MUTEX(ftrace_start_lock);
68
69 static struct ftrace_ops ftrace_list_end __read_mostly =
70 {
71         .func = ftrace_stub,
72 };
73
74 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
75 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
78
79 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
80 {
81         struct ftrace_ops *op = ftrace_list;
82
83         /* in case someone actually ports this to alpha! */
84         read_barrier_depends();
85
86         while (op != &ftrace_list_end) {
87                 /* silly alpha */
88                 read_barrier_depends();
89                 op->func(ip, parent_ip);
90                 op = op->next;
91         };
92 }
93
94 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
95 {
96         if (current->pid != ftrace_pid_trace)
97                 return;
98
99         ftrace_pid_function(ip, parent_ip);
100 }
101
102 static void set_ftrace_pid_function(ftrace_func_t func)
103 {
104         /* do not set ftrace_pid_function to itself! */
105         if (func != ftrace_pid_func)
106                 ftrace_pid_function = func;
107 }
108
109 /**
110  * clear_ftrace_function - reset the ftrace function
111  *
112  * This NULLs the ftrace function and in essence stops
113  * tracing.  There may be lag
114  */
115 void clear_ftrace_function(void)
116 {
117         ftrace_trace_function = ftrace_stub;
118         __ftrace_trace_function = ftrace_stub;
119         ftrace_pid_function = ftrace_stub;
120 }
121
122 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123 /*
124  * For those archs that do not test ftrace_trace_stop in their
125  * mcount call site, we need to do it from C.
126  */
127 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
128 {
129         if (function_trace_stop)
130                 return;
131
132         __ftrace_trace_function(ip, parent_ip);
133 }
134 #endif
135
136 static int __register_ftrace_function(struct ftrace_ops *ops)
137 {
138         /* should not be called from interrupt context */
139         spin_lock(&ftrace_lock);
140
141         ops->next = ftrace_list;
142         /*
143          * We are entering ops into the ftrace_list but another
144          * CPU might be walking that list. We need to make sure
145          * the ops->next pointer is valid before another CPU sees
146          * the ops pointer included into the ftrace_list.
147          */
148         smp_wmb();
149         ftrace_list = ops;
150
151         if (ftrace_enabled) {
152                 ftrace_func_t func;
153
154                 if (ops->next == &ftrace_list_end)
155                         func = ops->func;
156                 else
157                         func = ftrace_list_func;
158
159                 if (ftrace_pid_trace >= 0) {
160                         set_ftrace_pid_function(func);
161                         func = ftrace_pid_func;
162                 }
163
164                 /*
165                  * For one func, simply call it directly.
166                  * For more than one func, call the chain.
167                  */
168 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
169                 ftrace_trace_function = func;
170 #else
171                 __ftrace_trace_function = func;
172                 ftrace_trace_function = ftrace_test_stop_func;
173 #endif
174         }
175
176         spin_unlock(&ftrace_lock);
177
178         return 0;
179 }
180
181 static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 {
183         struct ftrace_ops **p;
184         int ret = 0;
185
186         /* should not be called from interrupt context */
187         spin_lock(&ftrace_lock);
188
189         /*
190          * If we are removing the last function, then simply point
191          * to the ftrace_stub.
192          */
193         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
194                 ftrace_trace_function = ftrace_stub;
195                 ftrace_list = &ftrace_list_end;
196                 goto out;
197         }
198
199         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
200                 if (*p == ops)
201                         break;
202
203         if (*p != ops) {
204                 ret = -1;
205                 goto out;
206         }
207
208         *p = (*p)->next;
209
210         if (ftrace_enabled) {
211                 /* If we only have one func left, then call that directly */
212                 if (ftrace_list->next == &ftrace_list_end) {
213                         ftrace_func_t func = ftrace_list->func;
214
215                         if (ftrace_pid_trace >= 0) {
216                                 set_ftrace_pid_function(func);
217                                 func = ftrace_pid_func;
218                         }
219 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
220                         ftrace_trace_function = func;
221 #else
222                         __ftrace_trace_function = func;
223 #endif
224                 }
225         }
226
227  out:
228         spin_unlock(&ftrace_lock);
229
230         return ret;
231 }
232
233 static void ftrace_update_pid_func(void)
234 {
235         ftrace_func_t func;
236
237         /* should not be called from interrupt context */
238         spin_lock(&ftrace_lock);
239
240         if (ftrace_trace_function == ftrace_stub)
241                 goto out;
242
243         func = ftrace_trace_function;
244
245         if (ftrace_pid_trace >= 0) {
246                 set_ftrace_pid_function(func);
247                 func = ftrace_pid_func;
248         } else {
249                 if (func != ftrace_pid_func)
250                         goto out;
251
252                 set_ftrace_pid_function(func);
253         }
254
255 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
256         ftrace_trace_function = func;
257 #else
258         __ftrace_trace_function = func;
259 #endif
260
261  out:
262         spin_unlock(&ftrace_lock);
263 }
264
265 #ifdef CONFIG_DYNAMIC_FTRACE
266 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
267 # error Dynamic ftrace depends on MCOUNT_RECORD
268 #endif
269
270 /*
271  * Since MCOUNT_ADDR may point to mcount itself, we do not want
272  * to get it confused by reading a reference in the code as we
273  * are parsing on objcopy output of text. Use a variable for
274  * it instead.
275  */
276 static unsigned long mcount_addr = MCOUNT_ADDR;
277
278 enum {
279         FTRACE_ENABLE_CALLS             = (1 << 0),
280         FTRACE_DISABLE_CALLS            = (1 << 1),
281         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
282         FTRACE_ENABLE_MCOUNT            = (1 << 3),
283         FTRACE_DISABLE_MCOUNT           = (1 << 4),
284         FTRACE_START_FUNC_RET           = (1 << 5),
285         FTRACE_STOP_FUNC_RET            = (1 << 6),
286 };
287
288 static int ftrace_filtered;
289
290 static LIST_HEAD(ftrace_new_addrs);
291
292 static DEFINE_MUTEX(ftrace_regex_lock);
293
294 struct ftrace_page {
295         struct ftrace_page      *next;
296         unsigned long           index;
297         struct dyn_ftrace       records[];
298 };
299
300 #define ENTRIES_PER_PAGE \
301   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
302
303 /* estimate from running different kernels */
304 #define NR_TO_INIT              10000
305
306 static struct ftrace_page       *ftrace_pages_start;
307 static struct ftrace_page       *ftrace_pages;
308
309 static struct dyn_ftrace *ftrace_free_records;
310
311
312 #ifdef CONFIG_KPROBES
313
314 static int frozen_record_count;
315
316 static inline void freeze_record(struct dyn_ftrace *rec)
317 {
318         if (!(rec->flags & FTRACE_FL_FROZEN)) {
319                 rec->flags |= FTRACE_FL_FROZEN;
320                 frozen_record_count++;
321         }
322 }
323
324 static inline void unfreeze_record(struct dyn_ftrace *rec)
325 {
326         if (rec->flags & FTRACE_FL_FROZEN) {
327                 rec->flags &= ~FTRACE_FL_FROZEN;
328                 frozen_record_count--;
329         }
330 }
331
332 static inline int record_frozen(struct dyn_ftrace *rec)
333 {
334         return rec->flags & FTRACE_FL_FROZEN;
335 }
336 #else
337 # define freeze_record(rec)                     ({ 0; })
338 # define unfreeze_record(rec)                   ({ 0; })
339 # define record_frozen(rec)                     ({ 0; })
340 #endif /* CONFIG_KPROBES */
341
342 static void ftrace_free_rec(struct dyn_ftrace *rec)
343 {
344         rec->ip = (unsigned long)ftrace_free_records;
345         ftrace_free_records = rec;
346         rec->flags |= FTRACE_FL_FREE;
347 }
348
349 void ftrace_release(void *start, unsigned long size)
350 {
351         struct dyn_ftrace *rec;
352         struct ftrace_page *pg;
353         unsigned long s = (unsigned long)start;
354         unsigned long e = s + size;
355         int i;
356
357         if (ftrace_disabled || !start)
358                 return;
359
360         /* should not be called from interrupt context */
361         spin_lock(&ftrace_lock);
362
363         for (pg = ftrace_pages_start; pg; pg = pg->next) {
364                 for (i = 0; i < pg->index; i++) {
365                         rec = &pg->records[i];
366
367                         if ((rec->ip >= s) && (rec->ip < e))
368                                 ftrace_free_rec(rec);
369                 }
370         }
371         spin_unlock(&ftrace_lock);
372 }
373
374 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
375 {
376         struct dyn_ftrace *rec;
377
378         /* First check for freed records */
379         if (ftrace_free_records) {
380                 rec = ftrace_free_records;
381
382                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
383                         FTRACE_WARN_ON_ONCE(1);
384                         ftrace_free_records = NULL;
385                         return NULL;
386                 }
387
388                 ftrace_free_records = (void *)rec->ip;
389                 memset(rec, 0, sizeof(*rec));
390                 return rec;
391         }
392
393         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
394                 if (!ftrace_pages->next) {
395                         /* allocate another page */
396                         ftrace_pages->next =
397                                 (void *)get_zeroed_page(GFP_KERNEL);
398                         if (!ftrace_pages->next)
399                                 return NULL;
400                 }
401                 ftrace_pages = ftrace_pages->next;
402         }
403
404         return &ftrace_pages->records[ftrace_pages->index++];
405 }
406
407 static struct dyn_ftrace *
408 ftrace_record_ip(unsigned long ip)
409 {
410         struct dyn_ftrace *rec;
411
412         if (ftrace_disabled)
413                 return NULL;
414
415         rec = ftrace_alloc_dyn_node(ip);
416         if (!rec)
417                 return NULL;
418
419         rec->ip = ip;
420
421         list_add(&rec->list, &ftrace_new_addrs);
422
423         return rec;
424 }
425
426 static void print_ip_ins(const char *fmt, unsigned char *p)
427 {
428         int i;
429
430         printk(KERN_CONT "%s", fmt);
431
432         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
433                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
434 }
435
436 static void ftrace_bug(int failed, unsigned long ip)
437 {
438         switch (failed) {
439         case -EFAULT:
440                 FTRACE_WARN_ON_ONCE(1);
441                 pr_info("ftrace faulted on modifying ");
442                 print_ip_sym(ip);
443                 break;
444         case -EINVAL:
445                 FTRACE_WARN_ON_ONCE(1);
446                 pr_info("ftrace failed to modify ");
447                 print_ip_sym(ip);
448                 print_ip_ins(" actual: ", (unsigned char *)ip);
449                 printk(KERN_CONT "\n");
450                 break;
451         case -EPERM:
452                 FTRACE_WARN_ON_ONCE(1);
453                 pr_info("ftrace faulted on writing ");
454                 print_ip_sym(ip);
455                 break;
456         default:
457                 FTRACE_WARN_ON_ONCE(1);
458                 pr_info("ftrace faulted on unknown error ");
459                 print_ip_sym(ip);
460         }
461 }
462
463
464 static int
465 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
466 {
467         unsigned long ip, fl;
468         unsigned long ftrace_addr;
469
470         ftrace_addr = (unsigned long)ftrace_caller;
471
472         ip = rec->ip;
473
474         /*
475          * If this record is not to be traced and
476          * it is not enabled then do nothing.
477          *
478          * If this record is not to be traced and
479          * it is enabled then disabled it.
480          *
481          */
482         if (rec->flags & FTRACE_FL_NOTRACE) {
483                 if (rec->flags & FTRACE_FL_ENABLED)
484                         rec->flags &= ~FTRACE_FL_ENABLED;
485                 else
486                         return 0;
487
488         } else if (ftrace_filtered && enable) {
489                 /*
490                  * Filtering is on:
491                  */
492
493                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
494
495                 /* Record is filtered and enabled, do nothing */
496                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
497                         return 0;
498
499                 /* Record is not filtered and is not enabled do nothing */
500                 if (!fl)
501                         return 0;
502
503                 /* Record is not filtered but enabled, disable it */
504                 if (fl == FTRACE_FL_ENABLED)
505                         rec->flags &= ~FTRACE_FL_ENABLED;
506                 else
507                 /* Otherwise record is filtered but not enabled, enable it */
508                         rec->flags |= FTRACE_FL_ENABLED;
509         } else {
510                 /* Disable or not filtered */
511
512                 if (enable) {
513                         /* if record is enabled, do nothing */
514                         if (rec->flags & FTRACE_FL_ENABLED)
515                                 return 0;
516
517                         rec->flags |= FTRACE_FL_ENABLED;
518
519                 } else {
520
521                         /* if record is not enabled do nothing */
522                         if (!(rec->flags & FTRACE_FL_ENABLED))
523                                 return 0;
524
525                         rec->flags &= ~FTRACE_FL_ENABLED;
526                 }
527         }
528
529         if (rec->flags & FTRACE_FL_ENABLED)
530                 return ftrace_make_call(rec, ftrace_addr);
531         else
532                 return ftrace_make_nop(NULL, rec, ftrace_addr);
533 }
534
535 static void ftrace_replace_code(int enable)
536 {
537         int i, failed;
538         struct dyn_ftrace *rec;
539         struct ftrace_page *pg;
540
541         for (pg = ftrace_pages_start; pg; pg = pg->next) {
542                 for (i = 0; i < pg->index; i++) {
543                         rec = &pg->records[i];
544
545                         /*
546                          * Skip over free records and records that have
547                          * failed.
548                          */
549                         if (rec->flags & FTRACE_FL_FREE ||
550                             rec->flags & FTRACE_FL_FAILED)
551                                 continue;
552
553                         /* ignore updates to this record's mcount site */
554                         if (get_kprobe((void *)rec->ip)) {
555                                 freeze_record(rec);
556                                 continue;
557                         } else {
558                                 unfreeze_record(rec);
559                         }
560
561                         failed = __ftrace_replace_code(rec, enable);
562                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
563                                 rec->flags |= FTRACE_FL_FAILED;
564                                 if ((system_state == SYSTEM_BOOTING) ||
565                                     !core_kernel_text(rec->ip)) {
566                                         ftrace_free_rec(rec);
567                                 } else
568                                         ftrace_bug(failed, rec->ip);
569                         }
570                 }
571         }
572 }
573
574 static int
575 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
576 {
577         unsigned long ip;
578         int ret;
579
580         ip = rec->ip;
581
582         ret = ftrace_make_nop(mod, rec, mcount_addr);
583         if (ret) {
584                 ftrace_bug(ret, ip);
585                 rec->flags |= FTRACE_FL_FAILED;
586                 return 0;
587         }
588         return 1;
589 }
590
591 static int __ftrace_modify_code(void *data)
592 {
593         int *command = data;
594
595         if (*command & FTRACE_ENABLE_CALLS)
596                 ftrace_replace_code(1);
597         else if (*command & FTRACE_DISABLE_CALLS)
598                 ftrace_replace_code(0);
599
600         if (*command & FTRACE_UPDATE_TRACE_FUNC)
601                 ftrace_update_ftrace_func(ftrace_trace_function);
602
603         if (*command & FTRACE_START_FUNC_RET)
604                 ftrace_enable_ftrace_graph_caller();
605         else if (*command & FTRACE_STOP_FUNC_RET)
606                 ftrace_disable_ftrace_graph_caller();
607
608         return 0;
609 }
610
611 static void ftrace_run_update_code(int command)
612 {
613         stop_machine(__ftrace_modify_code, &command, NULL);
614 }
615
616 static ftrace_func_t saved_ftrace_func;
617 static int ftrace_start_up;
618
619 static void ftrace_startup_enable(int command)
620 {
621         if (saved_ftrace_func != ftrace_trace_function) {
622                 saved_ftrace_func = ftrace_trace_function;
623                 command |= FTRACE_UPDATE_TRACE_FUNC;
624         }
625
626         if (!command || !ftrace_enabled)
627                 return;
628
629         ftrace_run_update_code(command);
630 }
631
632 static void ftrace_startup(int command)
633 {
634         if (unlikely(ftrace_disabled))
635                 return;
636
637         mutex_lock(&ftrace_start_lock);
638         ftrace_start_up++;
639         command |= FTRACE_ENABLE_CALLS;
640
641         ftrace_startup_enable(command);
642
643         mutex_unlock(&ftrace_start_lock);
644 }
645
646 static void ftrace_shutdown(int command)
647 {
648         if (unlikely(ftrace_disabled))
649                 return;
650
651         mutex_lock(&ftrace_start_lock);
652         ftrace_start_up--;
653         if (!ftrace_start_up)
654                 command |= FTRACE_DISABLE_CALLS;
655
656         if (saved_ftrace_func != ftrace_trace_function) {
657                 saved_ftrace_func = ftrace_trace_function;
658                 command |= FTRACE_UPDATE_TRACE_FUNC;
659         }
660
661         if (!command || !ftrace_enabled)
662                 goto out;
663
664         ftrace_run_update_code(command);
665  out:
666         mutex_unlock(&ftrace_start_lock);
667 }
668
669 static void ftrace_startup_sysctl(void)
670 {
671         int command = FTRACE_ENABLE_MCOUNT;
672
673         if (unlikely(ftrace_disabled))
674                 return;
675
676         mutex_lock(&ftrace_start_lock);
677         /* Force update next time */
678         saved_ftrace_func = NULL;
679         /* ftrace_start_up is true if we want ftrace running */
680         if (ftrace_start_up)
681                 command |= FTRACE_ENABLE_CALLS;
682
683         ftrace_run_update_code(command);
684         mutex_unlock(&ftrace_start_lock);
685 }
686
687 static void ftrace_shutdown_sysctl(void)
688 {
689         int command = FTRACE_DISABLE_MCOUNT;
690
691         if (unlikely(ftrace_disabled))
692                 return;
693
694         mutex_lock(&ftrace_start_lock);
695         /* ftrace_start_up is true if ftrace is running */
696         if (ftrace_start_up)
697                 command |= FTRACE_DISABLE_CALLS;
698
699         ftrace_run_update_code(command);
700         mutex_unlock(&ftrace_start_lock);
701 }
702
703 static cycle_t          ftrace_update_time;
704 static unsigned long    ftrace_update_cnt;
705 unsigned long           ftrace_update_tot_cnt;
706
707 static int ftrace_update_code(struct module *mod)
708 {
709         struct dyn_ftrace *p, *t;
710         cycle_t start, stop;
711
712         start = ftrace_now(raw_smp_processor_id());
713         ftrace_update_cnt = 0;
714
715         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
716
717                 /* If something went wrong, bail without enabling anything */
718                 if (unlikely(ftrace_disabled))
719                         return -1;
720
721                 list_del_init(&p->list);
722
723                 /* convert record (i.e, patch mcount-call with NOP) */
724                 if (ftrace_code_disable(mod, p)) {
725                         p->flags |= FTRACE_FL_CONVERTED;
726                         ftrace_update_cnt++;
727                 } else
728                         ftrace_free_rec(p);
729         }
730
731         stop = ftrace_now(raw_smp_processor_id());
732         ftrace_update_time = stop - start;
733         ftrace_update_tot_cnt += ftrace_update_cnt;
734
735         return 0;
736 }
737
738 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
739 {
740         struct ftrace_page *pg;
741         int cnt;
742         int i;
743
744         /* allocate a few pages */
745         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
746         if (!ftrace_pages_start)
747                 return -1;
748
749         /*
750          * Allocate a few more pages.
751          *
752          * TODO: have some parser search vmlinux before
753          *   final linking to find all calls to ftrace.
754          *   Then we can:
755          *    a) know how many pages to allocate.
756          *     and/or
757          *    b) set up the table then.
758          *
759          *  The dynamic code is still necessary for
760          *  modules.
761          */
762
763         pg = ftrace_pages = ftrace_pages_start;
764
765         cnt = num_to_init / ENTRIES_PER_PAGE;
766         pr_info("ftrace: allocating %ld entries in %d pages\n",
767                 num_to_init, cnt + 1);
768
769         for (i = 0; i < cnt; i++) {
770                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
771
772                 /* If we fail, we'll try later anyway */
773                 if (!pg->next)
774                         break;
775
776                 pg = pg->next;
777         }
778
779         return 0;
780 }
781
782 enum {
783         FTRACE_ITER_FILTER      = (1 << 0),
784         FTRACE_ITER_CONT        = (1 << 1),
785         FTRACE_ITER_NOTRACE     = (1 << 2),
786         FTRACE_ITER_FAILURES    = (1 << 3),
787 };
788
789 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
790
791 struct ftrace_iterator {
792         loff_t                  pos;
793         struct ftrace_page      *pg;
794         unsigned                idx;
795         unsigned                flags;
796         unsigned char           buffer[FTRACE_BUFF_MAX+1];
797         unsigned                buffer_idx;
798         unsigned                filtered;
799 };
800
801 static void *
802 t_next(struct seq_file *m, void *v, loff_t *pos)
803 {
804         struct ftrace_iterator *iter = m->private;
805         struct dyn_ftrace *rec = NULL;
806
807         (*pos)++;
808
809         /* should not be called from interrupt context */
810         spin_lock(&ftrace_lock);
811  retry:
812         if (iter->idx >= iter->pg->index) {
813                 if (iter->pg->next) {
814                         iter->pg = iter->pg->next;
815                         iter->idx = 0;
816                         goto retry;
817                 }
818         } else {
819                 rec = &iter->pg->records[iter->idx++];
820                 if ((rec->flags & FTRACE_FL_FREE) ||
821
822                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
823                      (rec->flags & FTRACE_FL_FAILED)) ||
824
825                     ((iter->flags & FTRACE_ITER_FAILURES) &&
826                      !(rec->flags & FTRACE_FL_FAILED)) ||
827
828                     ((iter->flags & FTRACE_ITER_FILTER) &&
829                      !(rec->flags & FTRACE_FL_FILTER)) ||
830
831                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
832                      !(rec->flags & FTRACE_FL_NOTRACE))) {
833                         rec = NULL;
834                         goto retry;
835                 }
836         }
837         spin_unlock(&ftrace_lock);
838
839         iter->pos = *pos;
840
841         return rec;
842 }
843
844 static void *t_start(struct seq_file *m, loff_t *pos)
845 {
846         struct ftrace_iterator *iter = m->private;
847         void *p = NULL;
848         loff_t l = -1;
849
850         if (*pos > iter->pos)
851                 *pos = iter->pos;
852
853         l = *pos;
854         p = t_next(m, p, &l);
855
856         return p;
857 }
858
859 static void t_stop(struct seq_file *m, void *p)
860 {
861 }
862
863 static int t_show(struct seq_file *m, void *v)
864 {
865         struct ftrace_iterator *iter = m->private;
866         struct dyn_ftrace *rec = v;
867         char str[KSYM_SYMBOL_LEN];
868         int ret = 0;
869
870         if (!rec)
871                 return 0;
872
873         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
874
875         ret = seq_printf(m, "%s\n", str);
876         if (ret < 0) {
877                 iter->pos--;
878                 iter->idx--;
879         }
880
881         return 0;
882 }
883
884 static struct seq_operations show_ftrace_seq_ops = {
885         .start = t_start,
886         .next = t_next,
887         .stop = t_stop,
888         .show = t_show,
889 };
890
891 static int
892 ftrace_avail_open(struct inode *inode, struct file *file)
893 {
894         struct ftrace_iterator *iter;
895         int ret;
896
897         if (unlikely(ftrace_disabled))
898                 return -ENODEV;
899
900         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
901         if (!iter)
902                 return -ENOMEM;
903
904         iter->pg = ftrace_pages_start;
905         iter->pos = 0;
906
907         ret = seq_open(file, &show_ftrace_seq_ops);
908         if (!ret) {
909                 struct seq_file *m = file->private_data;
910
911                 m->private = iter;
912         } else {
913                 kfree(iter);
914         }
915
916         return ret;
917 }
918
919 int ftrace_avail_release(struct inode *inode, struct file *file)
920 {
921         struct seq_file *m = (struct seq_file *)file->private_data;
922         struct ftrace_iterator *iter = m->private;
923
924         seq_release(inode, file);
925         kfree(iter);
926
927         return 0;
928 }
929
930 static int
931 ftrace_failures_open(struct inode *inode, struct file *file)
932 {
933         int ret;
934         struct seq_file *m;
935         struct ftrace_iterator *iter;
936
937         ret = ftrace_avail_open(inode, file);
938         if (!ret) {
939                 m = (struct seq_file *)file->private_data;
940                 iter = (struct ftrace_iterator *)m->private;
941                 iter->flags = FTRACE_ITER_FAILURES;
942         }
943
944         return ret;
945 }
946
947
948 static void ftrace_filter_reset(int enable)
949 {
950         struct ftrace_page *pg;
951         struct dyn_ftrace *rec;
952         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
953         unsigned i;
954
955         /* should not be called from interrupt context */
956         spin_lock(&ftrace_lock);
957         if (enable)
958                 ftrace_filtered = 0;
959         pg = ftrace_pages_start;
960         while (pg) {
961                 for (i = 0; i < pg->index; i++) {
962                         rec = &pg->records[i];
963                         if (rec->flags & FTRACE_FL_FAILED)
964                                 continue;
965                         rec->flags &= ~type;
966                 }
967                 pg = pg->next;
968         }
969         spin_unlock(&ftrace_lock);
970 }
971
972 static int
973 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
974 {
975         struct ftrace_iterator *iter;
976         int ret = 0;
977
978         if (unlikely(ftrace_disabled))
979                 return -ENODEV;
980
981         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
982         if (!iter)
983                 return -ENOMEM;
984
985         mutex_lock(&ftrace_regex_lock);
986         if ((file->f_mode & FMODE_WRITE) &&
987             !(file->f_flags & O_APPEND))
988                 ftrace_filter_reset(enable);
989
990         if (file->f_mode & FMODE_READ) {
991                 iter->pg = ftrace_pages_start;
992                 iter->pos = 0;
993                 iter->flags = enable ? FTRACE_ITER_FILTER :
994                         FTRACE_ITER_NOTRACE;
995
996                 ret = seq_open(file, &show_ftrace_seq_ops);
997                 if (!ret) {
998                         struct seq_file *m = file->private_data;
999                         m->private = iter;
1000                 } else
1001                         kfree(iter);
1002         } else
1003                 file->private_data = iter;
1004         mutex_unlock(&ftrace_regex_lock);
1005
1006         return ret;
1007 }
1008
1009 static int
1010 ftrace_filter_open(struct inode *inode, struct file *file)
1011 {
1012         return ftrace_regex_open(inode, file, 1);
1013 }
1014
1015 static int
1016 ftrace_notrace_open(struct inode *inode, struct file *file)
1017 {
1018         return ftrace_regex_open(inode, file, 0);
1019 }
1020
1021 static ssize_t
1022 ftrace_regex_read(struct file *file, char __user *ubuf,
1023                        size_t cnt, loff_t *ppos)
1024 {
1025         if (file->f_mode & FMODE_READ)
1026                 return seq_read(file, ubuf, cnt, ppos);
1027         else
1028                 return -EPERM;
1029 }
1030
1031 static loff_t
1032 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1033 {
1034         loff_t ret;
1035
1036         if (file->f_mode & FMODE_READ)
1037                 ret = seq_lseek(file, offset, origin);
1038         else
1039                 file->f_pos = ret = 1;
1040
1041         return ret;
1042 }
1043
1044 enum {
1045         MATCH_FULL,
1046         MATCH_FRONT_ONLY,
1047         MATCH_MIDDLE_ONLY,
1048         MATCH_END_ONLY,
1049 };
1050
1051 static void
1052 ftrace_match(unsigned char *buff, int len, int enable)
1053 {
1054         char str[KSYM_SYMBOL_LEN];
1055         char *search = NULL;
1056         struct ftrace_page *pg;
1057         struct dyn_ftrace *rec;
1058         int type = MATCH_FULL;
1059         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1060         unsigned i, match = 0, search_len = 0;
1061
1062         for (i = 0; i < len; i++) {
1063                 if (buff[i] == '*') {
1064                         if (!i) {
1065                                 search = buff + i + 1;
1066                                 type = MATCH_END_ONLY;
1067                                 search_len = len - (i + 1);
1068                         } else {
1069                                 if (type == MATCH_END_ONLY) {
1070                                         type = MATCH_MIDDLE_ONLY;
1071                                 } else {
1072                                         match = i;
1073                                         type = MATCH_FRONT_ONLY;
1074                                 }
1075                                 buff[i] = 0;
1076                                 break;
1077                         }
1078                 }
1079         }
1080
1081         /* should not be called from interrupt context */
1082         spin_lock(&ftrace_lock);
1083         if (enable)
1084                 ftrace_filtered = 1;
1085         pg = ftrace_pages_start;
1086         while (pg) {
1087                 for (i = 0; i < pg->index; i++) {
1088                         int matched = 0;
1089                         char *ptr;
1090
1091                         rec = &pg->records[i];
1092                         if (rec->flags & FTRACE_FL_FAILED)
1093                                 continue;
1094                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1095                         switch (type) {
1096                         case MATCH_FULL:
1097                                 if (strcmp(str, buff) == 0)
1098                                         matched = 1;
1099                                 break;
1100                         case MATCH_FRONT_ONLY:
1101                                 if (memcmp(str, buff, match) == 0)
1102                                         matched = 1;
1103                                 break;
1104                         case MATCH_MIDDLE_ONLY:
1105                                 if (strstr(str, search))
1106                                         matched = 1;
1107                                 break;
1108                         case MATCH_END_ONLY:
1109                                 ptr = strstr(str, search);
1110                                 if (ptr && (ptr[search_len] == 0))
1111                                         matched = 1;
1112                                 break;
1113                         }
1114                         if (matched)
1115                                 rec->flags |= flag;
1116                 }
1117                 pg = pg->next;
1118         }
1119         spin_unlock(&ftrace_lock);
1120 }
1121
1122 static ssize_t
1123 ftrace_regex_write(struct file *file, const char __user *ubuf,
1124                    size_t cnt, loff_t *ppos, int enable)
1125 {
1126         struct ftrace_iterator *iter;
1127         char ch;
1128         size_t read = 0;
1129         ssize_t ret;
1130
1131         if (!cnt || cnt < 0)
1132                 return 0;
1133
1134         mutex_lock(&ftrace_regex_lock);
1135
1136         if (file->f_mode & FMODE_READ) {
1137                 struct seq_file *m = file->private_data;
1138                 iter = m->private;
1139         } else
1140                 iter = file->private_data;
1141
1142         if (!*ppos) {
1143                 iter->flags &= ~FTRACE_ITER_CONT;
1144                 iter->buffer_idx = 0;
1145         }
1146
1147         ret = get_user(ch, ubuf++);
1148         if (ret)
1149                 goto out;
1150         read++;
1151         cnt--;
1152
1153         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1154                 /* skip white space */
1155                 while (cnt && isspace(ch)) {
1156                         ret = get_user(ch, ubuf++);
1157                         if (ret)
1158                                 goto out;
1159                         read++;
1160                         cnt--;
1161                 }
1162
1163                 if (isspace(ch)) {
1164                         file->f_pos += read;
1165                         ret = read;
1166                         goto out;
1167                 }
1168
1169                 iter->buffer_idx = 0;
1170         }
1171
1172         while (cnt && !isspace(ch)) {
1173                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1174                         iter->buffer[iter->buffer_idx++] = ch;
1175                 else {
1176                         ret = -EINVAL;
1177                         goto out;
1178                 }
1179                 ret = get_user(ch, ubuf++);
1180                 if (ret)
1181                         goto out;
1182                 read++;
1183                 cnt--;
1184         }
1185
1186         if (isspace(ch)) {
1187                 iter->filtered++;
1188                 iter->buffer[iter->buffer_idx] = 0;
1189                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1190                 iter->buffer_idx = 0;
1191         } else
1192                 iter->flags |= FTRACE_ITER_CONT;
1193
1194
1195         file->f_pos += read;
1196
1197         ret = read;
1198  out:
1199         mutex_unlock(&ftrace_regex_lock);
1200
1201         return ret;
1202 }
1203
1204 static ssize_t
1205 ftrace_filter_write(struct file *file, const char __user *ubuf,
1206                     size_t cnt, loff_t *ppos)
1207 {
1208         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1209 }
1210
1211 static ssize_t
1212 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1213                      size_t cnt, loff_t *ppos)
1214 {
1215         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1216 }
1217
1218 static void
1219 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1220 {
1221         if (unlikely(ftrace_disabled))
1222                 return;
1223
1224         mutex_lock(&ftrace_regex_lock);
1225         if (reset)
1226                 ftrace_filter_reset(enable);
1227         if (buf)
1228                 ftrace_match(buf, len, enable);
1229         mutex_unlock(&ftrace_regex_lock);
1230 }
1231
1232 /**
1233  * ftrace_set_filter - set a function to filter on in ftrace
1234  * @buf - the string that holds the function filter text.
1235  * @len - the length of the string.
1236  * @reset - non zero to reset all filters before applying this filter.
1237  *
1238  * Filters denote which functions should be enabled when tracing is enabled.
1239  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1240  */
1241 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1242 {
1243         ftrace_set_regex(buf, len, reset, 1);
1244 }
1245
1246 /**
1247  * ftrace_set_notrace - set a function to not trace in ftrace
1248  * @buf - the string that holds the function notrace text.
1249  * @len - the length of the string.
1250  * @reset - non zero to reset all filters before applying this filter.
1251  *
1252  * Notrace Filters denote which functions should not be enabled when tracing
1253  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1254  * for tracing.
1255  */
1256 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1257 {
1258         ftrace_set_regex(buf, len, reset, 0);
1259 }
1260
1261 static int
1262 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1263 {
1264         struct seq_file *m = (struct seq_file *)file->private_data;
1265         struct ftrace_iterator *iter;
1266
1267         mutex_lock(&ftrace_regex_lock);
1268         if (file->f_mode & FMODE_READ) {
1269                 iter = m->private;
1270
1271                 seq_release(inode, file);
1272         } else
1273                 iter = file->private_data;
1274
1275         if (iter->buffer_idx) {
1276                 iter->filtered++;
1277                 iter->buffer[iter->buffer_idx] = 0;
1278                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1279         }
1280
1281         mutex_lock(&ftrace_sysctl_lock);
1282         mutex_lock(&ftrace_start_lock);
1283         if (ftrace_start_up && ftrace_enabled)
1284                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1285         mutex_unlock(&ftrace_start_lock);
1286         mutex_unlock(&ftrace_sysctl_lock);
1287
1288         kfree(iter);
1289         mutex_unlock(&ftrace_regex_lock);
1290         return 0;
1291 }
1292
1293 static int
1294 ftrace_filter_release(struct inode *inode, struct file *file)
1295 {
1296         return ftrace_regex_release(inode, file, 1);
1297 }
1298
1299 static int
1300 ftrace_notrace_release(struct inode *inode, struct file *file)
1301 {
1302         return ftrace_regex_release(inode, file, 0);
1303 }
1304
1305 static struct file_operations ftrace_avail_fops = {
1306         .open = ftrace_avail_open,
1307         .read = seq_read,
1308         .llseek = seq_lseek,
1309         .release = ftrace_avail_release,
1310 };
1311
1312 static struct file_operations ftrace_failures_fops = {
1313         .open = ftrace_failures_open,
1314         .read = seq_read,
1315         .llseek = seq_lseek,
1316         .release = ftrace_avail_release,
1317 };
1318
1319 static struct file_operations ftrace_filter_fops = {
1320         .open = ftrace_filter_open,
1321         .read = ftrace_regex_read,
1322         .write = ftrace_filter_write,
1323         .llseek = ftrace_regex_lseek,
1324         .release = ftrace_filter_release,
1325 };
1326
1327 static struct file_operations ftrace_notrace_fops = {
1328         .open = ftrace_notrace_open,
1329         .read = ftrace_regex_read,
1330         .write = ftrace_notrace_write,
1331         .llseek = ftrace_regex_lseek,
1332         .release = ftrace_notrace_release,
1333 };
1334
1335 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1336 {
1337         struct dentry *entry;
1338
1339         entry = debugfs_create_file("available_filter_functions", 0444,
1340                                     d_tracer, NULL, &ftrace_avail_fops);
1341         if (!entry)
1342                 pr_warning("Could not create debugfs "
1343                            "'available_filter_functions' entry\n");
1344
1345         entry = debugfs_create_file("failures", 0444,
1346                                     d_tracer, NULL, &ftrace_failures_fops);
1347         if (!entry)
1348                 pr_warning("Could not create debugfs 'failures' entry\n");
1349
1350         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1351                                     NULL, &ftrace_filter_fops);
1352         if (!entry)
1353                 pr_warning("Could not create debugfs "
1354                            "'set_ftrace_filter' entry\n");
1355
1356         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1357                                     NULL, &ftrace_notrace_fops);
1358         if (!entry)
1359                 pr_warning("Could not create debugfs "
1360                            "'set_ftrace_notrace' entry\n");
1361
1362         return 0;
1363 }
1364
1365 static int ftrace_convert_nops(struct module *mod,
1366                                unsigned long *start,
1367                                unsigned long *end)
1368 {
1369         unsigned long *p;
1370         unsigned long addr;
1371         unsigned long flags;
1372
1373         mutex_lock(&ftrace_start_lock);
1374         p = start;
1375         while (p < end) {
1376                 addr = ftrace_call_adjust(*p++);
1377                 /*
1378                  * Some architecture linkers will pad between
1379                  * the different mcount_loc sections of different
1380                  * object files to satisfy alignments.
1381                  * Skip any NULL pointers.
1382                  */
1383                 if (!addr)
1384                         continue;
1385                 ftrace_record_ip(addr);
1386         }
1387
1388         /* disable interrupts to prevent kstop machine */
1389         local_irq_save(flags);
1390         ftrace_update_code(mod);
1391         local_irq_restore(flags);
1392         mutex_unlock(&ftrace_start_lock);
1393
1394         return 0;
1395 }
1396
1397 void ftrace_init_module(struct module *mod,
1398                         unsigned long *start, unsigned long *end)
1399 {
1400         if (ftrace_disabled || start == end)
1401                 return;
1402         ftrace_convert_nops(mod, start, end);
1403 }
1404
1405 extern unsigned long __start_mcount_loc[];
1406 extern unsigned long __stop_mcount_loc[];
1407
1408 void __init ftrace_init(void)
1409 {
1410         unsigned long count, addr, flags;
1411         int ret;
1412
1413         /* Keep the ftrace pointer to the stub */
1414         addr = (unsigned long)ftrace_stub;
1415
1416         local_irq_save(flags);
1417         ftrace_dyn_arch_init(&addr);
1418         local_irq_restore(flags);
1419
1420         /* ftrace_dyn_arch_init places the return code in addr */
1421         if (addr)
1422                 goto failed;
1423
1424         count = __stop_mcount_loc - __start_mcount_loc;
1425
1426         ret = ftrace_dyn_table_alloc(count);
1427         if (ret)
1428                 goto failed;
1429
1430         last_ftrace_enabled = ftrace_enabled = 1;
1431
1432         ret = ftrace_convert_nops(NULL,
1433                                   __start_mcount_loc,
1434                                   __stop_mcount_loc);
1435
1436         return;
1437  failed:
1438         ftrace_disabled = 1;
1439 }
1440
1441 #else
1442
1443 static int __init ftrace_nodyn_init(void)
1444 {
1445         ftrace_enabled = 1;
1446         return 0;
1447 }
1448 device_initcall(ftrace_nodyn_init);
1449
1450 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1451 static inline void ftrace_startup_enable(int command) { }
1452 /* Keep as macros so we do not need to define the commands */
1453 # define ftrace_startup(command)        do { } while (0)
1454 # define ftrace_shutdown(command)       do { } while (0)
1455 # define ftrace_startup_sysctl()        do { } while (0)
1456 # define ftrace_shutdown_sysctl()       do { } while (0)
1457 #endif /* CONFIG_DYNAMIC_FTRACE */
1458
1459 static ssize_t
1460 ftrace_pid_read(struct file *file, char __user *ubuf,
1461                        size_t cnt, loff_t *ppos)
1462 {
1463         char buf[64];
1464         int r;
1465
1466         if (ftrace_pid_trace >= 0)
1467                 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1468         else
1469                 r = sprintf(buf, "no pid\n");
1470
1471         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1472 }
1473
1474 static ssize_t
1475 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1476                    size_t cnt, loff_t *ppos)
1477 {
1478         char buf[64];
1479         long val;
1480         int ret;
1481
1482         if (cnt >= sizeof(buf))
1483                 return -EINVAL;
1484
1485         if (copy_from_user(&buf, ubuf, cnt))
1486                 return -EFAULT;
1487
1488         buf[cnt] = 0;
1489
1490         ret = strict_strtol(buf, 10, &val);
1491         if (ret < 0)
1492                 return ret;
1493
1494         mutex_lock(&ftrace_start_lock);
1495         if (ret < 0) {
1496                 /* disable pid tracing */
1497                 if (ftrace_pid_trace < 0)
1498                         goto out;
1499                 ftrace_pid_trace = -1;
1500
1501         } else {
1502
1503                 if (ftrace_pid_trace == val)
1504                         goto out;
1505
1506                 ftrace_pid_trace = val;
1507         }
1508
1509         /* update the function call */
1510         ftrace_update_pid_func();
1511         ftrace_startup_enable(0);
1512
1513  out:
1514         mutex_unlock(&ftrace_start_lock);
1515
1516         return cnt;
1517 }
1518
1519 static struct file_operations ftrace_pid_fops = {
1520         .read = ftrace_pid_read,
1521         .write = ftrace_pid_write,
1522 };
1523
1524 static __init int ftrace_init_debugfs(void)
1525 {
1526         struct dentry *d_tracer;
1527         struct dentry *entry;
1528
1529         d_tracer = tracing_init_dentry();
1530         if (!d_tracer)
1531                 return 0;
1532
1533         ftrace_init_dyn_debugfs(d_tracer);
1534
1535         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1536                                     NULL, &ftrace_pid_fops);
1537         if (!entry)
1538                 pr_warning("Could not create debugfs "
1539                            "'set_ftrace_pid' entry\n");
1540         return 0;
1541 }
1542
1543 fs_initcall(ftrace_init_debugfs);
1544
1545 /**
1546  * ftrace_kill - kill ftrace
1547  *
1548  * This function should be used by panic code. It stops ftrace
1549  * but in a not so nice way. If you need to simply kill ftrace
1550  * from a non-atomic section, use ftrace_kill.
1551  */
1552 void ftrace_kill(void)
1553 {
1554         ftrace_disabled = 1;
1555         ftrace_enabled = 0;
1556         clear_ftrace_function();
1557 }
1558
1559 /**
1560  * register_ftrace_function - register a function for profiling
1561  * @ops - ops structure that holds the function for profiling.
1562  *
1563  * Register a function to be called by all functions in the
1564  * kernel.
1565  *
1566  * Note: @ops->func and all the functions it calls must be labeled
1567  *       with "notrace", otherwise it will go into a
1568  *       recursive loop.
1569  */
1570 int register_ftrace_function(struct ftrace_ops *ops)
1571 {
1572         int ret;
1573
1574         if (unlikely(ftrace_disabled))
1575                 return -1;
1576
1577         mutex_lock(&ftrace_sysctl_lock);
1578
1579         if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1580                 ret = -EBUSY;
1581                 goto out;
1582         }
1583
1584         ret = __register_ftrace_function(ops);
1585         ftrace_startup(0);
1586
1587 out:
1588         mutex_unlock(&ftrace_sysctl_lock);
1589         return ret;
1590 }
1591
1592 /**
1593  * unregister_ftrace_function - unresgister a function for profiling.
1594  * @ops - ops structure that holds the function to unregister
1595  *
1596  * Unregister a function that was added to be called by ftrace profiling.
1597  */
1598 int unregister_ftrace_function(struct ftrace_ops *ops)
1599 {
1600         int ret;
1601
1602         mutex_lock(&ftrace_sysctl_lock);
1603         ret = __unregister_ftrace_function(ops);
1604         ftrace_shutdown(0);
1605         mutex_unlock(&ftrace_sysctl_lock);
1606
1607         return ret;
1608 }
1609
1610 int
1611 ftrace_enable_sysctl(struct ctl_table *table, int write,
1612                      struct file *file, void __user *buffer, size_t *lenp,
1613                      loff_t *ppos)
1614 {
1615         int ret;
1616
1617         if (unlikely(ftrace_disabled))
1618                 return -ENODEV;
1619
1620         mutex_lock(&ftrace_sysctl_lock);
1621
1622         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1623
1624         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1625                 goto out;
1626
1627         last_ftrace_enabled = ftrace_enabled;
1628
1629         if (ftrace_enabled) {
1630
1631                 ftrace_startup_sysctl();
1632
1633                 /* we are starting ftrace again */
1634                 if (ftrace_list != &ftrace_list_end) {
1635                         if (ftrace_list->next == &ftrace_list_end)
1636                                 ftrace_trace_function = ftrace_list->func;
1637                         else
1638                                 ftrace_trace_function = ftrace_list_func;
1639                 }
1640
1641         } else {
1642                 /* stopping ftrace calls (just send to ftrace_stub) */
1643                 ftrace_trace_function = ftrace_stub;
1644
1645                 ftrace_shutdown_sysctl();
1646         }
1647
1648  out:
1649         mutex_unlock(&ftrace_sysctl_lock);
1650         return ret;
1651 }
1652
1653 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1654
1655 static atomic_t ftrace_graph_active;
1656
1657 /* The callbacks that hook a function */
1658 trace_func_graph_ret_t ftrace_graph_return =
1659                         (trace_func_graph_ret_t)ftrace_stub;
1660 trace_func_graph_ent_t ftrace_graph_entry =
1661                         (trace_func_graph_ent_t)ftrace_stub;
1662
1663 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1664 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1665 {
1666         int i;
1667         int ret = 0;
1668         unsigned long flags;
1669         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1670         struct task_struct *g, *t;
1671
1672         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1673                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1674                                         * sizeof(struct ftrace_ret_stack),
1675                                         GFP_KERNEL);
1676                 if (!ret_stack_list[i]) {
1677                         start = 0;
1678                         end = i;
1679                         ret = -ENOMEM;
1680                         goto free;
1681                 }
1682         }
1683
1684         read_lock_irqsave(&tasklist_lock, flags);
1685         do_each_thread(g, t) {
1686                 if (start == end) {
1687                         ret = -EAGAIN;
1688                         goto unlock;
1689                 }
1690
1691                 if (t->ret_stack == NULL) {
1692                         t->ret_stack = ret_stack_list[start++];
1693                         t->curr_ret_stack = -1;
1694                         atomic_set(&t->trace_overrun, 0);
1695                 }
1696         } while_each_thread(g, t);
1697
1698 unlock:
1699         read_unlock_irqrestore(&tasklist_lock, flags);
1700 free:
1701         for (i = start; i < end; i++)
1702                 kfree(ret_stack_list[i]);
1703         return ret;
1704 }
1705
1706 /* Allocate a return stack for each task */
1707 static int start_graph_tracing(void)
1708 {
1709         struct ftrace_ret_stack **ret_stack_list;
1710         int ret;
1711
1712         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1713                                 sizeof(struct ftrace_ret_stack *),
1714                                 GFP_KERNEL);
1715
1716         if (!ret_stack_list)
1717                 return -ENOMEM;
1718
1719         do {
1720                 ret = alloc_retstack_tasklist(ret_stack_list);
1721         } while (ret == -EAGAIN);
1722
1723         kfree(ret_stack_list);
1724         return ret;
1725 }
1726
1727 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1728                         trace_func_graph_ent_t entryfunc)
1729 {
1730         int ret = 0;
1731
1732         mutex_lock(&ftrace_sysctl_lock);
1733
1734         /*
1735          * Don't launch return tracing if normal function
1736          * tracing is already running.
1737          */
1738         if (ftrace_trace_function != ftrace_stub) {
1739                 ret = -EBUSY;
1740                 goto out;
1741         }
1742         atomic_inc(&ftrace_graph_active);
1743         ret = start_graph_tracing();
1744         if (ret) {
1745                 atomic_dec(&ftrace_graph_active);
1746                 goto out;
1747         }
1748         ftrace_tracing_type = FTRACE_TYPE_RETURN;
1749         ftrace_graph_return = retfunc;
1750         ftrace_graph_entry = entryfunc;
1751         ftrace_startup(FTRACE_START_FUNC_RET);
1752
1753 out:
1754         mutex_unlock(&ftrace_sysctl_lock);
1755         return ret;
1756 }
1757
1758 void unregister_ftrace_graph(void)
1759 {
1760         mutex_lock(&ftrace_sysctl_lock);
1761
1762         atomic_dec(&ftrace_graph_active);
1763         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1764         ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1765         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1766         /* Restore normal tracing type */
1767         ftrace_tracing_type = FTRACE_TYPE_ENTER;
1768
1769         mutex_unlock(&ftrace_sysctl_lock);
1770 }
1771
1772 /* Allocate a return stack for newly created task */
1773 void ftrace_graph_init_task(struct task_struct *t)
1774 {
1775         if (atomic_read(&ftrace_graph_active)) {
1776                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1777                                 * sizeof(struct ftrace_ret_stack),
1778                                 GFP_KERNEL);
1779                 if (!t->ret_stack)
1780                         return;
1781                 t->curr_ret_stack = -1;
1782                 atomic_set(&t->trace_overrun, 0);
1783         } else
1784                 t->ret_stack = NULL;
1785 }
1786
1787 void ftrace_graph_exit_task(struct task_struct *t)
1788 {
1789         struct ftrace_ret_stack *ret_stack = t->ret_stack;
1790
1791         t->ret_stack = NULL;
1792         /* NULL must become visible to IRQs before we free it: */
1793         barrier();
1794
1795         kfree(ret_stack);
1796 }
1797 #endif
1798