ftrace: do not process freed records
[pandora-kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond)                    \
35         do {                                    \
36                 if (WARN_ON(cond))              \
37                         ftrace_kill();          \
38         } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond)               \
41         do {                                    \
42                 if (WARN_ON_ONCE(cond))         \
43                         ftrace_kill();          \
44         } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
52
53 /*
54  * ftrace_disabled is set when an anomaly is discovered.
55  * ftrace_disabled is much stronger than ftrace_enabled.
56  */
57 static int ftrace_disabled __read_mostly;
58
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
61
62 static struct ftrace_ops ftrace_list_end __read_mostly =
63 {
64         .func = ftrace_stub,
65 };
66
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
70
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
72 {
73         struct ftrace_ops *op = ftrace_list;
74
75         /* in case someone actually ports this to alpha! */
76         read_barrier_depends();
77
78         while (op != &ftrace_list_end) {
79                 /* silly alpha */
80                 read_barrier_depends();
81                 op->func(ip, parent_ip);
82                 op = op->next;
83         };
84 }
85
86 /**
87  * clear_ftrace_function - reset the ftrace function
88  *
89  * This NULLs the ftrace function and in essence stops
90  * tracing.  There may be lag
91  */
92 void clear_ftrace_function(void)
93 {
94         ftrace_trace_function = ftrace_stub;
95         __ftrace_trace_function = ftrace_stub;
96 }
97
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99 /*
100  * For those archs that do not test ftrace_trace_stop in their
101  * mcount call site, we need to do it from C.
102  */
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104 {
105         if (function_trace_stop)
106                 return;
107
108         __ftrace_trace_function(ip, parent_ip);
109 }
110 #endif
111
112 static int __register_ftrace_function(struct ftrace_ops *ops)
113 {
114         /* should not be called from interrupt context */
115         spin_lock(&ftrace_lock);
116
117         ops->next = ftrace_list;
118         /*
119          * We are entering ops into the ftrace_list but another
120          * CPU might be walking that list. We need to make sure
121          * the ops->next pointer is valid before another CPU sees
122          * the ops pointer included into the ftrace_list.
123          */
124         smp_wmb();
125         ftrace_list = ops;
126
127         if (ftrace_enabled) {
128                 /*
129                  * For one func, simply call it directly.
130                  * For more than one func, call the chain.
131                  */
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133                 if (ops->next == &ftrace_list_end)
134                         ftrace_trace_function = ops->func;
135                 else
136                         ftrace_trace_function = ftrace_list_func;
137 #else
138                 if (ops->next == &ftrace_list_end)
139                         __ftrace_trace_function = ops->func;
140                 else
141                         __ftrace_trace_function = ftrace_list_func;
142                 ftrace_trace_function = ftrace_test_stop_func;
143 #endif
144         }
145
146         spin_unlock(&ftrace_lock);
147
148         return 0;
149 }
150
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 {
153         struct ftrace_ops **p;
154         int ret = 0;
155
156         /* should not be called from interrupt context */
157         spin_lock(&ftrace_lock);
158
159         /*
160          * If we are removing the last function, then simply point
161          * to the ftrace_stub.
162          */
163         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164                 ftrace_trace_function = ftrace_stub;
165                 ftrace_list = &ftrace_list_end;
166                 goto out;
167         }
168
169         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170                 if (*p == ops)
171                         break;
172
173         if (*p != ops) {
174                 ret = -1;
175                 goto out;
176         }
177
178         *p = (*p)->next;
179
180         if (ftrace_enabled) {
181                 /* If we only have one func left, then call that directly */
182                 if (ftrace_list->next == &ftrace_list_end)
183                         ftrace_trace_function = ftrace_list->func;
184         }
185
186  out:
187         spin_unlock(&ftrace_lock);
188
189         return ret;
190 }
191
192 #ifdef CONFIG_DYNAMIC_FTRACE
193 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
194 # error Dynamic ftrace depends on MCOUNT_RECORD
195 #endif
196
197 /*
198  * Since MCOUNT_ADDR may point to mcount itself, we do not want
199  * to get it confused by reading a reference in the code as we
200  * are parsing on objcopy output of text. Use a variable for
201  * it instead.
202  */
203 static unsigned long mcount_addr = MCOUNT_ADDR;
204
205 enum {
206         FTRACE_ENABLE_CALLS             = (1 << 0),
207         FTRACE_DISABLE_CALLS            = (1 << 1),
208         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
209         FTRACE_ENABLE_MCOUNT            = (1 << 3),
210         FTRACE_DISABLE_MCOUNT           = (1 << 4),
211 };
212
213 static int ftrace_filtered;
214
215 static LIST_HEAD(ftrace_new_addrs);
216
217 static DEFINE_MUTEX(ftrace_regex_lock);
218
219 struct ftrace_page {
220         struct ftrace_page      *next;
221         unsigned long           index;
222         struct dyn_ftrace       records[];
223 };
224
225 #define ENTRIES_PER_PAGE \
226   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227
228 /* estimate from running different kernels */
229 #define NR_TO_INIT              10000
230
231 static struct ftrace_page       *ftrace_pages_start;
232 static struct ftrace_page       *ftrace_pages;
233
234 static struct dyn_ftrace *ftrace_free_records;
235
236
237 #ifdef CONFIG_KPROBES
238
239 static int frozen_record_count;
240
241 static inline void freeze_record(struct dyn_ftrace *rec)
242 {
243         if (!(rec->flags & FTRACE_FL_FROZEN)) {
244                 rec->flags |= FTRACE_FL_FROZEN;
245                 frozen_record_count++;
246         }
247 }
248
249 static inline void unfreeze_record(struct dyn_ftrace *rec)
250 {
251         if (rec->flags & FTRACE_FL_FROZEN) {
252                 rec->flags &= ~FTRACE_FL_FROZEN;
253                 frozen_record_count--;
254         }
255 }
256
257 static inline int record_frozen(struct dyn_ftrace *rec)
258 {
259         return rec->flags & FTRACE_FL_FROZEN;
260 }
261 #else
262 # define freeze_record(rec)                     ({ 0; })
263 # define unfreeze_record(rec)                   ({ 0; })
264 # define record_frozen(rec)                     ({ 0; })
265 #endif /* CONFIG_KPROBES */
266
267 static void ftrace_free_rec(struct dyn_ftrace *rec)
268 {
269         rec->ip = (unsigned long)ftrace_free_records;
270         ftrace_free_records = rec;
271         rec->flags |= FTRACE_FL_FREE;
272 }
273
274 void ftrace_release(void *start, unsigned long size)
275 {
276         struct dyn_ftrace *rec;
277         struct ftrace_page *pg;
278         unsigned long s = (unsigned long)start;
279         unsigned long e = s + size;
280         int i;
281
282         if (ftrace_disabled || !start)
283                 return;
284
285         /* should not be called from interrupt context */
286         spin_lock(&ftrace_lock);
287
288         for (pg = ftrace_pages_start; pg; pg = pg->next) {
289                 for (i = 0; i < pg->index; i++) {
290                         rec = &pg->records[i];
291
292                         if ((rec->ip >= s) && (rec->ip < e))
293                                 ftrace_free_rec(rec);
294                 }
295         }
296         spin_unlock(&ftrace_lock);
297 }
298
299 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
300 {
301         struct dyn_ftrace *rec;
302
303         /* First check for freed records */
304         if (ftrace_free_records) {
305                 rec = ftrace_free_records;
306
307                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
308                         FTRACE_WARN_ON_ONCE(1);
309                         ftrace_free_records = NULL;
310                         return NULL;
311                 }
312
313                 ftrace_free_records = (void *)rec->ip;
314                 memset(rec, 0, sizeof(*rec));
315                 return rec;
316         }
317
318         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
319                 if (!ftrace_pages->next) {
320                         /* allocate another page */
321                         ftrace_pages->next =
322                                 (void *)get_zeroed_page(GFP_KERNEL);
323                         if (!ftrace_pages->next)
324                                 return NULL;
325                 }
326                 ftrace_pages = ftrace_pages->next;
327         }
328
329         return &ftrace_pages->records[ftrace_pages->index++];
330 }
331
332 static struct dyn_ftrace *
333 ftrace_record_ip(unsigned long ip)
334 {
335         struct dyn_ftrace *rec;
336
337         if (ftrace_disabled)
338                 return NULL;
339
340         rec = ftrace_alloc_dyn_node(ip);
341         if (!rec)
342                 return NULL;
343
344         rec->ip = ip;
345
346         list_add(&rec->list, &ftrace_new_addrs);
347
348         return rec;
349 }
350
351 static void print_ip_ins(const char *fmt, unsigned char *p)
352 {
353         int i;
354
355         printk(KERN_CONT "%s", fmt);
356
357         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
358                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
359 }
360
361 static void ftrace_bug(int failed, unsigned long ip,
362                        unsigned char *expected,
363                        unsigned char *replace)
364 {
365         switch (failed) {
366         case -EFAULT:
367                 FTRACE_WARN_ON_ONCE(1);
368                 pr_info("ftrace faulted on modifying ");
369                 print_ip_sym(ip);
370                 break;
371         case -EINVAL:
372                 FTRACE_WARN_ON_ONCE(1);
373                 pr_info("ftrace failed to modify ");
374                 print_ip_sym(ip);
375                 print_ip_ins(" expected: ", expected);
376                 print_ip_ins(" actual: ", (unsigned char *)ip);
377                 print_ip_ins(" replace: ", replace);
378                 printk(KERN_CONT "\n");
379                 break;
380         case -EPERM:
381                 FTRACE_WARN_ON_ONCE(1);
382                 pr_info("ftrace faulted on writing ");
383                 print_ip_sym(ip);
384                 break;
385         default:
386                 FTRACE_WARN_ON_ONCE(1);
387                 pr_info("ftrace faulted on unknown error ");
388                 print_ip_sym(ip);
389         }
390 }
391
392 #define FTRACE_ADDR ((long)(ftrace_caller))
393
394 static int
395 __ftrace_replace_code(struct dyn_ftrace *rec,
396                       unsigned char *old, unsigned char *new, int enable)
397 {
398         unsigned long ip, fl;
399
400         ip = rec->ip;
401
402         if (ftrace_filtered && enable) {
403                 /*
404                  * If filtering is on:
405                  *
406                  * If this record is set to be filtered and
407                  * is enabled then do nothing.
408                  *
409                  * If this record is set to be filtered and
410                  * it is not enabled, enable it.
411                  *
412                  * If this record is not set to be filtered
413                  * and it is not enabled do nothing.
414                  *
415                  * If this record is set not to trace then
416                  * do nothing.
417                  *
418                  * If this record is set not to trace and
419                  * it is enabled then disable it.
420                  *
421                  * If this record is not set to be filtered and
422                  * it is enabled, disable it.
423                  */
424
425                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
426                                    FTRACE_FL_ENABLED);
427
428                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
429                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
430                     !fl || (fl == FTRACE_FL_NOTRACE))
431                         return 0;
432
433                 /*
434                  * If it is enabled disable it,
435                  * otherwise enable it!
436                  */
437                 if (fl & FTRACE_FL_ENABLED) {
438                         /* swap new and old */
439                         new = old;
440                         old = ftrace_call_replace(ip, FTRACE_ADDR);
441                         rec->flags &= ~FTRACE_FL_ENABLED;
442                 } else {
443                         new = ftrace_call_replace(ip, FTRACE_ADDR);
444                         rec->flags |= FTRACE_FL_ENABLED;
445                 }
446         } else {
447
448                 if (enable) {
449                         /*
450                          * If this record is set not to trace and is
451                          * not enabled, do nothing.
452                          */
453                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
454                         if (fl == FTRACE_FL_NOTRACE)
455                                 return 0;
456
457                         new = ftrace_call_replace(ip, FTRACE_ADDR);
458                 } else
459                         old = ftrace_call_replace(ip, FTRACE_ADDR);
460
461                 if (enable) {
462                         if (rec->flags & FTRACE_FL_ENABLED)
463                                 return 0;
464                         rec->flags |= FTRACE_FL_ENABLED;
465                 } else {
466                         if (!(rec->flags & FTRACE_FL_ENABLED))
467                                 return 0;
468                         rec->flags &= ~FTRACE_FL_ENABLED;
469                 }
470         }
471
472         return ftrace_modify_code(ip, old, new);
473 }
474
475 static void ftrace_replace_code(int enable)
476 {
477         int i, failed;
478         unsigned char *new = NULL, *old = NULL;
479         struct dyn_ftrace *rec;
480         struct ftrace_page *pg;
481
482         if (enable)
483                 old = ftrace_nop_replace();
484         else
485                 new = ftrace_nop_replace();
486
487         for (pg = ftrace_pages_start; pg; pg = pg->next) {
488                 for (i = 0; i < pg->index; i++) {
489                         rec = &pg->records[i];
490
491                         /*
492                          * Skip over free records and records that have
493                          * failed.
494                          */
495                         if (rec->flags & FTRACE_FL_FREE ||
496                             rec->flags & FTRACE_FL_FAILED)
497                                 continue;
498
499                         /* ignore updates to this record's mcount site */
500                         if (get_kprobe((void *)rec->ip)) {
501                                 freeze_record(rec);
502                                 continue;
503                         } else {
504                                 unfreeze_record(rec);
505                         }
506
507                         failed = __ftrace_replace_code(rec, old, new, enable);
508                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
509                                 rec->flags |= FTRACE_FL_FAILED;
510                                 if ((system_state == SYSTEM_BOOTING) ||
511                                     !core_kernel_text(rec->ip)) {
512                                         ftrace_free_rec(rec);
513                                 } else
514                                         ftrace_bug(failed, rec->ip, old, new);
515                         }
516                 }
517         }
518 }
519
520 static int
521 ftrace_code_disable(struct dyn_ftrace *rec)
522 {
523         unsigned long ip;
524         unsigned char *nop, *call;
525         int ret;
526
527         ip = rec->ip;
528
529         nop = ftrace_nop_replace();
530         call = ftrace_call_replace(ip, mcount_addr);
531
532         ret = ftrace_modify_code(ip, call, nop);
533         if (ret) {
534                 ftrace_bug(ret, ip, call, nop);
535                 rec->flags |= FTRACE_FL_FAILED;
536                 return 0;
537         }
538         return 1;
539 }
540
541 static int __ftrace_modify_code(void *data)
542 {
543         int *command = data;
544
545         if (*command & FTRACE_ENABLE_CALLS)
546                 ftrace_replace_code(1);
547         else if (*command & FTRACE_DISABLE_CALLS)
548                 ftrace_replace_code(0);
549
550         if (*command & FTRACE_UPDATE_TRACE_FUNC)
551                 ftrace_update_ftrace_func(ftrace_trace_function);
552
553         return 0;
554 }
555
556 static void ftrace_run_update_code(int command)
557 {
558         stop_machine(__ftrace_modify_code, &command, NULL);
559 }
560
561 static ftrace_func_t saved_ftrace_func;
562 static int ftrace_start_up;
563 static DEFINE_MUTEX(ftrace_start_lock);
564
565 static void ftrace_startup(void)
566 {
567         int command = 0;
568
569         if (unlikely(ftrace_disabled))
570                 return;
571
572         mutex_lock(&ftrace_start_lock);
573         ftrace_start_up++;
574         if (ftrace_start_up == 1)
575                 command |= FTRACE_ENABLE_CALLS;
576
577         if (saved_ftrace_func != ftrace_trace_function) {
578                 saved_ftrace_func = ftrace_trace_function;
579                 command |= FTRACE_UPDATE_TRACE_FUNC;
580         }
581
582         if (!command || !ftrace_enabled)
583                 goto out;
584
585         ftrace_run_update_code(command);
586  out:
587         mutex_unlock(&ftrace_start_lock);
588 }
589
590 static void ftrace_shutdown(void)
591 {
592         int command = 0;
593
594         if (unlikely(ftrace_disabled))
595                 return;
596
597         mutex_lock(&ftrace_start_lock);
598         ftrace_start_up--;
599         if (!ftrace_start_up)
600                 command |= FTRACE_DISABLE_CALLS;
601
602         if (saved_ftrace_func != ftrace_trace_function) {
603                 saved_ftrace_func = ftrace_trace_function;
604                 command |= FTRACE_UPDATE_TRACE_FUNC;
605         }
606
607         if (!command || !ftrace_enabled)
608                 goto out;
609
610         ftrace_run_update_code(command);
611  out:
612         mutex_unlock(&ftrace_start_lock);
613 }
614
615 static void ftrace_startup_sysctl(void)
616 {
617         int command = FTRACE_ENABLE_MCOUNT;
618
619         if (unlikely(ftrace_disabled))
620                 return;
621
622         mutex_lock(&ftrace_start_lock);
623         /* Force update next time */
624         saved_ftrace_func = NULL;
625         /* ftrace_start_up is true if we want ftrace running */
626         if (ftrace_start_up)
627                 command |= FTRACE_ENABLE_CALLS;
628
629         ftrace_run_update_code(command);
630         mutex_unlock(&ftrace_start_lock);
631 }
632
633 static void ftrace_shutdown_sysctl(void)
634 {
635         int command = FTRACE_DISABLE_MCOUNT;
636
637         if (unlikely(ftrace_disabled))
638                 return;
639
640         mutex_lock(&ftrace_start_lock);
641         /* ftrace_start_up is true if ftrace is running */
642         if (ftrace_start_up)
643                 command |= FTRACE_DISABLE_CALLS;
644
645         ftrace_run_update_code(command);
646         mutex_unlock(&ftrace_start_lock);
647 }
648
649 static cycle_t          ftrace_update_time;
650 static unsigned long    ftrace_update_cnt;
651 unsigned long           ftrace_update_tot_cnt;
652
653 static int ftrace_update_code(void)
654 {
655         struct dyn_ftrace *p, *t;
656         cycle_t start, stop;
657
658         start = ftrace_now(raw_smp_processor_id());
659         ftrace_update_cnt = 0;
660
661         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
662
663                 /* If something went wrong, bail without enabling anything */
664                 if (unlikely(ftrace_disabled))
665                         return -1;
666
667                 list_del_init(&p->list);
668
669                 /* convert record (i.e, patch mcount-call with NOP) */
670                 if (ftrace_code_disable(p)) {
671                         p->flags |= FTRACE_FL_CONVERTED;
672                         ftrace_update_cnt++;
673                 } else
674                         ftrace_free_rec(p);
675         }
676
677         stop = ftrace_now(raw_smp_processor_id());
678         ftrace_update_time = stop - start;
679         ftrace_update_tot_cnt += ftrace_update_cnt;
680
681         return 0;
682 }
683
684 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
685 {
686         struct ftrace_page *pg;
687         int cnt;
688         int i;
689
690         /* allocate a few pages */
691         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
692         if (!ftrace_pages_start)
693                 return -1;
694
695         /*
696          * Allocate a few more pages.
697          *
698          * TODO: have some parser search vmlinux before
699          *   final linking to find all calls to ftrace.
700          *   Then we can:
701          *    a) know how many pages to allocate.
702          *     and/or
703          *    b) set up the table then.
704          *
705          *  The dynamic code is still necessary for
706          *  modules.
707          */
708
709         pg = ftrace_pages = ftrace_pages_start;
710
711         cnt = num_to_init / ENTRIES_PER_PAGE;
712         pr_info("ftrace: allocating %ld entries in %d pages\n",
713                 num_to_init, cnt);
714
715         for (i = 0; i < cnt; i++) {
716                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
717
718                 /* If we fail, we'll try later anyway */
719                 if (!pg->next)
720                         break;
721
722                 pg = pg->next;
723         }
724
725         return 0;
726 }
727
728 enum {
729         FTRACE_ITER_FILTER      = (1 << 0),
730         FTRACE_ITER_CONT        = (1 << 1),
731         FTRACE_ITER_NOTRACE     = (1 << 2),
732         FTRACE_ITER_FAILURES    = (1 << 3),
733 };
734
735 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
736
737 struct ftrace_iterator {
738         loff_t                  pos;
739         struct ftrace_page      *pg;
740         unsigned                idx;
741         unsigned                flags;
742         unsigned char           buffer[FTRACE_BUFF_MAX+1];
743         unsigned                buffer_idx;
744         unsigned                filtered;
745 };
746
747 static void *
748 t_next(struct seq_file *m, void *v, loff_t *pos)
749 {
750         struct ftrace_iterator *iter = m->private;
751         struct dyn_ftrace *rec = NULL;
752
753         (*pos)++;
754
755         /* should not be called from interrupt context */
756         spin_lock(&ftrace_lock);
757  retry:
758         if (iter->idx >= iter->pg->index) {
759                 if (iter->pg->next) {
760                         iter->pg = iter->pg->next;
761                         iter->idx = 0;
762                         goto retry;
763                 }
764         } else {
765                 rec = &iter->pg->records[iter->idx++];
766                 if ((rec->flags & FTRACE_FL_FREE) ||
767
768                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
769                      (rec->flags & FTRACE_FL_FAILED)) ||
770
771                     ((iter->flags & FTRACE_ITER_FAILURES) &&
772                      !(rec->flags & FTRACE_FL_FAILED)) ||
773
774                     ((iter->flags & FTRACE_ITER_FILTER) &&
775                      !(rec->flags & FTRACE_FL_FILTER)) ||
776
777                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
778                      !(rec->flags & FTRACE_FL_NOTRACE))) {
779                         rec = NULL;
780                         goto retry;
781                 }
782         }
783         spin_unlock(&ftrace_lock);
784
785         iter->pos = *pos;
786
787         return rec;
788 }
789
790 static void *t_start(struct seq_file *m, loff_t *pos)
791 {
792         struct ftrace_iterator *iter = m->private;
793         void *p = NULL;
794         loff_t l = -1;
795
796         if (*pos != iter->pos) {
797                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
798                         ;
799         } else {
800                 l = *pos;
801                 p = t_next(m, p, &l);
802         }
803
804         return p;
805 }
806
807 static void t_stop(struct seq_file *m, void *p)
808 {
809 }
810
811 static int t_show(struct seq_file *m, void *v)
812 {
813         struct dyn_ftrace *rec = v;
814         char str[KSYM_SYMBOL_LEN];
815
816         if (!rec)
817                 return 0;
818
819         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
820
821         seq_printf(m, "%s\n", str);
822
823         return 0;
824 }
825
826 static struct seq_operations show_ftrace_seq_ops = {
827         .start = t_start,
828         .next = t_next,
829         .stop = t_stop,
830         .show = t_show,
831 };
832
833 static int
834 ftrace_avail_open(struct inode *inode, struct file *file)
835 {
836         struct ftrace_iterator *iter;
837         int ret;
838
839         if (unlikely(ftrace_disabled))
840                 return -ENODEV;
841
842         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
843         if (!iter)
844                 return -ENOMEM;
845
846         iter->pg = ftrace_pages_start;
847         iter->pos = -1;
848
849         ret = seq_open(file, &show_ftrace_seq_ops);
850         if (!ret) {
851                 struct seq_file *m = file->private_data;
852
853                 m->private = iter;
854         } else {
855                 kfree(iter);
856         }
857
858         return ret;
859 }
860
861 int ftrace_avail_release(struct inode *inode, struct file *file)
862 {
863         struct seq_file *m = (struct seq_file *)file->private_data;
864         struct ftrace_iterator *iter = m->private;
865
866         seq_release(inode, file);
867         kfree(iter);
868
869         return 0;
870 }
871
872 static int
873 ftrace_failures_open(struct inode *inode, struct file *file)
874 {
875         int ret;
876         struct seq_file *m;
877         struct ftrace_iterator *iter;
878
879         ret = ftrace_avail_open(inode, file);
880         if (!ret) {
881                 m = (struct seq_file *)file->private_data;
882                 iter = (struct ftrace_iterator *)m->private;
883                 iter->flags = FTRACE_ITER_FAILURES;
884         }
885
886         return ret;
887 }
888
889
890 static void ftrace_filter_reset(int enable)
891 {
892         struct ftrace_page *pg;
893         struct dyn_ftrace *rec;
894         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
895         unsigned i;
896
897         /* should not be called from interrupt context */
898         spin_lock(&ftrace_lock);
899         if (enable)
900                 ftrace_filtered = 0;
901         pg = ftrace_pages_start;
902         while (pg) {
903                 for (i = 0; i < pg->index; i++) {
904                         rec = &pg->records[i];
905                         if (rec->flags & FTRACE_FL_FAILED)
906                                 continue;
907                         rec->flags &= ~type;
908                 }
909                 pg = pg->next;
910         }
911         spin_unlock(&ftrace_lock);
912 }
913
914 static int
915 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
916 {
917         struct ftrace_iterator *iter;
918         int ret = 0;
919
920         if (unlikely(ftrace_disabled))
921                 return -ENODEV;
922
923         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
924         if (!iter)
925                 return -ENOMEM;
926
927         mutex_lock(&ftrace_regex_lock);
928         if ((file->f_mode & FMODE_WRITE) &&
929             !(file->f_flags & O_APPEND))
930                 ftrace_filter_reset(enable);
931
932         if (file->f_mode & FMODE_READ) {
933                 iter->pg = ftrace_pages_start;
934                 iter->pos = -1;
935                 iter->flags = enable ? FTRACE_ITER_FILTER :
936                         FTRACE_ITER_NOTRACE;
937
938                 ret = seq_open(file, &show_ftrace_seq_ops);
939                 if (!ret) {
940                         struct seq_file *m = file->private_data;
941                         m->private = iter;
942                 } else
943                         kfree(iter);
944         } else
945                 file->private_data = iter;
946         mutex_unlock(&ftrace_regex_lock);
947
948         return ret;
949 }
950
951 static int
952 ftrace_filter_open(struct inode *inode, struct file *file)
953 {
954         return ftrace_regex_open(inode, file, 1);
955 }
956
957 static int
958 ftrace_notrace_open(struct inode *inode, struct file *file)
959 {
960         return ftrace_regex_open(inode, file, 0);
961 }
962
963 static ssize_t
964 ftrace_regex_read(struct file *file, char __user *ubuf,
965                        size_t cnt, loff_t *ppos)
966 {
967         if (file->f_mode & FMODE_READ)
968                 return seq_read(file, ubuf, cnt, ppos);
969         else
970                 return -EPERM;
971 }
972
973 static loff_t
974 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
975 {
976         loff_t ret;
977
978         if (file->f_mode & FMODE_READ)
979                 ret = seq_lseek(file, offset, origin);
980         else
981                 file->f_pos = ret = 1;
982
983         return ret;
984 }
985
986 enum {
987         MATCH_FULL,
988         MATCH_FRONT_ONLY,
989         MATCH_MIDDLE_ONLY,
990         MATCH_END_ONLY,
991 };
992
993 static void
994 ftrace_match(unsigned char *buff, int len, int enable)
995 {
996         char str[KSYM_SYMBOL_LEN];
997         char *search = NULL;
998         struct ftrace_page *pg;
999         struct dyn_ftrace *rec;
1000         int type = MATCH_FULL;
1001         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1002         unsigned i, match = 0, search_len = 0;
1003
1004         for (i = 0; i < len; i++) {
1005                 if (buff[i] == '*') {
1006                         if (!i) {
1007                                 search = buff + i + 1;
1008                                 type = MATCH_END_ONLY;
1009                                 search_len = len - (i + 1);
1010                         } else {
1011                                 if (type == MATCH_END_ONLY) {
1012                                         type = MATCH_MIDDLE_ONLY;
1013                                 } else {
1014                                         match = i;
1015                                         type = MATCH_FRONT_ONLY;
1016                                 }
1017                                 buff[i] = 0;
1018                                 break;
1019                         }
1020                 }
1021         }
1022
1023         /* should not be called from interrupt context */
1024         spin_lock(&ftrace_lock);
1025         if (enable)
1026                 ftrace_filtered = 1;
1027         pg = ftrace_pages_start;
1028         while (pg) {
1029                 for (i = 0; i < pg->index; i++) {
1030                         int matched = 0;
1031                         char *ptr;
1032
1033                         rec = &pg->records[i];
1034                         if (rec->flags & FTRACE_FL_FAILED)
1035                                 continue;
1036                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1037                         switch (type) {
1038                         case MATCH_FULL:
1039                                 if (strcmp(str, buff) == 0)
1040                                         matched = 1;
1041                                 break;
1042                         case MATCH_FRONT_ONLY:
1043                                 if (memcmp(str, buff, match) == 0)
1044                                         matched = 1;
1045                                 break;
1046                         case MATCH_MIDDLE_ONLY:
1047                                 if (strstr(str, search))
1048                                         matched = 1;
1049                                 break;
1050                         case MATCH_END_ONLY:
1051                                 ptr = strstr(str, search);
1052                                 if (ptr && (ptr[search_len] == 0))
1053                                         matched = 1;
1054                                 break;
1055                         }
1056                         if (matched)
1057                                 rec->flags |= flag;
1058                 }
1059                 pg = pg->next;
1060         }
1061         spin_unlock(&ftrace_lock);
1062 }
1063
1064 static ssize_t
1065 ftrace_regex_write(struct file *file, const char __user *ubuf,
1066                    size_t cnt, loff_t *ppos, int enable)
1067 {
1068         struct ftrace_iterator *iter;
1069         char ch;
1070         size_t read = 0;
1071         ssize_t ret;
1072
1073         if (!cnt || cnt < 0)
1074                 return 0;
1075
1076         mutex_lock(&ftrace_regex_lock);
1077
1078         if (file->f_mode & FMODE_READ) {
1079                 struct seq_file *m = file->private_data;
1080                 iter = m->private;
1081         } else
1082                 iter = file->private_data;
1083
1084         if (!*ppos) {
1085                 iter->flags &= ~FTRACE_ITER_CONT;
1086                 iter->buffer_idx = 0;
1087         }
1088
1089         ret = get_user(ch, ubuf++);
1090         if (ret)
1091                 goto out;
1092         read++;
1093         cnt--;
1094
1095         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1096                 /* skip white space */
1097                 while (cnt && isspace(ch)) {
1098                         ret = get_user(ch, ubuf++);
1099                         if (ret)
1100                                 goto out;
1101                         read++;
1102                         cnt--;
1103                 }
1104
1105                 if (isspace(ch)) {
1106                         file->f_pos += read;
1107                         ret = read;
1108                         goto out;
1109                 }
1110
1111                 iter->buffer_idx = 0;
1112         }
1113
1114         while (cnt && !isspace(ch)) {
1115                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1116                         iter->buffer[iter->buffer_idx++] = ch;
1117                 else {
1118                         ret = -EINVAL;
1119                         goto out;
1120                 }
1121                 ret = get_user(ch, ubuf++);
1122                 if (ret)
1123                         goto out;
1124                 read++;
1125                 cnt--;
1126         }
1127
1128         if (isspace(ch)) {
1129                 iter->filtered++;
1130                 iter->buffer[iter->buffer_idx] = 0;
1131                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1132                 iter->buffer_idx = 0;
1133         } else
1134                 iter->flags |= FTRACE_ITER_CONT;
1135
1136
1137         file->f_pos += read;
1138
1139         ret = read;
1140  out:
1141         mutex_unlock(&ftrace_regex_lock);
1142
1143         return ret;
1144 }
1145
1146 static ssize_t
1147 ftrace_filter_write(struct file *file, const char __user *ubuf,
1148                     size_t cnt, loff_t *ppos)
1149 {
1150         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1151 }
1152
1153 static ssize_t
1154 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1155                      size_t cnt, loff_t *ppos)
1156 {
1157         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1158 }
1159
1160 static void
1161 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1162 {
1163         if (unlikely(ftrace_disabled))
1164                 return;
1165
1166         mutex_lock(&ftrace_regex_lock);
1167         if (reset)
1168                 ftrace_filter_reset(enable);
1169         if (buf)
1170                 ftrace_match(buf, len, enable);
1171         mutex_unlock(&ftrace_regex_lock);
1172 }
1173
1174 /**
1175  * ftrace_set_filter - set a function to filter on in ftrace
1176  * @buf - the string that holds the function filter text.
1177  * @len - the length of the string.
1178  * @reset - non zero to reset all filters before applying this filter.
1179  *
1180  * Filters denote which functions should be enabled when tracing is enabled.
1181  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1182  */
1183 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1184 {
1185         ftrace_set_regex(buf, len, reset, 1);
1186 }
1187
1188 /**
1189  * ftrace_set_notrace - set a function to not trace in ftrace
1190  * @buf - the string that holds the function notrace text.
1191  * @len - the length of the string.
1192  * @reset - non zero to reset all filters before applying this filter.
1193  *
1194  * Notrace Filters denote which functions should not be enabled when tracing
1195  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1196  * for tracing.
1197  */
1198 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1199 {
1200         ftrace_set_regex(buf, len, reset, 0);
1201 }
1202
1203 static int
1204 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1205 {
1206         struct seq_file *m = (struct seq_file *)file->private_data;
1207         struct ftrace_iterator *iter;
1208
1209         mutex_lock(&ftrace_regex_lock);
1210         if (file->f_mode & FMODE_READ) {
1211                 iter = m->private;
1212
1213                 seq_release(inode, file);
1214         } else
1215                 iter = file->private_data;
1216
1217         if (iter->buffer_idx) {
1218                 iter->filtered++;
1219                 iter->buffer[iter->buffer_idx] = 0;
1220                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1221         }
1222
1223         mutex_lock(&ftrace_sysctl_lock);
1224         mutex_lock(&ftrace_start_lock);
1225         if (iter->filtered && ftrace_start_up && ftrace_enabled)
1226                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1227         mutex_unlock(&ftrace_start_lock);
1228         mutex_unlock(&ftrace_sysctl_lock);
1229
1230         kfree(iter);
1231         mutex_unlock(&ftrace_regex_lock);
1232         return 0;
1233 }
1234
1235 static int
1236 ftrace_filter_release(struct inode *inode, struct file *file)
1237 {
1238         return ftrace_regex_release(inode, file, 1);
1239 }
1240
1241 static int
1242 ftrace_notrace_release(struct inode *inode, struct file *file)
1243 {
1244         return ftrace_regex_release(inode, file, 0);
1245 }
1246
1247 static struct file_operations ftrace_avail_fops = {
1248         .open = ftrace_avail_open,
1249         .read = seq_read,
1250         .llseek = seq_lseek,
1251         .release = ftrace_avail_release,
1252 };
1253
1254 static struct file_operations ftrace_failures_fops = {
1255         .open = ftrace_failures_open,
1256         .read = seq_read,
1257         .llseek = seq_lseek,
1258         .release = ftrace_avail_release,
1259 };
1260
1261 static struct file_operations ftrace_filter_fops = {
1262         .open = ftrace_filter_open,
1263         .read = ftrace_regex_read,
1264         .write = ftrace_filter_write,
1265         .llseek = ftrace_regex_lseek,
1266         .release = ftrace_filter_release,
1267 };
1268
1269 static struct file_operations ftrace_notrace_fops = {
1270         .open = ftrace_notrace_open,
1271         .read = ftrace_regex_read,
1272         .write = ftrace_notrace_write,
1273         .llseek = ftrace_regex_lseek,
1274         .release = ftrace_notrace_release,
1275 };
1276
1277 static __init int ftrace_init_debugfs(void)
1278 {
1279         struct dentry *d_tracer;
1280         struct dentry *entry;
1281
1282         d_tracer = tracing_init_dentry();
1283
1284         entry = debugfs_create_file("available_filter_functions", 0444,
1285                                     d_tracer, NULL, &ftrace_avail_fops);
1286         if (!entry)
1287                 pr_warning("Could not create debugfs "
1288                            "'available_filter_functions' entry\n");
1289
1290         entry = debugfs_create_file("failures", 0444,
1291                                     d_tracer, NULL, &ftrace_failures_fops);
1292         if (!entry)
1293                 pr_warning("Could not create debugfs 'failures' entry\n");
1294
1295         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1296                                     NULL, &ftrace_filter_fops);
1297         if (!entry)
1298                 pr_warning("Could not create debugfs "
1299                            "'set_ftrace_filter' entry\n");
1300
1301         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1302                                     NULL, &ftrace_notrace_fops);
1303         if (!entry)
1304                 pr_warning("Could not create debugfs "
1305                            "'set_ftrace_notrace' entry\n");
1306
1307         return 0;
1308 }
1309
1310 fs_initcall(ftrace_init_debugfs);
1311
1312 static int ftrace_convert_nops(unsigned long *start,
1313                                unsigned long *end)
1314 {
1315         unsigned long *p;
1316         unsigned long addr;
1317         unsigned long flags;
1318
1319         mutex_lock(&ftrace_start_lock);
1320         p = start;
1321         while (p < end) {
1322                 addr = ftrace_call_adjust(*p++);
1323                 ftrace_record_ip(addr);
1324         }
1325
1326         /* disable interrupts to prevent kstop machine */
1327         local_irq_save(flags);
1328         ftrace_update_code();
1329         local_irq_restore(flags);
1330         mutex_unlock(&ftrace_start_lock);
1331
1332         return 0;
1333 }
1334
1335 void ftrace_init_module(unsigned long *start, unsigned long *end)
1336 {
1337         if (ftrace_disabled || start == end)
1338                 return;
1339         ftrace_convert_nops(start, end);
1340 }
1341
1342 extern unsigned long __start_mcount_loc[];
1343 extern unsigned long __stop_mcount_loc[];
1344
1345 void __init ftrace_init(void)
1346 {
1347         unsigned long count, addr, flags;
1348         int ret;
1349
1350         /* Keep the ftrace pointer to the stub */
1351         addr = (unsigned long)ftrace_stub;
1352
1353         local_irq_save(flags);
1354         ftrace_dyn_arch_init(&addr);
1355         local_irq_restore(flags);
1356
1357         /* ftrace_dyn_arch_init places the return code in addr */
1358         if (addr)
1359                 goto failed;
1360
1361         count = __stop_mcount_loc - __start_mcount_loc;
1362
1363         ret = ftrace_dyn_table_alloc(count);
1364         if (ret)
1365                 goto failed;
1366
1367         last_ftrace_enabled = ftrace_enabled = 1;
1368
1369         ret = ftrace_convert_nops(__start_mcount_loc,
1370                                   __stop_mcount_loc);
1371
1372         return;
1373  failed:
1374         ftrace_disabled = 1;
1375 }
1376
1377 #else
1378
1379 static int __init ftrace_nodyn_init(void)
1380 {
1381         ftrace_enabled = 1;
1382         return 0;
1383 }
1384 device_initcall(ftrace_nodyn_init);
1385
1386 # define ftrace_startup()               do { } while (0)
1387 # define ftrace_shutdown()              do { } while (0)
1388 # define ftrace_startup_sysctl()        do { } while (0)
1389 # define ftrace_shutdown_sysctl()       do { } while (0)
1390 #endif /* CONFIG_DYNAMIC_FTRACE */
1391
1392 /**
1393  * ftrace_kill - kill ftrace
1394  *
1395  * This function should be used by panic code. It stops ftrace
1396  * but in a not so nice way. If you need to simply kill ftrace
1397  * from a non-atomic section, use ftrace_kill.
1398  */
1399 void ftrace_kill(void)
1400 {
1401         ftrace_disabled = 1;
1402         ftrace_enabled = 0;
1403         clear_ftrace_function();
1404 }
1405
1406 /**
1407  * register_ftrace_function - register a function for profiling
1408  * @ops - ops structure that holds the function for profiling.
1409  *
1410  * Register a function to be called by all functions in the
1411  * kernel.
1412  *
1413  * Note: @ops->func and all the functions it calls must be labeled
1414  *       with "notrace", otherwise it will go into a
1415  *       recursive loop.
1416  */
1417 int register_ftrace_function(struct ftrace_ops *ops)
1418 {
1419         int ret;
1420
1421         if (unlikely(ftrace_disabled))
1422                 return -1;
1423
1424         mutex_lock(&ftrace_sysctl_lock);
1425         ret = __register_ftrace_function(ops);
1426         ftrace_startup();
1427         mutex_unlock(&ftrace_sysctl_lock);
1428
1429         return ret;
1430 }
1431
1432 /**
1433  * unregister_ftrace_function - unresgister a function for profiling.
1434  * @ops - ops structure that holds the function to unregister
1435  *
1436  * Unregister a function that was added to be called by ftrace profiling.
1437  */
1438 int unregister_ftrace_function(struct ftrace_ops *ops)
1439 {
1440         int ret;
1441
1442         mutex_lock(&ftrace_sysctl_lock);
1443         ret = __unregister_ftrace_function(ops);
1444         ftrace_shutdown();
1445         mutex_unlock(&ftrace_sysctl_lock);
1446
1447         return ret;
1448 }
1449
1450 int
1451 ftrace_enable_sysctl(struct ctl_table *table, int write,
1452                      struct file *file, void __user *buffer, size_t *lenp,
1453                      loff_t *ppos)
1454 {
1455         int ret;
1456
1457         if (unlikely(ftrace_disabled))
1458                 return -ENODEV;
1459
1460         mutex_lock(&ftrace_sysctl_lock);
1461
1462         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1463
1464         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1465                 goto out;
1466
1467         last_ftrace_enabled = ftrace_enabled;
1468
1469         if (ftrace_enabled) {
1470
1471                 ftrace_startup_sysctl();
1472
1473                 /* we are starting ftrace again */
1474                 if (ftrace_list != &ftrace_list_end) {
1475                         if (ftrace_list->next == &ftrace_list_end)
1476                                 ftrace_trace_function = ftrace_list->func;
1477                         else
1478                                 ftrace_trace_function = ftrace_list_func;
1479                 }
1480
1481         } else {
1482                 /* stopping ftrace calls (just send to ftrace_stub) */
1483                 ftrace_trace_function = ftrace_stub;
1484
1485                 ftrace_shutdown_sysctl();
1486         }
1487
1488  out:
1489         mutex_unlock(&ftrace_sysctl_lock);
1490         return ret;
1491 }
1492
1493 #ifdef CONFIG_FUNCTION_RET_TRACER
1494 trace_function_return_t ftrace_function_return =
1495                         (trace_function_return_t)ftrace_stub;
1496 void register_ftrace_return(trace_function_return_t func)
1497 {
1498         ftrace_function_return = func;
1499 }
1500
1501 void unregister_ftrace_return(void)
1502 {
1503         ftrace_function_return = (trace_function_return_t)ftrace_stub;
1504 }
1505 #endif
1506
1507
1508