tracing: Move raw_init from events to class
[pandora-kernel.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
32
33 #include "trace.h"
34 #include "trace_output.h"
35
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
40
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
46
47 const char *reserved_field_names[] = {
48         "common_type",
49         "common_flags",
50         "common_preempt_count",
51         "common_pid",
52         "common_tgid",
53         "common_lock_depth",
54         FIELD_STRING_IP,
55         FIELD_STRING_NARGS,
56         FIELD_STRING_RETIP,
57         FIELD_STRING_FUNC,
58 };
59
60 struct fetch_func {
61         unsigned long (*func)(struct pt_regs *, void *);
62         void *data;
63 };
64
65 static __kprobes unsigned long call_fetch(struct fetch_func *f,
66                                           struct pt_regs *regs)
67 {
68         return f->func(regs, f->data);
69 }
70
71 /* fetch handlers */
72 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
73                                               void *offset)
74 {
75         return regs_get_register(regs, (unsigned int)((unsigned long)offset));
76 }
77
78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
79                                            void *num)
80 {
81         return regs_get_kernel_stack_nth(regs,
82                                          (unsigned int)((unsigned long)num));
83 }
84
85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
86 {
87         unsigned long retval;
88
89         if (probe_kernel_address(addr, retval))
90                 return 0;
91         return retval;
92 }
93
94 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
95                                               void *dummy)
96 {
97         return regs_return_value(regs);
98 }
99
100 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
101                                                    void *dummy)
102 {
103         return kernel_stack_pointer(regs);
104 }
105
106 /* Memory fetching by symbol */
107 struct symbol_cache {
108         char *symbol;
109         long offset;
110         unsigned long addr;
111 };
112
113 static unsigned long update_symbol_cache(struct symbol_cache *sc)
114 {
115         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
116         if (sc->addr)
117                 sc->addr += sc->offset;
118         return sc->addr;
119 }
120
121 static void free_symbol_cache(struct symbol_cache *sc)
122 {
123         kfree(sc->symbol);
124         kfree(sc);
125 }
126
127 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
128 {
129         struct symbol_cache *sc;
130
131         if (!sym || strlen(sym) == 0)
132                 return NULL;
133         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
134         if (!sc)
135                 return NULL;
136
137         sc->symbol = kstrdup(sym, GFP_KERNEL);
138         if (!sc->symbol) {
139                 kfree(sc);
140                 return NULL;
141         }
142         sc->offset = offset;
143
144         update_symbol_cache(sc);
145         return sc;
146 }
147
148 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
149 {
150         struct symbol_cache *sc = data;
151
152         if (sc->addr)
153                 return fetch_memory(regs, (void *)sc->addr);
154         else
155                 return 0;
156 }
157
158 /* Special indirect memory access interface */
159 struct indirect_fetch_data {
160         struct fetch_func orig;
161         long offset;
162 };
163
164 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
165 {
166         struct indirect_fetch_data *ind = data;
167         unsigned long addr;
168
169         addr = call_fetch(&ind->orig, regs);
170         if (addr) {
171                 addr += ind->offset;
172                 return fetch_memory(regs, (void *)addr);
173         } else
174                 return 0;
175 }
176
177 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
178 {
179         if (data->orig.func == fetch_indirect)
180                 free_indirect_fetch_data(data->orig.data);
181         else if (data->orig.func == fetch_symbol)
182                 free_symbol_cache(data->orig.data);
183         kfree(data);
184 }
185
186 /**
187  * Kprobe event core functions
188  */
189
190 struct probe_arg {
191         struct fetch_func       fetch;
192         const char              *name;
193 };
194
195 /* Flags for trace_probe */
196 #define TP_FLAG_TRACE   1
197 #define TP_FLAG_PROFILE 2
198
199 struct trace_probe {
200         struct list_head        list;
201         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
202         unsigned long           nhit;
203         unsigned int            flags;  /* For TP_FLAG_* */
204         const char              *symbol;        /* symbol name */
205         struct ftrace_event_class       class;
206         struct ftrace_event_call        call;
207         struct trace_event              event;
208         unsigned int            nr_args;
209         struct probe_arg        args[];
210 };
211
212 #define SIZEOF_TRACE_PROBE(n)                   \
213         (offsetof(struct trace_probe, args) +   \
214         (sizeof(struct probe_arg) * (n)))
215
216 static __kprobes int probe_is_return(struct trace_probe *tp)
217 {
218         return tp->rp.handler != NULL;
219 }
220
221 static __kprobes const char *probe_symbol(struct trace_probe *tp)
222 {
223         return tp->symbol ? tp->symbol : "unknown";
224 }
225
226 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
227 {
228         int ret = -EINVAL;
229
230         if (ff->func == fetch_register) {
231                 const char *name;
232                 name = regs_query_register_name((unsigned int)((long)ff->data));
233                 ret = snprintf(buf, n, "%%%s", name);
234         } else if (ff->func == fetch_stack)
235                 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
236         else if (ff->func == fetch_memory)
237                 ret = snprintf(buf, n, "@0x%p", ff->data);
238         else if (ff->func == fetch_symbol) {
239                 struct symbol_cache *sc = ff->data;
240                 if (sc->offset)
241                         ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
242                                         sc->offset);
243                 else
244                         ret = snprintf(buf, n, "@%s", sc->symbol);
245         } else if (ff->func == fetch_retvalue)
246                 ret = snprintf(buf, n, "$retval");
247         else if (ff->func == fetch_stack_address)
248                 ret = snprintf(buf, n, "$stack");
249         else if (ff->func == fetch_indirect) {
250                 struct indirect_fetch_data *id = ff->data;
251                 size_t l = 0;
252                 ret = snprintf(buf, n, "%+ld(", id->offset);
253                 if (ret >= n)
254                         goto end;
255                 l += ret;
256                 ret = probe_arg_string(buf + l, n - l, &id->orig);
257                 if (ret < 0)
258                         goto end;
259                 l += ret;
260                 ret = snprintf(buf + l, n - l, ")");
261                 ret += l;
262         }
263 end:
264         if (ret >= n)
265                 return -ENOSPC;
266         return ret;
267 }
268
269 static int register_probe_event(struct trace_probe *tp);
270 static void unregister_probe_event(struct trace_probe *tp);
271
272 static DEFINE_MUTEX(probe_lock);
273 static LIST_HEAD(probe_list);
274
275 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
276 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
277                                 struct pt_regs *regs);
278
279 /* Check the name is good for event/group */
280 static int check_event_name(const char *name)
281 {
282         if (!isalpha(*name) && *name != '_')
283                 return 0;
284         while (*++name != '\0') {
285                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
286                         return 0;
287         }
288         return 1;
289 }
290
291 /*
292  * Allocate new trace_probe and initialize it (including kprobes).
293  */
294 static struct trace_probe *alloc_trace_probe(const char *group,
295                                              const char *event,
296                                              void *addr,
297                                              const char *symbol,
298                                              unsigned long offs,
299                                              int nargs, int is_return)
300 {
301         struct trace_probe *tp;
302         int ret = -ENOMEM;
303
304         tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
305         if (!tp)
306                 return ERR_PTR(ret);
307
308         if (symbol) {
309                 tp->symbol = kstrdup(symbol, GFP_KERNEL);
310                 if (!tp->symbol)
311                         goto error;
312                 tp->rp.kp.symbol_name = tp->symbol;
313                 tp->rp.kp.offset = offs;
314         } else
315                 tp->rp.kp.addr = addr;
316
317         if (is_return)
318                 tp->rp.handler = kretprobe_dispatcher;
319         else
320                 tp->rp.kp.pre_handler = kprobe_dispatcher;
321
322         if (!event || !check_event_name(event)) {
323                 ret = -EINVAL;
324                 goto error;
325         }
326
327         tp->call.class = &tp->class;
328         tp->call.name = kstrdup(event, GFP_KERNEL);
329         if (!tp->call.name)
330                 goto error;
331
332         if (!group || !check_event_name(group)) {
333                 ret = -EINVAL;
334                 goto error;
335         }
336
337         tp->class.system = kstrdup(group, GFP_KERNEL);
338         if (!tp->class.system)
339                 goto error;
340
341         INIT_LIST_HEAD(&tp->list);
342         return tp;
343 error:
344         kfree(tp->call.name);
345         kfree(tp->symbol);
346         kfree(tp);
347         return ERR_PTR(ret);
348 }
349
350 static void free_probe_arg(struct probe_arg *arg)
351 {
352         if (arg->fetch.func == fetch_symbol)
353                 free_symbol_cache(arg->fetch.data);
354         else if (arg->fetch.func == fetch_indirect)
355                 free_indirect_fetch_data(arg->fetch.data);
356         kfree(arg->name);
357 }
358
359 static void free_trace_probe(struct trace_probe *tp)
360 {
361         int i;
362
363         for (i = 0; i < tp->nr_args; i++)
364                 free_probe_arg(&tp->args[i]);
365
366         kfree(tp->call.class->system);
367         kfree(tp->call.name);
368         kfree(tp->symbol);
369         kfree(tp);
370 }
371
372 static struct trace_probe *find_probe_event(const char *event,
373                                             const char *group)
374 {
375         struct trace_probe *tp;
376
377         list_for_each_entry(tp, &probe_list, list)
378                 if (strcmp(tp->call.name, event) == 0 &&
379                     strcmp(tp->call.class->system, group) == 0)
380                         return tp;
381         return NULL;
382 }
383
384 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
385 static void unregister_trace_probe(struct trace_probe *tp)
386 {
387         if (probe_is_return(tp))
388                 unregister_kretprobe(&tp->rp);
389         else
390                 unregister_kprobe(&tp->rp.kp);
391         list_del(&tp->list);
392         unregister_probe_event(tp);
393 }
394
395 /* Register a trace_probe and probe_event */
396 static int register_trace_probe(struct trace_probe *tp)
397 {
398         struct trace_probe *old_tp;
399         int ret;
400
401         mutex_lock(&probe_lock);
402
403         /* register as an event */
404         old_tp = find_probe_event(tp->call.name, tp->call.class->system);
405         if (old_tp) {
406                 /* delete old event */
407                 unregister_trace_probe(old_tp);
408                 free_trace_probe(old_tp);
409         }
410         ret = register_probe_event(tp);
411         if (ret) {
412                 pr_warning("Faild to register probe event(%d)\n", ret);
413                 goto end;
414         }
415
416         tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
417         if (probe_is_return(tp))
418                 ret = register_kretprobe(&tp->rp);
419         else
420                 ret = register_kprobe(&tp->rp.kp);
421
422         if (ret) {
423                 pr_warning("Could not insert probe(%d)\n", ret);
424                 if (ret == -EILSEQ) {
425                         pr_warning("Probing address(0x%p) is not an "
426                                    "instruction boundary.\n",
427                                    tp->rp.kp.addr);
428                         ret = -EINVAL;
429                 }
430                 unregister_probe_event(tp);
431         } else
432                 list_add_tail(&tp->list, &probe_list);
433 end:
434         mutex_unlock(&probe_lock);
435         return ret;
436 }
437
438 /* Split symbol and offset. */
439 static int split_symbol_offset(char *symbol, unsigned long *offset)
440 {
441         char *tmp;
442         int ret;
443
444         if (!offset)
445                 return -EINVAL;
446
447         tmp = strchr(symbol, '+');
448         if (tmp) {
449                 /* skip sign because strict_strtol doesn't accept '+' */
450                 ret = strict_strtoul(tmp + 1, 0, offset);
451                 if (ret)
452                         return ret;
453                 *tmp = '\0';
454         } else
455                 *offset = 0;
456         return 0;
457 }
458
459 #define PARAM_MAX_ARGS 16
460 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
461
462 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
463 {
464         int ret = 0;
465         unsigned long param;
466
467         if (strcmp(arg, "retval") == 0) {
468                 if (is_return) {
469                         ff->func = fetch_retvalue;
470                         ff->data = NULL;
471                 } else
472                         ret = -EINVAL;
473         } else if (strncmp(arg, "stack", 5) == 0) {
474                 if (arg[5] == '\0') {
475                         ff->func = fetch_stack_address;
476                         ff->data = NULL;
477                 } else if (isdigit(arg[5])) {
478                         ret = strict_strtoul(arg + 5, 10, &param);
479                         if (ret || param > PARAM_MAX_STACK)
480                                 ret = -EINVAL;
481                         else {
482                                 ff->func = fetch_stack;
483                                 ff->data = (void *)param;
484                         }
485                 } else
486                         ret = -EINVAL;
487         } else
488                 ret = -EINVAL;
489         return ret;
490 }
491
492 /* Recursive argument parser */
493 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
494 {
495         int ret = 0;
496         unsigned long param;
497         long offset;
498         char *tmp;
499
500         switch (arg[0]) {
501         case '$':
502                 ret = parse_probe_vars(arg + 1, ff, is_return);
503                 break;
504         case '%':       /* named register */
505                 ret = regs_query_register_offset(arg + 1);
506                 if (ret >= 0) {
507                         ff->func = fetch_register;
508                         ff->data = (void *)(unsigned long)ret;
509                         ret = 0;
510                 }
511                 break;
512         case '@':       /* memory or symbol */
513                 if (isdigit(arg[1])) {
514                         ret = strict_strtoul(arg + 1, 0, &param);
515                         if (ret)
516                                 break;
517                         ff->func = fetch_memory;
518                         ff->data = (void *)param;
519                 } else {
520                         ret = split_symbol_offset(arg + 1, &offset);
521                         if (ret)
522                                 break;
523                         ff->data = alloc_symbol_cache(arg + 1, offset);
524                         if (ff->data)
525                                 ff->func = fetch_symbol;
526                         else
527                                 ret = -EINVAL;
528                 }
529                 break;
530         case '+':       /* indirect memory */
531         case '-':
532                 tmp = strchr(arg, '(');
533                 if (!tmp) {
534                         ret = -EINVAL;
535                         break;
536                 }
537                 *tmp = '\0';
538                 ret = strict_strtol(arg + 1, 0, &offset);
539                 if (ret)
540                         break;
541                 if (arg[0] == '-')
542                         offset = -offset;
543                 arg = tmp + 1;
544                 tmp = strrchr(arg, ')');
545                 if (tmp) {
546                         struct indirect_fetch_data *id;
547                         *tmp = '\0';
548                         id = kzalloc(sizeof(struct indirect_fetch_data),
549                                      GFP_KERNEL);
550                         if (!id)
551                                 return -ENOMEM;
552                         id->offset = offset;
553                         ret = __parse_probe_arg(arg, &id->orig, is_return);
554                         if (ret)
555                                 kfree(id);
556                         else {
557                                 ff->func = fetch_indirect;
558                                 ff->data = (void *)id;
559                         }
560                 } else
561                         ret = -EINVAL;
562                 break;
563         default:
564                 /* TODO: support custom handler */
565                 ret = -EINVAL;
566         }
567         return ret;
568 }
569
570 /* String length checking wrapper */
571 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
572 {
573         if (strlen(arg) > MAX_ARGSTR_LEN) {
574                 pr_info("Argument is too long.: %s\n",  arg);
575                 return -ENOSPC;
576         }
577         return __parse_probe_arg(arg, ff, is_return);
578 }
579
580 /* Return 1 if name is reserved or already used by another argument */
581 static int conflict_field_name(const char *name,
582                                struct probe_arg *args, int narg)
583 {
584         int i;
585         for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
586                 if (strcmp(reserved_field_names[i], name) == 0)
587                         return 1;
588         for (i = 0; i < narg; i++)
589                 if (strcmp(args[i].name, name) == 0)
590                         return 1;
591         return 0;
592 }
593
594 static int create_trace_probe(int argc, char **argv)
595 {
596         /*
597          * Argument syntax:
598          *  - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
599          *  - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
600          * Fetch args:
601          *  $retval     : fetch return value
602          *  $stack      : fetch stack address
603          *  $stackN     : fetch Nth of stack (N:0-)
604          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
605          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
606          *  %REG        : fetch register REG
607          * Indirect memory fetch:
608          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
609          * Alias name of args:
610          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
611          */
612         struct trace_probe *tp;
613         int i, ret = 0;
614         int is_return = 0, is_delete = 0;
615         char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
616         unsigned long offset = 0;
617         void *addr = NULL;
618         char buf[MAX_EVENT_NAME_LEN];
619
620         /* argc must be >= 1 */
621         if (argv[0][0] == 'p')
622                 is_return = 0;
623         else if (argv[0][0] == 'r')
624                 is_return = 1;
625         else if (argv[0][0] == '-')
626                 is_delete = 1;
627         else {
628                 pr_info("Probe definition must be started with 'p', 'r' or"
629                         " '-'.\n");
630                 return -EINVAL;
631         }
632
633         if (argv[0][1] == ':') {
634                 event = &argv[0][2];
635                 if (strchr(event, '/')) {
636                         group = event;
637                         event = strchr(group, '/') + 1;
638                         event[-1] = '\0';
639                         if (strlen(group) == 0) {
640                                 pr_info("Group name is not specified\n");
641                                 return -EINVAL;
642                         }
643                 }
644                 if (strlen(event) == 0) {
645                         pr_info("Event name is not specified\n");
646                         return -EINVAL;
647                 }
648         }
649         if (!group)
650                 group = KPROBE_EVENT_SYSTEM;
651
652         if (is_delete) {
653                 if (!event) {
654                         pr_info("Delete command needs an event name.\n");
655                         return -EINVAL;
656                 }
657                 tp = find_probe_event(event, group);
658                 if (!tp) {
659                         pr_info("Event %s/%s doesn't exist.\n", group, event);
660                         return -ENOENT;
661                 }
662                 /* delete an event */
663                 unregister_trace_probe(tp);
664                 free_trace_probe(tp);
665                 return 0;
666         }
667
668         if (argc < 2) {
669                 pr_info("Probe point is not specified.\n");
670                 return -EINVAL;
671         }
672         if (isdigit(argv[1][0])) {
673                 if (is_return) {
674                         pr_info("Return probe point must be a symbol.\n");
675                         return -EINVAL;
676                 }
677                 /* an address specified */
678                 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
679                 if (ret) {
680                         pr_info("Failed to parse address.\n");
681                         return ret;
682                 }
683         } else {
684                 /* a symbol specified */
685                 symbol = argv[1];
686                 /* TODO: support .init module functions */
687                 ret = split_symbol_offset(symbol, &offset);
688                 if (ret) {
689                         pr_info("Failed to parse symbol.\n");
690                         return ret;
691                 }
692                 if (offset && is_return) {
693                         pr_info("Return probe must be used without offset.\n");
694                         return -EINVAL;
695                 }
696         }
697         argc -= 2; argv += 2;
698
699         /* setup a probe */
700         if (!event) {
701                 /* Make a new event name */
702                 if (symbol)
703                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
704                                  is_return ? 'r' : 'p', symbol, offset);
705                 else
706                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
707                                  is_return ? 'r' : 'p', addr);
708                 event = buf;
709         }
710         tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
711                                is_return);
712         if (IS_ERR(tp)) {
713                 pr_info("Failed to allocate trace_probe.(%d)\n",
714                         (int)PTR_ERR(tp));
715                 return PTR_ERR(tp);
716         }
717
718         /* parse arguments */
719         ret = 0;
720         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
721                 /* Parse argument name */
722                 arg = strchr(argv[i], '=');
723                 if (arg)
724                         *arg++ = '\0';
725                 else
726                         arg = argv[i];
727
728                 if (conflict_field_name(argv[i], tp->args, i)) {
729                         pr_info("Argument%d name '%s' conflicts with "
730                                 "another field.\n", i, argv[i]);
731                         ret = -EINVAL;
732                         goto error;
733                 }
734
735                 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
736                 if (!tp->args[i].name) {
737                         pr_info("Failed to allocate argument%d name '%s'.\n",
738                                 i, argv[i]);
739                         ret = -ENOMEM;
740                         goto error;
741                 }
742
743                 /* Parse fetch argument */
744                 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
745                 if (ret) {
746                         pr_info("Parse error at argument%d. (%d)\n", i, ret);
747                         kfree(tp->args[i].name);
748                         goto error;
749                 }
750
751                 tp->nr_args++;
752         }
753
754         ret = register_trace_probe(tp);
755         if (ret)
756                 goto error;
757         return 0;
758
759 error:
760         free_trace_probe(tp);
761         return ret;
762 }
763
764 static void cleanup_all_probes(void)
765 {
766         struct trace_probe *tp;
767
768         mutex_lock(&probe_lock);
769         /* TODO: Use batch unregistration */
770         while (!list_empty(&probe_list)) {
771                 tp = list_entry(probe_list.next, struct trace_probe, list);
772                 unregister_trace_probe(tp);
773                 free_trace_probe(tp);
774         }
775         mutex_unlock(&probe_lock);
776 }
777
778
779 /* Probes listing interfaces */
780 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
781 {
782         mutex_lock(&probe_lock);
783         return seq_list_start(&probe_list, *pos);
784 }
785
786 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
787 {
788         return seq_list_next(v, &probe_list, pos);
789 }
790
791 static void probes_seq_stop(struct seq_file *m, void *v)
792 {
793         mutex_unlock(&probe_lock);
794 }
795
796 static int probes_seq_show(struct seq_file *m, void *v)
797 {
798         struct trace_probe *tp = v;
799         int i, ret;
800         char buf[MAX_ARGSTR_LEN + 1];
801
802         seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
803         seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
804
805         if (!tp->symbol)
806                 seq_printf(m, " 0x%p", tp->rp.kp.addr);
807         else if (tp->rp.kp.offset)
808                 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
809         else
810                 seq_printf(m, " %s", probe_symbol(tp));
811
812         for (i = 0; i < tp->nr_args; i++) {
813                 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
814                 if (ret < 0) {
815                         pr_warning("Argument%d decoding error(%d).\n", i, ret);
816                         return ret;
817                 }
818                 seq_printf(m, " %s=%s", tp->args[i].name, buf);
819         }
820         seq_printf(m, "\n");
821         return 0;
822 }
823
824 static const struct seq_operations probes_seq_op = {
825         .start  = probes_seq_start,
826         .next   = probes_seq_next,
827         .stop   = probes_seq_stop,
828         .show   = probes_seq_show
829 };
830
831 static int probes_open(struct inode *inode, struct file *file)
832 {
833         if ((file->f_mode & FMODE_WRITE) &&
834             (file->f_flags & O_TRUNC))
835                 cleanup_all_probes();
836
837         return seq_open(file, &probes_seq_op);
838 }
839
840 static int command_trace_probe(const char *buf)
841 {
842         char **argv;
843         int argc = 0, ret = 0;
844
845         argv = argv_split(GFP_KERNEL, buf, &argc);
846         if (!argv)
847                 return -ENOMEM;
848
849         if (argc)
850                 ret = create_trace_probe(argc, argv);
851
852         argv_free(argv);
853         return ret;
854 }
855
856 #define WRITE_BUFSIZE 128
857
858 static ssize_t probes_write(struct file *file, const char __user *buffer,
859                             size_t count, loff_t *ppos)
860 {
861         char *kbuf, *tmp;
862         int ret;
863         size_t done;
864         size_t size;
865
866         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
867         if (!kbuf)
868                 return -ENOMEM;
869
870         ret = done = 0;
871         while (done < count) {
872                 size = count - done;
873                 if (size >= WRITE_BUFSIZE)
874                         size = WRITE_BUFSIZE - 1;
875                 if (copy_from_user(kbuf, buffer + done, size)) {
876                         ret = -EFAULT;
877                         goto out;
878                 }
879                 kbuf[size] = '\0';
880                 tmp = strchr(kbuf, '\n');
881                 if (tmp) {
882                         *tmp = '\0';
883                         size = tmp - kbuf + 1;
884                 } else if (done + size < count) {
885                         pr_warning("Line length is too long: "
886                                    "Should be less than %d.", WRITE_BUFSIZE);
887                         ret = -EINVAL;
888                         goto out;
889                 }
890                 done += size;
891                 /* Remove comments */
892                 tmp = strchr(kbuf, '#');
893                 if (tmp)
894                         *tmp = '\0';
895
896                 ret = command_trace_probe(kbuf);
897                 if (ret)
898                         goto out;
899         }
900         ret = done;
901 out:
902         kfree(kbuf);
903         return ret;
904 }
905
906 static const struct file_operations kprobe_events_ops = {
907         .owner          = THIS_MODULE,
908         .open           = probes_open,
909         .read           = seq_read,
910         .llseek         = seq_lseek,
911         .release        = seq_release,
912         .write          = probes_write,
913 };
914
915 /* Probes profiling interfaces */
916 static int probes_profile_seq_show(struct seq_file *m, void *v)
917 {
918         struct trace_probe *tp = v;
919
920         seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
921                    tp->rp.kp.nmissed);
922
923         return 0;
924 }
925
926 static const struct seq_operations profile_seq_op = {
927         .start  = probes_seq_start,
928         .next   = probes_seq_next,
929         .stop   = probes_seq_stop,
930         .show   = probes_profile_seq_show
931 };
932
933 static int profile_open(struct inode *inode, struct file *file)
934 {
935         return seq_open(file, &profile_seq_op);
936 }
937
938 static const struct file_operations kprobe_profile_ops = {
939         .owner          = THIS_MODULE,
940         .open           = profile_open,
941         .read           = seq_read,
942         .llseek         = seq_lseek,
943         .release        = seq_release,
944 };
945
946 /* Kprobe handler */
947 static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
948 {
949         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
950         struct kprobe_trace_entry *entry;
951         struct ring_buffer_event *event;
952         struct ring_buffer *buffer;
953         int size, i, pc;
954         unsigned long irq_flags;
955         struct ftrace_event_call *call = &tp->call;
956
957         tp->nhit++;
958
959         local_save_flags(irq_flags);
960         pc = preempt_count();
961
962         size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
963
964         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
965                                                   irq_flags, pc);
966         if (!event)
967                 return;
968
969         entry = ring_buffer_event_data(event);
970         entry->nargs = tp->nr_args;
971         entry->ip = (unsigned long)kp->addr;
972         for (i = 0; i < tp->nr_args; i++)
973                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
974
975         if (!filter_current_check_discard(buffer, call, entry, event))
976                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
977 }
978
979 /* Kretprobe handler */
980 static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
981                                           struct pt_regs *regs)
982 {
983         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
984         struct kretprobe_trace_entry *entry;
985         struct ring_buffer_event *event;
986         struct ring_buffer *buffer;
987         int size, i, pc;
988         unsigned long irq_flags;
989         struct ftrace_event_call *call = &tp->call;
990
991         local_save_flags(irq_flags);
992         pc = preempt_count();
993
994         size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
995
996         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
997                                                   irq_flags, pc);
998         if (!event)
999                 return;
1000
1001         entry = ring_buffer_event_data(event);
1002         entry->nargs = tp->nr_args;
1003         entry->func = (unsigned long)tp->rp.kp.addr;
1004         entry->ret_ip = (unsigned long)ri->ret_addr;
1005         for (i = 0; i < tp->nr_args; i++)
1006                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1007
1008         if (!filter_current_check_discard(buffer, call, entry, event))
1009                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1010 }
1011
1012 /* Event entry printers */
1013 enum print_line_t
1014 print_kprobe_event(struct trace_iterator *iter, int flags)
1015 {
1016         struct kprobe_trace_entry *field;
1017         struct trace_seq *s = &iter->seq;
1018         struct trace_event *event;
1019         struct trace_probe *tp;
1020         int i;
1021
1022         field = (struct kprobe_trace_entry *)iter->ent;
1023         event = ftrace_find_event(field->ent.type);
1024         tp = container_of(event, struct trace_probe, event);
1025
1026         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1027                 goto partial;
1028
1029         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1030                 goto partial;
1031
1032         if (!trace_seq_puts(s, ")"))
1033                 goto partial;
1034
1035         for (i = 0; i < field->nargs; i++)
1036                 if (!trace_seq_printf(s, " %s=%lx",
1037                                       tp->args[i].name, field->args[i]))
1038                         goto partial;
1039
1040         if (!trace_seq_puts(s, "\n"))
1041                 goto partial;
1042
1043         return TRACE_TYPE_HANDLED;
1044 partial:
1045         return TRACE_TYPE_PARTIAL_LINE;
1046 }
1047
1048 enum print_line_t
1049 print_kretprobe_event(struct trace_iterator *iter, int flags)
1050 {
1051         struct kretprobe_trace_entry *field;
1052         struct trace_seq *s = &iter->seq;
1053         struct trace_event *event;
1054         struct trace_probe *tp;
1055         int i;
1056
1057         field = (struct kretprobe_trace_entry *)iter->ent;
1058         event = ftrace_find_event(field->ent.type);
1059         tp = container_of(event, struct trace_probe, event);
1060
1061         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1062                 goto partial;
1063
1064         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1065                 goto partial;
1066
1067         if (!trace_seq_puts(s, " <- "))
1068                 goto partial;
1069
1070         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1071                 goto partial;
1072
1073         if (!trace_seq_puts(s, ")"))
1074                 goto partial;
1075
1076         for (i = 0; i < field->nargs; i++)
1077                 if (!trace_seq_printf(s, " %s=%lx",
1078                                       tp->args[i].name, field->args[i]))
1079                         goto partial;
1080
1081         if (!trace_seq_puts(s, "\n"))
1082                 goto partial;
1083
1084         return TRACE_TYPE_HANDLED;
1085 partial:
1086         return TRACE_TYPE_PARTIAL_LINE;
1087 }
1088
1089 static int probe_event_enable(struct ftrace_event_call *call)
1090 {
1091         struct trace_probe *tp = (struct trace_probe *)call->data;
1092
1093         tp->flags |= TP_FLAG_TRACE;
1094         if (probe_is_return(tp))
1095                 return enable_kretprobe(&tp->rp);
1096         else
1097                 return enable_kprobe(&tp->rp.kp);
1098 }
1099
1100 static void probe_event_disable(struct ftrace_event_call *call)
1101 {
1102         struct trace_probe *tp = (struct trace_probe *)call->data;
1103
1104         tp->flags &= ~TP_FLAG_TRACE;
1105         if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1106                 if (probe_is_return(tp))
1107                         disable_kretprobe(&tp->rp);
1108                 else
1109                         disable_kprobe(&tp->rp.kp);
1110         }
1111 }
1112
1113 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1114 {
1115         return 0;
1116 }
1117
1118 #undef DEFINE_FIELD
1119 #define DEFINE_FIELD(type, item, name, is_signed)                       \
1120         do {                                                            \
1121                 ret = trace_define_field(event_call, #type, name,       \
1122                                          offsetof(typeof(field), item), \
1123                                          sizeof(field.item), is_signed, \
1124                                          FILTER_OTHER);                 \
1125                 if (ret)                                                \
1126                         return ret;                                     \
1127         } while (0)
1128
1129 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1130 {
1131         int ret, i;
1132         struct kprobe_trace_entry field;
1133         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1134
1135         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1136         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1137         /* Set argument names as fields */
1138         for (i = 0; i < tp->nr_args; i++)
1139                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1140         return 0;
1141 }
1142
1143 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1144 {
1145         int ret, i;
1146         struct kretprobe_trace_entry field;
1147         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1148
1149         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1150         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1151         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1152         /* Set argument names as fields */
1153         for (i = 0; i < tp->nr_args; i++)
1154                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1155         return 0;
1156 }
1157
1158 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1159 {
1160         int i;
1161         int pos = 0;
1162
1163         const char *fmt, *arg;
1164
1165         if (!probe_is_return(tp)) {
1166                 fmt = "(%lx)";
1167                 arg = "REC->" FIELD_STRING_IP;
1168         } else {
1169                 fmt = "(%lx <- %lx)";
1170                 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1171         }
1172
1173         /* When len=0, we just calculate the needed length */
1174 #define LEN_OR_ZERO (len ? len - pos : 0)
1175
1176         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1177
1178         for (i = 0; i < tp->nr_args; i++) {
1179                 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
1180                                 tp->args[i].name);
1181         }
1182
1183         pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1184
1185         for (i = 0; i < tp->nr_args; i++) {
1186                 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1187                                 tp->args[i].name);
1188         }
1189
1190 #undef LEN_OR_ZERO
1191
1192         /* return the length of print_fmt */
1193         return pos;
1194 }
1195
1196 static int set_print_fmt(struct trace_probe *tp)
1197 {
1198         int len;
1199         char *print_fmt;
1200
1201         /* First: called with 0 length to calculate the needed length */
1202         len = __set_print_fmt(tp, NULL, 0);
1203         print_fmt = kmalloc(len + 1, GFP_KERNEL);
1204         if (!print_fmt)
1205                 return -ENOMEM;
1206
1207         /* Second: actually write the @print_fmt */
1208         __set_print_fmt(tp, print_fmt, len + 1);
1209         tp->call.print_fmt = print_fmt;
1210
1211         return 0;
1212 }
1213
1214 #ifdef CONFIG_PERF_EVENTS
1215
1216 /* Kprobe profile handler */
1217 static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218                                          struct pt_regs *regs)
1219 {
1220         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1221         struct ftrace_event_call *call = &tp->call;
1222         struct kprobe_trace_entry *entry;
1223         int size, __size, i;
1224         unsigned long irq_flags;
1225         int rctx;
1226
1227         __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229         size -= sizeof(u32);
1230         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231                      "profile buffer not large enough"))
1232                 return;
1233
1234         entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235         if (!entry)
1236                 return;
1237
1238         entry->nargs = tp->nr_args;
1239         entry->ip = (unsigned long)kp->addr;
1240         for (i = 0; i < tp->nr_args; i++)
1241                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242
1243         perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244 }
1245
1246 /* Kretprobe profile handler */
1247 static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248                                             struct pt_regs *regs)
1249 {
1250         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1251         struct ftrace_event_call *call = &tp->call;
1252         struct kretprobe_trace_entry *entry;
1253         int size, __size, i;
1254         unsigned long irq_flags;
1255         int rctx;
1256
1257         __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259         size -= sizeof(u32);
1260         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261                      "profile buffer not large enough"))
1262                 return;
1263
1264         entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265         if (!entry)
1266                 return;
1267
1268         entry->nargs = tp->nr_args;
1269         entry->func = (unsigned long)tp->rp.kp.addr;
1270         entry->ret_ip = (unsigned long)ri->ret_addr;
1271         for (i = 0; i < tp->nr_args; i++)
1272                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273
1274         perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275                                irq_flags, regs);
1276 }
1277
1278 static int probe_perf_enable(struct ftrace_event_call *call)
1279 {
1280         struct trace_probe *tp = (struct trace_probe *)call->data;
1281
1282         tp->flags |= TP_FLAG_PROFILE;
1283
1284         if (probe_is_return(tp))
1285                 return enable_kretprobe(&tp->rp);
1286         else
1287                 return enable_kprobe(&tp->rp.kp);
1288 }
1289
1290 static void probe_perf_disable(struct ftrace_event_call *call)
1291 {
1292         struct trace_probe *tp = (struct trace_probe *)call->data;
1293
1294         tp->flags &= ~TP_FLAG_PROFILE;
1295
1296         if (!(tp->flags & TP_FLAG_TRACE)) {
1297                 if (probe_is_return(tp))
1298                         disable_kretprobe(&tp->rp);
1299                 else
1300                         disable_kprobe(&tp->rp.kp);
1301         }
1302 }
1303 #endif  /* CONFIG_PERF_EVENTS */
1304
1305 static __kprobes
1306 int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1307 {
1308         switch (type) {
1309         case TRACE_REG_REGISTER:
1310                 return probe_event_enable(event);
1311         case TRACE_REG_UNREGISTER:
1312                 probe_event_disable(event);
1313                 return 0;
1314
1315 #ifdef CONFIG_PERF_EVENTS
1316         case TRACE_REG_PERF_REGISTER:
1317                 return probe_perf_enable(event);
1318         case TRACE_REG_PERF_UNREGISTER:
1319                 probe_perf_disable(event);
1320                 return 0;
1321 #endif
1322         }
1323         return 0;
1324 }
1325
1326 static __kprobes
1327 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1328 {
1329         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1330
1331         if (tp->flags & TP_FLAG_TRACE)
1332                 kprobe_trace_func(kp, regs);
1333 #ifdef CONFIG_PERF_EVENTS
1334         if (tp->flags & TP_FLAG_PROFILE)
1335                 kprobe_perf_func(kp, regs);
1336 #endif
1337         return 0;       /* We don't tweek kernel, so just return 0 */
1338 }
1339
1340 static __kprobes
1341 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1342 {
1343         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1344
1345         if (tp->flags & TP_FLAG_TRACE)
1346                 kretprobe_trace_func(ri, regs);
1347 #ifdef CONFIG_PERF_EVENTS
1348         if (tp->flags & TP_FLAG_PROFILE)
1349                 kretprobe_perf_func(ri, regs);
1350 #endif
1351         return 0;       /* We don't tweek kernel, so just return 0 */
1352 }
1353
1354 static int register_probe_event(struct trace_probe *tp)
1355 {
1356         struct ftrace_event_call *call = &tp->call;
1357         int ret;
1358
1359         /* Initialize ftrace_event_call */
1360         if (probe_is_return(tp)) {
1361                 tp->event.trace = print_kretprobe_event;
1362                 INIT_LIST_HEAD(&call->class->fields);
1363                 call->class->raw_init = probe_event_raw_init;
1364                 call->class->define_fields = kretprobe_event_define_fields;
1365         } else {
1366                 INIT_LIST_HEAD(&call->class->fields);
1367                 tp->event.trace = print_kprobe_event;
1368                 call->class->raw_init = probe_event_raw_init;
1369                 call->class->define_fields = kprobe_event_define_fields;
1370         }
1371         if (set_print_fmt(tp) < 0)
1372                 return -ENOMEM;
1373         call->event = &tp->event;
1374         call->id = register_ftrace_event(&tp->event);
1375         if (!call->id) {
1376                 kfree(call->print_fmt);
1377                 return -ENODEV;
1378         }
1379         call->enabled = 0;
1380         call->class->reg = kprobe_register;
1381         call->data = tp;
1382         ret = trace_add_event_call(call);
1383         if (ret) {
1384                 pr_info("Failed to register kprobe event: %s\n", call->name);
1385                 kfree(call->print_fmt);
1386                 unregister_ftrace_event(&tp->event);
1387         }
1388         return ret;
1389 }
1390
1391 static void unregister_probe_event(struct trace_probe *tp)
1392 {
1393         /* tp->event is unregistered in trace_remove_event_call() */
1394         trace_remove_event_call(&tp->call);
1395         kfree(tp->call.print_fmt);
1396 }
1397
1398 /* Make a debugfs interface for controling probe points */
1399 static __init int init_kprobe_trace(void)
1400 {
1401         struct dentry *d_tracer;
1402         struct dentry *entry;
1403
1404         d_tracer = tracing_init_dentry();
1405         if (!d_tracer)
1406                 return 0;
1407
1408         entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1409                                     NULL, &kprobe_events_ops);
1410
1411         /* Event list interface */
1412         if (!entry)
1413                 pr_warning("Could not create debugfs "
1414                            "'kprobe_events' entry\n");
1415
1416         /* Profile interface */
1417         entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1418                                     NULL, &kprobe_profile_ops);
1419
1420         if (!entry)
1421                 pr_warning("Could not create debugfs "
1422                            "'kprobe_profile' entry\n");
1423         return 0;
1424 }
1425 fs_initcall(init_kprobe_trace);
1426
1427
1428 #ifdef CONFIG_FTRACE_STARTUP_TEST
1429
1430 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1431                                         int a4, int a5, int a6)
1432 {
1433         return a1 + a2 + a3 + a4 + a5 + a6;
1434 }
1435
1436 static __init int kprobe_trace_self_tests_init(void)
1437 {
1438         int ret, warn = 0;
1439         int (*target)(int, int, int, int, int, int);
1440         struct trace_probe *tp;
1441
1442         target = kprobe_trace_selftest_target;
1443
1444         pr_info("Testing kprobe tracing: ");
1445
1446         ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1447                                   "$stack $stack0 +0($stack)");
1448         if (WARN_ON_ONCE(ret)) {
1449                 pr_warning("error on probing function entry.\n");
1450                 warn++;
1451         } else {
1452                 /* Enable trace point */
1453                 tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
1454                 if (WARN_ON_ONCE(tp == NULL)) {
1455                         pr_warning("error on getting new probe.\n");
1456                         warn++;
1457                 } else
1458                         probe_event_enable(&tp->call);
1459         }
1460
1461         ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1462                                   "$retval");
1463         if (WARN_ON_ONCE(ret)) {
1464                 pr_warning("error on probing function return.\n");
1465                 warn++;
1466         } else {
1467                 /* Enable trace point */
1468                 tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
1469                 if (WARN_ON_ONCE(tp == NULL)) {
1470                         pr_warning("error on getting new probe.\n");
1471                         warn++;
1472                 } else
1473                         probe_event_enable(&tp->call);
1474         }
1475
1476         if (warn)
1477                 goto end;
1478
1479         ret = target(1, 2, 3, 4, 5, 6);
1480
1481         ret = command_trace_probe("-:testprobe");
1482         if (WARN_ON_ONCE(ret)) {
1483                 pr_warning("error on deleting a probe.\n");
1484                 warn++;
1485         }
1486
1487         ret = command_trace_probe("-:testprobe2");
1488         if (WARN_ON_ONCE(ret)) {
1489                 pr_warning("error on deleting a probe.\n");
1490                 warn++;
1491         }
1492
1493 end:
1494         cleanup_all_probes();
1495         if (warn)
1496                 pr_cont("NG: Some tests are failed. Please check them.\n");
1497         else
1498                 pr_cont("OK\n");
1499         return 0;
1500 }
1501
1502 late_initcall(kprobe_trace_self_tests_init);
1503
1504 #endif