Merge branch 'linus' into tracing/core
[pandora-kernel.git] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 static DEFINE_MUTEX(trace_event_mutex);
18 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19
20 static int next_event_type = __TRACE_LAST_TYPE + 1;
21
22 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
23 {
24         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
25
26         s->buffer[len] = 0;
27         seq_puts(m, s->buffer);
28
29         trace_seq_init(s);
30 }
31
32 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
33 {
34         struct trace_seq *s = &iter->seq;
35         struct trace_entry *entry = iter->ent;
36         struct bprint_entry *field;
37         int ret;
38
39         trace_assign_type(field, entry);
40
41         ret = trace_seq_bprintf(s, field->fmt, field->buf);
42         if (!ret)
43                 return TRACE_TYPE_PARTIAL_LINE;
44
45         return TRACE_TYPE_HANDLED;
46 }
47
48 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
49 {
50         struct trace_seq *s = &iter->seq;
51         struct trace_entry *entry = iter->ent;
52         struct print_entry *field;
53         int ret;
54
55         trace_assign_type(field, entry);
56
57         ret = trace_seq_printf(s, "%s", field->buf);
58         if (!ret)
59                 return TRACE_TYPE_PARTIAL_LINE;
60
61         return TRACE_TYPE_HANDLED;
62 }
63
64 /**
65  * trace_seq_printf - sequence printing of trace information
66  * @s: trace sequence descriptor
67  * @fmt: printf format string
68  *
69  * The tracer may use either sequence operations or its own
70  * copy to user routines. To simplify formating of a trace
71  * trace_seq_printf is used to store strings into a special
72  * buffer (@s). Then the output may be either used by
73  * the sequencer or pulled into another buffer.
74  */
75 int
76 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
77 {
78         int len = (PAGE_SIZE - 1) - s->len;
79         va_list ap;
80         int ret;
81
82         if (!len)
83                 return 0;
84
85         va_start(ap, fmt);
86         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
87         va_end(ap);
88
89         /* If we can't write it all, don't bother writing anything */
90         if (ret >= len)
91                 return 0;
92
93         s->len += ret;
94
95         return len;
96 }
97
98 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
99 {
100         int len = (PAGE_SIZE - 1) - s->len;
101         int ret;
102
103         if (!len)
104                 return 0;
105
106         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
107
108         /* If we can't write it all, don't bother writing anything */
109         if (ret >= len)
110                 return 0;
111
112         s->len += ret;
113
114         return len;
115 }
116
117 /**
118  * trace_seq_puts - trace sequence printing of simple string
119  * @s: trace sequence descriptor
120  * @str: simple string to record
121  *
122  * The tracer may use either the sequence operations or its own
123  * copy to user routines. This function records a simple string
124  * into a special buffer (@s) for later retrieval by a sequencer
125  * or other mechanism.
126  */
127 int trace_seq_puts(struct trace_seq *s, const char *str)
128 {
129         int len = strlen(str);
130
131         if (len > ((PAGE_SIZE - 1) - s->len))
132                 return 0;
133
134         memcpy(s->buffer + s->len, str, len);
135         s->len += len;
136
137         return len;
138 }
139
140 int trace_seq_putc(struct trace_seq *s, unsigned char c)
141 {
142         if (s->len >= (PAGE_SIZE - 1))
143                 return 0;
144
145         s->buffer[s->len++] = c;
146
147         return 1;
148 }
149
150 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
151 {
152         if (len > ((PAGE_SIZE - 1) - s->len))
153                 return 0;
154
155         memcpy(s->buffer + s->len, mem, len);
156         s->len += len;
157
158         return len;
159 }
160
161 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
162 {
163         unsigned char hex[HEX_CHARS];
164         const unsigned char *data = mem;
165         int i, j;
166
167 #ifdef __BIG_ENDIAN
168         for (i = 0, j = 0; i < len; i++) {
169 #else
170         for (i = len-1, j = 0; i >= 0; i--) {
171 #endif
172                 hex[j++] = hex_asc_hi(data[i]);
173                 hex[j++] = hex_asc_lo(data[i]);
174         }
175         hex[j++] = ' ';
176
177         return trace_seq_putmem(s, hex, j);
178 }
179
180 void *trace_seq_reserve(struct trace_seq *s, size_t len)
181 {
182         void *ret;
183
184         if (len > ((PAGE_SIZE - 1) - s->len))
185                 return NULL;
186
187         ret = s->buffer + s->len;
188         s->len += len;
189
190         return ret;
191 }
192
193 int trace_seq_path(struct trace_seq *s, struct path *path)
194 {
195         unsigned char *p;
196
197         if (s->len >= (PAGE_SIZE - 1))
198                 return 0;
199         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
200         if (!IS_ERR(p)) {
201                 p = mangle_path(s->buffer + s->len, p, "\n");
202                 if (p) {
203                         s->len = p - s->buffer;
204                         return 1;
205                 }
206         } else {
207                 s->buffer[s->len++] = '?';
208                 return 1;
209         }
210
211         return 0;
212 }
213
214 #ifdef CONFIG_KRETPROBES
215 static inline const char *kretprobed(const char *name)
216 {
217         static const char tramp_name[] = "kretprobe_trampoline";
218         int size = sizeof(tramp_name);
219
220         if (strncmp(tramp_name, name, size) == 0)
221                 return "[unknown/kretprobe'd]";
222         return name;
223 }
224 #else
225 static inline const char *kretprobed(const char *name)
226 {
227         return name;
228 }
229 #endif /* CONFIG_KRETPROBES */
230
231 static int
232 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
233 {
234 #ifdef CONFIG_KALLSYMS
235         char str[KSYM_SYMBOL_LEN];
236         const char *name;
237
238         kallsyms_lookup(address, NULL, NULL, NULL, str);
239
240         name = kretprobed(str);
241
242         return trace_seq_printf(s, fmt, name);
243 #endif
244         return 1;
245 }
246
247 static int
248 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
249                      unsigned long address)
250 {
251 #ifdef CONFIG_KALLSYMS
252         char str[KSYM_SYMBOL_LEN];
253         const char *name;
254
255         sprint_symbol(str, address);
256         name = kretprobed(str);
257
258         return trace_seq_printf(s, fmt, name);
259 #endif
260         return 1;
261 }
262
263 #ifndef CONFIG_64BIT
264 # define IP_FMT "%08lx"
265 #else
266 # define IP_FMT "%016lx"
267 #endif
268
269 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
270                       unsigned long ip, unsigned long sym_flags)
271 {
272         struct file *file = NULL;
273         unsigned long vmstart = 0;
274         int ret = 1;
275
276         if (mm) {
277                 const struct vm_area_struct *vma;
278
279                 down_read(&mm->mmap_sem);
280                 vma = find_vma(mm, ip);
281                 if (vma) {
282                         file = vma->vm_file;
283                         vmstart = vma->vm_start;
284                 }
285                 if (file) {
286                         ret = trace_seq_path(s, &file->f_path);
287                         if (ret)
288                                 ret = trace_seq_printf(s, "[+0x%lx]",
289                                                        ip - vmstart);
290                 }
291                 up_read(&mm->mmap_sem);
292         }
293         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
294                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
295         return ret;
296 }
297
298 int
299 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
300                       unsigned long sym_flags)
301 {
302         struct mm_struct *mm = NULL;
303         int ret = 1;
304         unsigned int i;
305
306         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
307                 struct task_struct *task;
308                 /*
309                  * we do the lookup on the thread group leader,
310                  * since individual threads might have already quit!
311                  */
312                 rcu_read_lock();
313                 task = find_task_by_vpid(entry->ent.tgid);
314                 if (task)
315                         mm = get_task_mm(task);
316                 rcu_read_unlock();
317         }
318
319         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
320                 unsigned long ip = entry->caller[i];
321
322                 if (ip == ULONG_MAX || !ret)
323                         break;
324                 if (i && ret)
325                         ret = trace_seq_puts(s, " <- ");
326                 if (!ip) {
327                         if (ret)
328                                 ret = trace_seq_puts(s, "??");
329                         continue;
330                 }
331                 if (!ret)
332                         break;
333                 if (ret)
334                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
335         }
336
337         if (mm)
338                 mmput(mm);
339         return ret;
340 }
341
342 int
343 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
344 {
345         int ret;
346
347         if (!ip)
348                 return trace_seq_printf(s, "0");
349
350         if (sym_flags & TRACE_ITER_SYM_OFFSET)
351                 ret = seq_print_sym_offset(s, "%s", ip);
352         else
353                 ret = seq_print_sym_short(s, "%s", ip);
354
355         if (!ret)
356                 return 0;
357
358         if (sym_flags & TRACE_ITER_SYM_ADDR)
359                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
360         return ret;
361 }
362
363 static int
364 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
365 {
366         int hardirq, softirq;
367         char comm[TASK_COMM_LEN];
368
369         trace_find_cmdline(entry->pid, comm);
370         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
371         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
372
373         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
374                               comm, entry->pid, cpu,
375                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
376                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
377                                   'X' : '.',
378                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
379                                 'N' : '.',
380                               (hardirq && softirq) ? 'H' :
381                                 hardirq ? 'h' : softirq ? 's' : '.'))
382                 return 0;
383
384         if (entry->preempt_count)
385                 return trace_seq_printf(s, "%x", entry->preempt_count);
386         return trace_seq_puts(s, ".");
387 }
388
389 static unsigned long preempt_mark_thresh = 100;
390
391 static int
392 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
393                     unsigned long rel_usecs)
394 {
395         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
396                                 rel_usecs > preempt_mark_thresh ? '!' :
397                                   rel_usecs > 1 ? '+' : ' ');
398 }
399
400 int trace_print_context(struct trace_iterator *iter)
401 {
402         struct trace_seq *s = &iter->seq;
403         struct trace_entry *entry = iter->ent;
404         unsigned long long t = ns2usecs(iter->ts);
405         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
406         unsigned long secs = (unsigned long)t;
407         char comm[TASK_COMM_LEN];
408
409         trace_find_cmdline(entry->pid, comm);
410
411         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
412                                 comm, entry->pid, iter->cpu, secs, usec_rem);
413 }
414
415 int trace_print_lat_context(struct trace_iterator *iter)
416 {
417         u64 next_ts;
418         int ret;
419         struct trace_seq *s = &iter->seq;
420         struct trace_entry *entry = iter->ent,
421                            *next_entry = trace_find_next_entry(iter, NULL,
422                                                                &next_ts);
423         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
424         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
425         unsigned long rel_usecs;
426
427         if (!next_entry)
428                 next_ts = iter->ts;
429         rel_usecs = ns2usecs(next_ts - iter->ts);
430
431         if (verbose) {
432                 char comm[TASK_COMM_LEN];
433
434                 trace_find_cmdline(entry->pid, comm);
435
436                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
437                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
438                                        entry->pid, iter->cpu, entry->flags,
439                                        entry->preempt_count, iter->idx,
440                                        ns2usecs(iter->ts),
441                                        abs_usecs / USEC_PER_MSEC,
442                                        abs_usecs % USEC_PER_MSEC,
443                                        rel_usecs / USEC_PER_MSEC,
444                                        rel_usecs % USEC_PER_MSEC);
445         } else {
446                 ret = lat_print_generic(s, entry, iter->cpu);
447                 if (ret)
448                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
449         }
450
451         return ret;
452 }
453
454 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
455
456 static int task_state_char(unsigned long state)
457 {
458         int bit = state ? __ffs(state) + 1 : 0;
459
460         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
461 }
462
463 /**
464  * ftrace_find_event - find a registered event
465  * @type: the type of event to look for
466  *
467  * Returns an event of type @type otherwise NULL
468  */
469 struct trace_event *ftrace_find_event(int type)
470 {
471         struct trace_event *event;
472         struct hlist_node *n;
473         unsigned key;
474
475         key = type & (EVENT_HASHSIZE - 1);
476
477         hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
478                 if (event->type == type)
479                         return event;
480         }
481
482         return NULL;
483 }
484
485 /**
486  * register_ftrace_event - register output for an event type
487  * @event: the event type to register
488  *
489  * Event types are stored in a hash and this hash is used to
490  * find a way to print an event. If the @event->type is set
491  * then it will use that type, otherwise it will assign a
492  * type to use.
493  *
494  * If you assign your own type, please make sure it is added
495  * to the trace_type enum in trace.h, to avoid collisions
496  * with the dynamic types.
497  *
498  * Returns the event type number or zero on error.
499  */
500 int register_ftrace_event(struct trace_event *event)
501 {
502         unsigned key;
503         int ret = 0;
504
505         mutex_lock(&trace_event_mutex);
506
507         if (!event) {
508                 ret = next_event_type++;
509                 goto out;
510         }
511
512         if (!event->type)
513                 event->type = next_event_type++;
514         else if (event->type > __TRACE_LAST_TYPE) {
515                 printk(KERN_WARNING "Need to add type to trace.h\n");
516                 WARN_ON(1);
517         }
518
519         if (ftrace_find_event(event->type))
520                 goto out;
521
522         if (event->trace == NULL)
523                 event->trace = trace_nop_print;
524         if (event->raw == NULL)
525                 event->raw = trace_nop_print;
526         if (event->hex == NULL)
527                 event->hex = trace_nop_print;
528         if (event->binary == NULL)
529                 event->binary = trace_nop_print;
530
531         key = event->type & (EVENT_HASHSIZE - 1);
532
533         hlist_add_head_rcu(&event->node, &event_hash[key]);
534
535         ret = event->type;
536  out:
537         mutex_unlock(&trace_event_mutex);
538
539         return ret;
540 }
541
542 /**
543  * unregister_ftrace_event - remove a no longer used event
544  * @event: the event to remove
545  */
546 int unregister_ftrace_event(struct trace_event *event)
547 {
548         mutex_lock(&trace_event_mutex);
549         hlist_del(&event->node);
550         mutex_unlock(&trace_event_mutex);
551
552         return 0;
553 }
554
555 /*
556  * Standard events
557  */
558
559 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
560 {
561         return TRACE_TYPE_HANDLED;
562 }
563
564 /* TRACE_FN */
565 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
566 {
567         struct ftrace_entry *field;
568         struct trace_seq *s = &iter->seq;
569
570         trace_assign_type(field, iter->ent);
571
572         if (!seq_print_ip_sym(s, field->ip, flags))
573                 goto partial;
574
575         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
576                 if (!trace_seq_printf(s, " <-"))
577                         goto partial;
578                 if (!seq_print_ip_sym(s,
579                                       field->parent_ip,
580                                       flags))
581                         goto partial;
582         }
583         if (!trace_seq_printf(s, "\n"))
584                 goto partial;
585
586         return TRACE_TYPE_HANDLED;
587
588  partial:
589         return TRACE_TYPE_PARTIAL_LINE;
590 }
591
592 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
593 {
594         struct ftrace_entry *field;
595
596         trace_assign_type(field, iter->ent);
597
598         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
599                               field->ip,
600                               field->parent_ip))
601                 return TRACE_TYPE_PARTIAL_LINE;
602
603         return TRACE_TYPE_HANDLED;
604 }
605
606 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
607 {
608         struct ftrace_entry *field;
609         struct trace_seq *s = &iter->seq;
610
611         trace_assign_type(field, iter->ent);
612
613         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
614         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
615
616         return TRACE_TYPE_HANDLED;
617 }
618
619 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
620 {
621         struct ftrace_entry *field;
622         struct trace_seq *s = &iter->seq;
623
624         trace_assign_type(field, iter->ent);
625
626         SEQ_PUT_FIELD_RET(s, field->ip);
627         SEQ_PUT_FIELD_RET(s, field->parent_ip);
628
629         return TRACE_TYPE_HANDLED;
630 }
631
632 static struct trace_event trace_fn_event = {
633         .type           = TRACE_FN,
634         .trace          = trace_fn_trace,
635         .raw            = trace_fn_raw,
636         .hex            = trace_fn_hex,
637         .binary         = trace_fn_bin,
638 };
639
640 /* TRACE_CTX an TRACE_WAKE */
641 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
642                                              char *delim)
643 {
644         struct ctx_switch_entry *field;
645         char comm[TASK_COMM_LEN];
646         int S, T;
647
648
649         trace_assign_type(field, iter->ent);
650
651         T = task_state_char(field->next_state);
652         S = task_state_char(field->prev_state);
653         trace_find_cmdline(field->next_pid, comm);
654         if (!trace_seq_printf(&iter->seq,
655                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
656                               field->prev_pid,
657                               field->prev_prio,
658                               S, delim,
659                               field->next_cpu,
660                               field->next_pid,
661                               field->next_prio,
662                               T, comm))
663                 return TRACE_TYPE_PARTIAL_LINE;
664
665         return TRACE_TYPE_HANDLED;
666 }
667
668 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
669 {
670         return trace_ctxwake_print(iter, "==>");
671 }
672
673 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
674                                           int flags)
675 {
676         return trace_ctxwake_print(iter, "  +");
677 }
678
679 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
680 {
681         struct ctx_switch_entry *field;
682         int T;
683
684         trace_assign_type(field, iter->ent);
685
686         if (!S)
687                 task_state_char(field->prev_state);
688         T = task_state_char(field->next_state);
689         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
690                               field->prev_pid,
691                               field->prev_prio,
692                               S,
693                               field->next_cpu,
694                               field->next_pid,
695                               field->next_prio,
696                               T))
697                 return TRACE_TYPE_PARTIAL_LINE;
698
699         return TRACE_TYPE_HANDLED;
700 }
701
702 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
703 {
704         return trace_ctxwake_raw(iter, 0);
705 }
706
707 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
708 {
709         return trace_ctxwake_raw(iter, '+');
710 }
711
712
713 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
714 {
715         struct ctx_switch_entry *field;
716         struct trace_seq *s = &iter->seq;
717         int T;
718
719         trace_assign_type(field, iter->ent);
720
721         if (!S)
722                 task_state_char(field->prev_state);
723         T = task_state_char(field->next_state);
724
725         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
726         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
727         SEQ_PUT_HEX_FIELD_RET(s, S);
728         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
729         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
730         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
731         SEQ_PUT_HEX_FIELD_RET(s, T);
732
733         return TRACE_TYPE_HANDLED;
734 }
735
736 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
737 {
738         return trace_ctxwake_hex(iter, 0);
739 }
740
741 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
742 {
743         return trace_ctxwake_hex(iter, '+');
744 }
745
746 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
747                                            int flags)
748 {
749         struct ctx_switch_entry *field;
750         struct trace_seq *s = &iter->seq;
751
752         trace_assign_type(field, iter->ent);
753
754         SEQ_PUT_FIELD_RET(s, field->prev_pid);
755         SEQ_PUT_FIELD_RET(s, field->prev_prio);
756         SEQ_PUT_FIELD_RET(s, field->prev_state);
757         SEQ_PUT_FIELD_RET(s, field->next_pid);
758         SEQ_PUT_FIELD_RET(s, field->next_prio);
759         SEQ_PUT_FIELD_RET(s, field->next_state);
760
761         return TRACE_TYPE_HANDLED;
762 }
763
764 static struct trace_event trace_ctx_event = {
765         .type           = TRACE_CTX,
766         .trace          = trace_ctx_print,
767         .raw            = trace_ctx_raw,
768         .hex            = trace_ctx_hex,
769         .binary         = trace_ctxwake_bin,
770 };
771
772 static struct trace_event trace_wake_event = {
773         .type           = TRACE_WAKE,
774         .trace          = trace_wake_print,
775         .raw            = trace_wake_raw,
776         .hex            = trace_wake_hex,
777         .binary         = trace_ctxwake_bin,
778 };
779
780 /* TRACE_SPECIAL */
781 static enum print_line_t trace_special_print(struct trace_iterator *iter,
782                                              int flags)
783 {
784         struct special_entry *field;
785
786         trace_assign_type(field, iter->ent);
787
788         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
789                               field->arg1,
790                               field->arg2,
791                               field->arg3))
792                 return TRACE_TYPE_PARTIAL_LINE;
793
794         return TRACE_TYPE_HANDLED;
795 }
796
797 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
798                                            int flags)
799 {
800         struct special_entry *field;
801         struct trace_seq *s = &iter->seq;
802
803         trace_assign_type(field, iter->ent);
804
805         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
806         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
807         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
808
809         return TRACE_TYPE_HANDLED;
810 }
811
812 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
813                                            int flags)
814 {
815         struct special_entry *field;
816         struct trace_seq *s = &iter->seq;
817
818         trace_assign_type(field, iter->ent);
819
820         SEQ_PUT_FIELD_RET(s, field->arg1);
821         SEQ_PUT_FIELD_RET(s, field->arg2);
822         SEQ_PUT_FIELD_RET(s, field->arg3);
823
824         return TRACE_TYPE_HANDLED;
825 }
826
827 static struct trace_event trace_special_event = {
828         .type           = TRACE_SPECIAL,
829         .trace          = trace_special_print,
830         .raw            = trace_special_print,
831         .hex            = trace_special_hex,
832         .binary         = trace_special_bin,
833 };
834
835 /* TRACE_STACK */
836
837 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
838                                            int flags)
839 {
840         struct stack_entry *field;
841         struct trace_seq *s = &iter->seq;
842         int i;
843
844         trace_assign_type(field, iter->ent);
845
846         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
847                 if (i) {
848                         if (!trace_seq_puts(s, " <= "))
849                                 goto partial;
850
851                         if (!seq_print_ip_sym(s, field->caller[i], flags))
852                                 goto partial;
853                 }
854                 if (!trace_seq_puts(s, "\n"))
855                         goto partial;
856         }
857
858         return TRACE_TYPE_HANDLED;
859
860  partial:
861         return TRACE_TYPE_PARTIAL_LINE;
862 }
863
864 static struct trace_event trace_stack_event = {
865         .type           = TRACE_STACK,
866         .trace          = trace_stack_print,
867         .raw            = trace_special_print,
868         .hex            = trace_special_hex,
869         .binary         = trace_special_bin,
870 };
871
872 /* TRACE_USER_STACK */
873 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
874                                                 int flags)
875 {
876         struct userstack_entry *field;
877         struct trace_seq *s = &iter->seq;
878
879         trace_assign_type(field, iter->ent);
880
881         if (!seq_print_userip_objs(field, s, flags))
882                 goto partial;
883
884         if (!trace_seq_putc(s, '\n'))
885                 goto partial;
886
887         return TRACE_TYPE_HANDLED;
888
889  partial:
890         return TRACE_TYPE_PARTIAL_LINE;
891 }
892
893 static struct trace_event trace_user_stack_event = {
894         .type           = TRACE_USER_STACK,
895         .trace          = trace_user_stack_print,
896         .raw            = trace_special_print,
897         .hex            = trace_special_hex,
898         .binary         = trace_special_bin,
899 };
900
901 /* TRACE_BPRINT */
902 static enum print_line_t
903 trace_bprint_print(struct trace_iterator *iter, int flags)
904 {
905         struct trace_entry *entry = iter->ent;
906         struct trace_seq *s = &iter->seq;
907         struct bprint_entry *field;
908
909         trace_assign_type(field, entry);
910
911         if (!seq_print_ip_sym(s, field->ip, flags))
912                 goto partial;
913
914         if (!trace_seq_puts(s, ": "))
915                 goto partial;
916
917         if (!trace_seq_bprintf(s, field->fmt, field->buf))
918                 goto partial;
919
920         return TRACE_TYPE_HANDLED;
921
922  partial:
923         return TRACE_TYPE_PARTIAL_LINE;
924 }
925
926
927 static enum print_line_t
928 trace_bprint_raw(struct trace_iterator *iter, int flags)
929 {
930         struct bprint_entry *field;
931         struct trace_seq *s = &iter->seq;
932
933         trace_assign_type(field, iter->ent);
934
935         if (!trace_seq_printf(s, ": %lx : ", field->ip))
936                 goto partial;
937
938         if (!trace_seq_bprintf(s, field->fmt, field->buf))
939                 goto partial;
940
941         return TRACE_TYPE_HANDLED;
942
943  partial:
944         return TRACE_TYPE_PARTIAL_LINE;
945 }
946
947
948 static struct trace_event trace_bprint_event = {
949         .type           = TRACE_BPRINT,
950         .trace          = trace_bprint_print,
951         .raw            = trace_bprint_raw,
952 };
953
954 /* TRACE_PRINT */
955 static enum print_line_t trace_print_print(struct trace_iterator *iter,
956                                            int flags)
957 {
958         struct print_entry *field;
959         struct trace_seq *s = &iter->seq;
960
961         trace_assign_type(field, iter->ent);
962
963         if (!seq_print_ip_sym(s, field->ip, flags))
964                 goto partial;
965
966         if (!trace_seq_printf(s, ": %s", field->buf))
967                 goto partial;
968
969         return TRACE_TYPE_HANDLED;
970
971  partial:
972         return TRACE_TYPE_PARTIAL_LINE;
973 }
974
975 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
976 {
977         struct print_entry *field;
978
979         trace_assign_type(field, iter->ent);
980
981         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
982                 goto partial;
983
984         return TRACE_TYPE_HANDLED;
985
986  partial:
987         return TRACE_TYPE_PARTIAL_LINE;
988 }
989
990 static struct trace_event trace_print_event = {
991         .type           = TRACE_PRINT,
992         .trace          = trace_print_print,
993         .raw            = trace_print_raw,
994 };
995
996
997 static struct trace_event *events[] __initdata = {
998         &trace_fn_event,
999         &trace_ctx_event,
1000         &trace_wake_event,
1001         &trace_special_event,
1002         &trace_stack_event,
1003         &trace_user_stack_event,
1004         &trace_bprint_event,
1005         &trace_print_event,
1006         NULL
1007 };
1008
1009 __init static int init_events(void)
1010 {
1011         struct trace_event *event;
1012         int i, ret;
1013
1014         for (i = 0; events[i]; i++) {
1015                 event = events[i];
1016
1017                 ret = register_ftrace_event(event);
1018                 if (!ret) {
1019                         printk(KERN_WARNING "event %d failed to register\n",
1020                                event->type);
1021                         WARN_ON_ONCE(1);
1022                 }
1023         }
1024
1025         return 0;
1026 }
1027 device_initcall(init_events);