3a0b44bdabf7d6d9b9a448a3e22bb607913200e5
[pandora-kernel.git] / include / trace / ftrace.h
1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct ftrace_raw_<call> {
7  *      struct trace_entry              ent;
8  *      <type>                          <item>;
9  *      <type2>                         <item2>[<len>];
10  *      [...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18
19 #include <linux/ftrace_event.h>
20
21 #undef __field
22 #define __field(type, item)             type    item;
23
24 #undef __array
25 #define __array(type, item, len)        type    item[len];
26
27 #undef __dynamic_array
28 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
29
30 #undef __string
31 #define __string(item, src) __dynamic_array(char, item, -1)
32
33 #undef TP_STRUCT__entry
34 #define TP_STRUCT__entry(args...) args
35
36 #undef TRACE_EVENT
37 #define TRACE_EVENT(name, proto, args, tstruct, assign, print)  \
38         struct ftrace_raw_##name {                              \
39                 struct trace_entry      ent;                    \
40                 tstruct                                         \
41                 char                    __data[0];              \
42         };                                                      \
43         static struct ftrace_event_call event_##name
44
45 /* Callbacks are meaningless to ftrace. */
46 #undef TRACE_EVENT_FN
47 #define TRACE_EVENT_FN(name, proto, args, tstruct,              \
48                 assign, print, reg, unreg)                      \
49         TRACE_EVENT(name, TP_PROTO(proto), TP_ARGS(args),       \
50                 TP_STRUCT__entry(tstruct),                      \
51                 TP_fast_assign(assign),                         \
52                 TP_printk(print))
53
54 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
55
56
57 /*
58  * Stage 2 of the trace events.
59  *
60  * Include the following:
61  *
62  * struct ftrace_data_offsets_<call> {
63  *      u32                             <item1>;
64  *      u32                             <item2>;
65  *      [...]
66  * };
67  *
68  * The __dynamic_array() macro will create each u32 <item>, this is
69  * to keep the offset of each array from the beginning of the event.
70  * The size of an array is also encoded, in the higher 16 bits of <item>.
71  */
72
73 #undef __field
74 #define __field(type, item);
75
76 #undef __array
77 #define __array(type, item, len)
78
79 #undef __dynamic_array
80 #define __dynamic_array(type, item, len)        u32 item;
81
82 #undef __string
83 #define __string(item, src) __dynamic_array(char, item, -1)
84
85 #undef TRACE_EVENT
86 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
87         struct ftrace_data_offsets_##call {                             \
88                 tstruct;                                                \
89         };
90
91 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
92
93 /*
94  * Setup the showing format of trace point.
95  *
96  * int
97  * ftrace_format_##call(struct trace_seq *s)
98  * {
99  *      struct ftrace_raw_##call field;
100  *      int ret;
101  *
102  *      ret = trace_seq_printf(s, #type " " #item ";"
103  *                             " offset:%u; size:%u;\n",
104  *                             offsetof(struct ftrace_raw_##call, item),
105  *                             sizeof(field.type));
106  *
107  * }
108  */
109
110 #undef TP_STRUCT__entry
111 #define TP_STRUCT__entry(args...) args
112
113 #undef __field
114 #define __field(type, item)                                     \
115         ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"      \
116                                "offset:%u;\tsize:%u;\n",                \
117                                (unsigned int)offsetof(typeof(field), item), \
118                                (unsigned int)sizeof(field.item));       \
119         if (!ret)                                                       \
120                 return 0;
121
122 #undef __array
123 #define __array(type, item, len)                                                \
124         ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"    \
125                                "offset:%u;\tsize:%u;\n",                \
126                                (unsigned int)offsetof(typeof(field), item), \
127                                (unsigned int)sizeof(field.item));       \
128         if (!ret)                                                       \
129                 return 0;
130
131 #undef __dynamic_array
132 #define __dynamic_array(type, item, len)                                       \
133         ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
134                                "offset:%u;\tsize:%u;\n",                       \
135                                (unsigned int)offsetof(typeof(field),           \
136                                         __data_loc_##item),                    \
137                                (unsigned int)sizeof(field.__data_loc_##item)); \
138         if (!ret)                                                              \
139                 return 0;
140
141 #undef __string
142 #define __string(item, src) __dynamic_array(char, item, -1)
143
144 #undef __entry
145 #define __entry REC
146
147 #undef __print_symbolic
148 #undef __get_dynamic_array
149 #undef __get_str
150
151 #undef TP_printk
152 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
153
154 #undef TP_fast_assign
155 #define TP_fast_assign(args...) args
156
157 #undef TP_perf_assign
158 #define TP_perf_assign(args...)
159
160 #undef TRACE_EVENT
161 #define TRACE_EVENT(call, proto, args, tstruct, func, print)            \
162 static int                                                              \
163 ftrace_format_##call(struct ftrace_event_call *unused,                  \
164                       struct trace_seq *s)                              \
165 {                                                                       \
166         struct ftrace_raw_##call field __attribute__((unused));         \
167         int ret = 0;                                                    \
168                                                                         \
169         tstruct;                                                        \
170                                                                         \
171         trace_seq_printf(s, "\nprint fmt: " print);                     \
172                                                                         \
173         return ret;                                                     \
174 }
175
176 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
177
178 /*
179  * Stage 3 of the trace events.
180  *
181  * Override the macros in <trace/trace_events.h> to include the following:
182  *
183  * enum print_line_t
184  * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
185  * {
186  *      struct trace_seq *s = &iter->seq;
187  *      struct ftrace_raw_<call> *field; <-- defined in stage 1
188  *      struct trace_entry *entry;
189  *      struct trace_seq *p;
190  *      int ret;
191  *
192  *      entry = iter->ent;
193  *
194  *      if (entry->type != event_<call>.id) {
195  *              WARN_ON_ONCE(1);
196  *              return TRACE_TYPE_UNHANDLED;
197  *      }
198  *
199  *      field = (typeof(field))entry;
200  *
201  *      p = get_cpu_var(ftrace_event_seq);
202  *      trace_seq_init(p);
203  *      ret = trace_seq_printf(s, <TP_printk> "\n");
204  *      put_cpu();
205  *      if (!ret)
206  *              return TRACE_TYPE_PARTIAL_LINE;
207  *
208  *      return TRACE_TYPE_HANDLED;
209  * }
210  *
211  * This is the method used to print the raw event to the trace
212  * output format. Note, this is not needed if the data is read
213  * in binary.
214  */
215
216 #undef __entry
217 #define __entry field
218
219 #undef TP_printk
220 #define TP_printk(fmt, args...) fmt "\n", args
221
222 #undef __get_dynamic_array
223 #define __get_dynamic_array(field)      \
224                 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
225
226 #undef __get_str
227 #define __get_str(field) (char *)__get_dynamic_array(field)
228
229 #undef __print_flags
230 #define __print_flags(flag, delim, flag_array...)                       \
231         ({                                                              \
232                 static const struct trace_print_flags flags[] =         \
233                         { flag_array, { -1, NULL }};                    \
234                 ftrace_print_flags_seq(p, delim, flag, flags);          \
235         })
236
237 #undef __print_symbolic
238 #define __print_symbolic(value, symbol_array...)                        \
239         ({                                                              \
240                 static const struct trace_print_flags symbols[] =       \
241                         { symbol_array, { -1, NULL }};                  \
242                 ftrace_print_symbols_seq(p, value, symbols);            \
243         })
244
245 #undef TRACE_EVENT
246 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
247 enum print_line_t                                                       \
248 ftrace_raw_output_##call(struct trace_iterator *iter, int flags)        \
249 {                                                                       \
250         struct trace_seq *s = &iter->seq;                               \
251         struct ftrace_raw_##call *field;                                \
252         struct trace_entry *entry;                                      \
253         struct trace_seq *p;                                            \
254         int ret;                                                        \
255                                                                         \
256         entry = iter->ent;                                              \
257                                                                         \
258         if (entry->type != event_##call.id) {                           \
259                 WARN_ON_ONCE(1);                                        \
260                 return TRACE_TYPE_UNHANDLED;                            \
261         }                                                               \
262                                                                         \
263         field = (typeof(field))entry;                                   \
264                                                                         \
265         p = &get_cpu_var(ftrace_event_seq);                             \
266         trace_seq_init(p);                                              \
267         ret = trace_seq_printf(s, #call ": " print);                    \
268         put_cpu();                                                      \
269         if (!ret)                                                       \
270                 return TRACE_TYPE_PARTIAL_LINE;                         \
271                                                                         \
272         return TRACE_TYPE_HANDLED;                                      \
273 }
274         
275 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
276
277 #undef __field
278 #define __field(type, item)                                             \
279         ret = trace_define_field(event_call, #type, #item,              \
280                                  offsetof(typeof(field), item),         \
281                                  sizeof(field.item), is_signed_type(type));     \
282         if (ret)                                                        \
283                 return ret;
284
285 #undef __array
286 #define __array(type, item, len)                                        \
287         BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                         \
288         ret = trace_define_field(event_call, #type "[" #len "]", #item, \
289                                  offsetof(typeof(field), item),         \
290                                  sizeof(field.item), 0);                \
291         if (ret)                                                        \
292                 return ret;
293
294 #undef __dynamic_array
295 #define __dynamic_array(type, item, len)                                       \
296         ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
297                                 offsetof(typeof(field), __data_loc_##item),    \
298                                  sizeof(field.__data_loc_##item), 0);
299
300 #undef __string
301 #define __string(item, src) __dynamic_array(char, item, -1)
302
303 #undef TRACE_EVENT
304 #define TRACE_EVENT(call, proto, args, tstruct, func, print)            \
305 int                                                                     \
306 ftrace_define_fields_##call(struct ftrace_event_call *event_call)       \
307 {                                                                       \
308         struct ftrace_raw_##call field;                                 \
309         int ret;                                                        \
310                                                                         \
311         ret = trace_define_common_fields(event_call);                   \
312         if (ret)                                                        \
313                 return ret;                                             \
314                                                                         \
315         tstruct;                                                        \
316                                                                         \
317         return ret;                                                     \
318 }
319
320 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
321
322 /*
323  * remember the offset of each array from the beginning of the event.
324  */
325
326 #undef __entry
327 #define __entry entry
328
329 #undef __field
330 #define __field(type, item)
331
332 #undef __array
333 #define __array(type, item, len)
334
335 #undef __dynamic_array
336 #define __dynamic_array(type, item, len)                                \
337         __data_offsets->item = __data_size +                            \
338                                offsetof(typeof(*entry), __data);        \
339         __data_offsets->item |= (len * sizeof(type)) << 16;             \
340         __data_size += (len) * sizeof(type);
341
342 #undef __string
343 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \
344
345 #undef TRACE_EVENT
346 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
347 static inline int ftrace_get_offsets_##call(                            \
348         struct ftrace_data_offsets_##call *__data_offsets, proto)       \
349 {                                                                       \
350         int __data_size = 0;                                            \
351         struct ftrace_raw_##call __maybe_unused *entry;                 \
352                                                                         \
353         tstruct;                                                        \
354                                                                         \
355         return __data_size;                                             \
356 }
357
358 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
359
360 #ifdef CONFIG_EVENT_PROFILE
361
362 /*
363  * Generate the functions needed for tracepoint perf_counter support.
364  *
365  * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
366  *
367  * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
368  * {
369  *      int ret = 0;
370  *
371  *      if (!atomic_inc_return(&event_call->profile_count))
372  *              ret = register_trace_<call>(ftrace_profile_<call>);
373  *
374  *      return ret;
375  * }
376  *
377  * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
378  * {
379  *      if (atomic_add_negative(-1, &event->call->profile_count))
380  *              unregister_trace_<call>(ftrace_profile_<call>);
381  * }
382  *
383  */
384
385 #undef TRACE_EVENT
386 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
387                                                                         \
388 static void ftrace_profile_##call(proto);                               \
389                                                                         \
390 static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
391 {                                                                       \
392         int ret = 0;                                                    \
393                                                                         \
394         if (!atomic_inc_return(&event_call->profile_count))             \
395                 ret = register_trace_##call(ftrace_profile_##call);     \
396                                                                         \
397         return ret;                                                     \
398 }                                                                       \
399                                                                         \
400 static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
401 {                                                                       \
402         if (atomic_add_negative(-1, &event_call->profile_count))        \
403                 unregister_trace_##call(ftrace_profile_##call);         \
404 }
405
406 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
407
408 #endif
409
410 /*
411  * Stage 4 of the trace events.
412  *
413  * Override the macros in <trace/trace_events.h> to include the following:
414  *
415  * static void ftrace_event_<call>(proto)
416  * {
417  *      event_trace_printk(_RET_IP_, "<call>: " <fmt>);
418  * }
419  *
420  * static int ftrace_reg_event_<call>(void)
421  * {
422  *      int ret;
423  *
424  *      ret = register_trace_<call>(ftrace_event_<call>);
425  *      if (!ret)
426  *              pr_info("event trace: Could not activate trace point "
427  *                      "probe to  <call>");
428  *      return ret;
429  * }
430  *
431  * static void ftrace_unreg_event_<call>(void)
432  * {
433  *      unregister_trace_<call>(ftrace_event_<call>);
434  * }
435  *
436  *
437  * For those macros defined with TRACE_EVENT:
438  *
439  * static struct ftrace_event_call event_<call>;
440  *
441  * static void ftrace_raw_event_<call>(proto)
442  * {
443  *      struct ring_buffer_event *event;
444  *      struct ftrace_raw_<call> *entry; <-- defined in stage 1
445  *      unsigned long irq_flags;
446  *      int pc;
447  *
448  *      local_save_flags(irq_flags);
449  *      pc = preempt_count();
450  *
451  *      event = trace_current_buffer_lock_reserve(event_<call>.id,
452  *                                sizeof(struct ftrace_raw_<call>),
453  *                                irq_flags, pc);
454  *      if (!event)
455  *              return;
456  *      entry   = ring_buffer_event_data(event);
457  *
458  *      <assign>;  <-- Here we assign the entries by the __field and
459  *                      __array macros.
460  *
461  *      trace_current_buffer_unlock_commit(event, irq_flags, pc);
462  * }
463  *
464  * static int ftrace_raw_reg_event_<call>(void)
465  * {
466  *      int ret;
467  *
468  *      ret = register_trace_<call>(ftrace_raw_event_<call>);
469  *      if (!ret)
470  *              pr_info("event trace: Could not activate trace point "
471  *                      "probe to <call>");
472  *      return ret;
473  * }
474  *
475  * static void ftrace_unreg_event_<call>(void)
476  * {
477  *      unregister_trace_<call>(ftrace_raw_event_<call>);
478  * }
479  *
480  * static struct trace_event ftrace_event_type_<call> = {
481  *      .trace                  = ftrace_raw_output_<call>, <-- stage 2
482  * };
483  *
484  * static int ftrace_raw_init_event_<call>(void)
485  * {
486  *      int id;
487  *
488  *      id = register_ftrace_event(&ftrace_event_type_<call>);
489  *      if (!id)
490  *              return -ENODEV;
491  *      event_<call>.id = id;
492  *      return 0;
493  * }
494  *
495  * static struct ftrace_event_call __used
496  * __attribute__((__aligned__(4)))
497  * __attribute__((section("_ftrace_events"))) event_<call> = {
498  *      .name                   = "<call>",
499  *      .system                 = "<system>",
500  *      .raw_init               = ftrace_raw_init_event_<call>,
501  *      .regfunc                = ftrace_reg_event_<call>,
502  *      .unregfunc              = ftrace_unreg_event_<call>,
503  *      .show_format            = ftrace_format_<call>,
504  * }
505  *
506  */
507
508 #undef TP_FMT
509 #define TP_FMT(fmt, args...)    fmt "\n", ##args
510
511 #ifdef CONFIG_EVENT_PROFILE
512
513 #define _TRACE_PROFILE_INIT(call)                                       \
514         .profile_count = ATOMIC_INIT(-1),                               \
515         .profile_enable = ftrace_profile_enable_##call,                 \
516         .profile_disable = ftrace_profile_disable_##call,
517
518 #else
519 #define _TRACE_PROFILE_INIT(call)
520 #endif
521
522 #undef __entry
523 #define __entry entry
524
525 #undef __field
526 #define __field(type, item)
527
528 #undef __array
529 #define __array(type, item, len)
530
531 #undef __dynamic_array
532 #define __dynamic_array(type, item, len)                                \
533         __entry->__data_loc_##item = __data_offsets.item;
534
535 #undef __string
536 #define __string(item, src) __dynamic_array(char, item, -1)             \
537
538 #undef __assign_str
539 #define __assign_str(dst, src)                                          \
540         strcpy(__get_str(dst), src);
541
542 #undef TRACE_EVENT
543 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
544                                                                         \
545 static struct ftrace_event_call event_##call;                           \
546                                                                         \
547 static void ftrace_raw_event_##call(proto)                              \
548 {                                                                       \
549         struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
550         struct ftrace_event_call *event_call = &event_##call;           \
551         struct ring_buffer_event *event;                                \
552         struct ftrace_raw_##call *entry;                                \
553         unsigned long irq_flags;                                        \
554         int __data_size;                                                \
555         int pc;                                                         \
556                                                                         \
557         local_save_flags(irq_flags);                                    \
558         pc = preempt_count();                                           \
559                                                                         \
560         __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
561                                                                         \
562         event = trace_current_buffer_lock_reserve(event_##call.id,      \
563                                  sizeof(*entry) + __data_size,          \
564                                  irq_flags, pc);                        \
565         if (!event)                                                     \
566                 return;                                                 \
567         entry   = ring_buffer_event_data(event);                        \
568                                                                         \
569                                                                         \
570         tstruct                                                         \
571                                                                         \
572         { assign; }                                                     \
573                                                                         \
574         if (!filter_current_check_discard(event_call, entry, event))    \
575                 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
576 }                                                                       \
577                                                                         \
578 static int ftrace_raw_reg_event_##call(void *ptr)                       \
579 {                                                                       \
580         int ret;                                                        \
581                                                                         \
582         ret = register_trace_##call(ftrace_raw_event_##call);           \
583         if (ret)                                                        \
584                 pr_info("event trace: Could not activate trace point "  \
585                         "probe to " #call "\n");                        \
586         return ret;                                                     \
587 }                                                                       \
588                                                                         \
589 static void ftrace_raw_unreg_event_##call(void *ptr)                    \
590 {                                                                       \
591         unregister_trace_##call(ftrace_raw_event_##call);               \
592 }                                                                       \
593                                                                         \
594 static struct trace_event ftrace_event_type_##call = {                  \
595         .trace                  = ftrace_raw_output_##call,             \
596 };                                                                      \
597                                                                         \
598 static int ftrace_raw_init_event_##call(void)                           \
599 {                                                                       \
600         int id;                                                         \
601                                                                         \
602         id = register_ftrace_event(&ftrace_event_type_##call);          \
603         if (!id)                                                        \
604                 return -ENODEV;                                         \
605         event_##call.id = id;                                           \
606         INIT_LIST_HEAD(&event_##call.fields);                           \
607         init_preds(&event_##call);                                      \
608         return 0;                                                       \
609 }                                                                       \
610                                                                         \
611 static struct ftrace_event_call __used                                  \
612 __attribute__((__aligned__(4)))                                         \
613 __attribute__((section("_ftrace_events"))) event_##call = {             \
614         .name                   = #call,                                \
615         .system                 = __stringify(TRACE_SYSTEM),            \
616         .event                  = &ftrace_event_type_##call,            \
617         .raw_init               = ftrace_raw_init_event_##call,         \
618         .regfunc                = ftrace_raw_reg_event_##call,          \
619         .unregfunc              = ftrace_raw_unreg_event_##call,        \
620         .show_format            = ftrace_format_##call,                 \
621         .define_fields          = ftrace_define_fields_##call,          \
622         _TRACE_PROFILE_INIT(call)                                       \
623 }
624
625 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
626
627 /*
628  * Define the insertion callback to profile events
629  *
630  * The job is very similar to ftrace_raw_event_<call> except that we don't
631  * insert in the ring buffer but in a perf counter.
632  *
633  * static void ftrace_profile_<call>(proto)
634  * {
635  *      struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
636  *      struct ftrace_event_call *event_call = &event_<call>;
637  *      extern void perf_tpcounter_event(int, u64, u64, void *, int);
638  *      struct ftrace_raw_##call *entry;
639  *      u64 __addr = 0, __count = 1;
640  *      unsigned long irq_flags;
641  *      int __entry_size;
642  *      int __data_size;
643  *      int pc;
644  *
645  *      local_save_flags(irq_flags);
646  *      pc = preempt_count();
647  *
648  *      __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
649  *
650  *      // Below we want to get the aligned size by taking into account
651  *      // the u32 field that will later store the buffer size
652  *      __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
653  *                           sizeof(u64));
654  *      __entry_size -= sizeof(u32);
655  *
656  *      do {
657  *              char raw_data[__entry_size]; <- allocate our sample in the stack
658  *              struct trace_entry *ent;
659  *
660  *              zero dead bytes from alignment to avoid stack leak to userspace:
661  *
662  *              *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
663  *              entry = (struct ftrace_raw_<call> *)raw_data;
664  *              ent = &entry->ent;
665  *              tracing_generic_entry_update(ent, irq_flags, pc);
666  *              ent->type = event_call->id;
667  *
668  *              <tstruct> <- do some jobs with dynamic arrays
669  *
670  *              <assign>  <- affect our values
671  *
672  *              perf_tpcounter_event(event_call->id, __addr, __count, entry,
673  *                           __entry_size);  <- submit them to perf counter
674  *      } while (0);
675  *
676  * }
677  */
678
679 #ifdef CONFIG_EVENT_PROFILE
680
681 #undef __perf_addr
682 #define __perf_addr(a) __addr = (a)
683
684 #undef __perf_count
685 #define __perf_count(c) __count = (c)
686
687 #undef TRACE_EVENT
688 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)          \
689 static void ftrace_profile_##call(proto)                                \
690 {                                                                       \
691         struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
692         struct ftrace_event_call *event_call = &event_##call;           \
693         extern void perf_tpcounter_event(int, u64, u64, void *, int);   \
694         struct ftrace_raw_##call *entry;                                \
695         u64 __addr = 0, __count = 1;                                    \
696         unsigned long irq_flags;                                        \
697         int __entry_size;                                               \
698         int __data_size;                                                \
699         int pc;                                                         \
700                                                                         \
701         local_save_flags(irq_flags);                                    \
702         pc = preempt_count();                                           \
703                                                                         \
704         __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
705         __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
706                              sizeof(u64));                              \
707         __entry_size -= sizeof(u32);                                    \
708                                                                         \
709         do {                                                            \
710                 char raw_data[__entry_size];                            \
711                 struct trace_entry *ent;                                \
712                                                                         \
713                 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
714                 entry = (struct ftrace_raw_##call *)raw_data;           \
715                 ent = &entry->ent;                                      \
716                 tracing_generic_entry_update(ent, irq_flags, pc);       \
717                 ent->type = event_call->id;                             \
718                                                                         \
719                 tstruct                                                 \
720                                                                         \
721                 { assign; }                                             \
722                                                                         \
723                 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
724                              __entry_size);                             \
725         } while (0);                                                    \
726                                                                         \
727 }
728
729 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
730 #endif /* CONFIG_EVENT_PROFILE */
731
732 #undef _TRACE_PROFILE_INIT
733