2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 #define __field(type, item) type item;
25 #define __array(type, item, len) type item[len];
27 #undef __dynamic_array
28 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
31 #define __string(item, src) __dynamic_array(char, item, -1)
33 #undef TP_STRUCT__entry
34 #define TP_STRUCT__entry(args...) args
37 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
38 struct ftrace_raw_##name { \
39 struct trace_entry ent; \
43 static struct ftrace_event_call event_##name
45 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
49 * Stage 2 of the trace events.
51 * Include the following:
53 * struct ftrace_data_offsets_<call> {
59 * The __dynamic_array() macro will create each u32 <item>, this is
60 * to keep the offset of each array from the beginning of the event.
61 * The size of an array is also encoded, in the higher 16 bits of <item>.
65 #define __field(type, item);
68 #define __array(type, item, len)
70 #undef __dynamic_array
71 #define __dynamic_array(type, item, len) u32 item;
74 #define __string(item, src) __dynamic_array(char, item, -1)
77 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
78 struct ftrace_data_offsets_##call { \
82 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
85 * Setup the showing format of trace point.
88 * ftrace_format_##call(struct trace_seq *s)
90 * struct ftrace_raw_##call field;
93 * ret = trace_seq_printf(s, #type " " #item ";"
94 * " offset:%u; size:%u;\n",
95 * offsetof(struct ftrace_raw_##call, item),
96 * sizeof(field.type));
101 #undef TP_STRUCT__entry
102 #define TP_STRUCT__entry(args...) args
105 #define __field(type, item) \
106 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
107 "offset:%u;\tsize:%u;\n", \
108 (unsigned int)offsetof(typeof(field), item), \
109 (unsigned int)sizeof(field.item)); \
114 #define __array(type, item, len) \
115 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
116 "offset:%u;\tsize:%u;\n", \
117 (unsigned int)offsetof(typeof(field), item), \
118 (unsigned int)sizeof(field.item)); \
122 #undef __dynamic_array
123 #define __dynamic_array(type, item, len) \
124 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
125 "offset:%u;\tsize:%u;\n", \
126 (unsigned int)offsetof(typeof(field), \
127 __data_loc_##item), \
128 (unsigned int)sizeof(field.__data_loc_##item)); \
133 #define __string(item, src) __dynamic_array(char, item, -1)
138 #undef __print_symbolic
139 #undef __get_dynamic_array
143 #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
145 #undef TP_fast_assign
146 #define TP_fast_assign(args...) args
148 #undef TP_perf_assign
149 #define TP_perf_assign(args...)
152 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
154 ftrace_format_##call(struct trace_seq *s) \
156 struct ftrace_raw_##call field __attribute__((unused)); \
161 trace_seq_printf(s, "\nprint fmt: " print); \
166 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
169 * Stage 3 of the trace events.
171 * Override the macros in <trace/trace_events.h> to include the following:
174 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
176 * struct trace_seq *s = &iter->seq;
177 * struct ftrace_raw_<call> *field; <-- defined in stage 1
178 * struct trace_entry *entry;
179 * struct trace_seq *p;
184 * if (entry->type != event_<call>.id) {
186 * return TRACE_TYPE_UNHANDLED;
189 * field = (typeof(field))entry;
191 * p = get_cpu_var(ftrace_event_seq);
193 * ret = trace_seq_printf(s, <TP_printk> "\n");
196 * return TRACE_TYPE_PARTIAL_LINE;
198 * return TRACE_TYPE_HANDLED;
201 * This is the method used to print the raw event to the trace
202 * output format. Note, this is not needed if the data is read
207 #define __entry field
210 #define TP_printk(fmt, args...) fmt "\n", args
212 #undef __get_dynamic_array
213 #define __get_dynamic_array(field) \
214 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
217 #define __get_str(field) (char *)__get_dynamic_array(field)
220 #define __print_flags(flag, delim, flag_array...) \
222 static const struct trace_print_flags flags[] = \
223 { flag_array, { -1, NULL }}; \
224 ftrace_print_flags_seq(p, delim, flag, flags); \
227 #undef __print_symbolic
228 #define __print_symbolic(value, symbol_array...) \
230 static const struct trace_print_flags symbols[] = \
231 { symbol_array, { -1, NULL }}; \
232 ftrace_print_symbols_seq(p, value, symbols); \
236 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
238 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
240 struct trace_seq *s = &iter->seq; \
241 struct ftrace_raw_##call *field; \
242 struct trace_entry *entry; \
243 struct trace_seq *p; \
248 if (entry->type != event_##call.id) { \
250 return TRACE_TYPE_UNHANDLED; \
253 field = (typeof(field))entry; \
255 p = &get_cpu_var(ftrace_event_seq); \
257 ret = trace_seq_printf(s, #call ": " print); \
260 return TRACE_TYPE_PARTIAL_LINE; \
262 return TRACE_TYPE_HANDLED; \
265 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
268 #define __field(type, item) \
269 ret = trace_define_field(event_call, #type, #item, \
270 offsetof(typeof(field), item), \
271 sizeof(field.item), is_signed_type(type)); \
276 #define __array(type, item, len) \
277 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
278 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
279 offsetof(typeof(field), item), \
280 sizeof(field.item), 0); \
284 #undef __dynamic_array
285 #define __dynamic_array(type, item, len) \
286 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
287 offsetof(typeof(field), __data_loc_##item), \
288 sizeof(field.__data_loc_##item), 0);
291 #define __string(item, src) __dynamic_array(char, item, -1)
294 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
296 ftrace_define_fields_##call(void) \
298 struct ftrace_raw_##call field; \
299 struct ftrace_event_call *event_call = &event_##call; \
302 __common_field(int, type, 1); \
303 __common_field(unsigned char, flags, 0); \
304 __common_field(unsigned char, preempt_count, 0); \
305 __common_field(int, pid, 1); \
306 __common_field(int, tgid, 1); \
313 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
316 * remember the offset of each array from the beginning of the event.
320 #define __entry entry
323 #define __field(type, item)
326 #define __array(type, item, len)
328 #undef __dynamic_array
329 #define __dynamic_array(type, item, len) \
330 __data_offsets->item = __data_size + \
331 offsetof(typeof(*entry), __data); \
332 __data_offsets->item |= (len * sizeof(type)) << 16; \
333 __data_size += (len) * sizeof(type);
336 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
339 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
340 static inline int ftrace_get_offsets_##call( \
341 struct ftrace_data_offsets_##call *__data_offsets, proto) \
343 int __data_size = 0; \
344 struct ftrace_raw_##call __maybe_unused *entry; \
348 return __data_size; \
351 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
353 #ifdef CONFIG_EVENT_PROFILE
356 * Generate the functions needed for tracepoint perf_counter support.
358 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
360 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
364 * if (!atomic_inc_return(&event_call->profile_count))
365 * ret = register_trace_<call>(ftrace_profile_<call>);
370 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
372 * if (atomic_add_negative(-1, &event->call->profile_count))
373 * unregister_trace_<call>(ftrace_profile_<call>);
379 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
381 static void ftrace_profile_##call(proto); \
383 static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
387 if (!atomic_inc_return(&event_call->profile_count)) \
388 ret = register_trace_##call(ftrace_profile_##call); \
393 static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
395 if (atomic_add_negative(-1, &event_call->profile_count)) \
396 unregister_trace_##call(ftrace_profile_##call); \
399 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
404 * Stage 4 of the trace events.
406 * Override the macros in <trace/trace_events.h> to include the following:
408 * static void ftrace_event_<call>(proto)
410 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
413 * static int ftrace_reg_event_<call>(void)
417 * ret = register_trace_<call>(ftrace_event_<call>);
419 * pr_info("event trace: Could not activate trace point "
420 * "probe to <call>");
424 * static void ftrace_unreg_event_<call>(void)
426 * unregister_trace_<call>(ftrace_event_<call>);
430 * For those macros defined with TRACE_EVENT:
432 * static struct ftrace_event_call event_<call>;
434 * static void ftrace_raw_event_<call>(proto)
436 * struct ring_buffer_event *event;
437 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
438 * unsigned long irq_flags;
441 * local_save_flags(irq_flags);
442 * pc = preempt_count();
444 * event = trace_current_buffer_lock_reserve(event_<call>.id,
445 * sizeof(struct ftrace_raw_<call>),
449 * entry = ring_buffer_event_data(event);
451 * <assign>; <-- Here we assign the entries by the __field and
454 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
457 * static int ftrace_raw_reg_event_<call>(void)
461 * ret = register_trace_<call>(ftrace_raw_event_<call>);
463 * pr_info("event trace: Could not activate trace point "
464 * "probe to <call>");
468 * static void ftrace_unreg_event_<call>(void)
470 * unregister_trace_<call>(ftrace_raw_event_<call>);
473 * static struct trace_event ftrace_event_type_<call> = {
474 * .trace = ftrace_raw_output_<call>, <-- stage 2
477 * static int ftrace_raw_init_event_<call>(void)
481 * id = register_ftrace_event(&ftrace_event_type_<call>);
484 * event_<call>.id = id;
488 * static struct ftrace_event_call __used
489 * __attribute__((__aligned__(4)))
490 * __attribute__((section("_ftrace_events"))) event_<call> = {
492 * .system = "<system>",
493 * .raw_init = ftrace_raw_init_event_<call>,
494 * .regfunc = ftrace_reg_event_<call>,
495 * .unregfunc = ftrace_unreg_event_<call>,
496 * .show_format = ftrace_format_<call>,
502 #define TP_FMT(fmt, args...) fmt "\n", ##args
504 #ifdef CONFIG_EVENT_PROFILE
506 #define _TRACE_PROFILE_INIT(call) \
507 .profile_count = ATOMIC_INIT(-1), \
508 .profile_enable = ftrace_profile_enable_##call, \
509 .profile_disable = ftrace_profile_disable_##call,
512 #define _TRACE_PROFILE_INIT(call)
516 #define __entry entry
519 #define __field(type, item)
522 #define __array(type, item, len)
524 #undef __dynamic_array
525 #define __dynamic_array(type, item, len) \
526 __entry->__data_loc_##item = __data_offsets.item;
529 #define __string(item, src) __dynamic_array(char, item, -1) \
532 #define __assign_str(dst, src) \
533 strcpy(__get_str(dst), src);
536 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
538 static struct ftrace_event_call event_##call; \
540 static void ftrace_raw_event_##call(proto) \
542 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
543 struct ftrace_event_call *event_call = &event_##call; \
544 struct ring_buffer_event *event; \
545 struct ftrace_raw_##call *entry; \
546 unsigned long irq_flags; \
550 local_save_flags(irq_flags); \
551 pc = preempt_count(); \
553 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
555 event = trace_current_buffer_lock_reserve(event_##call.id, \
556 sizeof(*entry) + __data_size, \
560 entry = ring_buffer_event_data(event); \
567 if (!filter_current_check_discard(event_call, entry, event)) \
568 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
571 static int ftrace_raw_reg_event_##call(void *ptr) \
575 ret = register_trace_##call(ftrace_raw_event_##call); \
577 pr_info("event trace: Could not activate trace point " \
578 "probe to " #call "\n"); \
582 static void ftrace_raw_unreg_event_##call(void *ptr) \
584 unregister_trace_##call(ftrace_raw_event_##call); \
587 static struct trace_event ftrace_event_type_##call = { \
588 .trace = ftrace_raw_output_##call, \
591 static int ftrace_raw_init_event_##call(void) \
595 id = register_ftrace_event(&ftrace_event_type_##call); \
598 event_##call.id = id; \
599 INIT_LIST_HEAD(&event_##call.fields); \
600 init_preds(&event_##call); \
604 static struct ftrace_event_call __used \
605 __attribute__((__aligned__(4))) \
606 __attribute__((section("_ftrace_events"))) event_##call = { \
608 .system = __stringify(TRACE_SYSTEM), \
609 .event = &ftrace_event_type_##call, \
610 .raw_init = ftrace_raw_init_event_##call, \
611 .regfunc = ftrace_raw_reg_event_##call, \
612 .unregfunc = ftrace_raw_unreg_event_##call, \
613 .show_format = ftrace_format_##call, \
614 .define_fields = ftrace_define_fields_##call, \
615 _TRACE_PROFILE_INIT(call) \
618 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
621 * Define the insertion callback to profile events
623 * The job is very similar to ftrace_raw_event_<call> except that we don't
624 * insert in the ring buffer but in a perf counter.
626 * static void ftrace_profile_<call>(proto)
628 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
629 * struct ftrace_event_call *event_call = &event_<call>;
630 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
631 * struct ftrace_raw_##call *entry;
632 * u64 __addr = 0, __count = 1;
633 * unsigned long irq_flags;
638 * local_save_flags(irq_flags);
639 * pc = preempt_count();
641 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
643 * // Below we want to get the aligned size by taking into account
644 * // the u32 field that will later store the buffer size
645 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
647 * __entry_size -= sizeof(u32);
650 * char raw_data[__entry_size]; <- allocate our sample in the stack
651 * struct trace_entry *ent;
653 * zero dead bytes from alignment to avoid stack leak to userspace:
655 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
656 * entry = (struct ftrace_raw_<call> *)raw_data;
658 * tracing_generic_entry_update(ent, irq_flags, pc);
659 * ent->type = event_call->id;
661 * <tstruct> <- do some jobs with dynamic arrays
663 * <assign> <- affect our values
665 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
666 * __entry_size); <- submit them to perf counter
672 #ifdef CONFIG_EVENT_PROFILE
675 #define __perf_addr(a) __addr = (a)
678 #define __perf_count(c) __count = (c)
681 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
682 static void ftrace_profile_##call(proto) \
684 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
685 struct ftrace_event_call *event_call = &event_##call; \
686 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
687 struct ftrace_raw_##call *entry; \
688 u64 __addr = 0, __count = 1; \
689 unsigned long irq_flags; \
694 local_save_flags(irq_flags); \
695 pc = preempt_count(); \
697 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
698 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
700 __entry_size -= sizeof(u32); \
703 char raw_data[__entry_size]; \
704 struct trace_entry *ent; \
706 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
707 entry = (struct ftrace_raw_##call *)raw_data; \
709 tracing_generic_entry_update(ent, irq_flags, pc); \
710 ent->type = event_call->id; \
716 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
722 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
723 #endif /* CONFIG_EVENT_PROFILE */
725 #undef _TRACE_PROFILE_INIT