2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
42 #define __field(type, item) type item;
45 #define __field_ext(type, item, filter_type) type item;
48 #define __array(type, item, len) type item[len];
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
54 #define __string(item, src) __dynamic_array(char, item, -1)
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
67 static struct ftrace_event_class event_class_##name;
70 #define DEFINE_EVENT(template, name, proto, args) \
71 static struct ftrace_event_call __used \
72 __attribute__((__aligned__(4))) event_##name
74 #undef DEFINE_EVENT_PRINT
75 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
79 #define __cpparg(arg...) arg
81 /* Callbacks are meaningless to ftrace. */
83 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
84 assign, print, reg, unreg) \
85 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
86 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
88 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
92 * Stage 2 of the trace events.
94 * Include the following:
96 * struct ftrace_data_offsets_<call> {
102 * The __dynamic_array() macro will create each u32 <item>, this is
103 * to keep the offset of each array from the beginning of the event.
104 * The size of an array is also encoded, in the higher 16 bits of <item>.
108 #define __field(type, item)
111 #define __field_ext(type, item, filter_type)
114 #define __array(type, item, len)
116 #undef __dynamic_array
117 #define __dynamic_array(type, item, len) u32 item;
120 #define __string(item, src) __dynamic_array(char, item, -1)
122 #undef DECLARE_EVENT_CLASS
123 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
124 struct ftrace_data_offsets_##call { \
129 #define DEFINE_EVENT(template, name, proto, args)
131 #undef DEFINE_EVENT_PRINT
132 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
133 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
138 * Stage 3 of the trace events.
140 * Override the macros in <trace/trace_events.h> to include the following:
143 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
145 * struct trace_seq *s = &iter->seq;
146 * struct ftrace_raw_<call> *field; <-- defined in stage 1
147 * struct trace_entry *entry;
148 * struct trace_seq *p;
153 * if (entry->type != event_<call>->event.type) {
155 * return TRACE_TYPE_UNHANDLED;
158 * field = (typeof(field))entry;
160 * p = &get_cpu_var(ftrace_event_seq);
162 * ret = trace_seq_printf(s, "%s: ", <call>);
164 * ret = trace_seq_printf(s, <TP_printk> "\n");
167 * return TRACE_TYPE_PARTIAL_LINE;
169 * return TRACE_TYPE_HANDLED;
172 * This is the method used to print the raw event to the trace
173 * output format. Note, this is not needed if the data is read
178 #define __entry field
181 #define TP_printk(fmt, args...) fmt "\n", args
183 #undef __get_dynamic_array
184 #define __get_dynamic_array(field) \
185 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
188 #define __get_str(field) (char *)__get_dynamic_array(field)
191 #define __print_flags(flag, delim, flag_array...) \
193 static const struct trace_print_flags __flags[] = \
194 { flag_array, { -1, NULL }}; \
195 ftrace_print_flags_seq(p, delim, flag, __flags); \
198 #undef __print_symbolic
199 #define __print_symbolic(value, symbol_array...) \
201 static const struct trace_print_flags symbols[] = \
202 { symbol_array, { -1, NULL }}; \
203 ftrace_print_symbols_seq(p, value, symbols); \
206 #undef DECLARE_EVENT_CLASS
207 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
208 static notrace enum print_line_t \
209 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
210 struct trace_event *trace_event) \
212 struct ftrace_event_call *event; \
213 struct trace_seq *s = &iter->seq; \
214 struct ftrace_raw_##call *field; \
215 struct trace_entry *entry; \
216 struct trace_seq *p; \
219 event = container_of(trace_event, struct ftrace_event_call, \
224 if (entry->type != event->event.type) { \
226 return TRACE_TYPE_UNHANDLED; \
229 field = (typeof(field))entry; \
231 p = &get_cpu_var(ftrace_event_seq); \
233 ret = trace_seq_printf(s, "%s: ", event->name); \
235 ret = trace_seq_printf(s, print); \
238 return TRACE_TYPE_PARTIAL_LINE; \
240 return TRACE_TYPE_HANDLED; \
242 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
243 .trace = ftrace_raw_output_##call, \
246 #undef DEFINE_EVENT_PRINT
247 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
248 static notrace enum print_line_t \
249 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
250 struct trace_event *event) \
252 struct trace_seq *s = &iter->seq; \
253 struct ftrace_raw_##template *field; \
254 struct trace_entry *entry; \
255 struct trace_seq *p; \
260 if (entry->type != event_##call.event.type) { \
262 return TRACE_TYPE_UNHANDLED; \
265 field = (typeof(field))entry; \
267 p = &get_cpu_var(ftrace_event_seq); \
269 ret = trace_seq_printf(s, "%s: ", #call); \
271 ret = trace_seq_printf(s, print); \
274 return TRACE_TYPE_PARTIAL_LINE; \
276 return TRACE_TYPE_HANDLED; \
278 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
279 .trace = ftrace_raw_output_##call, \
282 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
285 #define __field_ext(type, item, filter_type) \
286 ret = trace_define_field(event_call, #type, #item, \
287 offsetof(typeof(field), item), \
288 sizeof(field.item), \
289 is_signed_type(type), filter_type); \
294 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
297 #define __array(type, item, len) \
298 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
299 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
300 offsetof(typeof(field), item), \
301 sizeof(field.item), \
302 is_signed_type(type), FILTER_OTHER); \
306 #undef __dynamic_array
307 #define __dynamic_array(type, item, len) \
308 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
309 offsetof(typeof(field), __data_loc_##item), \
310 sizeof(field.__data_loc_##item), \
311 is_signed_type(type), FILTER_OTHER);
314 #define __string(item, src) __dynamic_array(char, item, -1)
316 #undef DECLARE_EVENT_CLASS
317 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
319 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
321 struct ftrace_raw_##call field; \
330 #define DEFINE_EVENT(template, name, proto, args)
332 #undef DEFINE_EVENT_PRINT
333 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
334 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
336 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
339 * remember the offset of each array from the beginning of the event.
343 #define __entry entry
346 #define __field(type, item)
349 #define __field_ext(type, item, filter_type)
352 #define __array(type, item, len)
354 #undef __dynamic_array
355 #define __dynamic_array(type, item, len) \
356 __data_offsets->item = __data_size + \
357 offsetof(typeof(*entry), __data); \
358 __data_offsets->item |= (len * sizeof(type)) << 16; \
359 __data_size += (len) * sizeof(type);
362 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
364 #undef DECLARE_EVENT_CLASS
365 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
366 static inline notrace int ftrace_get_offsets_##call( \
367 struct ftrace_data_offsets_##call *__data_offsets, proto) \
369 int __data_size = 0; \
370 struct ftrace_raw_##call __maybe_unused *entry; \
374 return __data_size; \
378 #define DEFINE_EVENT(template, name, proto, args)
380 #undef DEFINE_EVENT_PRINT
381 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
382 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
384 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
387 * Stage 4 of the trace events.
389 * Override the macros in <trace/trace_events.h> to include the following:
391 * For those macros defined with TRACE_EVENT:
393 * static struct ftrace_event_call event_<call>;
395 * static void ftrace_raw_event_<call>(void *__data, proto)
397 * struct ftrace_event_call *event_call = __data;
398 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
399 * struct ring_buffer_event *event;
400 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
401 * struct ring_buffer *buffer;
402 * unsigned long irq_flags;
406 * local_save_flags(irq_flags);
407 * pc = preempt_count();
409 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
411 * event = trace_current_buffer_lock_reserve(&buffer,
412 * event_<call>->event.type,
413 * sizeof(*entry) + __data_size,
417 * entry = ring_buffer_event_data(event);
419 * { <assign>; } <-- Here we assign the entries by the __field and
422 * if (!filter_current_check_discard(buffer, event_call, entry, event))
423 * trace_current_buffer_unlock_commit(buffer,
424 * event, irq_flags, pc);
427 * static struct trace_event ftrace_event_type_<call> = {
428 * .trace = ftrace_raw_output_<call>, <-- stage 2
431 * static const char print_fmt_<call>[] = <TP_printk>;
433 * static struct ftrace_event_class __used event_class_<template> = {
434 * .system = "<system>",
435 * .define_fields = ftrace_define_fields_<call>,
436 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
437 * .raw_init = trace_event_raw_init,
438 * .probe = ftrace_raw_event_##call,
441 * static struct ftrace_event_call __used
442 * __attribute__((__aligned__(4)))
443 * __attribute__((section("_ftrace_events"))) event_<call> = {
445 * .class = event_class_<template>,
446 * .event = &ftrace_event_type_<call>,
447 * .print_fmt = print_fmt_<call>,
452 #ifdef CONFIG_PERF_EVENTS
454 #define _TRACE_PERF_PROTO(call, proto) \
455 static notrace void \
456 perf_trace_##call(void *__data, proto);
458 #define _TRACE_PERF_INIT(call) \
459 .perf_probe = perf_trace_##call,
462 #define _TRACE_PERF_PROTO(call, proto)
463 #define _TRACE_PERF_INIT(call)
464 #endif /* CONFIG_PERF_EVENTS */
467 #define __entry entry
470 #define __field(type, item)
473 #define __array(type, item, len)
475 #undef __dynamic_array
476 #define __dynamic_array(type, item, len) \
477 __entry->__data_loc_##item = __data_offsets.item;
480 #define __string(item, src) __dynamic_array(char, item, -1) \
483 #define __assign_str(dst, src) \
484 strcpy(__get_str(dst), src);
486 #undef TP_fast_assign
487 #define TP_fast_assign(args...) args
489 #undef TP_perf_assign
490 #define TP_perf_assign(args...)
492 #undef DECLARE_EVENT_CLASS
493 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
495 static notrace void \
496 ftrace_raw_event_##call(void *__data, proto) \
498 struct ftrace_event_call *event_call = __data; \
499 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
500 struct ring_buffer_event *event; \
501 struct ftrace_raw_##call *entry; \
502 struct ring_buffer *buffer; \
503 unsigned long irq_flags; \
507 local_save_flags(irq_flags); \
508 pc = preempt_count(); \
510 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
512 event = trace_current_buffer_lock_reserve(&buffer, \
513 event_call->event.type, \
514 sizeof(*entry) + __data_size, \
518 entry = ring_buffer_event_data(event); \
524 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
525 trace_nowake_buffer_unlock_commit(buffer, \
526 event, irq_flags, pc); \
529 * The ftrace_test_probe is compiled out, it is only here as a build time check
530 * to make sure that if the tracepoint handling changes, the ftrace probe will
531 * fail to compile unless it too is updated.
535 #define DEFINE_EVENT(template, call, proto, args) \
536 static inline void ftrace_test_probe_##call(void) \
538 check_trace_callback_type_##call(ftrace_raw_event_##template); \
541 #undef DEFINE_EVENT_PRINT
542 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
544 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
550 #undef __print_symbolic
551 #undef __get_dynamic_array
555 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
557 #undef DECLARE_EVENT_CLASS
558 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
559 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
560 static const char print_fmt_##call[] = print; \
561 static struct ftrace_event_class __used event_class_##call = { \
562 .system = __stringify(TRACE_SYSTEM), \
563 .define_fields = ftrace_define_fields_##call, \
564 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
565 .raw_init = trace_event_raw_init, \
566 .probe = ftrace_raw_event_##call, \
567 _TRACE_PERF_INIT(call) \
571 #define DEFINE_EVENT(template, call, proto, args) \
573 static struct ftrace_event_call __used \
574 __attribute__((__aligned__(4))) \
575 __attribute__((section("_ftrace_events"))) event_##call = { \
577 .class = &event_class_##template, \
578 .event.funcs = &ftrace_event_type_funcs_##template, \
579 .print_fmt = print_fmt_##template, \
582 #undef DEFINE_EVENT_PRINT
583 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
585 static const char print_fmt_##call[] = print; \
587 static struct ftrace_event_call __used \
588 __attribute__((__aligned__(4))) \
589 __attribute__((section("_ftrace_events"))) event_##call = { \
591 .class = &event_class_##template, \
592 .event.funcs = &ftrace_event_type_funcs_##call, \
593 .print_fmt = print_fmt_##call, \
596 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
599 * Define the insertion callback to perf events
601 * The job is very similar to ftrace_raw_event_<call> except that we don't
602 * insert in the ring buffer but in a perf counter.
604 * static void ftrace_perf_<call>(proto)
606 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
607 * struct ftrace_event_call *event_call = &event_<call>;
608 * extern void perf_tp_event(int, u64, u64, void *, int);
609 * struct ftrace_raw_##call *entry;
610 * struct perf_trace_buf *trace_buf;
611 * u64 __addr = 0, __count = 1;
612 * unsigned long irq_flags;
613 * struct trace_entry *ent;
619 * pc = preempt_count();
621 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
623 * // Below we want to get the aligned size by taking into account
624 * // the u32 field that will later store the buffer size
625 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
627 * __entry_size -= sizeof(u32);
629 * // Protect the non nmi buffer
630 * // This also protects the rcu read side
631 * local_irq_save(irq_flags);
632 * __cpu = smp_processor_id();
635 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
637 * trace_buf = rcu_dereference_sched(perf_trace_buf);
642 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
644 * // Avoid recursion from perf that could mess up the buffer
645 * if (trace_buf->recursion++)
646 * goto end_recursion;
648 * raw_data = trace_buf->buf;
650 * // Make recursion update visible before entering perf_tp_event
651 * // so that we protect from perf recursions.
655 * //zero dead bytes from alignment to avoid stack leak to userspace:
656 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
657 * entry = (struct ftrace_raw_<call> *)raw_data;
659 * tracing_generic_entry_update(ent, irq_flags, pc);
660 * ent->type = event_call->id;
662 * <tstruct> <- do some jobs with dynamic arrays
664 * <assign> <- affect our values
666 * perf_tp_event(event_call->id, __addr, __count, entry,
667 * __entry_size); <- submit them to perf counter
672 #ifdef CONFIG_PERF_EVENTS
675 #define __entry entry
677 #undef __get_dynamic_array
678 #define __get_dynamic_array(field) \
679 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
682 #define __get_str(field) (char *)__get_dynamic_array(field)
685 #define __perf_addr(a) __addr = (a)
688 #define __perf_count(c) __count = (c)
690 #undef DECLARE_EVENT_CLASS
691 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
692 static notrace void \
693 perf_trace_##call(void *__data, proto) \
695 struct ftrace_event_call *event_call = __data; \
696 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
697 struct ftrace_raw_##call *entry; \
698 struct pt_regs __regs; \
699 u64 __addr = 0, __count = 1; \
700 struct hlist_head *head; \
705 perf_fetch_caller_regs(&__regs, 1); \
707 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
708 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
710 __entry_size -= sizeof(u32); \
712 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
713 "profile buffer not large enough")) \
716 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
717 __entry_size, event_call->event.type, &__regs, &rctx); \
725 head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\
726 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
727 __count, &__regs, head); \
731 * This part is compiled out, it is only here as a build time check
732 * to make sure that if the tracepoint handling changes, the
733 * perf probe will fail to compile unless it too is updated.
736 #define DEFINE_EVENT(template, call, proto, args) \
737 static inline void perf_test_probe_##call(void) \
739 check_trace_callback_type_##call(perf_trace_##template); \
743 #undef DEFINE_EVENT_PRINT
744 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
745 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
747 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
748 #endif /* CONFIG_PERF_EVENTS */
750 #undef _TRACE_PROFILE_INIT