1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
12 #include <linux/kmemtrace.h>
13 #include <trace/power.h>
15 #include <linux/trace_seq.h>
16 #include <linux/ftrace_event.h>
19 __TRACE_FIRST_TYPE = 0,
46 * Function trace entry - function address and parent function addres:
49 struct trace_entry ent;
51 unsigned long parent_ip;
54 /* Function call entry */
55 struct ftrace_graph_ent_entry {
56 struct trace_entry ent;
57 struct ftrace_graph_ent graph_ent;
60 /* Function return entry */
61 struct ftrace_graph_ret_entry {
62 struct trace_entry ent;
63 struct ftrace_graph_ret ret;
65 extern struct tracer boot_tracer;
68 * Context switch trace entry - which task (and prio) we switched from/to:
70 struct ctx_switch_entry {
71 struct trace_entry ent;
72 unsigned int prev_pid;
73 unsigned char prev_prio;
74 unsigned char prev_state;
75 unsigned int next_pid;
76 unsigned char next_prio;
77 unsigned char next_state;
78 unsigned int next_cpu;
82 * Special (free-form) trace entry:
84 struct special_entry {
85 struct trace_entry ent;
95 #define FTRACE_STACK_ENTRIES 8
98 struct trace_entry ent;
99 unsigned long caller[FTRACE_STACK_ENTRIES];
102 struct userstack_entry {
103 struct trace_entry ent;
104 unsigned long caller[FTRACE_STACK_ENTRIES];
108 * trace_printk entry:
110 struct bprint_entry {
111 struct trace_entry ent;
118 struct trace_entry ent;
123 #define TRACE_OLD_SIZE 88
125 struct trace_field_cont {
127 /* Temporary till we get rid of this completely */
128 char buf[TRACE_OLD_SIZE - 1];
131 struct trace_mmiotrace_rw {
132 struct trace_entry ent;
133 struct mmiotrace_rw rw;
136 struct trace_mmiotrace_map {
137 struct trace_entry ent;
138 struct mmiotrace_map map;
141 struct trace_boot_call {
142 struct trace_entry ent;
143 struct boot_trace_call boot_call;
146 struct trace_boot_ret {
147 struct trace_entry ent;
148 struct boot_trace_ret boot_ret;
151 #define TRACE_FUNC_SIZE 30
152 #define TRACE_FILE_SIZE 20
153 struct trace_branch {
154 struct trace_entry ent;
156 char func[TRACE_FUNC_SIZE+1];
157 char file[TRACE_FILE_SIZE+1];
161 struct hw_branch_entry {
162 struct trace_entry ent;
168 struct trace_entry ent;
169 struct power_trace state_data;
172 enum kmemtrace_type_id {
173 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
174 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
175 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
178 struct kmemtrace_alloc_entry {
179 struct trace_entry ent;
180 enum kmemtrace_type_id type_id;
181 unsigned long call_site;
189 struct kmemtrace_free_entry {
190 struct trace_entry ent;
191 enum kmemtrace_type_id type_id;
192 unsigned long call_site;
196 struct syscall_trace_enter {
197 struct trace_entry ent;
199 unsigned long args[];
202 struct syscall_trace_exit {
203 struct trace_entry ent;
208 struct kprobe_trace_entry {
209 struct trace_entry ent;
212 unsigned long args[];
215 #define SIZEOF_KPROBE_TRACE_ENTRY(n) \
216 (offsetof(struct kprobe_trace_entry, args) + \
217 (sizeof(unsigned long) * (n)))
219 struct kretprobe_trace_entry {
220 struct trace_entry ent;
222 unsigned long ret_ip;
224 unsigned long args[];
227 #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
228 (offsetof(struct kretprobe_trace_entry, args) + \
229 (sizeof(unsigned long) * (n)))
234 * trace_flag_type is an enumeration that holds different
235 * states when a trace occurs. These are:
236 * IRQS_OFF - interrupts were disabled
237 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
238 * NEED_RESCED - reschedule is requested
239 * HARDIRQ - inside an interrupt handler
240 * SOFTIRQ - inside a softirq handler
242 enum trace_flag_type {
243 TRACE_FLAG_IRQS_OFF = 0x01,
244 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
245 TRACE_FLAG_NEED_RESCHED = 0x04,
246 TRACE_FLAG_HARDIRQ = 0x08,
247 TRACE_FLAG_SOFTIRQ = 0x10,
250 #define TRACE_BUF_SIZE 1024
253 * The CPU trace array - it consists of thousands of trace entries
254 * plus some other descriptor data: (for example which task started
257 struct trace_array_cpu {
259 void *buffer_page; /* ring buffer spare */
261 unsigned long saved_latency;
262 unsigned long critical_start;
263 unsigned long critical_end;
264 unsigned long critical_sequence;
266 unsigned long policy;
267 unsigned long rt_priority;
268 unsigned long skipped_entries;
269 cycle_t preempt_timestamp;
272 char comm[TASK_COMM_LEN];
276 * The trace array - an array of per-CPU trace arrays. This is the
277 * highest level data structure that individual tracers deal with.
278 * They have on/off state as well:
281 struct ring_buffer *buffer;
282 unsigned long entries;
285 struct task_struct *waiter;
286 struct trace_array_cpu *data[NR_CPUS];
289 #define FTRACE_CMP_TYPE(var, type) \
290 __builtin_types_compatible_p(typeof(var), type *)
293 #define IF_ASSIGN(var, entry, etype, id) \
294 if (FTRACE_CMP_TYPE(var, etype)) { \
295 var = (typeof(var))(entry); \
296 WARN_ON(id && (entry)->type != id); \
300 /* Will cause compile errors if type is not found. */
301 extern void __ftrace_bad_type(void);
304 * The trace_assign_type is a verifier that the entry type is
305 * the same as the type being assigned. To add new types simply
306 * add a line with the following format:
308 * IF_ASSIGN(var, ent, type, id);
310 * Where "type" is the trace type that includes the trace_entry
311 * as the "ent" item. And "id" is the trace identifier that is
312 * used in the trace_type enum.
314 * If the type can have more than one id, then use zero.
316 #define trace_assign_type(var, ent) \
318 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
319 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
320 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
321 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
322 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
323 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
324 IF_ASSIGN(var, ent, struct special_entry, 0); \
325 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
327 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
329 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
330 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
331 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
332 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
334 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
336 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
337 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
338 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
340 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
342 __ftrace_bad_type(); \
346 * An option specific to a tracer. This is a boolean value.
347 * The bit is the bit index that sets its value on the
348 * flags value in struct tracer_flags.
351 const char *name; /* Will appear on the trace_options file */
352 u32 bit; /* Mask assigned in val field in tracer_flags */
356 * The set of specific options for a tracer. Your tracer
357 * have to set the initial value of the flags val.
359 struct tracer_flags {
361 struct tracer_opt *opts;
364 /* Makes more easy to define a tracer opt */
365 #define TRACER_OPT(s, b) .name = #s, .bit = b
369 * struct tracer - a specific tracer and its callbacks to interact with debugfs
370 * @name: the name chosen to select it on the available_tracers file
371 * @init: called when one switches to this tracer (echo name > current_tracer)
372 * @reset: called when one switches to another tracer
373 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
374 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
375 * @open: called when the trace file is opened
376 * @pipe_open: called when the trace_pipe file is opened
377 * @wait_pipe: override how the user waits for traces on trace_pipe
378 * @close: called when the trace file is released
379 * @read: override the default read callback on trace_pipe
380 * @splice_read: override the default splice_read callback on trace_pipe
381 * @selftest: selftest to run on boot (see trace_selftest.c)
382 * @print_headers: override the first lines that describe your columns
383 * @print_line: callback that prints a trace
384 * @set_flag: signals one of your private flags changed (trace_options file)
385 * @flags: your private flags
389 int (*init)(struct trace_array *tr);
390 void (*reset)(struct trace_array *tr);
391 void (*start)(struct trace_array *tr);
392 void (*stop)(struct trace_array *tr);
393 void (*open)(struct trace_iterator *iter);
394 void (*pipe_open)(struct trace_iterator *iter);
395 void (*wait_pipe)(struct trace_iterator *iter);
396 void (*close)(struct trace_iterator *iter);
397 ssize_t (*read)(struct trace_iterator *iter,
398 struct file *filp, char __user *ubuf,
399 size_t cnt, loff_t *ppos);
400 ssize_t (*splice_read)(struct trace_iterator *iter,
403 struct pipe_inode_info *pipe,
406 #ifdef CONFIG_FTRACE_STARTUP_TEST
407 int (*selftest)(struct tracer *trace,
408 struct trace_array *tr);
410 void (*print_header)(struct seq_file *m);
411 enum print_line_t (*print_line)(struct trace_iterator *iter);
412 /* If you handled the flag setting, return 0 */
413 int (*set_flag)(u32 old_flags, u32 bit, int set);
416 struct tracer_flags *flags;
417 struct tracer_stat *stats;
421 #define TRACE_PIPE_ALL_CPU -1
423 int tracer_init(struct tracer *t, struct trace_array *tr);
424 int tracing_is_enabled(void);
425 void trace_wake_up(void);
426 void tracing_reset(struct trace_array *tr, int cpu);
427 void tracing_reset_online_cpus(struct trace_array *tr);
428 void tracing_reset_current(int cpu);
429 void tracing_reset_current_online_cpus(void);
430 int tracing_open_generic(struct inode *inode, struct file *filp);
431 struct dentry *trace_create_file(const char *name,
433 struct dentry *parent,
435 const struct file_operations *fops);
437 struct dentry *tracing_init_dentry(void);
438 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
440 struct ring_buffer_event;
442 struct ring_buffer_event *
443 trace_buffer_lock_reserve(struct ring_buffer *buffer,
448 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
449 struct ring_buffer_event *event,
450 unsigned long flags, int pc);
452 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
453 struct trace_array_cpu *data);
455 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
456 int *ent_cpu, u64 *ent_ts);
458 void default_wait_pipe(struct trace_iterator *iter);
459 void poll_wait_pipe(struct trace_iterator *iter);
461 void ftrace(struct trace_array *tr,
462 struct trace_array_cpu *data,
464 unsigned long parent_ip,
465 unsigned long flags, int pc);
466 void tracing_sched_switch_trace(struct trace_array *tr,
467 struct task_struct *prev,
468 struct task_struct *next,
469 unsigned long flags, int pc);
471 void tracing_sched_wakeup_trace(struct trace_array *tr,
472 struct task_struct *wakee,
473 struct task_struct *cur,
474 unsigned long flags, int pc);
475 void trace_special(struct trace_array *tr,
476 struct trace_array_cpu *data,
479 unsigned long arg3, int pc);
480 void trace_function(struct trace_array *tr,
482 unsigned long parent_ip,
483 unsigned long flags, int pc);
485 void trace_graph_return(struct ftrace_graph_ret *trace);
486 int trace_graph_entry(struct ftrace_graph_ent *trace);
487 void set_graph_array(struct trace_array *tr);
489 void tracing_start_cmdline_record(void);
490 void tracing_stop_cmdline_record(void);
491 void tracing_sched_switch_assign_trace(struct trace_array *tr);
492 void tracing_stop_sched_switch_record(void);
493 void tracing_start_sched_switch_record(void);
494 int register_tracer(struct tracer *type);
495 void unregister_tracer(struct tracer *type);
497 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
499 #ifdef CONFIG_TRACER_MAX_TRACE
500 extern unsigned long tracing_max_latency;
501 extern unsigned long tracing_thresh;
503 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
504 void update_max_tr_single(struct trace_array *tr,
505 struct task_struct *tsk, int cpu);
506 #endif /* CONFIG_TRACER_MAX_TRACE */
508 #ifdef CONFIG_STACKTRACE
509 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
512 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
515 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
518 static inline void ftrace_trace_stack(struct trace_array *tr,
519 unsigned long flags, int skip, int pc)
523 static inline void ftrace_trace_userstack(struct trace_array *tr,
524 unsigned long flags, int pc)
528 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
532 #endif /* CONFIG_STACKTRACE */
534 extern cycle_t ftrace_now(int cpu);
536 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
538 (*tracer_switch_func_t)(void *private,
540 struct task_struct *prev,
541 struct task_struct *next);
543 struct tracer_switch_ops {
544 tracer_switch_func_t func;
546 struct tracer_switch_ops *next;
548 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
550 extern void trace_find_cmdline(int pid, char comm[]);
552 #ifdef CONFIG_DYNAMIC_FTRACE
553 extern unsigned long ftrace_update_tot_cnt;
554 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
555 extern int DYN_FTRACE_TEST_NAME(void);
558 extern int ring_buffer_expanded;
559 extern bool tracing_selftest_disabled;
560 DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
562 #ifdef CONFIG_FTRACE_STARTUP_TEST
563 extern int trace_selftest_startup_function(struct tracer *trace,
564 struct trace_array *tr);
565 extern int trace_selftest_startup_function_graph(struct tracer *trace,
566 struct trace_array *tr);
567 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
568 struct trace_array *tr);
569 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
570 struct trace_array *tr);
571 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
572 struct trace_array *tr);
573 extern int trace_selftest_startup_wakeup(struct tracer *trace,
574 struct trace_array *tr);
575 extern int trace_selftest_startup_nop(struct tracer *trace,
576 struct trace_array *tr);
577 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
578 struct trace_array *tr);
579 extern int trace_selftest_startup_sysprof(struct tracer *trace,
580 struct trace_array *tr);
581 extern int trace_selftest_startup_branch(struct tracer *trace,
582 struct trace_array *tr);
583 extern int trace_selftest_startup_hw_branches(struct tracer *trace,
584 struct trace_array *tr);
585 #endif /* CONFIG_FTRACE_STARTUP_TEST */
587 extern void *head_page(struct trace_array_cpu *data);
588 extern unsigned long long ns2usecs(cycle_t nsec);
590 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
592 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
594 trace_array_vprintk(struct trace_array *tr,
595 unsigned long ip, const char *fmt, va_list args);
596 int trace_array_printk(struct trace_array *tr,
597 unsigned long ip, const char *fmt, ...);
599 extern unsigned long trace_flags;
601 extern int trace_clock_id;
603 /* Standard output formatting function used for function return traces */
604 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
605 extern enum print_line_t print_graph_function(struct trace_iterator *iter);
606 extern enum print_line_t
607 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
609 #ifdef CONFIG_DYNAMIC_FTRACE
610 /* TODO: make this variable */
611 #define FTRACE_GRAPH_MAX_FUNCS 32
612 extern int ftrace_graph_count;
613 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
615 static inline int ftrace_graph_addr(unsigned long addr)
619 if (!ftrace_graph_count || test_tsk_trace_graph(current))
622 for (i = 0; i < ftrace_graph_count; i++) {
623 if (addr == ftrace_graph_funcs[i])
630 static inline int ftrace_trace_addr(unsigned long addr)
634 static inline int ftrace_graph_addr(unsigned long addr)
638 #endif /* CONFIG_DYNAMIC_FTRACE */
639 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
640 static inline enum print_line_t
641 print_graph_function(struct trace_iterator *iter)
643 return TRACE_TYPE_UNHANDLED;
645 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
647 extern struct pid *ftrace_pid_trace;
649 #ifdef CONFIG_FUNCTION_TRACER
650 static inline int ftrace_trace_task(struct task_struct *task)
652 if (!ftrace_pid_trace)
655 return test_tsk_trace_trace(task);
658 static inline int ftrace_trace_task(struct task_struct *task)
665 * trace_iterator_flags is an enumeration that defines bit
666 * positions into trace_flags that controls the output.
668 * NOTE: These bits must match the trace_options array in
671 enum trace_iterator_flags {
672 TRACE_ITER_PRINT_PARENT = 0x01,
673 TRACE_ITER_SYM_OFFSET = 0x02,
674 TRACE_ITER_SYM_ADDR = 0x04,
675 TRACE_ITER_VERBOSE = 0x08,
676 TRACE_ITER_RAW = 0x10,
677 TRACE_ITER_HEX = 0x20,
678 TRACE_ITER_BIN = 0x40,
679 TRACE_ITER_BLOCK = 0x80,
680 TRACE_ITER_STACKTRACE = 0x100,
681 TRACE_ITER_SCHED_TREE = 0x200,
682 TRACE_ITER_PRINTK = 0x400,
683 TRACE_ITER_PREEMPTONLY = 0x800,
684 TRACE_ITER_BRANCH = 0x1000,
685 TRACE_ITER_ANNOTATE = 0x2000,
686 TRACE_ITER_USERSTACKTRACE = 0x4000,
687 TRACE_ITER_SYM_USEROBJ = 0x8000,
688 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
689 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
690 TRACE_ITER_LATENCY_FMT = 0x40000,
691 TRACE_ITER_SLEEP_TIME = 0x80000,
692 TRACE_ITER_GRAPH_TIME = 0x100000,
696 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
697 * control the output of kernel symbols.
699 #define TRACE_ITER_SYM_MASK \
700 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
702 extern struct tracer nop_trace;
705 * ftrace_preempt_disable - disable preemption scheduler safe
707 * When tracing can happen inside the scheduler, there exists
708 * cases that the tracing might happen before the need_resched
709 * flag is checked. If this happens and the tracer calls
710 * preempt_enable (after a disable), a schedule might take place
711 * causing an infinite recursion.
713 * To prevent this, we read the need_resched flag before
714 * disabling preemption. When we want to enable preemption we
715 * check the flag, if it is set, then we call preempt_enable_no_resched.
716 * Otherwise, we call preempt_enable.
718 * The rational for doing the above is that if need_resched is set
719 * and we have yet to reschedule, we are either in an atomic location
720 * (where we do not need to check for scheduling) or we are inside
721 * the scheduler and do not want to resched.
723 static inline int ftrace_preempt_disable(void)
727 resched = need_resched();
728 preempt_disable_notrace();
734 * ftrace_preempt_enable - enable preemption scheduler safe
735 * @resched: the return value from ftrace_preempt_disable
737 * This is a scheduler safe way to enable preemption and not miss
738 * any preemption checks. The disabled saved the state of preemption.
739 * If resched is set, then we are either inside an atomic or
740 * are inside the scheduler (we would have already scheduled
741 * otherwise). In this case, we do not want to call normal
742 * preempt_enable, but preempt_enable_no_resched instead.
744 static inline void ftrace_preempt_enable(int resched)
747 preempt_enable_no_resched_notrace();
749 preempt_enable_notrace();
752 #ifdef CONFIG_BRANCH_TRACER
753 extern int enable_branch_tracing(struct trace_array *tr);
754 extern void disable_branch_tracing(void);
755 static inline int trace_branch_enable(struct trace_array *tr)
757 if (trace_flags & TRACE_ITER_BRANCH)
758 return enable_branch_tracing(tr);
761 static inline void trace_branch_disable(void)
763 /* due to races, always disable */
764 disable_branch_tracing();
767 static inline int trace_branch_enable(struct trace_array *tr)
771 static inline void trace_branch_disable(void)
774 #endif /* CONFIG_BRANCH_TRACER */
776 /* set ring buffers to default size if not already done so */
777 int tracing_update_buffers(void);
779 /* trace event type bit fields, not numeric */
781 TRACE_EVENT_TYPE_PRINTF = 1,
782 TRACE_EVENT_TYPE_RAW = 2,
785 struct ftrace_event_field {
786 struct list_head link;
795 struct event_filter {
797 struct filter_pred **preds;
802 struct event_subsystem {
803 struct list_head list;
805 struct dentry *entry;
806 struct event_filter *filter;
812 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
818 char str_val[MAX_FILTER_STR_VAL];
827 extern void print_event_filter(struct ftrace_event_call *call,
828 struct trace_seq *s);
829 extern int apply_event_filter(struct ftrace_event_call *call,
830 char *filter_string);
831 extern int apply_subsystem_event_filter(struct event_subsystem *system,
832 char *filter_string);
833 extern void print_subsystem_event_filter(struct event_subsystem *system,
834 struct trace_seq *s);
835 extern int filter_assign_type(const char *type);
838 filter_check_discard(struct ftrace_event_call *call, void *rec,
839 struct ring_buffer *buffer,
840 struct ring_buffer_event *event)
842 if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) {
843 ring_buffer_discard_commit(buffer, event);
850 #define DEFINE_COMPARISON_PRED(type) \
851 static int filter_pred_##type(struct filter_pred *pred, void *event, \
852 int val1, int val2) \
854 type *addr = (type *)(event + pred->offset); \
855 type val = (type)pred->val; \
858 switch (pred->op) { \
860 match = (*addr < val); \
863 match = (*addr <= val); \
866 match = (*addr > val); \
869 match = (*addr >= val); \
878 #define DEFINE_EQUALITY_PRED(size) \
879 static int filter_pred_##size(struct filter_pred *pred, void *event, \
880 int val1, int val2) \
882 u##size *addr = (u##size *)(event + pred->offset); \
883 u##size val = (u##size)pred->val; \
886 match = (val == *addr) ^ pred->not; \
891 extern struct mutex event_mutex;
892 extern struct list_head ftrace_events;
894 extern const char *__start___trace_bprintk_fmt[];
895 extern const char *__stop___trace_bprintk_fmt[];
897 #undef TRACE_EVENT_FORMAT
898 #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
899 extern struct ftrace_event_call event_##call;
900 #undef TRACE_EVENT_FORMAT_NOFILTER
901 #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt)
902 #include "trace_event_types.h"
904 #endif /* _LINUX_KERNEL_TRACE_H */