2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled;
66 /* Pipe tracepoints to printk */
67 struct trace_iterator *tracepoint_print_iter;
68 int tracepoint_printk;
70 /* For tracers that don't implement custom flags */
71 static struct tracer_opt dummy_tracer_opt[] = {
75 static struct tracer_flags dummy_tracer_flags = {
77 .opts = dummy_tracer_opt
81 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
87 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
91 static DEFINE_PER_CPU(bool, trace_cmdline_save);
94 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
99 static int tracing_disabled = 1;
101 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
128 #define MAX_TRACER_SIZE 100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
132 static bool allocate_snapshot;
134 static int __init set_cmdline_ftrace(char *str)
136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
137 default_bootup_tracer = bootup_tracer_buf;
138 /* We are using ftrace early, expand it */
139 ring_buffer_expanded = true;
142 __setup("ftrace=", set_cmdline_ftrace);
144 static int __init set_ftrace_dump_on_oops(char *str)
146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
158 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
160 static int __init stop_trace_on_warning(char *str)
162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
166 __setup("traceoff_on_warning", stop_trace_on_warning);
168 static int __init boot_alloc_snapshot(char *str)
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
175 __setup("alloc_snapshot", boot_alloc_snapshot);
178 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179 static char *trace_boot_options __initdata;
181 static int __init set_trace_boot_options(char *str)
183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
184 trace_boot_options = trace_boot_options_buf;
187 __setup("trace_options=", set_trace_boot_options);
189 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190 static char *trace_boot_clock __initdata;
192 static int __init set_trace_boot_clock(char *str)
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
198 __setup("trace_clock=", set_trace_boot_clock);
200 static int __init set_tracepoint_printk(char *str)
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
206 __setup("tp_printk", set_tracepoint_printk);
208 unsigned long long ns2usecs(cycle_t nsec)
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
227 static struct trace_array global_trace;
229 LIST_HEAD(ftrace_trace_arrays);
231 int trace_array_get(struct trace_array *this_tr)
233 struct trace_array *tr;
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
244 mutex_unlock(&trace_types_lock);
249 static void __trace_array_put(struct trace_array *this_tr)
251 WARN_ON(!this_tr->ref);
255 void trace_array_put(struct trace_array *this_tr)
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
262 int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
274 EXPORT_SYMBOL_GPL(filter_check_discard);
276 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
288 EXPORT_SYMBOL_GPL(call_filter_check_discard);
290 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
294 /* Early boot up does not have a buffer yet */
296 return trace_clock_local();
298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
304 cycle_t ftrace_now(int cpu)
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
310 * tracing_is_enabled - Show if global_trace has been disabled
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
318 int tracing_is_enabled(void)
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
326 return !global_trace.buffer_disabled;
330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
339 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
341 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
343 /* trace_types holds a link list of available tracers. */
344 static struct tracer *trace_types __read_mostly;
347 * trace_types_lock is used to protect the trace_types list.
349 DEFINE_MUTEX(trace_types_lock);
352 * serialize the access of the ring buffer
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
366 * These primitives allow multi process access to different cpu ring buffer
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
374 static DECLARE_RWSEM(all_cpu_access_lock);
375 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
377 static inline void trace_access_lock(int cpu)
379 if (cpu == RING_BUFFER_ALL_CPUS) {
380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
383 /* gain it for accessing a cpu ring buffer. */
385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
386 down_read(&all_cpu_access_lock);
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
393 static inline void trace_access_unlock(int cpu)
395 if (cpu == RING_BUFFER_ALL_CPUS) {
396 up_write(&all_cpu_access_lock);
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
403 static inline void trace_access_lock_init(void)
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
413 static DEFINE_MUTEX(access_lock);
415 static inline void trace_access_lock(int cpu)
418 mutex_lock(&access_lock);
421 static inline void trace_access_unlock(int cpu)
424 mutex_unlock(&access_lock);
427 static inline void trace_access_lock_init(void)
433 /* trace_flags holds trace_options default values */
434 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
439 static void tracer_tracing_on(struct trace_array *tr)
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
457 * tracing_on - enable tracing buffers
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
462 void tracing_on(void)
464 tracer_tracing_on(&global_trace);
466 EXPORT_SYMBOL_GPL(tracing_on);
469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
474 int __trace_puts(unsigned long ip, const char *str, int size)
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
483 if (!(trace_flags & TRACE_ITER_PRINTK))
486 pc = preempt_count();
488 if (unlikely(tracing_selftest_running || tracing_disabled))
491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
500 entry = ring_buffer_event_data(event);
503 memcpy(&entry->buf, str, size);
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
510 entry->buf[size] = '\0';
512 __buffer_unlock_commit(buffer, event);
513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
517 EXPORT_SYMBOL_GPL(__trace_puts);
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
524 int __trace_bputs(unsigned long ip, const char *str)
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
533 if (!(trace_flags & TRACE_ITER_PRINTK))
536 pc = preempt_count();
538 if (unlikely(tracing_selftest_running || tracing_disabled))
541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
548 entry = ring_buffer_event_data(event);
552 __buffer_unlock_commit(buffer, event);
553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
557 EXPORT_SYMBOL_GPL(__trace_bputs);
559 #ifdef CONFIG_TRACER_SNAPSHOT
561 * trace_snapshot - take a snapshot of the current buffer.
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
574 void tracing_snapshot(void)
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
586 if (!tr->allocated_snapshot) {
587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
604 EXPORT_SYMBOL_GPL(tracing_snapshot);
606 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
608 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
610 static int alloc_snapshot(struct trace_array *tr)
614 if (!tr->allocated_snapshot) {
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
622 tr->allocated_snapshot = true;
628 static void free_snapshot(struct trace_array *tr)
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
642 * tracing_alloc_snapshot - allocate snapshot buffer.
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
651 int tracing_alloc_snapshot(void)
653 struct trace_array *tr = &global_trace;
656 ret = alloc_snapshot(tr);
661 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
674 void tracing_snapshot_alloc(void)
678 ret = tracing_alloc_snapshot();
684 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
686 void tracing_snapshot(void)
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
690 EXPORT_SYMBOL_GPL(tracing_snapshot);
691 int tracing_alloc_snapshot(void)
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
696 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
697 void tracing_snapshot_alloc(void)
702 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
703 #endif /* CONFIG_TRACER_SNAPSHOT */
705 static void tracer_tracing_off(struct trace_array *tr)
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
723 * tracing_off - turn off tracing buffers
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
730 void tracing_off(void)
732 tracer_tracing_off(&global_trace);
734 EXPORT_SYMBOL_GPL(tracing_off);
736 void disable_trace_on_warning(void)
738 if (__disable_trace_on_warning)
743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
746 * Shows real state of the ring buffer if it is enabled or not.
748 static int tracer_tracing_is_on(struct trace_array *tr)
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
756 * tracing_is_on - show state of ring buffers enabled
758 int tracing_is_on(void)
760 return tracer_tracing_is_on(&global_trace);
762 EXPORT_SYMBOL_GPL(tracing_is_on);
764 static int __init set_buf_size(char *str)
766 unsigned long buf_size;
770 buf_size = memparse(str, &str);
771 /* nr_entries can not be zero */
774 trace_buf_size = buf_size;
777 __setup("trace_buf_size=", set_buf_size);
779 static int __init set_tracing_thresh(char *str)
781 unsigned long threshold;
786 ret = kstrtoul(str, 0, &threshold);
789 tracing_thresh = threshold * 1000;
792 __setup("tracing_thresh=", set_tracing_thresh);
794 unsigned long nsecs_to_usecs(unsigned long nsecs)
799 /* These must match the bit postions in trace_iterator_flags */
800 static const char *trace_options[] = {
833 int in_ns; /* is this clock in nanoseconds? */
835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
838 { trace_clock_jiffies, "uptime", 0 },
839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
845 * trace_parser_get_init - gets the buffer for trace parser
847 int trace_parser_get_init(struct trace_parser *parser, int size)
849 memset(parser, 0, sizeof(*parser));
851 parser->buffer = kmalloc(size, GFP_KERNEL);
860 * trace_parser_put - frees the buffer for trace parser
862 void trace_parser_put(struct trace_parser *parser)
864 kfree(parser->buffer);
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
874 * Returns number of bytes read.
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
878 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
886 trace_parser_clear(parser);
888 ret = get_user(ch, ubuf++);
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
909 /* only spaces were written */
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
921 if (parser->idx < parser->size - 1)
922 parser->buffer[parser->idx++] = ch;
927 ret = get_user(ch, ubuf++);
934 /* We either got finished input or we have to wait for another call. */
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
938 } else if (parser->idx < parser->size - 1) {
940 parser->buffer[parser->idx++] = ch;
953 /* TODO add a seq_buf_to_buffer() */
954 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
958 if (trace_seq_used(s) <= s->seq.readpos)
961 len = trace_seq_used(s) - s->seq.readpos;
964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
966 s->seq.readpos += cnt;
970 unsigned long __read_mostly tracing_thresh;
972 #ifdef CONFIG_TRACER_MAX_TRACE
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
987 max_buf->time_start = data->preempt_timestamp;
989 max_data->saved_latency = tr->max_latency;
990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
994 max_data->pid = tsk->pid;
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
1000 max_data->uid = current_uid();
1002 max_data->uid = task_uid(tsk);
1004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1022 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1024 struct ring_buffer *buf;
1029 WARN_ON_ONCE(!irqs_disabled());
1031 if (!tr->allocated_snapshot) {
1032 /* Only the nop tracer should hit this when disabling */
1033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1037 arch_spin_lock(&tr->max_lock);
1039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
1043 __update_max_tr(tr, tsk, cpu);
1044 arch_spin_unlock(&tr->max_lock);
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1056 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1063 WARN_ON_ONCE(!irqs_disabled());
1064 if (!tr->allocated_snapshot) {
1065 /* Only the nop tracer should hit this when disabling */
1066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1070 arch_spin_lock(&tr->max_lock);
1072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1074 if (ret == -EBUSY) {
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1082 "Failed to swap buffers due to commit in progress\n");
1085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1087 __update_max_tr(tr, tsk, cpu);
1088 arch_spin_unlock(&tr->max_lock);
1090 #endif /* CONFIG_TRACER_MAX_TRACE */
1092 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
1098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1102 #ifdef CONFIG_FTRACE_STARTUP_TEST
1103 static int run_tracer_selftest(struct tracer *type)
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1109 if (!type->selftest || tracing_selftest_disabled)
1113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
1119 tracing_reset_online_cpus(&tr->trace_buffer);
1121 tr->current_trace = type;
1123 #ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1147 #ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1158 printk(KERN_CONT "PASSED\n");
1162 static inline int run_tracer_selftest(struct tracer *type)
1166 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1172 * Register a new plugin tracer.
1174 int register_tracer(struct tracer *type)
1180 pr_info("Tracer must have a name\n");
1184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1189 mutex_lock(&trace_types_lock);
1191 tracing_selftest_running = true;
1193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1196 pr_info("Tracer %s already registered\n",
1203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1206 type->flags = &dummy_tracer_flags;
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
1211 ret = run_tracer_selftest(type);
1215 type->next = trace_types;
1219 tracing_selftest_running = false;
1220 mutex_unlock(&trace_types_lock);
1222 if (ret || !default_bootup_tracer)
1225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
1230 tracing_set_tracer(&global_trace, type->name);
1231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
1233 tracing_selftest_disabled = true;
1234 #ifdef CONFIG_FTRACE_STARTUP_TEST
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1243 void tracing_reset(struct trace_buffer *buf, int cpu)
1245 struct ring_buffer *buffer = buf->buffer;
1250 ring_buffer_record_disable(buffer);
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
1254 ring_buffer_reset_cpu(buffer, cpu);
1256 ring_buffer_record_enable(buffer);
1259 void tracing_reset_online_cpus(struct trace_buffer *buf)
1261 struct ring_buffer *buffer = buf->buffer;
1267 ring_buffer_record_disable(buffer);
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1274 for_each_online_cpu(cpu)
1275 ring_buffer_reset_cpu(buffer, cpu);
1277 ring_buffer_record_enable(buffer);
1280 /* Must have trace_types_lock held */
1281 void tracing_reset_all_online_cpus(void)
1283 struct trace_array *tr;
1285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1286 tracing_reset_online_cpus(&tr->trace_buffer);
1287 #ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1293 #define SAVED_CMDLINES_DEFAULT 128
1294 #define NO_CMDLINE_MAP UINT_MAX
1295 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1296 struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1301 char *saved_cmdlines;
1303 static struct saved_cmdlines_buffer *savedcmd;
1305 /* temporary disable recording */
1306 static atomic_t trace_record_cmdline_disabled __read_mostly;
1308 static inline char *get_saved_cmdlines(int idx)
1310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1313 static inline void set_cmdline(int idx, const char *cmdline)
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1318 static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1323 if (!s->map_cmdline_to_pid)
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1342 static int trace_create_savedcmd(void)
1346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1360 int is_tracing_stopped(void)
1362 return global_trace.stop_count;
1366 * tracing_start - quick start of the tracer
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1371 void tracing_start(void)
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1376 if (tracing_disabled)
1379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
1382 /* Someone screwed up their debugging */
1384 global_trace.stop_count = 0;
1389 /* Prevent the buffers from switching */
1390 arch_spin_lock(&global_trace.max_lock);
1392 buffer = global_trace.trace_buffer.buffer;
1394 ring_buffer_record_enable(buffer);
1396 #ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
1399 ring_buffer_record_enable(buffer);
1402 arch_spin_unlock(&global_trace.max_lock);
1405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1408 static void tracing_start_tr(struct trace_array *tr)
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1413 if (tracing_disabled)
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1431 buffer = tr->trace_buffer.buffer;
1433 ring_buffer_record_enable(buffer);
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1440 * tracing_stop - quick stop of the tracer
1442 * Light weight way to stop tracing. Use in conjunction with
1445 void tracing_stop(void)
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
1454 /* Prevent the buffers from switching */
1455 arch_spin_lock(&global_trace.max_lock);
1457 buffer = global_trace.trace_buffer.buffer;
1459 ring_buffer_record_disable(buffer);
1461 #ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
1464 ring_buffer_record_disable(buffer);
1467 arch_spin_unlock(&global_trace.max_lock);
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1473 static void tracing_stop_tr(struct trace_array *tr)
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1486 buffer = tr->trace_buffer.buffer;
1488 ring_buffer_record_disable(buffer);
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1494 void trace_stop_cmdline_recording(void);
1496 static int trace_save_cmdline(struct task_struct *tsk)
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1509 if (!arch_spin_trylock(&trace_cmdline_lock))
1512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1513 if (idx == NO_CMDLINE_MAP) {
1514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1522 pid = savedcmd->map_cmdline_to_pid[idx];
1523 if (pid != NO_CMDLINE_MAP)
1524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1529 savedcmd->cmdline_idx = idx;
1532 set_cmdline(idx, tsk->comm);
1534 arch_spin_unlock(&trace_cmdline_lock);
1539 static void __trace_find_cmdline(int pid, char comm[])
1544 strcpy(comm, "<idle>");
1548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1558 map = savedcmd->map_pid_to_cmdline[pid];
1559 if (map != NO_CMDLINE_MAP)
1560 strcpy(comm, get_saved_cmdlines(map));
1562 strcpy(comm, "<...>");
1565 void trace_find_cmdline(int pid, char comm[])
1568 arch_spin_lock(&trace_cmdline_lock);
1570 __trace_find_cmdline(pid, comm);
1572 arch_spin_unlock(&trace_cmdline_lock);
1576 void tracing_record_cmdline(struct task_struct *tsk)
1578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1581 if (!__this_cpu_read(trace_cmdline_save))
1584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
1589 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1592 struct task_struct *tsk = current;
1594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1597 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1607 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1609 struct ring_buffer_event *
1610 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1613 unsigned long flags, int pc)
1615 struct ring_buffer_event *event;
1617 event = ring_buffer_lock_reserve(buffer, len);
1618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1621 tracing_generic_entry_update(ent, flags, pc);
1629 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1636 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
1638 unsigned long flags, int pc)
1640 __buffer_unlock_commit(buffer, event);
1642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
1646 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
1650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1652 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1654 static struct ring_buffer *temp_buffer;
1656 struct ring_buffer_event *
1657 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1662 struct ring_buffer_event *entry;
1664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1665 entry = trace_buffer_lock_reserve(*current_rb,
1666 type, len, flags, pc);
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1680 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1682 struct ring_buffer_event *
1683 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
1685 unsigned long flags, int pc)
1687 *current_rb = global_trace.trace_buffer.buffer;
1688 return trace_buffer_lock_reserve(*current_rb,
1689 type, len, flags, pc);
1691 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1693 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
1695 unsigned long flags, int pc)
1697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1699 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1701 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
1706 __buffer_unlock_commit(buffer, event);
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1711 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1713 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
1716 ring_buffer_discard_commit(buffer, event);
1718 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1721 trace_function(struct trace_array *tr,
1722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1725 struct ftrace_event_call *call = &event_function;
1726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1727 struct ring_buffer_event *event;
1728 struct ftrace_entry *entry;
1730 /* If we are reading the ring buffer, don't trace */
1731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1738 entry = ring_buffer_event_data(event);
1740 entry->parent_ip = parent_ip;
1742 if (!call_filter_check_discard(call, entry, buffer, event))
1743 __buffer_unlock_commit(buffer, event);
1746 #ifdef CONFIG_STACKTRACE
1748 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749 struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1753 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1756 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1757 unsigned long flags,
1758 int skip, int pc, struct pt_regs *regs)
1760 struct ftrace_event_call *call = &event_kernel_stack;
1761 struct ring_buffer_event *event;
1762 struct stack_entry *entry;
1763 struct stack_trace trace;
1765 int size = FTRACE_STACK_ENTRIES;
1767 trace.nr_entries = 0;
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1776 preempt_disable_notrace();
1778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1787 if (use_stack == 1) {
1788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1792 save_stack_trace_regs(regs, &trace);
1794 save_stack_trace(&trace);
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1799 /* From now on, use_stack is a boolean */
1802 size *= sizeof(unsigned long);
1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 sizeof(*entry) + size, flags, pc);
1808 entry = ring_buffer_event_data(event);
1810 memset(&entry->caller, 0, size);
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1819 save_stack_trace_regs(regs, &trace);
1821 save_stack_trace(&trace);
1824 entry->size = trace.nr_entries;
1826 if (!call_filter_check_discard(call, entry, buffer, event))
1827 __buffer_unlock_commit(buffer, event);
1830 /* Again, don't let gcc optimize things here */
1832 __this_cpu_dec(ftrace_stack_reserve);
1833 preempt_enable_notrace();
1837 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1846 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1855 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1862 * trace_dump_stack - record a stack back trace in the trace buffer
1863 * @skip: Number of functions to skip (helper handlers)
1865 void trace_dump_stack(int skip)
1867 unsigned long flags;
1869 if (tracing_disabled || tracing_selftest_running)
1872 local_save_flags(flags);
1875 * Skip 3 more, seems to get us at the caller of
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
1883 static DEFINE_PER_CPU(int, user_stack_count);
1886 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1888 struct ftrace_event_call *call = &event_user_stack;
1889 struct ring_buffer_event *event;
1890 struct userstack_entry *entry;
1891 struct stack_trace trace;
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1900 if (unlikely(in_nmi()))
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1908 if (__this_cpu_read(user_stack_count))
1911 __this_cpu_inc(user_stack_count);
1913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1914 sizeof(*entry), flags, pc);
1916 goto out_drop_count;
1917 entry = ring_buffer_event_data(event);
1919 entry->tgid = current->tgid;
1920 memset(&entry->caller, 0, sizeof(entry->caller));
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1925 trace.entries = entry->caller;
1927 save_stack_trace_user(&trace);
1928 if (!call_filter_check_discard(call, entry, buffer, event))
1929 __buffer_unlock_commit(buffer, event);
1932 __this_cpu_dec(user_stack_count);
1938 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1940 ftrace_trace_userstack(tr, flags, preempt_count());
1944 #endif /* CONFIG_STACKTRACE */
1946 /* created for use with alloc_percpu */
1947 struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1951 static struct trace_buffer_struct *trace_percpu_buffer;
1952 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1963 static char *get_trace_buf(void)
1965 struct trace_buffer_struct *percpu_buffer;
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1972 percpu_buffer = trace_percpu_nmi_buffer;
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1978 percpu_buffer = trace_percpu_buffer;
1983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1986 static int alloc_percpu_trace_buffer(void)
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2017 free_percpu(irq_buffers);
2019 free_percpu(sirq_buffers);
2021 free_percpu(buffers);
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2027 static int buffers_allocated;
2029 void trace_printk_init_buffers(void)
2031 if (buffers_allocated)
2034 if (alloc_percpu_trace_buffer())
2037 /* trace_printk() is for debug use only. Don't use it in production. */
2039 pr_warning("\n**********************************************************\n");
2040 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2045 pr_warning("** unsafe for production use. **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** If you see this message and you are not debugging **\n");
2048 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2049 pr_warning("** **\n");
2050 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warning("**********************************************************\n");
2053 /* Expand the buffers to set size */
2054 tracing_update_buffers();
2056 buffers_allocated = 1;
2059 * trace_printk_init_buffers() can be called by modules.
2060 * If that happens, then we need to start cmdline recording
2061 * directly here. If the global_trace.buffer is already
2062 * allocated here, then this was called by module code.
2064 if (global_trace.trace_buffer.buffer)
2065 tracing_start_cmdline_record();
2068 void trace_printk_start_comm(void)
2070 /* Start tracing comms if trace printk is set */
2071 if (!buffers_allocated)
2073 tracing_start_cmdline_record();
2076 static void trace_printk_start_stop_comm(int enabled)
2078 if (!buffers_allocated)
2082 tracing_start_cmdline_record();
2084 tracing_stop_cmdline_record();
2088 * trace_vbprintk - write binary msg to tracing buffer
2091 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2093 struct ftrace_event_call *call = &event_bprint;
2094 struct ring_buffer_event *event;
2095 struct ring_buffer *buffer;
2096 struct trace_array *tr = &global_trace;
2097 struct bprint_entry *entry;
2098 unsigned long flags;
2100 int len = 0, size, pc;
2102 if (unlikely(tracing_selftest_running || tracing_disabled))
2105 /* Don't pollute graph traces with trace_vprintk internals */
2106 pause_graph_tracing();
2108 pc = preempt_count();
2109 preempt_disable_notrace();
2111 tbuffer = get_trace_buf();
2117 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2119 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2122 local_save_flags(flags);
2123 size = sizeof(*entry) + sizeof(u32) * len;
2124 buffer = tr->trace_buffer.buffer;
2125 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2129 entry = ring_buffer_event_data(event);
2133 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2134 if (!call_filter_check_discard(call, entry, buffer, event)) {
2135 __buffer_unlock_commit(buffer, event);
2136 ftrace_trace_stack(buffer, flags, 6, pc);
2140 preempt_enable_notrace();
2141 unpause_graph_tracing();
2145 EXPORT_SYMBOL_GPL(trace_vbprintk);
2148 __trace_array_vprintk(struct ring_buffer *buffer,
2149 unsigned long ip, const char *fmt, va_list args)
2151 struct ftrace_event_call *call = &event_print;
2152 struct ring_buffer_event *event;
2153 int len = 0, size, pc;
2154 struct print_entry *entry;
2155 unsigned long flags;
2158 if (tracing_disabled || tracing_selftest_running)
2161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2164 pc = preempt_count();
2165 preempt_disable_notrace();
2168 tbuffer = get_trace_buf();
2174 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2176 local_save_flags(flags);
2177 size = sizeof(*entry) + len + 1;
2178 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2182 entry = ring_buffer_event_data(event);
2185 memcpy(&entry->buf, tbuffer, len + 1);
2186 if (!call_filter_check_discard(call, entry, buffer, event)) {
2187 __buffer_unlock_commit(buffer, event);
2188 ftrace_trace_stack(buffer, flags, 6, pc);
2191 preempt_enable_notrace();
2192 unpause_graph_tracing();
2197 int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2203 int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2218 int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2233 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2235 return trace_array_vprintk(&global_trace, ip, fmt, args);
2237 EXPORT_SYMBOL_GPL(trace_vprintk);
2239 static void trace_iterator_increment(struct trace_iterator *iter)
2241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2245 ring_buffer_read(buf_iter, NULL);
2248 static struct trace_entry *
2249 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
2252 struct ring_buffer_event *event;
2253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2269 static struct trace_entry *
2270 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
2273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2274 struct trace_entry *ent, *next = NULL;
2275 unsigned long lost_events = 0, next_lost = 0;
2276 int cpu_file = iter->cpu_file;
2277 u64 next_ts = 0, ts;
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2291 *ent_cpu = cpu_file;
2296 for_each_tracing_cpu(cpu) {
2298 if (ring_buffer_empty_cpu(buffer, cpu))
2301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2304 * Pick the entry with the smallest timestamp:
2306 if (ent && (!next || ts < next_ts)) {
2310 next_lost = lost_events;
2311 next_size = iter->ent_size;
2315 iter->ent_size = next_size;
2318 *ent_cpu = next_cpu;
2324 *missing_events = next_lost;
2329 /* Find the next real entry, without updating the iterator itself */
2330 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
2333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2336 /* Find the next real entry, and increment the iterator to the next entry */
2337 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
2343 trace_iterator_increment(iter);
2345 return iter->ent ? iter : NULL;
2348 static void trace_consume(struct trace_iterator *iter)
2350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2351 &iter->lost_events);
2354 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2356 struct trace_iterator *iter = m->private;
2360 WARN_ON_ONCE(iter->leftover);
2364 /* can't go backwards */
2369 ent = trace_find_next_entry_inc(iter);
2373 while (ent && iter->idx < i)
2374 ent = trace_find_next_entry_inc(iter);
2381 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2390 buf_iter = trace_buffer_iter(iter, cpu);
2394 ring_buffer_iter_reset(buf_iter);
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2402 if (ts >= iter->trace_buffer->time_start)
2405 ring_buffer_read(buf_iter, NULL);
2408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2412 * The current tracer is copied to avoid a global locking
2415 static void *s_start(struct seq_file *m, loff_t *pos)
2417 struct trace_iterator *iter = m->private;
2418 struct trace_array *tr = iter->tr;
2419 int cpu_file = iter->cpu_file;
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2430 mutex_lock(&trace_types_lock);
2431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
2433 mutex_unlock(&trace_types_lock);
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
2443 if (*pos != iter->pos) {
2448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2449 for_each_tracing_cpu(cpu)
2450 tracing_iter_reset(iter, cpu);
2452 tracing_iter_reset(iter, cpu_file);
2455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2467 p = s_next(m, p, &l);
2471 trace_event_read_lock();
2472 trace_access_lock(cpu_file);
2476 static void s_stop(struct seq_file *m, void *p)
2478 struct trace_iterator *iter = m->private;
2480 #ifdef CONFIG_TRACER_MAX_TRACE
2481 if (iter->snapshot && iter->trace->use_max_tr)
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
2488 trace_access_unlock(iter->cpu_file);
2489 trace_event_read_unlock();
2493 get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
2496 unsigned long count;
2502 for_each_tracing_cpu(cpu) {
2503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2511 /* total is the same as the entries */
2515 ring_buffer_overrun_cpu(buf->buffer, cpu);
2520 static void print_lat_help_header(struct seq_file *m)
2522 seq_puts(m, "# _------=> CPU# \n"
2523 "# / _-----=> irqs-off \n"
2524 "# | / _----=> need-resched \n"
2525 "# || / _---=> hardirq/softirq \n"
2526 "# ||| / _--=> preempt-depth \n"
2528 "# cmd pid ||||| time | caller \n"
2529 "# \\ / ||||| \\ | / \n");
2532 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2534 unsigned long total;
2535 unsigned long entries;
2537 get_total_entries(buf, &total, &entries);
2538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2543 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2545 print_event_info(buf, m);
2546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2550 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2552 print_event_info(buf, m);
2553 seq_puts(m, "# _-----=> irqs-off\n"
2554 "# / _----=> need-resched\n"
2555 "# | / _---=> hardirq/softirq\n"
2556 "# || / _--=> preempt-depth\n"
2558 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2559 "# | | | |||| | |\n");
2563 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2568 struct tracer *type = iter->trace;
2569 unsigned long entries;
2570 unsigned long total;
2571 const char *name = "preemption";
2575 get_total_entries(buf, &total, &entries);
2577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2579 seq_puts(m, "# -----------------------------------"
2580 "---------------------------------\n");
2581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2583 nsecs_to_usecs(data->saved_latency),
2587 #if defined(CONFIG_PREEMPT_NONE)
2589 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2591 #elif defined(CONFIG_PREEMPT)
2596 /* These are reserved for later use */
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
2605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2608 data->policy, data->rt_priority);
2609 seq_puts(m, "# -----------------\n");
2611 if (data->critical_start) {
2612 seq_puts(m, "# => started at: ");
2613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
2615 seq_puts(m, "\n# => ended at: ");
2616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
2618 seq_puts(m, "\n#\n");
2624 static void test_cpu_buff_start(struct trace_iterator *iter)
2626 struct trace_seq *s = &iter->seq;
2628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2634 if (cpumask_test_cpu(iter->cpu, iter->started))
2637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2640 cpumask_set_cpu(iter->cpu, iter->started);
2642 /* Don't print started cpu buffer for the first entry of the trace */
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2648 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2650 struct trace_seq *s = &iter->seq;
2651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2652 struct trace_entry *entry;
2653 struct trace_event *event;
2657 test_cpu_buff_start(iter);
2659 event = ftrace_find_event(entry->type);
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663 trace_print_lat_context(iter);
2665 trace_print_context(iter);
2668 if (trace_seq_has_overflowed(s))
2669 return TRACE_TYPE_PARTIAL_LINE;
2672 return event->funcs->trace(iter, sym_flags, event);
2674 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2676 return trace_handle_return(s);
2679 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
2683 struct trace_event *event;
2687 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688 trace_seq_printf(s, "%d %d %llu ",
2689 entry->pid, iter->cpu, iter->ts);
2691 if (trace_seq_has_overflowed(s))
2692 return TRACE_TYPE_PARTIAL_LINE;
2694 event = ftrace_find_event(entry->type);
2696 return event->funcs->raw(iter, 0, event);
2698 trace_seq_printf(s, "%d ?\n", entry->type);
2700 return trace_handle_return(s);
2703 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2705 struct trace_seq *s = &iter->seq;
2706 unsigned char newline = '\n';
2707 struct trace_entry *entry;
2708 struct trace_event *event;
2712 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2713 SEQ_PUT_HEX_FIELD(s, entry->pid);
2714 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715 SEQ_PUT_HEX_FIELD(s, iter->ts);
2716 if (trace_seq_has_overflowed(s))
2717 return TRACE_TYPE_PARTIAL_LINE;
2720 event = ftrace_find_event(entry->type);
2722 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2723 if (ret != TRACE_TYPE_HANDLED)
2727 SEQ_PUT_FIELD(s, newline);
2729 return trace_handle_return(s);
2732 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2734 struct trace_seq *s = &iter->seq;
2735 struct trace_entry *entry;
2736 struct trace_event *event;
2740 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2741 SEQ_PUT_FIELD(s, entry->pid);
2742 SEQ_PUT_FIELD(s, iter->cpu);
2743 SEQ_PUT_FIELD(s, iter->ts);
2744 if (trace_seq_has_overflowed(s))
2745 return TRACE_TYPE_PARTIAL_LINE;
2748 event = ftrace_find_event(entry->type);
2749 return event ? event->funcs->binary(iter, 0, event) :
2753 int trace_empty(struct trace_iterator *iter)
2755 struct ring_buffer_iter *buf_iter;
2758 /* If we are looking at one CPU buffer, only check that one */
2759 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2760 cpu = iter->cpu_file;
2761 buf_iter = trace_buffer_iter(iter, cpu);
2763 if (!ring_buffer_iter_empty(buf_iter))
2766 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2772 for_each_tracing_cpu(cpu) {
2773 buf_iter = trace_buffer_iter(iter, cpu);
2775 if (!ring_buffer_iter_empty(buf_iter))
2778 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2786 /* Called with trace_event_read_lock() held. */
2787 enum print_line_t print_trace_line(struct trace_iterator *iter)
2789 enum print_line_t ret;
2791 if (iter->lost_events) {
2792 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793 iter->cpu, iter->lost_events);
2794 if (trace_seq_has_overflowed(&iter->seq))
2795 return TRACE_TYPE_PARTIAL_LINE;
2798 if (iter->trace && iter->trace->print_line) {
2799 ret = iter->trace->print_line(iter);
2800 if (ret != TRACE_TYPE_UNHANDLED)
2804 if (iter->ent->type == TRACE_BPUTS &&
2805 trace_flags & TRACE_ITER_PRINTK &&
2806 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807 return trace_print_bputs_msg_only(iter);
2809 if (iter->ent->type == TRACE_BPRINT &&
2810 trace_flags & TRACE_ITER_PRINTK &&
2811 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2812 return trace_print_bprintk_msg_only(iter);
2814 if (iter->ent->type == TRACE_PRINT &&
2815 trace_flags & TRACE_ITER_PRINTK &&
2816 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2817 return trace_print_printk_msg_only(iter);
2819 if (trace_flags & TRACE_ITER_BIN)
2820 return print_bin_fmt(iter);
2822 if (trace_flags & TRACE_ITER_HEX)
2823 return print_hex_fmt(iter);
2825 if (trace_flags & TRACE_ITER_RAW)
2826 return print_raw_fmt(iter);
2828 return print_trace_fmt(iter);
2831 void trace_latency_header(struct seq_file *m)
2833 struct trace_iterator *iter = m->private;
2835 /* print nothing if the buffers are empty */
2836 if (trace_empty(iter))
2839 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840 print_trace_header(m, iter);
2842 if (!(trace_flags & TRACE_ITER_VERBOSE))
2843 print_lat_help_header(m);
2846 void trace_default_header(struct seq_file *m)
2848 struct trace_iterator *iter = m->private;
2850 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2853 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854 /* print nothing if the buffers are empty */
2855 if (trace_empty(iter))
2857 print_trace_header(m, iter);
2858 if (!(trace_flags & TRACE_ITER_VERBOSE))
2859 print_lat_help_header(m);
2861 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862 if (trace_flags & TRACE_ITER_IRQ_INFO)
2863 print_func_help_header_irq(iter->trace_buffer, m);
2865 print_func_help_header(iter->trace_buffer, m);
2870 static void test_ftrace_alive(struct seq_file *m)
2872 if (!ftrace_is_dead())
2874 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875 "# MAY BE MISSING FUNCTION EVENTS\n");
2878 #ifdef CONFIG_TRACER_MAX_TRACE
2879 static void show_snapshot_main_help(struct seq_file *m)
2881 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883 "# Takes a snapshot of the main buffer.\n"
2884 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885 "# (Doesn't have to be '2' works with any number that\n"
2886 "# is not a '0' or '1')\n");
2889 static void show_snapshot_percpu_help(struct seq_file *m)
2891 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2892 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2893 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 "# Takes a snapshot of the main buffer for this cpu.\n");
2896 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897 "# Must use main snapshot file to allocate.\n");
2899 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900 "# (Doesn't have to be '2' works with any number that\n"
2901 "# is not a '0' or '1')\n");
2904 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2906 if (iter->tr->allocated_snapshot)
2907 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2909 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2911 seq_puts(m, "# Snapshot commands:\n");
2912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913 show_snapshot_main_help(m);
2915 show_snapshot_percpu_help(m);
2918 /* Should never be called */
2919 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2922 static int s_show(struct seq_file *m, void *v)
2924 struct trace_iterator *iter = v;
2927 if (iter->ent == NULL) {
2929 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2931 test_ftrace_alive(m);
2933 if (iter->snapshot && trace_empty(iter))
2934 print_snapshot_help(m, iter);
2935 else if (iter->trace && iter->trace->print_header)
2936 iter->trace->print_header(m);
2938 trace_default_header(m);
2940 } else if (iter->leftover) {
2942 * If we filled the seq_file buffer earlier, we
2943 * want to just show it now.
2945 ret = trace_print_seq(m, &iter->seq);
2947 /* ret should this time be zero, but you never know */
2948 iter->leftover = ret;
2951 print_trace_line(iter);
2952 ret = trace_print_seq(m, &iter->seq);
2954 * If we overflow the seq_file buffer, then it will
2955 * ask us for this data again at start up.
2957 * ret is 0 if seq_file write succeeded.
2960 iter->leftover = ret;
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2970 static inline int tracing_get_cpu(struct inode *inode)
2972 if (inode->i_cdev) /* See trace_create_cpu_file() */
2973 return (long)inode->i_cdev - 1;
2974 return RING_BUFFER_ALL_CPUS;
2977 static const struct seq_operations tracer_seq_ops = {
2984 static struct trace_iterator *
2985 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2987 struct trace_array *tr = inode->i_private;
2988 struct trace_iterator *iter;
2991 if (tracing_disabled)
2992 return ERR_PTR(-ENODEV);
2994 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2996 return ERR_PTR(-ENOMEM);
2998 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3000 if (!iter->buffer_iter)
3004 * We make a copy of the current tracer to avoid concurrent
3005 * changes on it while we are reading.
3007 mutex_lock(&trace_types_lock);
3008 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3012 *iter->trace = *tr->current_trace;
3014 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3019 #ifdef CONFIG_TRACER_MAX_TRACE
3020 /* Currently only the top directory has a snapshot */
3021 if (tr->current_trace->print_max || snapshot)
3022 iter->trace_buffer = &tr->max_buffer;
3025 iter->trace_buffer = &tr->trace_buffer;
3026 iter->snapshot = snapshot;
3028 iter->cpu_file = tracing_get_cpu(inode);
3029 mutex_init(&iter->mutex);
3031 /* Notify the tracer early; before we stop tracing. */
3032 if (iter->trace && iter->trace->open)
3033 iter->trace->open(iter);
3035 /* Annotate start of buffers if we had overruns */
3036 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3037 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3039 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3040 if (trace_clocks[tr->clock_id].in_ns)
3041 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3043 /* stop the trace while dumping if we are not opening "snapshot" */
3044 if (!iter->snapshot)
3045 tracing_stop_tr(tr);
3047 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3048 for_each_tracing_cpu(cpu) {
3049 iter->buffer_iter[cpu] =
3050 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3052 ring_buffer_read_prepare_sync();
3053 for_each_tracing_cpu(cpu) {
3054 ring_buffer_read_start(iter->buffer_iter[cpu]);
3055 tracing_iter_reset(iter, cpu);
3058 cpu = iter->cpu_file;
3059 iter->buffer_iter[cpu] =
3060 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3061 ring_buffer_read_prepare_sync();
3062 ring_buffer_read_start(iter->buffer_iter[cpu]);
3063 tracing_iter_reset(iter, cpu);
3066 mutex_unlock(&trace_types_lock);
3071 mutex_unlock(&trace_types_lock);
3073 kfree(iter->buffer_iter);
3075 seq_release_private(inode, file);
3076 return ERR_PTR(-ENOMEM);
3079 int tracing_open_generic(struct inode *inode, struct file *filp)
3081 if (tracing_disabled)
3084 filp->private_data = inode->i_private;
3088 bool tracing_is_disabled(void)
3090 return (tracing_disabled) ? true: false;
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3097 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3099 struct trace_array *tr = inode->i_private;
3101 if (tracing_disabled)
3104 if (trace_array_get(tr) < 0)
3107 filp->private_data = inode->i_private;
3112 static int tracing_release(struct inode *inode, struct file *file)
3114 struct trace_array *tr = inode->i_private;
3115 struct seq_file *m = file->private_data;
3116 struct trace_iterator *iter;
3119 if (!(file->f_mode & FMODE_READ)) {
3120 trace_array_put(tr);
3124 /* Writes do not use seq_file */
3126 mutex_lock(&trace_types_lock);
3128 for_each_tracing_cpu(cpu) {
3129 if (iter->buffer_iter[cpu])
3130 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3133 if (iter->trace && iter->trace->close)
3134 iter->trace->close(iter);
3136 if (!iter->snapshot)
3137 /* reenable tracing if it was previously enabled */
3138 tracing_start_tr(tr);
3140 __trace_array_put(tr);
3142 mutex_unlock(&trace_types_lock);
3144 mutex_destroy(&iter->mutex);
3145 free_cpumask_var(iter->started);
3147 kfree(iter->buffer_iter);
3148 seq_release_private(inode, file);
3153 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3155 struct trace_array *tr = inode->i_private;
3157 trace_array_put(tr);
3161 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3163 struct trace_array *tr = inode->i_private;
3165 trace_array_put(tr);
3167 return single_release(inode, file);
3170 static int tracing_open(struct inode *inode, struct file *file)
3172 struct trace_array *tr = inode->i_private;
3173 struct trace_iterator *iter;
3176 if (trace_array_get(tr) < 0)
3179 /* If this file was open for write, then erase contents */
3180 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181 int cpu = tracing_get_cpu(inode);
3183 if (cpu == RING_BUFFER_ALL_CPUS)
3184 tracing_reset_online_cpus(&tr->trace_buffer);
3186 tracing_reset(&tr->trace_buffer, cpu);
3189 if (file->f_mode & FMODE_READ) {
3190 iter = __tracing_open(inode, file, false);
3192 ret = PTR_ERR(iter);
3193 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3198 trace_array_put(tr);
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3209 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3211 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3214 /* Find the next tracer that this trace array may use */
3215 static struct tracer *
3216 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3218 while (t && !trace_ok_for_array(t, tr))
3225 t_next(struct seq_file *m, void *v, loff_t *pos)
3227 struct trace_array *tr = m->private;
3228 struct tracer *t = v;
3233 t = get_tracer_for_array(tr, t->next);
3238 static void *t_start(struct seq_file *m, loff_t *pos)
3240 struct trace_array *tr = m->private;
3244 mutex_lock(&trace_types_lock);
3246 t = get_tracer_for_array(tr, trace_types);
3247 for (; t && l < *pos; t = t_next(m, t, &l))
3253 static void t_stop(struct seq_file *m, void *p)
3255 mutex_unlock(&trace_types_lock);
3258 static int t_show(struct seq_file *m, void *v)
3260 struct tracer *t = v;
3265 seq_puts(m, t->name);
3274 static const struct seq_operations show_traces_seq_ops = {
3281 static int show_traces_open(struct inode *inode, struct file *file)
3283 struct trace_array *tr = inode->i_private;
3287 if (tracing_disabled)
3290 ret = seq_open(file, &show_traces_seq_ops);
3294 m = file->private_data;
3301 tracing_write_stub(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3307 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3311 if (file->f_mode & FMODE_READ)
3312 ret = seq_lseek(file, offset, whence);
3314 file->f_pos = ret = 0;
3319 static const struct file_operations tracing_fops = {
3320 .open = tracing_open,
3322 .write = tracing_write_stub,
3323 .llseek = tracing_lseek,
3324 .release = tracing_release,
3327 static const struct file_operations show_traces_fops = {
3328 .open = show_traces_open,
3330 .release = seq_release,
3331 .llseek = seq_lseek,
3335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3338 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3344 static char mask_str[NR_CPUS + 1];
3347 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348 size_t count, loff_t *ppos)
3350 struct trace_array *tr = file_inode(filp)->i_private;
3353 mutex_lock(&tracing_cpumask_update_lock);
3355 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3356 if (count - len < 2) {
3360 len += sprintf(mask_str + len, "\n");
3361 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3364 mutex_unlock(&tracing_cpumask_update_lock);
3370 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371 size_t count, loff_t *ppos)
3373 struct trace_array *tr = file_inode(filp)->i_private;
3374 cpumask_var_t tracing_cpumask_new;
3377 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3380 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3384 mutex_lock(&tracing_cpumask_update_lock);
3386 local_irq_disable();
3387 arch_spin_lock(&tr->max_lock);
3388 for_each_tracing_cpu(cpu) {
3390 * Increase/decrease the disabled counter if we are
3391 * about to flip a bit in the cpumask:
3393 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3394 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3395 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3398 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3399 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3400 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3404 arch_spin_unlock(&tr->max_lock);
3407 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3409 mutex_unlock(&tracing_cpumask_update_lock);
3410 free_cpumask_var(tracing_cpumask_new);
3415 free_cpumask_var(tracing_cpumask_new);
3420 static const struct file_operations tracing_cpumask_fops = {
3421 .open = tracing_open_generic_tr,
3422 .read = tracing_cpumask_read,
3423 .write = tracing_cpumask_write,
3424 .release = tracing_release_generic_tr,
3425 .llseek = generic_file_llseek,
3428 static int tracing_trace_options_show(struct seq_file *m, void *v)
3430 struct tracer_opt *trace_opts;
3431 struct trace_array *tr = m->private;
3435 mutex_lock(&trace_types_lock);
3436 tracer_flags = tr->current_trace->flags->val;
3437 trace_opts = tr->current_trace->flags->opts;
3439 for (i = 0; trace_options[i]; i++) {
3440 if (trace_flags & (1 << i))
3441 seq_printf(m, "%s\n", trace_options[i]);
3443 seq_printf(m, "no%s\n", trace_options[i]);
3446 for (i = 0; trace_opts[i].name; i++) {
3447 if (tracer_flags & trace_opts[i].bit)
3448 seq_printf(m, "%s\n", trace_opts[i].name);
3450 seq_printf(m, "no%s\n", trace_opts[i].name);
3452 mutex_unlock(&trace_types_lock);
3457 static int __set_tracer_option(struct trace_array *tr,
3458 struct tracer_flags *tracer_flags,
3459 struct tracer_opt *opts, int neg)
3461 struct tracer *trace = tr->current_trace;
3464 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3469 tracer_flags->val &= ~opts->bit;
3471 tracer_flags->val |= opts->bit;
3475 /* Try to assign a tracer specific option */
3476 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3478 struct tracer *trace = tr->current_trace;
3479 struct tracer_flags *tracer_flags = trace->flags;
3480 struct tracer_opt *opts = NULL;
3483 for (i = 0; tracer_flags->opts[i].name; i++) {
3484 opts = &tracer_flags->opts[i];
3486 if (strcmp(cmp, opts->name) == 0)
3487 return __set_tracer_option(tr, trace->flags, opts, neg);
3493 /* Some tracers require overwrite to stay enabled */
3494 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3496 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3502 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3504 /* do nothing if flag is already set */
3505 if (!!(trace_flags & mask) == !!enabled)
3508 /* Give the tracer a chance to approve the change */
3509 if (tr->current_trace->flag_changed)
3510 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3514 trace_flags |= mask;
3516 trace_flags &= ~mask;
3518 if (mask == TRACE_ITER_RECORD_CMD)
3519 trace_event_enable_cmd_record(enabled);
3521 if (mask == TRACE_ITER_OVERWRITE) {
3522 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3523 #ifdef CONFIG_TRACER_MAX_TRACE
3524 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3528 if (mask == TRACE_ITER_PRINTK)
3529 trace_printk_start_stop_comm(enabled);
3534 static int trace_set_options(struct trace_array *tr, char *option)
3541 cmp = strstrip(option);
3543 if (strncmp(cmp, "no", 2) == 0) {
3548 mutex_lock(&trace_types_lock);
3550 for (i = 0; trace_options[i]; i++) {
3551 if (strcmp(cmp, trace_options[i]) == 0) {
3552 ret = set_tracer_flag(tr, 1 << i, !neg);
3557 /* If no option could be set, test the specific tracer options */
3558 if (!trace_options[i])
3559 ret = set_tracer_option(tr, cmp, neg);
3561 mutex_unlock(&trace_types_lock);
3567 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3570 struct seq_file *m = filp->private_data;
3571 struct trace_array *tr = m->private;
3575 if (cnt >= sizeof(buf))
3578 if (copy_from_user(&buf, ubuf, cnt))
3583 ret = trace_set_options(tr, buf);
3592 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3594 struct trace_array *tr = inode->i_private;
3597 if (tracing_disabled)
3600 if (trace_array_get(tr) < 0)
3603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3605 trace_array_put(tr);
3610 static const struct file_operations tracing_iter_fops = {
3611 .open = tracing_trace_options_open,
3613 .llseek = seq_lseek,
3614 .release = tracing_single_release_tr,
3615 .write = tracing_trace_options_write,
3618 static const char readme_msg[] =
3619 "tracing mini-HOWTO:\n\n"
3620 "# echo 0 > tracing_on : quick way to disable tracing\n"
3621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622 " Important files:\n"
3623 " trace\t\t\t- The static contents of the buffer\n"
3624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626 " current_tracer\t- function and latency tracers\n"
3627 " available_tracers\t- list of configured tracers for current_tracer\n"
3628 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3630 " trace_clock\t\t-change the clock used to order events\n"
3631 " local: Per cpu clock but may not be synced across CPUs\n"
3632 " global: Synced across CPUs but slows tracing down.\n"
3633 " counter: Not a clock, but just an increment\n"
3634 " uptime: Jiffy counter from time of boot\n"
3635 " perf: Same clock that perf events use\n"
3636 #ifdef CONFIG_X86_64
3637 " x86-tsc: TSC cycle counter\n"
3639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640 " tracing_cpumask\t- Limit which CPUs to trace\n"
3641 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642 "\t\t\t Remove sub-buffer with rmdir\n"
3643 " trace_options\t\t- Set format or modify how tracing happens\n"
3644 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3645 "\t\t\t option name\n"
3646 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3647 #ifdef CONFIG_DYNAMIC_FTRACE
3648 "\n available_filter_functions - list of functions that can be filtered on\n"
3649 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3650 "\t\t\t functions\n"
3651 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652 "\t modules: Can select a group via module\n"
3653 "\t Format: :mod:<module-name>\n"
3654 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3655 "\t triggers: a command to perform when function is hit\n"
3656 "\t Format: <function>:<trigger>[:count]\n"
3657 "\t trigger: traceon, traceoff\n"
3658 "\t\t enable_event:<system>:<event>\n"
3659 "\t\t disable_event:<system>:<event>\n"
3660 #ifdef CONFIG_STACKTRACE
3663 #ifdef CONFIG_TRACER_SNAPSHOT
3668 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3669 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670 "\t The first one will disable tracing every time do_fault is hit\n"
3671 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3672 "\t The first time do trap is hit and it disables tracing, the\n"
3673 "\t counter will decrement to 2. If tracing is already disabled,\n"
3674 "\t the counter will not decrement. It only decrements when the\n"
3675 "\t trigger did work\n"
3676 "\t To remove trigger without count:\n"
3677 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3678 "\t To remove trigger with a count:\n"
3679 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3680 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module command :mod:\n"
3683 "\t Does not accept triggers\n"
3684 #endif /* CONFIG_DYNAMIC_FTRACE */
3685 #ifdef CONFIG_FUNCTION_TRACER
3686 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3689 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3691 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3692 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3694 #ifdef CONFIG_TRACER_SNAPSHOT
3695 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696 "\t\t\t snapshot buffer. Read the contents for more\n"
3697 "\t\t\t information\n"
3699 #ifdef CONFIG_STACK_TRACER
3700 " stack_trace\t\t- Shows the max stack trace when active\n"
3701 " stack_max_size\t- Shows current max stack size that was traced\n"
3702 "\t\t\t Write into this file to reset the max size (trigger a\n"
3703 "\t\t\t new trace)\n"
3704 #ifdef CONFIG_DYNAMIC_FTRACE
3705 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3708 #endif /* CONFIG_STACK_TRACER */
3709 " events/\t\t- Directory containing all trace event subsystems:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3712 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3714 " filter\t\t- If set, only events passing filter are traced\n"
3715 " events/<system>/<event>/\t- Directory containing control files for\n"
3717 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718 " filter\t\t- If set, only events passing filter are traced\n"
3719 " trigger\t\t- If set, a command to perform when event is hit\n"
3720 "\t Format: <trigger>[:count][if <filter>]\n"
3721 "\t trigger: traceon, traceoff\n"
3722 "\t enable_event:<system>:<event>\n"
3723 "\t disable_event:<system>:<event>\n"
3724 #ifdef CONFIG_STACKTRACE
3727 #ifdef CONFIG_TRACER_SNAPSHOT
3730 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3731 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3732 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733 "\t events/block/block_unplug/trigger\n"
3734 "\t The first disables tracing every time block_unplug is hit.\n"
3735 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3736 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3737 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738 "\t Like function triggers, the counter is only decremented if it\n"
3739 "\t enabled or disabled tracing.\n"
3740 "\t To remove a trigger without a count:\n"
3741 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3742 "\t To remove a trigger with a count:\n"
3743 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744 "\t Filters can be ignored when removing a trigger.\n"
3748 tracing_readme_read(struct file *filp, char __user *ubuf,
3749 size_t cnt, loff_t *ppos)
3751 return simple_read_from_buffer(ubuf, cnt, ppos,
3752 readme_msg, strlen(readme_msg));
3755 static const struct file_operations tracing_readme_fops = {
3756 .open = tracing_open_generic,
3757 .read = tracing_readme_read,
3758 .llseek = generic_file_llseek,
3761 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3763 unsigned int *ptr = v;
3765 if (*pos || m->count)
3770 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3772 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3781 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3787 arch_spin_lock(&trace_cmdline_lock);
3789 v = &savedcmd->map_cmdline_to_pid[0];
3791 v = saved_cmdlines_next(m, v, &l);
3799 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3801 arch_spin_unlock(&trace_cmdline_lock);
3805 static int saved_cmdlines_show(struct seq_file *m, void *v)
3807 char buf[TASK_COMM_LEN];
3808 unsigned int *pid = v;
3810 __trace_find_cmdline(*pid, buf);
3811 seq_printf(m, "%d %s\n", *pid, buf);
3815 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816 .start = saved_cmdlines_start,
3817 .next = saved_cmdlines_next,
3818 .stop = saved_cmdlines_stop,
3819 .show = saved_cmdlines_show,
3822 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3824 if (tracing_disabled)
3827 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3830 static const struct file_operations tracing_saved_cmdlines_fops = {
3831 .open = tracing_saved_cmdlines_open,
3833 .llseek = seq_lseek,
3834 .release = seq_release,
3838 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839 size_t cnt, loff_t *ppos)
3844 arch_spin_lock(&trace_cmdline_lock);
3845 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3846 arch_spin_unlock(&trace_cmdline_lock);
3848 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3851 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3853 kfree(s->saved_cmdlines);
3854 kfree(s->map_cmdline_to_pid);
3858 static int tracing_resize_saved_cmdlines(unsigned int val)
3860 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3862 s = kmalloc(sizeof(*s), GFP_KERNEL);
3866 if (allocate_cmdlines_buffer(val, s) < 0) {
3871 arch_spin_lock(&trace_cmdline_lock);
3872 savedcmd_temp = savedcmd;
3874 arch_spin_unlock(&trace_cmdline_lock);
3875 free_saved_cmdlines_buffer(savedcmd_temp);
3881 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3891 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892 if (!val || val > PID_MAX_DEFAULT)
3895 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3904 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905 .open = tracing_open_generic,
3906 .read = tracing_saved_cmdlines_size_read,
3907 .write = tracing_saved_cmdlines_size_write,
3911 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3914 struct trace_array *tr = filp->private_data;
3915 char buf[MAX_TRACER_SIZE+2];
3918 mutex_lock(&trace_types_lock);
3919 r = sprintf(buf, "%s\n", tr->current_trace->name);
3920 mutex_unlock(&trace_types_lock);
3922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3925 int tracer_init(struct tracer *t, struct trace_array *tr)
3927 tracing_reset_online_cpus(&tr->trace_buffer);
3931 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3935 for_each_tracing_cpu(cpu)
3936 per_cpu_ptr(buf->data, cpu)->entries = val;
3939 #ifdef CONFIG_TRACER_MAX_TRACE
3940 /* resize @tr's buffer to the size of @size_tr's entries */
3941 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942 struct trace_buffer *size_buf, int cpu_id)
3946 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947 for_each_tracing_cpu(cpu) {
3948 ret = ring_buffer_resize(trace_buf->buffer,
3949 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3952 per_cpu_ptr(trace_buf->data, cpu)->entries =
3953 per_cpu_ptr(size_buf->data, cpu)->entries;
3956 ret = ring_buffer_resize(trace_buf->buffer,
3957 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3959 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3965 #endif /* CONFIG_TRACER_MAX_TRACE */
3967 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968 unsigned long size, int cpu)
3973 * If kernel or user changes the size of the ring buffer
3974 * we use the size that was given, and we can forget about
3975 * expanding it later.
3977 ring_buffer_expanded = true;
3979 /* May be called before buffers are initialized */
3980 if (!tr->trace_buffer.buffer)
3983 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3987 #ifdef CONFIG_TRACER_MAX_TRACE
3988 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989 !tr->current_trace->use_max_tr)
3992 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3994 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995 &tr->trace_buffer, cpu);
3998 * AARGH! We are left with different
3999 * size max buffer!!!!
4000 * The max buffer is our "snapshot" buffer.
4001 * When a tracer needs a snapshot (one of the
4002 * latency tracers), it swaps the max buffer
4003 * with the saved snap shot. We succeeded to
4004 * update the size of the main buffer, but failed to
4005 * update the size of the max buffer. But when we tried
4006 * to reset the main buffer to the original size, we
4007 * failed there too. This is very unlikely to
4008 * happen, but if it does, warn and kill all
4012 tracing_disabled = 1;
4017 if (cpu == RING_BUFFER_ALL_CPUS)
4018 set_buffer_entries(&tr->max_buffer, size);
4020 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4023 #endif /* CONFIG_TRACER_MAX_TRACE */
4025 if (cpu == RING_BUFFER_ALL_CPUS)
4026 set_buffer_entries(&tr->trace_buffer, size);
4028 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4033 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034 unsigned long size, int cpu_id)
4038 mutex_lock(&trace_types_lock);
4040 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041 /* make sure, this cpu is enabled in the mask */
4042 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4048 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4053 mutex_unlock(&trace_types_lock);
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4067 * This function is to be called when a tracer is about to be used.
4069 int tracing_update_buffers(void)
4073 mutex_lock(&trace_types_lock);
4074 if (!ring_buffer_expanded)
4075 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4076 RING_BUFFER_ALL_CPUS);
4077 mutex_unlock(&trace_types_lock);
4082 struct trace_option_dentry;
4084 static struct trace_option_dentry *
4085 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4088 destroy_trace_option_files(struct trace_option_dentry *topts);
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4094 static void tracing_set_nop(struct trace_array *tr)
4096 if (tr->current_trace == &nop_trace)
4099 tr->current_trace->enabled--;
4101 if (tr->current_trace->reset)
4102 tr->current_trace->reset(tr);
4104 tr->current_trace = &nop_trace;
4107 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4109 static struct trace_option_dentry *topts;
4111 #ifdef CONFIG_TRACER_MAX_TRACE
4116 mutex_lock(&trace_types_lock);
4118 if (!ring_buffer_expanded) {
4119 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4120 RING_BUFFER_ALL_CPUS);
4126 for (t = trace_types; t; t = t->next) {
4127 if (strcmp(t->name, buf) == 0)
4134 if (t == tr->current_trace)
4137 /* Some tracers are only allowed for the top level buffer */
4138 if (!trace_ok_for_array(t, tr)) {
4143 /* If trace pipe files are being read, we can't change the tracer */
4144 if (tr->current_trace->ref) {
4149 trace_branch_disable();
4151 tr->current_trace->enabled--;
4153 if (tr->current_trace->reset)
4154 tr->current_trace->reset(tr);
4156 /* Current trace needs to be nop_trace before synchronize_sched */
4157 tr->current_trace = &nop_trace;
4159 #ifdef CONFIG_TRACER_MAX_TRACE
4160 had_max_tr = tr->allocated_snapshot;
4162 if (had_max_tr && !t->use_max_tr) {
4164 * We need to make sure that the update_max_tr sees that
4165 * current_trace changed to nop_trace to keep it from
4166 * swapping the buffers after we resize it.
4167 * The update_max_tr is called from interrupts disabled
4168 * so a synchronized_sched() is sufficient.
4170 synchronize_sched();
4174 /* Currently, only the top instance has options */
4175 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4176 destroy_trace_option_files(topts);
4177 topts = create_trace_option_files(tr, t);
4180 #ifdef CONFIG_TRACER_MAX_TRACE
4181 if (t->use_max_tr && !had_max_tr) {
4182 ret = alloc_snapshot(tr);
4189 ret = tracer_init(t, tr);
4194 tr->current_trace = t;
4195 tr->current_trace->enabled++;
4196 trace_branch_enable(tr);
4198 mutex_unlock(&trace_types_lock);
4204 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4207 struct trace_array *tr = filp->private_data;
4208 char buf[MAX_TRACER_SIZE+1];
4215 if (cnt > MAX_TRACER_SIZE)
4216 cnt = MAX_TRACER_SIZE;
4218 if (copy_from_user(&buf, ubuf, cnt))
4223 /* strip ending whitespace. */
4224 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4227 err = tracing_set_tracer(tr, buf);
4237 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4238 size_t cnt, loff_t *ppos)
4243 r = snprintf(buf, sizeof(buf), "%ld\n",
4244 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4245 if (r > sizeof(buf))
4247 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4251 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4252 size_t cnt, loff_t *ppos)
4257 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4267 tracing_thresh_read(struct file *filp, char __user *ubuf,
4268 size_t cnt, loff_t *ppos)
4270 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4274 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4275 size_t cnt, loff_t *ppos)
4277 struct trace_array *tr = filp->private_data;
4280 mutex_lock(&trace_types_lock);
4281 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4285 if (tr->current_trace->update_thresh) {
4286 ret = tr->current_trace->update_thresh(tr);
4293 mutex_unlock(&trace_types_lock);
4299 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4300 size_t cnt, loff_t *ppos)
4302 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4306 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4307 size_t cnt, loff_t *ppos)
4309 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4312 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4314 struct trace_array *tr = inode->i_private;
4315 struct trace_iterator *iter;
4318 if (tracing_disabled)
4321 if (trace_array_get(tr) < 0)
4324 mutex_lock(&trace_types_lock);
4326 /* create a buffer to store the information to pass to userspace */
4327 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4330 __trace_array_put(tr);
4334 trace_seq_init(&iter->seq);
4335 iter->trace = tr->current_trace;
4337 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4342 /* trace pipe does not show start of buffer */
4343 cpumask_setall(iter->started);
4345 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4346 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4348 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4349 if (trace_clocks[tr->clock_id].in_ns)
4350 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4353 iter->trace_buffer = &tr->trace_buffer;
4354 iter->cpu_file = tracing_get_cpu(inode);
4355 mutex_init(&iter->mutex);
4356 filp->private_data = iter;
4358 if (iter->trace->pipe_open)
4359 iter->trace->pipe_open(iter);
4361 nonseekable_open(inode, filp);
4363 tr->current_trace->ref++;
4365 mutex_unlock(&trace_types_lock);
4371 __trace_array_put(tr);
4372 mutex_unlock(&trace_types_lock);
4376 static int tracing_release_pipe(struct inode *inode, struct file *file)
4378 struct trace_iterator *iter = file->private_data;
4379 struct trace_array *tr = inode->i_private;
4381 mutex_lock(&trace_types_lock);
4383 tr->current_trace->ref--;
4385 if (iter->trace->pipe_close)
4386 iter->trace->pipe_close(iter);
4388 mutex_unlock(&trace_types_lock);
4390 free_cpumask_var(iter->started);
4391 mutex_destroy(&iter->mutex);
4394 trace_array_put(tr);
4400 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4402 /* Iterators are static, they should be filled or empty */
4403 if (trace_buffer_iter(iter, iter->cpu_file))
4404 return POLLIN | POLLRDNORM;
4406 if (trace_flags & TRACE_ITER_BLOCK)
4408 * Always select as readable when in blocking mode
4410 return POLLIN | POLLRDNORM;
4412 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4417 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4419 struct trace_iterator *iter = filp->private_data;
4421 return trace_poll(iter, filp, poll_table);
4424 /* Must be called with iter->mutex held. */
4425 static int tracing_wait_pipe(struct file *filp)
4427 struct trace_iterator *iter = filp->private_data;
4430 while (trace_empty(iter)) {
4432 if ((filp->f_flags & O_NONBLOCK)) {
4437 * We block until we read something and tracing is disabled.
4438 * We still block if tracing is disabled, but we have never
4439 * read anything. This allows a user to cat this file, and
4440 * then enable tracing. But after we have read something,
4441 * we give an EOF when tracing is again disabled.
4443 * iter->pos will be 0 if we haven't read anything.
4445 if (!tracing_is_on() && iter->pos)
4448 mutex_unlock(&iter->mutex);
4450 ret = wait_on_pipe(iter, false);
4452 mutex_lock(&iter->mutex);
4465 tracing_read_pipe(struct file *filp, char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
4468 struct trace_iterator *iter = filp->private_data;
4471 /* return any leftover data */
4472 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4476 trace_seq_init(&iter->seq);
4479 * Avoid more than one consumer on a single file descriptor
4480 * This is just a matter of traces coherency, the ring buffer itself
4483 mutex_lock(&iter->mutex);
4484 if (iter->trace->read) {
4485 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4491 sret = tracing_wait_pipe(filp);
4495 /* stop when tracing is finished */
4496 if (trace_empty(iter)) {
4501 if (cnt >= PAGE_SIZE)
4502 cnt = PAGE_SIZE - 1;
4504 /* reset all but tr, trace, and overruns */
4505 memset(&iter->seq, 0,
4506 sizeof(struct trace_iterator) -
4507 offsetof(struct trace_iterator, seq));
4508 cpumask_clear(iter->started);
4511 trace_event_read_lock();
4512 trace_access_lock(iter->cpu_file);
4513 while (trace_find_next_entry_inc(iter) != NULL) {
4514 enum print_line_t ret;
4515 int save_len = iter->seq.seq.len;
4517 ret = print_trace_line(iter);
4518 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4519 /* don't print partial lines */
4520 iter->seq.seq.len = save_len;
4523 if (ret != TRACE_TYPE_NO_CONSUME)
4524 trace_consume(iter);
4526 if (trace_seq_used(&iter->seq) >= cnt)
4530 * Setting the full flag means we reached the trace_seq buffer
4531 * size and we should leave by partial output condition above.
4532 * One of the trace_seq_* functions is not used properly.
4534 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4537 trace_access_unlock(iter->cpu_file);
4538 trace_event_read_unlock();
4540 /* Now copy what we have to the user */
4541 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4542 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4543 trace_seq_init(&iter->seq);
4546 * If there was nothing to send to user, in spite of consuming trace
4547 * entries, go back to wait for more entries.
4553 mutex_unlock(&iter->mutex);
4558 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4561 __free_page(spd->pages[idx]);
4564 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4566 .confirm = generic_pipe_buf_confirm,
4567 .release = generic_pipe_buf_release,
4568 .steal = generic_pipe_buf_steal,
4569 .get = generic_pipe_buf_get,
4573 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4579 /* Seq buffer is page-sized, exactly what we need. */
4581 save_len = iter->seq.seq.len;
4582 ret = print_trace_line(iter);
4584 if (trace_seq_has_overflowed(&iter->seq)) {
4585 iter->seq.seq.len = save_len;
4590 * This should not be hit, because it should only
4591 * be set if the iter->seq overflowed. But check it
4592 * anyway to be safe.
4594 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4595 iter->seq.seq.len = save_len;
4599 count = trace_seq_used(&iter->seq) - save_len;
4602 iter->seq.seq.len = save_len;
4606 if (ret != TRACE_TYPE_NO_CONSUME)
4607 trace_consume(iter);
4609 if (!trace_find_next_entry_inc(iter)) {
4619 static ssize_t tracing_splice_read_pipe(struct file *filp,
4621 struct pipe_inode_info *pipe,
4625 struct page *pages_def[PIPE_DEF_BUFFERS];
4626 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4627 struct trace_iterator *iter = filp->private_data;
4628 struct splice_pipe_desc spd = {
4630 .partial = partial_def,
4631 .nr_pages = 0, /* This gets updated below. */
4632 .nr_pages_max = PIPE_DEF_BUFFERS,
4634 .ops = &tracing_pipe_buf_ops,
4635 .spd_release = tracing_spd_release_pipe,
4641 if (splice_grow_spd(pipe, &spd))
4644 mutex_lock(&iter->mutex);
4646 if (iter->trace->splice_read) {
4647 ret = iter->trace->splice_read(iter, filp,
4648 ppos, pipe, len, flags);
4653 ret = tracing_wait_pipe(filp);
4657 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4662 trace_event_read_lock();
4663 trace_access_lock(iter->cpu_file);
4665 /* Fill as many pages as possible. */
4666 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4667 spd.pages[i] = alloc_page(GFP_KERNEL);
4671 rem = tracing_fill_pipe_page(rem, iter);
4673 /* Copy the data into the page, so we can start over. */
4674 ret = trace_seq_to_buffer(&iter->seq,
4675 page_address(spd.pages[i]),
4676 trace_seq_used(&iter->seq));
4678 __free_page(spd.pages[i]);
4681 spd.partial[i].offset = 0;
4682 spd.partial[i].len = trace_seq_used(&iter->seq);
4684 trace_seq_init(&iter->seq);
4687 trace_access_unlock(iter->cpu_file);
4688 trace_event_read_unlock();
4689 mutex_unlock(&iter->mutex);
4693 ret = splice_to_pipe(pipe, &spd);
4695 splice_shrink_spd(&spd);
4699 mutex_unlock(&iter->mutex);
4704 tracing_entries_read(struct file *filp, char __user *ubuf,
4705 size_t cnt, loff_t *ppos)
4707 struct inode *inode = file_inode(filp);
4708 struct trace_array *tr = inode->i_private;
4709 int cpu = tracing_get_cpu(inode);
4714 mutex_lock(&trace_types_lock);
4716 if (cpu == RING_BUFFER_ALL_CPUS) {
4717 int cpu, buf_size_same;
4722 /* check if all cpu sizes are same */
4723 for_each_tracing_cpu(cpu) {
4724 /* fill in the size from first enabled cpu */
4726 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4727 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4733 if (buf_size_same) {
4734 if (!ring_buffer_expanded)
4735 r = sprintf(buf, "%lu (expanded: %lu)\n",
4737 trace_buf_size >> 10);
4739 r = sprintf(buf, "%lu\n", size >> 10);
4741 r = sprintf(buf, "X\n");
4743 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4745 mutex_unlock(&trace_types_lock);
4747 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4752 tracing_entries_write(struct file *filp, const char __user *ubuf,
4753 size_t cnt, loff_t *ppos)
4755 struct inode *inode = file_inode(filp);
4756 struct trace_array *tr = inode->i_private;
4760 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4764 /* must have at least 1 entry */
4768 /* value is in KB */
4770 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4780 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4781 size_t cnt, loff_t *ppos)
4783 struct trace_array *tr = filp->private_data;
4786 unsigned long size = 0, expanded_size = 0;
4788 mutex_lock(&trace_types_lock);
4789 for_each_tracing_cpu(cpu) {
4790 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4791 if (!ring_buffer_expanded)
4792 expanded_size += trace_buf_size >> 10;
4794 if (ring_buffer_expanded)
4795 r = sprintf(buf, "%lu\n", size);
4797 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4798 mutex_unlock(&trace_types_lock);
4800 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4804 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4805 size_t cnt, loff_t *ppos)
4808 * There is no need to read what the user has written, this function
4809 * is just to make sure that there is no error when "echo" is used
4818 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4820 struct trace_array *tr = inode->i_private;
4822 /* disable tracing ? */
4823 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4824 tracer_tracing_off(tr);
4825 /* resize the ring buffer to 0 */
4826 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4828 trace_array_put(tr);
4834 tracing_mark_write(struct file *filp, const char __user *ubuf,
4835 size_t cnt, loff_t *fpos)
4837 unsigned long addr = (unsigned long)ubuf;
4838 struct trace_array *tr = filp->private_data;
4839 struct ring_buffer_event *event;
4840 struct ring_buffer *buffer;
4841 struct print_entry *entry;
4842 unsigned long irq_flags;
4843 struct page *pages[2];
4853 if (tracing_disabled)
4856 if (!(trace_flags & TRACE_ITER_MARKERS))
4859 if (cnt > TRACE_BUF_SIZE)
4860 cnt = TRACE_BUF_SIZE;
4863 * Userspace is injecting traces into the kernel trace buffer.
4864 * We want to be as non intrusive as possible.
4865 * To do so, we do not want to allocate any special buffers
4866 * or take any locks, but instead write the userspace data
4867 * straight into the ring buffer.
4869 * First we need to pin the userspace buffer into memory,
4870 * which, most likely it is, because it just referenced it.
4871 * But there's no guarantee that it is. By using get_user_pages_fast()
4872 * and kmap_atomic/kunmap_atomic() we can get access to the
4873 * pages directly. We then write the data directly into the
4876 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4878 /* check if we cross pages */
4879 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4882 offset = addr & (PAGE_SIZE - 1);
4885 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4886 if (ret < nr_pages) {
4888 put_page(pages[ret]);
4893 for (i = 0; i < nr_pages; i++)
4894 map_page[i] = kmap_atomic(pages[i]);
4896 local_save_flags(irq_flags);
4897 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4898 buffer = tr->trace_buffer.buffer;
4899 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4900 irq_flags, preempt_count());
4902 /* Ring buffer disabled, return as if not open for write */
4907 entry = ring_buffer_event_data(event);
4908 entry->ip = _THIS_IP_;
4910 if (nr_pages == 2) {
4911 len = PAGE_SIZE - offset;
4912 memcpy(&entry->buf, map_page[0] + offset, len);
4913 memcpy(&entry->buf[len], map_page[1], cnt - len);
4915 memcpy(&entry->buf, map_page[0] + offset, cnt);
4917 if (entry->buf[cnt - 1] != '\n') {
4918 entry->buf[cnt] = '\n';
4919 entry->buf[cnt + 1] = '\0';
4921 entry->buf[cnt] = '\0';
4923 __buffer_unlock_commit(buffer, event);
4930 for (i = 0; i < nr_pages; i++){
4931 kunmap_atomic(map_page[i]);
4938 static int tracing_clock_show(struct seq_file *m, void *v)
4940 struct trace_array *tr = m->private;
4943 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4945 "%s%s%s%s", i ? " " : "",
4946 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4947 i == tr->clock_id ? "]" : "");
4953 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4957 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4958 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4961 if (i == ARRAY_SIZE(trace_clocks))
4964 mutex_lock(&trace_types_lock);
4968 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4971 * New clock may not be consistent with the previous clock.
4972 * Reset the buffer so that it doesn't have incomparable timestamps.
4974 tracing_reset_online_cpus(&tr->trace_buffer);
4976 #ifdef CONFIG_TRACER_MAX_TRACE
4977 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4978 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4979 tracing_reset_online_cpus(&tr->max_buffer);
4982 mutex_unlock(&trace_types_lock);
4987 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4988 size_t cnt, loff_t *fpos)
4990 struct seq_file *m = filp->private_data;
4991 struct trace_array *tr = m->private;
4993 const char *clockstr;
4996 if (cnt >= sizeof(buf))
4999 if (copy_from_user(&buf, ubuf, cnt))
5004 clockstr = strstrip(buf);
5006 ret = tracing_set_clock(tr, clockstr);
5015 static int tracing_clock_open(struct inode *inode, struct file *file)
5017 struct trace_array *tr = inode->i_private;
5020 if (tracing_disabled)
5023 if (trace_array_get(tr))
5026 ret = single_open(file, tracing_clock_show, inode->i_private);
5028 trace_array_put(tr);
5033 struct ftrace_buffer_info {
5034 struct trace_iterator iter;
5039 #ifdef CONFIG_TRACER_SNAPSHOT
5040 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5042 struct trace_array *tr = inode->i_private;
5043 struct trace_iterator *iter;
5047 if (trace_array_get(tr) < 0)
5050 if (file->f_mode & FMODE_READ) {
5051 iter = __tracing_open(inode, file, true);
5053 ret = PTR_ERR(iter);
5055 /* Writes still need the seq_file to hold the private data */
5057 m = kzalloc(sizeof(*m), GFP_KERNEL);
5060 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5068 iter->trace_buffer = &tr->max_buffer;
5069 iter->cpu_file = tracing_get_cpu(inode);
5071 file->private_data = m;
5075 trace_array_put(tr);
5081 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5084 struct seq_file *m = filp->private_data;
5085 struct trace_iterator *iter = m->private;
5086 struct trace_array *tr = iter->tr;
5090 ret = tracing_update_buffers();
5094 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5098 mutex_lock(&trace_types_lock);
5100 if (tr->current_trace->use_max_tr) {
5107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5111 if (tr->allocated_snapshot)
5115 /* Only allow per-cpu swap if the ring buffer supports it */
5116 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5117 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5122 if (!tr->allocated_snapshot) {
5123 ret = alloc_snapshot(tr);
5127 local_irq_disable();
5128 /* Now, we're going to swap */
5129 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5130 update_max_tr(tr, current, smp_processor_id());
5132 update_max_tr_single(tr, current, iter->cpu_file);
5136 if (tr->allocated_snapshot) {
5137 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5138 tracing_reset_online_cpus(&tr->max_buffer);
5140 tracing_reset(&tr->max_buffer, iter->cpu_file);
5150 mutex_unlock(&trace_types_lock);
5154 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5156 struct seq_file *m = file->private_data;
5159 ret = tracing_release(inode, file);
5161 if (file->f_mode & FMODE_READ)
5164 /* If write only, the seq_file is just a stub */
5172 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5173 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5174 size_t count, loff_t *ppos);
5175 static int tracing_buffers_release(struct inode *inode, struct file *file);
5176 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5177 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5179 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5181 struct ftrace_buffer_info *info;
5184 ret = tracing_buffers_open(inode, filp);
5188 info = filp->private_data;
5190 if (info->iter.trace->use_max_tr) {
5191 tracing_buffers_release(inode, filp);
5195 info->iter.snapshot = true;
5196 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5201 #endif /* CONFIG_TRACER_SNAPSHOT */
5204 static const struct file_operations tracing_thresh_fops = {
5205 .open = tracing_open_generic,
5206 .read = tracing_thresh_read,
5207 .write = tracing_thresh_write,
5208 .llseek = generic_file_llseek,
5211 static const struct file_operations tracing_max_lat_fops = {
5212 .open = tracing_open_generic,
5213 .read = tracing_max_lat_read,
5214 .write = tracing_max_lat_write,
5215 .llseek = generic_file_llseek,
5218 static const struct file_operations set_tracer_fops = {
5219 .open = tracing_open_generic,
5220 .read = tracing_set_trace_read,
5221 .write = tracing_set_trace_write,
5222 .llseek = generic_file_llseek,
5225 static const struct file_operations tracing_pipe_fops = {
5226 .open = tracing_open_pipe,
5227 .poll = tracing_poll_pipe,
5228 .read = tracing_read_pipe,
5229 .splice_read = tracing_splice_read_pipe,
5230 .release = tracing_release_pipe,
5231 .llseek = no_llseek,
5234 static const struct file_operations tracing_entries_fops = {
5235 .open = tracing_open_generic_tr,
5236 .read = tracing_entries_read,
5237 .write = tracing_entries_write,
5238 .llseek = generic_file_llseek,
5239 .release = tracing_release_generic_tr,
5242 static const struct file_operations tracing_total_entries_fops = {
5243 .open = tracing_open_generic_tr,
5244 .read = tracing_total_entries_read,
5245 .llseek = generic_file_llseek,
5246 .release = tracing_release_generic_tr,
5249 static const struct file_operations tracing_free_buffer_fops = {
5250 .open = tracing_open_generic_tr,
5251 .write = tracing_free_buffer_write,
5252 .release = tracing_free_buffer_release,
5255 static const struct file_operations tracing_mark_fops = {
5256 .open = tracing_open_generic_tr,
5257 .write = tracing_mark_write,
5258 .llseek = generic_file_llseek,
5259 .release = tracing_release_generic_tr,
5262 static const struct file_operations trace_clock_fops = {
5263 .open = tracing_clock_open,
5265 .llseek = seq_lseek,
5266 .release = tracing_single_release_tr,
5267 .write = tracing_clock_write,
5270 #ifdef CONFIG_TRACER_SNAPSHOT
5271 static const struct file_operations snapshot_fops = {
5272 .open = tracing_snapshot_open,
5274 .write = tracing_snapshot_write,
5275 .llseek = tracing_lseek,
5276 .release = tracing_snapshot_release,
5279 static const struct file_operations snapshot_raw_fops = {
5280 .open = snapshot_raw_open,
5281 .read = tracing_buffers_read,
5282 .release = tracing_buffers_release,
5283 .splice_read = tracing_buffers_splice_read,
5284 .llseek = no_llseek,
5287 #endif /* CONFIG_TRACER_SNAPSHOT */
5289 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5291 struct trace_array *tr = inode->i_private;
5292 struct ftrace_buffer_info *info;
5295 if (tracing_disabled)
5298 if (trace_array_get(tr) < 0)
5301 info = kzalloc(sizeof(*info), GFP_KERNEL);
5303 trace_array_put(tr);
5307 mutex_lock(&trace_types_lock);
5310 info->iter.cpu_file = tracing_get_cpu(inode);
5311 info->iter.trace = tr->current_trace;
5312 info->iter.trace_buffer = &tr->trace_buffer;
5314 /* Force reading ring buffer for first read */
5315 info->read = (unsigned int)-1;
5317 filp->private_data = info;
5319 tr->current_trace->ref++;
5321 mutex_unlock(&trace_types_lock);
5323 ret = nonseekable_open(inode, filp);
5325 trace_array_put(tr);
5331 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5333 struct ftrace_buffer_info *info = filp->private_data;
5334 struct trace_iterator *iter = &info->iter;
5336 return trace_poll(iter, filp, poll_table);
5340 tracing_buffers_read(struct file *filp, char __user *ubuf,
5341 size_t count, loff_t *ppos)
5343 struct ftrace_buffer_info *info = filp->private_data;
5344 struct trace_iterator *iter = &info->iter;
5351 #ifdef CONFIG_TRACER_MAX_TRACE
5352 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5357 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5362 /* Do we have previous read data to read? */
5363 if (info->read < PAGE_SIZE)
5367 trace_access_lock(iter->cpu_file);
5368 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5372 trace_access_unlock(iter->cpu_file);
5375 if (trace_empty(iter)) {
5376 if ((filp->f_flags & O_NONBLOCK))
5379 ret = wait_on_pipe(iter, false);
5390 size = PAGE_SIZE - info->read;
5394 ret = copy_to_user(ubuf, info->spare + info->read, size);
5406 static int tracing_buffers_release(struct inode *inode, struct file *file)
5408 struct ftrace_buffer_info *info = file->private_data;
5409 struct trace_iterator *iter = &info->iter;
5411 mutex_lock(&trace_types_lock);
5413 iter->tr->current_trace->ref--;
5415 __trace_array_put(iter->tr);
5418 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5421 mutex_unlock(&trace_types_lock);
5427 struct ring_buffer *buffer;
5432 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5433 struct pipe_buffer *buf)
5435 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5440 ring_buffer_free_read_page(ref->buffer, ref->page);
5445 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5446 struct pipe_buffer *buf)
5448 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5453 /* Pipe buffer operations for a buffer. */
5454 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5456 .confirm = generic_pipe_buf_confirm,
5457 .release = buffer_pipe_buf_release,
5458 .steal = generic_pipe_buf_steal,
5459 .get = buffer_pipe_buf_get,
5463 * Callback from splice_to_pipe(), if we need to release some pages
5464 * at the end of the spd in case we error'ed out in filling the pipe.
5466 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5468 struct buffer_ref *ref =
5469 (struct buffer_ref *)spd->partial[i].private;
5474 ring_buffer_free_read_page(ref->buffer, ref->page);
5476 spd->partial[i].private = 0;
5480 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5481 struct pipe_inode_info *pipe, size_t len,
5484 struct ftrace_buffer_info *info = file->private_data;
5485 struct trace_iterator *iter = &info->iter;
5486 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5487 struct page *pages_def[PIPE_DEF_BUFFERS];
5488 struct splice_pipe_desc spd = {
5490 .partial = partial_def,
5491 .nr_pages_max = PIPE_DEF_BUFFERS,
5493 .ops = &buffer_pipe_buf_ops,
5494 .spd_release = buffer_spd_release,
5496 struct buffer_ref *ref;
5497 int entries, size, i;
5500 #ifdef CONFIG_TRACER_MAX_TRACE
5501 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5505 if (splice_grow_spd(pipe, &spd))
5508 if (*ppos & (PAGE_SIZE - 1))
5511 if (len & (PAGE_SIZE - 1)) {
5512 if (len < PAGE_SIZE)
5518 trace_access_lock(iter->cpu_file);
5519 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5521 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5525 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5532 ref->buffer = iter->trace_buffer->buffer;
5533 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5540 r = ring_buffer_read_page(ref->buffer, &ref->page,
5541 len, iter->cpu_file, 1);
5543 ring_buffer_free_read_page(ref->buffer, ref->page);
5549 * zero out any left over data, this is going to
5552 size = ring_buffer_page_len(ref->page);
5553 if (size < PAGE_SIZE)
5554 memset(ref->page + size, 0, PAGE_SIZE - size);
5556 page = virt_to_page(ref->page);
5558 spd.pages[i] = page;
5559 spd.partial[i].len = PAGE_SIZE;
5560 spd.partial[i].offset = 0;
5561 spd.partial[i].private = (unsigned long)ref;
5565 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5568 trace_access_unlock(iter->cpu_file);
5571 /* did we read anything? */
5572 if (!spd.nr_pages) {
5576 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5579 ret = wait_on_pipe(iter, true);
5586 ret = splice_to_pipe(pipe, &spd);
5587 splice_shrink_spd(&spd);
5592 static const struct file_operations tracing_buffers_fops = {
5593 .open = tracing_buffers_open,
5594 .read = tracing_buffers_read,
5595 .poll = tracing_buffers_poll,
5596 .release = tracing_buffers_release,
5597 .splice_read = tracing_buffers_splice_read,
5598 .llseek = no_llseek,
5602 tracing_stats_read(struct file *filp, char __user *ubuf,
5603 size_t count, loff_t *ppos)
5605 struct inode *inode = file_inode(filp);
5606 struct trace_array *tr = inode->i_private;
5607 struct trace_buffer *trace_buf = &tr->trace_buffer;
5608 int cpu = tracing_get_cpu(inode);
5609 struct trace_seq *s;
5611 unsigned long long t;
5612 unsigned long usec_rem;
5614 s = kmalloc(sizeof(*s), GFP_KERNEL);
5620 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5621 trace_seq_printf(s, "entries: %ld\n", cnt);
5623 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5624 trace_seq_printf(s, "overrun: %ld\n", cnt);
5626 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5627 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5629 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5630 trace_seq_printf(s, "bytes: %ld\n", cnt);
5632 if (trace_clocks[tr->clock_id].in_ns) {
5633 /* local or global for trace_clock */
5634 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5635 usec_rem = do_div(t, USEC_PER_SEC);
5636 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5639 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5640 usec_rem = do_div(t, USEC_PER_SEC);
5641 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5643 /* counter or tsc mode for trace_clock */
5644 trace_seq_printf(s, "oldest event ts: %llu\n",
5645 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5647 trace_seq_printf(s, "now ts: %llu\n",
5648 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5651 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5652 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5654 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5655 trace_seq_printf(s, "read events: %ld\n", cnt);
5657 count = simple_read_from_buffer(ubuf, count, ppos,
5658 s->buffer, trace_seq_used(s));
5665 static const struct file_operations tracing_stats_fops = {
5666 .open = tracing_open_generic_tr,
5667 .read = tracing_stats_read,
5668 .llseek = generic_file_llseek,
5669 .release = tracing_release_generic_tr,
5672 #ifdef CONFIG_DYNAMIC_FTRACE
5674 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5680 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5681 size_t cnt, loff_t *ppos)
5683 static char ftrace_dyn_info_buffer[1024];
5684 static DEFINE_MUTEX(dyn_info_mutex);
5685 unsigned long *p = filp->private_data;
5686 char *buf = ftrace_dyn_info_buffer;
5687 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5690 mutex_lock(&dyn_info_mutex);
5691 r = sprintf(buf, "%ld ", *p);
5693 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5696 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5698 mutex_unlock(&dyn_info_mutex);
5703 static const struct file_operations tracing_dyn_info_fops = {
5704 .open = tracing_open_generic,
5705 .read = tracing_read_dyn_info,
5706 .llseek = generic_file_llseek,
5708 #endif /* CONFIG_DYNAMIC_FTRACE */
5710 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5712 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5718 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5720 unsigned long *count = (long *)data;
5732 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5733 struct ftrace_probe_ops *ops, void *data)
5735 long count = (long)data;
5737 seq_printf(m, "%ps:", (void *)ip);
5739 seq_puts(m, "snapshot");
5742 seq_puts(m, ":unlimited\n");
5744 seq_printf(m, ":count=%ld\n", count);
5749 static struct ftrace_probe_ops snapshot_probe_ops = {
5750 .func = ftrace_snapshot,
5751 .print = ftrace_snapshot_print,
5754 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5755 .func = ftrace_count_snapshot,
5756 .print = ftrace_snapshot_print,
5760 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5761 char *glob, char *cmd, char *param, int enable)
5763 struct ftrace_probe_ops *ops;
5764 void *count = (void *)-1;
5768 /* hash funcs only work with set_ftrace_filter */
5772 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5774 if (glob[0] == '!') {
5775 unregister_ftrace_function_probe_func(glob+1, ops);
5782 number = strsep(¶m, ":");
5784 if (!strlen(number))
5788 * We use the callback data field (which is a pointer)
5791 ret = kstrtoul(number, 0, (unsigned long *)&count);
5796 ret = register_ftrace_function_probe(glob, ops, count);
5799 alloc_snapshot(&global_trace);
5801 return ret < 0 ? ret : 0;
5804 static struct ftrace_func_command ftrace_snapshot_cmd = {
5806 .func = ftrace_trace_snapshot_callback,
5809 static __init int register_snapshot_cmd(void)
5811 return register_ftrace_command(&ftrace_snapshot_cmd);
5814 static inline __init int register_snapshot_cmd(void) { return 0; }
5815 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5817 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5822 if (!debugfs_initialized())
5823 return ERR_PTR(-ENODEV);
5825 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5826 tr->dir = debugfs_create_dir("tracing", NULL);
5829 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5834 struct dentry *tracing_init_dentry(void)
5836 return tracing_init_dentry_tr(&global_trace);
5839 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5841 struct dentry *d_tracer;
5844 return tr->percpu_dir;
5846 d_tracer = tracing_init_dentry_tr(tr);
5847 if (IS_ERR(d_tracer))
5850 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5852 WARN_ONCE(!tr->percpu_dir,
5853 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5855 return tr->percpu_dir;
5858 static struct dentry *
5859 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5860 void *data, long cpu, const struct file_operations *fops)
5862 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5864 if (ret) /* See tracing_get_cpu() */
5865 ret->d_inode->i_cdev = (void *)(cpu + 1);
5870 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5872 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5873 struct dentry *d_cpu;
5874 char cpu_dir[30]; /* 30 characters should be more than enough */
5879 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5880 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5882 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5886 /* per cpu trace_pipe */
5887 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5888 tr, cpu, &tracing_pipe_fops);
5891 trace_create_cpu_file("trace", 0644, d_cpu,
5892 tr, cpu, &tracing_fops);
5894 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5895 tr, cpu, &tracing_buffers_fops);
5897 trace_create_cpu_file("stats", 0444, d_cpu,
5898 tr, cpu, &tracing_stats_fops);
5900 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5901 tr, cpu, &tracing_entries_fops);
5903 #ifdef CONFIG_TRACER_SNAPSHOT
5904 trace_create_cpu_file("snapshot", 0644, d_cpu,
5905 tr, cpu, &snapshot_fops);
5907 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5908 tr, cpu, &snapshot_raw_fops);
5912 #ifdef CONFIG_FTRACE_SELFTEST
5913 /* Let selftest have access to static functions in this file */
5914 #include "trace_selftest.c"
5917 struct trace_option_dentry {
5918 struct tracer_opt *opt;
5919 struct tracer_flags *flags;
5920 struct trace_array *tr;
5921 struct dentry *entry;
5925 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5928 struct trace_option_dentry *topt = filp->private_data;
5931 if (topt->flags->val & topt->opt->bit)
5936 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5940 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5943 struct trace_option_dentry *topt = filp->private_data;
5947 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5951 if (val != 0 && val != 1)
5954 if (!!(topt->flags->val & topt->opt->bit) != val) {
5955 mutex_lock(&trace_types_lock);
5956 ret = __set_tracer_option(topt->tr, topt->flags,
5958 mutex_unlock(&trace_types_lock);
5969 static const struct file_operations trace_options_fops = {
5970 .open = tracing_open_generic,
5971 .read = trace_options_read,
5972 .write = trace_options_write,
5973 .llseek = generic_file_llseek,
5977 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5980 long index = (long)filp->private_data;
5983 if (trace_flags & (1 << index))
5988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5992 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5995 struct trace_array *tr = &global_trace;
5996 long index = (long)filp->private_data;
6000 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6004 if (val != 0 && val != 1)
6007 mutex_lock(&trace_types_lock);
6008 ret = set_tracer_flag(tr, 1 << index, val);
6009 mutex_unlock(&trace_types_lock);
6019 static const struct file_operations trace_options_core_fops = {
6020 .open = tracing_open_generic,
6021 .read = trace_options_core_read,
6022 .write = trace_options_core_write,
6023 .llseek = generic_file_llseek,
6026 struct dentry *trace_create_file(const char *name,
6028 struct dentry *parent,
6030 const struct file_operations *fops)
6034 ret = debugfs_create_file(name, mode, parent, data, fops);
6036 pr_warning("Could not create debugfs '%s' entry\n", name);
6042 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6044 struct dentry *d_tracer;
6049 d_tracer = tracing_init_dentry_tr(tr);
6050 if (IS_ERR(d_tracer))
6053 tr->options = debugfs_create_dir("options", d_tracer);
6055 pr_warning("Could not create debugfs directory 'options'\n");
6063 create_trace_option_file(struct trace_array *tr,
6064 struct trace_option_dentry *topt,
6065 struct tracer_flags *flags,
6066 struct tracer_opt *opt)
6068 struct dentry *t_options;
6070 t_options = trace_options_init_dentry(tr);
6074 topt->flags = flags;
6078 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6079 &trace_options_fops);
6083 static struct trace_option_dentry *
6084 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6086 struct trace_option_dentry *topts;
6087 struct tracer_flags *flags;
6088 struct tracer_opt *opts;
6094 flags = tracer->flags;
6096 if (!flags || !flags->opts)
6101 for (cnt = 0; opts[cnt].name; cnt++)
6104 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6108 for (cnt = 0; opts[cnt].name; cnt++)
6109 create_trace_option_file(tr, &topts[cnt], flags,
6116 destroy_trace_option_files(struct trace_option_dentry *topts)
6123 for (cnt = 0; topts[cnt].opt; cnt++)
6124 debugfs_remove(topts[cnt].entry);
6129 static struct dentry *
6130 create_trace_option_core_file(struct trace_array *tr,
6131 const char *option, long index)
6133 struct dentry *t_options;
6135 t_options = trace_options_init_dentry(tr);
6139 return trace_create_file(option, 0644, t_options, (void *)index,
6140 &trace_options_core_fops);
6143 static __init void create_trace_options_dir(struct trace_array *tr)
6145 struct dentry *t_options;
6148 t_options = trace_options_init_dentry(tr);
6152 for (i = 0; trace_options[i]; i++)
6153 create_trace_option_core_file(tr, trace_options[i], i);
6157 rb_simple_read(struct file *filp, char __user *ubuf,
6158 size_t cnt, loff_t *ppos)
6160 struct trace_array *tr = filp->private_data;
6164 r = tracer_tracing_is_on(tr);
6165 r = sprintf(buf, "%d\n", r);
6167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6171 rb_simple_write(struct file *filp, const char __user *ubuf,
6172 size_t cnt, loff_t *ppos)
6174 struct trace_array *tr = filp->private_data;
6175 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6179 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6184 mutex_lock(&trace_types_lock);
6186 tracer_tracing_on(tr);
6187 if (tr->current_trace->start)
6188 tr->current_trace->start(tr);
6190 tracer_tracing_off(tr);
6191 if (tr->current_trace->stop)
6192 tr->current_trace->stop(tr);
6194 mutex_unlock(&trace_types_lock);
6202 static const struct file_operations rb_simple_fops = {
6203 .open = tracing_open_generic_tr,
6204 .read = rb_simple_read,
6205 .write = rb_simple_write,
6206 .release = tracing_release_generic_tr,
6207 .llseek = default_llseek,
6210 struct dentry *trace_instance_dir;
6213 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6216 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6218 enum ring_buffer_flags rb_flags;
6220 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6224 buf->buffer = ring_buffer_alloc(size, rb_flags);
6228 buf->data = alloc_percpu(struct trace_array_cpu);
6230 ring_buffer_free(buf->buffer);
6234 /* Allocate the first page for all buffers */
6235 set_buffer_entries(&tr->trace_buffer,
6236 ring_buffer_size(tr->trace_buffer.buffer, 0));
6241 static int allocate_trace_buffers(struct trace_array *tr, int size)
6245 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6249 #ifdef CONFIG_TRACER_MAX_TRACE
6250 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6251 allocate_snapshot ? size : 1);
6253 ring_buffer_free(tr->trace_buffer.buffer);
6254 free_percpu(tr->trace_buffer.data);
6257 tr->allocated_snapshot = allocate_snapshot;
6260 * Only the top level trace array gets its snapshot allocated
6261 * from the kernel command line.
6263 allocate_snapshot = false;
6268 static void free_trace_buffer(struct trace_buffer *buf)
6271 ring_buffer_free(buf->buffer);
6273 free_percpu(buf->data);
6278 static void free_trace_buffers(struct trace_array *tr)
6283 free_trace_buffer(&tr->trace_buffer);
6285 #ifdef CONFIG_TRACER_MAX_TRACE
6286 free_trace_buffer(&tr->max_buffer);
6290 static int new_instance_create(const char *name)
6292 struct trace_array *tr;
6295 mutex_lock(&trace_types_lock);
6298 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6299 if (tr->name && strcmp(tr->name, name) == 0)
6304 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6308 tr->name = kstrdup(name, GFP_KERNEL);
6312 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6315 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6317 raw_spin_lock_init(&tr->start_lock);
6319 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6321 tr->current_trace = &nop_trace;
6323 INIT_LIST_HEAD(&tr->systems);
6324 INIT_LIST_HEAD(&tr->events);
6326 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6329 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6333 ret = event_trace_add_tracer(tr->dir, tr);
6335 debugfs_remove_recursive(tr->dir);
6339 init_tracer_debugfs(tr, tr->dir);
6341 list_add(&tr->list, &ftrace_trace_arrays);
6343 mutex_unlock(&trace_types_lock);
6348 free_trace_buffers(tr);
6349 free_cpumask_var(tr->tracing_cpumask);
6354 mutex_unlock(&trace_types_lock);
6360 static int instance_delete(const char *name)
6362 struct trace_array *tr;
6366 mutex_lock(&trace_types_lock);
6369 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6370 if (tr->name && strcmp(tr->name, name) == 0) {
6379 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6382 list_del(&tr->list);
6384 tracing_set_nop(tr);
6385 event_trace_del_tracer(tr);
6386 ftrace_destroy_function_files(tr);
6387 debugfs_remove_recursive(tr->dir);
6388 free_trace_buffers(tr);
6396 mutex_unlock(&trace_types_lock);
6401 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6403 struct dentry *parent;
6406 /* Paranoid: Make sure the parent is the "instances" directory */
6407 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6408 if (WARN_ON_ONCE(parent != trace_instance_dir))
6412 * The inode mutex is locked, but debugfs_create_dir() will also
6413 * take the mutex. As the instances directory can not be destroyed
6414 * or changed in any other way, it is safe to unlock it, and
6415 * let the dentry try. If two users try to make the same dir at
6416 * the same time, then the new_instance_create() will determine the
6419 mutex_unlock(&inode->i_mutex);
6421 ret = new_instance_create(dentry->d_iname);
6423 mutex_lock(&inode->i_mutex);
6428 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6430 struct dentry *parent;
6433 /* Paranoid: Make sure the parent is the "instances" directory */
6434 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6435 if (WARN_ON_ONCE(parent != trace_instance_dir))
6438 /* The caller did a dget() on dentry */
6439 mutex_unlock(&dentry->d_inode->i_mutex);
6442 * The inode mutex is locked, but debugfs_create_dir() will also
6443 * take the mutex. As the instances directory can not be destroyed
6444 * or changed in any other way, it is safe to unlock it, and
6445 * let the dentry try. If two users try to make the same dir at
6446 * the same time, then the instance_delete() will determine the
6449 mutex_unlock(&inode->i_mutex);
6451 ret = instance_delete(dentry->d_iname);
6453 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6454 mutex_lock(&dentry->d_inode->i_mutex);
6459 static const struct inode_operations instance_dir_inode_operations = {
6460 .lookup = simple_lookup,
6461 .mkdir = instance_mkdir,
6462 .rmdir = instance_rmdir,
6465 static __init void create_trace_instances(struct dentry *d_tracer)
6467 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6468 if (WARN_ON(!trace_instance_dir))
6471 /* Hijack the dir inode operations, to allow mkdir */
6472 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6476 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6480 trace_create_file("available_tracers", 0444, d_tracer,
6481 tr, &show_traces_fops);
6483 trace_create_file("current_tracer", 0644, d_tracer,
6484 tr, &set_tracer_fops);
6486 trace_create_file("tracing_cpumask", 0644, d_tracer,
6487 tr, &tracing_cpumask_fops);
6489 trace_create_file("trace_options", 0644, d_tracer,
6490 tr, &tracing_iter_fops);
6492 trace_create_file("trace", 0644, d_tracer,
6495 trace_create_file("trace_pipe", 0444, d_tracer,
6496 tr, &tracing_pipe_fops);
6498 trace_create_file("buffer_size_kb", 0644, d_tracer,
6499 tr, &tracing_entries_fops);
6501 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6502 tr, &tracing_total_entries_fops);
6504 trace_create_file("free_buffer", 0200, d_tracer,
6505 tr, &tracing_free_buffer_fops);
6507 trace_create_file("trace_marker", 0220, d_tracer,
6508 tr, &tracing_mark_fops);
6510 trace_create_file("trace_clock", 0644, d_tracer, tr,
6513 trace_create_file("tracing_on", 0644, d_tracer,
6514 tr, &rb_simple_fops);
6516 #ifdef CONFIG_TRACER_MAX_TRACE
6517 trace_create_file("tracing_max_latency", 0644, d_tracer,
6518 &tr->max_latency, &tracing_max_lat_fops);
6521 if (ftrace_create_function_files(tr, d_tracer))
6522 WARN(1, "Could not allocate function filter files");
6524 #ifdef CONFIG_TRACER_SNAPSHOT
6525 trace_create_file("snapshot", 0644, d_tracer,
6526 tr, &snapshot_fops);
6529 for_each_tracing_cpu(cpu)
6530 tracing_init_debugfs_percpu(tr, cpu);
6534 static __init int tracer_init_debugfs(void)
6536 struct dentry *d_tracer;
6538 trace_access_lock_init();
6540 d_tracer = tracing_init_dentry();
6541 if (IS_ERR(d_tracer))
6544 init_tracer_debugfs(&global_trace, d_tracer);
6546 trace_create_file("tracing_thresh", 0644, d_tracer,
6547 &global_trace, &tracing_thresh_fops);
6549 trace_create_file("README", 0444, d_tracer,
6550 NULL, &tracing_readme_fops);
6552 trace_create_file("saved_cmdlines", 0444, d_tracer,
6553 NULL, &tracing_saved_cmdlines_fops);
6555 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6556 NULL, &tracing_saved_cmdlines_size_fops);
6558 #ifdef CONFIG_DYNAMIC_FTRACE
6559 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6560 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6563 create_trace_instances(d_tracer);
6565 create_trace_options_dir(&global_trace);
6570 static int trace_panic_handler(struct notifier_block *this,
6571 unsigned long event, void *unused)
6573 if (ftrace_dump_on_oops)
6574 ftrace_dump(ftrace_dump_on_oops);
6578 static struct notifier_block trace_panic_notifier = {
6579 .notifier_call = trace_panic_handler,
6581 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6584 static int trace_die_handler(struct notifier_block *self,
6590 if (ftrace_dump_on_oops)
6591 ftrace_dump(ftrace_dump_on_oops);
6599 static struct notifier_block trace_die_notifier = {
6600 .notifier_call = trace_die_handler,
6605 * printk is set to max of 1024, we really don't need it that big.
6606 * Nothing should be printing 1000 characters anyway.
6608 #define TRACE_MAX_PRINT 1000
6611 * Define here KERN_TRACE so that we have one place to modify
6612 * it if we decide to change what log level the ftrace dump
6615 #define KERN_TRACE KERN_EMERG
6618 trace_printk_seq(struct trace_seq *s)
6620 /* Probably should print a warning here. */
6621 if (s->seq.len >= TRACE_MAX_PRINT)
6622 s->seq.len = TRACE_MAX_PRINT;
6625 * More paranoid code. Although the buffer size is set to
6626 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6627 * an extra layer of protection.
6629 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6630 s->seq.len = s->seq.size - 1;
6632 /* should be zero ended, but we are paranoid. */
6633 s->buffer[s->seq.len] = 0;
6635 printk(KERN_TRACE "%s", s->buffer);
6640 void trace_init_global_iter(struct trace_iterator *iter)
6642 iter->tr = &global_trace;
6643 iter->trace = iter->tr->current_trace;
6644 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6645 iter->trace_buffer = &global_trace.trace_buffer;
6647 if (iter->trace && iter->trace->open)
6648 iter->trace->open(iter);
6650 /* Annotate start of buffers if we had overruns */
6651 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6652 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6654 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6655 if (trace_clocks[iter->tr->clock_id].in_ns)
6656 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6659 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6661 /* use static because iter can be a bit big for the stack */
6662 static struct trace_iterator iter;
6663 static atomic_t dump_running;
6664 unsigned int old_userobj;
6665 unsigned long flags;
6668 /* Only allow one dump user at a time. */
6669 if (atomic_inc_return(&dump_running) != 1) {
6670 atomic_dec(&dump_running);
6675 * Always turn off tracing when we dump.
6676 * We don't need to show trace output of what happens
6677 * between multiple crashes.
6679 * If the user does a sysrq-z, then they can re-enable
6680 * tracing with echo 1 > tracing_on.
6684 local_irq_save(flags);
6686 /* Simulate the iterator */
6687 trace_init_global_iter(&iter);
6689 for_each_tracing_cpu(cpu) {
6690 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6693 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6695 /* don't look at user memory in panic mode */
6696 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6698 switch (oops_dump_mode) {
6700 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6703 iter.cpu_file = raw_smp_processor_id();
6708 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6709 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6712 printk(KERN_TRACE "Dumping ftrace buffer:\n");
6714 /* Did function tracer already get disabled? */
6715 if (ftrace_is_dead()) {
6716 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6717 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6721 * We need to stop all tracing on all CPUS to read the
6722 * the next buffer. This is a bit expensive, but is
6723 * not done often. We fill all what we can read,
6724 * and then release the locks again.
6727 while (!trace_empty(&iter)) {
6730 printk(KERN_TRACE "---------------------------------\n");
6734 /* reset all but tr, trace, and overruns */
6735 memset(&iter.seq, 0,
6736 sizeof(struct trace_iterator) -
6737 offsetof(struct trace_iterator, seq));
6738 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6741 if (trace_find_next_entry_inc(&iter) != NULL) {
6744 ret = print_trace_line(&iter);
6745 if (ret != TRACE_TYPE_NO_CONSUME)
6746 trace_consume(&iter);
6748 touch_nmi_watchdog();
6750 trace_printk_seq(&iter.seq);
6754 printk(KERN_TRACE " (ftrace buffer empty)\n");
6756 printk(KERN_TRACE "---------------------------------\n");
6759 trace_flags |= old_userobj;
6761 for_each_tracing_cpu(cpu) {
6762 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6764 atomic_dec(&dump_running);
6765 local_irq_restore(flags);
6767 EXPORT_SYMBOL_GPL(ftrace_dump);
6769 __init static int tracer_alloc_buffers(void)
6775 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6778 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6779 goto out_free_buffer_mask;
6781 /* Only allocate trace_printk buffers if a trace_printk exists */
6782 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6783 /* Must be called before global_trace.buffer is allocated */
6784 trace_printk_init_buffers();
6786 /* To save memory, keep the ring buffer size to its minimum */
6787 if (ring_buffer_expanded)
6788 ring_buf_size = trace_buf_size;
6792 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6793 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6795 raw_spin_lock_init(&global_trace.start_lock);
6797 /* Used for event triggers */
6798 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6800 goto out_free_cpumask;
6802 if (trace_create_savedcmd() < 0)
6803 goto out_free_temp_buffer;
6805 /* TODO: make the number of buffers hot pluggable with CPUS */
6806 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6807 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6809 goto out_free_savedcmd;
6812 if (global_trace.buffer_disabled)
6815 if (trace_boot_clock) {
6816 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6818 pr_warning("Trace clock %s not defined, going back to default\n",
6823 * register_tracer() might reference current_trace, so it
6824 * needs to be set before we register anything. This is
6825 * just a bootstrap of current_trace anyway.
6827 global_trace.current_trace = &nop_trace;
6829 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6831 ftrace_init_global_array_ops(&global_trace);
6833 register_tracer(&nop_trace);
6835 /* All seems OK, enable tracing */
6836 tracing_disabled = 0;
6838 atomic_notifier_chain_register(&panic_notifier_list,
6839 &trace_panic_notifier);
6841 register_die_notifier(&trace_die_notifier);
6843 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6845 INIT_LIST_HEAD(&global_trace.systems);
6846 INIT_LIST_HEAD(&global_trace.events);
6847 list_add(&global_trace.list, &ftrace_trace_arrays);
6849 while (trace_boot_options) {
6852 option = strsep(&trace_boot_options, ",");
6853 trace_set_options(&global_trace, option);
6856 register_snapshot_cmd();
6861 free_saved_cmdlines_buffer(savedcmd);
6862 out_free_temp_buffer:
6863 ring_buffer_free(temp_buffer);
6865 free_cpumask_var(global_trace.tracing_cpumask);
6866 out_free_buffer_mask:
6867 free_cpumask_var(tracing_buffer_mask);
6872 void __init trace_init(void)
6874 if (tracepoint_printk) {
6875 tracepoint_print_iter =
6876 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6877 if (WARN_ON(!tracepoint_print_iter))
6878 tracepoint_printk = 0;
6880 tracer_alloc_buffers();
6881 init_ftrace_syscalls();
6885 __init static int clear_boot_tracer(void)
6888 * The default tracer at boot buffer is an init section.
6889 * This function is called in lateinit. If we did not
6890 * find the boot tracer, then clear it out, to prevent
6891 * later registration from accessing the buffer that is
6892 * about to be freed.
6894 if (!default_bootup_tracer)
6897 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6898 default_bootup_tracer);
6899 default_bootup_tracer = NULL;
6904 fs_initcall(tracer_init_debugfs);
6905 late_initcall(clear_boot_tracer);