Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/irq_work.h>
23 #include <linux/debugfs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/rwsem.h>
36 #include <linux/slab.h>
37 #include <linux/ctype.h>
38 #include <linux/init.h>
39 #include <linux/poll.h>
40 #include <linux/nmi.h>
41 #include <linux/fs.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 int ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * When a reader is waiting for data, then this variable is
90  * set to true.
91  */
92 static bool trace_wakeup_needed;
93
94 static struct irq_work trace_work_wakeup;
95
96 /*
97  * Kill all tracing for good (never come back).
98  * It is initialized to 1 but will turn to zero if the initialization
99  * of the tracer is successful. But that is the only place that sets
100  * this back to zero.
101  */
102 static int tracing_disabled = 1;
103
104 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
105
106 cpumask_var_t __read_mostly     tracing_buffer_mask;
107
108 /*
109  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110  *
111  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112  * is set, then ftrace_dump is called. This will output the contents
113  * of the ftrace buffers to the console.  This is very useful for
114  * capturing traces that lead to crashes and outputing it to a
115  * serial console.
116  *
117  * It is default off, but you can enable it with either specifying
118  * "ftrace_dump_on_oops" in the kernel command line, or setting
119  * /proc/sys/kernel/ftrace_dump_on_oops
120  * Set 1 if you want to dump buffers of all CPUs
121  * Set 2 if you want to dump the buffer of the CPU that triggered oops
122  */
123
124 enum ftrace_dump_mode ftrace_dump_on_oops;
125
126 static int tracing_set_tracer(const char *buf);
127
128 #define MAX_TRACER_SIZE         100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131
132 static int __init set_cmdline_ftrace(char *str)
133 {
134         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135         default_bootup_tracer = bootup_tracer_buf;
136         /* We are using ftrace early, expand it */
137         ring_buffer_expanded = 1;
138         return 1;
139 }
140 __setup("ftrace=", set_cmdline_ftrace);
141
142 static int __init set_ftrace_dump_on_oops(char *str)
143 {
144         if (*str++ != '=' || !*str) {
145                 ftrace_dump_on_oops = DUMP_ALL;
146                 return 1;
147         }
148
149         if (!strcmp("orig_cpu", str)) {
150                 ftrace_dump_on_oops = DUMP_ORIG;
151                 return 1;
152         }
153
154         return 0;
155 }
156 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
157
158
159 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
160 static char *trace_boot_options __initdata;
161
162 static int __init set_trace_boot_options(char *str)
163 {
164         strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
165         trace_boot_options = trace_boot_options_buf;
166         return 0;
167 }
168 __setup("trace_options=", set_trace_boot_options);
169
170 unsigned long long ns2usecs(cycle_t nsec)
171 {
172         nsec += 500;
173         do_div(nsec, 1000);
174         return nsec;
175 }
176
177 /*
178  * The global_trace is the descriptor that holds the tracing
179  * buffers for the live tracing. For each CPU, it contains
180  * a link list of pages that will store trace entries. The
181  * page descriptor of the pages in the memory is used to hold
182  * the link list by linking the lru item in the page descriptor
183  * to each of the pages in the buffer per CPU.
184  *
185  * For each active CPU there is a data field that holds the
186  * pages for the buffer for that CPU. Each CPU has the same number
187  * of pages allocated for its buffer.
188  */
189 static struct trace_array       global_trace;
190
191 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
192
193 int filter_current_check_discard(struct ring_buffer *buffer,
194                                  struct ftrace_event_call *call, void *rec,
195                                  struct ring_buffer_event *event)
196 {
197         return filter_check_discard(call, rec, buffer, event);
198 }
199 EXPORT_SYMBOL_GPL(filter_current_check_discard);
200
201 cycle_t ftrace_now(int cpu)
202 {
203         u64 ts;
204
205         /* Early boot up does not have a buffer yet */
206         if (!global_trace.buffer)
207                 return trace_clock_local();
208
209         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
210         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
211
212         return ts;
213 }
214
215 /*
216  * The max_tr is used to snapshot the global_trace when a maximum
217  * latency is reached. Some tracers will use this to store a maximum
218  * trace while it continues examining live traces.
219  *
220  * The buffers for the max_tr are set up the same as the global_trace.
221  * When a snapshot is taken, the link list of the max_tr is swapped
222  * with the link list of the global_trace and the buffers are reset for
223  * the global_trace so the tracing can continue.
224  */
225 static struct trace_array       max_tr;
226
227 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
228
229 int tracing_is_enabled(void)
230 {
231         return tracing_is_on();
232 }
233
234 /*
235  * trace_buf_size is the size in bytes that is allocated
236  * for a buffer. Note, the number of bytes is always rounded
237  * to page size.
238  *
239  * This number is purposely set to a low number of 16384.
240  * If the dump on oops happens, it will be much appreciated
241  * to not have to wait for all that output. Anyway this can be
242  * boot time and run time configurable.
243  */
244 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
245
246 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
247
248 /* trace_types holds a link list of available tracers. */
249 static struct tracer            *trace_types __read_mostly;
250
251 /* current_trace points to the tracer that is currently active */
252 static struct tracer            *current_trace __read_mostly;
253
254 /*
255  * trace_types_lock is used to protect the trace_types list.
256  */
257 static DEFINE_MUTEX(trace_types_lock);
258
259 /*
260  * serialize the access of the ring buffer
261  *
262  * ring buffer serializes readers, but it is low level protection.
263  * The validity of the events (which returns by ring_buffer_peek() ..etc)
264  * are not protected by ring buffer.
265  *
266  * The content of events may become garbage if we allow other process consumes
267  * these events concurrently:
268  *   A) the page of the consumed events may become a normal page
269  *      (not reader page) in ring buffer, and this page will be rewrited
270  *      by events producer.
271  *   B) The page of the consumed events may become a page for splice_read,
272  *      and this page will be returned to system.
273  *
274  * These primitives allow multi process access to different cpu ring buffer
275  * concurrently.
276  *
277  * These primitives don't distinguish read-only and read-consume access.
278  * Multi read-only access are also serialized.
279  */
280
281 #ifdef CONFIG_SMP
282 static DECLARE_RWSEM(all_cpu_access_lock);
283 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
284
285 static inline void trace_access_lock(int cpu)
286 {
287         if (cpu == TRACE_PIPE_ALL_CPU) {
288                 /* gain it for accessing the whole ring buffer. */
289                 down_write(&all_cpu_access_lock);
290         } else {
291                 /* gain it for accessing a cpu ring buffer. */
292
293                 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
294                 down_read(&all_cpu_access_lock);
295
296                 /* Secondly block other access to this @cpu ring buffer. */
297                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
298         }
299 }
300
301 static inline void trace_access_unlock(int cpu)
302 {
303         if (cpu == TRACE_PIPE_ALL_CPU) {
304                 up_write(&all_cpu_access_lock);
305         } else {
306                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
307                 up_read(&all_cpu_access_lock);
308         }
309 }
310
311 static inline void trace_access_lock_init(void)
312 {
313         int cpu;
314
315         for_each_possible_cpu(cpu)
316                 mutex_init(&per_cpu(cpu_access_lock, cpu));
317 }
318
319 #else
320
321 static DEFINE_MUTEX(access_lock);
322
323 static inline void trace_access_lock(int cpu)
324 {
325         (void)cpu;
326         mutex_lock(&access_lock);
327 }
328
329 static inline void trace_access_unlock(int cpu)
330 {
331         (void)cpu;
332         mutex_unlock(&access_lock);
333 }
334
335 static inline void trace_access_lock_init(void)
336 {
337 }
338
339 #endif
340
341 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
342 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
343
344 /* trace_flags holds trace_options default values */
345 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
348         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
349
350 static int trace_stop_count;
351 static DEFINE_RAW_SPINLOCK(tracing_start_lock);
352
353 /**
354  * trace_wake_up - wake up tasks waiting for trace input
355  *
356  * Schedules a delayed work to wake up any task that is blocked on the
357  * trace_wait queue. These is used with trace_poll for tasks polling the
358  * trace.
359  */
360 static void trace_wake_up(struct irq_work *work)
361 {
362         wake_up_all(&trace_wait);
363
364 }
365
366 /**
367  * tracing_on - enable tracing buffers
368  *
369  * This function enables tracing buffers that may have been
370  * disabled with tracing_off.
371  */
372 void tracing_on(void)
373 {
374         if (global_trace.buffer)
375                 ring_buffer_record_on(global_trace.buffer);
376         /*
377          * This flag is only looked at when buffers haven't been
378          * allocated yet. We don't really care about the race
379          * between setting this flag and actually turning
380          * on the buffer.
381          */
382         global_trace.buffer_disabled = 0;
383 }
384 EXPORT_SYMBOL_GPL(tracing_on);
385
386 /**
387  * tracing_off - turn off tracing buffers
388  *
389  * This function stops the tracing buffers from recording data.
390  * It does not disable any overhead the tracers themselves may
391  * be causing. This function simply causes all recording to
392  * the ring buffers to fail.
393  */
394 void tracing_off(void)
395 {
396         if (global_trace.buffer)
397                 ring_buffer_record_off(global_trace.buffer);
398         /*
399          * This flag is only looked at when buffers haven't been
400          * allocated yet. We don't really care about the race
401          * between setting this flag and actually turning
402          * on the buffer.
403          */
404         global_trace.buffer_disabled = 1;
405 }
406 EXPORT_SYMBOL_GPL(tracing_off);
407
408 /**
409  * tracing_is_on - show state of ring buffers enabled
410  */
411 int tracing_is_on(void)
412 {
413         if (global_trace.buffer)
414                 return ring_buffer_record_is_on(global_trace.buffer);
415         return !global_trace.buffer_disabled;
416 }
417 EXPORT_SYMBOL_GPL(tracing_is_on);
418
419 static int __init set_buf_size(char *str)
420 {
421         unsigned long buf_size;
422
423         if (!str)
424                 return 0;
425         buf_size = memparse(str, &str);
426         /* nr_entries can not be zero */
427         if (buf_size == 0)
428                 return 0;
429         trace_buf_size = buf_size;
430         return 1;
431 }
432 __setup("trace_buf_size=", set_buf_size);
433
434 static int __init set_tracing_thresh(char *str)
435 {
436         unsigned long threshold;
437         int ret;
438
439         if (!str)
440                 return 0;
441         ret = kstrtoul(str, 0, &threshold);
442         if (ret < 0)
443                 return 0;
444         tracing_thresh = threshold * 1000;
445         return 1;
446 }
447 __setup("tracing_thresh=", set_tracing_thresh);
448
449 unsigned long nsecs_to_usecs(unsigned long nsecs)
450 {
451         return nsecs / 1000;
452 }
453
454 /* These must match the bit postions in trace_iterator_flags */
455 static const char *trace_options[] = {
456         "print-parent",
457         "sym-offset",
458         "sym-addr",
459         "verbose",
460         "raw",
461         "hex",
462         "bin",
463         "block",
464         "stacktrace",
465         "trace_printk",
466         "ftrace_preempt",
467         "branch",
468         "annotate",
469         "userstacktrace",
470         "sym-userobj",
471         "printk-msg-only",
472         "context-info",
473         "latency-format",
474         "sleep-time",
475         "graph-time",
476         "record-cmd",
477         "overwrite",
478         "disable_on_free",
479         "irq-info",
480         "markers",
481         NULL
482 };
483
484 static struct {
485         u64 (*func)(void);
486         const char *name;
487         int in_ns;              /* is this clock in nanoseconds? */
488 } trace_clocks[] = {
489         { trace_clock_local,    "local",        1 },
490         { trace_clock_global,   "global",       1 },
491         { trace_clock_counter,  "counter",      0 },
492         ARCH_TRACE_CLOCKS
493 };
494
495 int trace_clock_id;
496
497 /*
498  * trace_parser_get_init - gets the buffer for trace parser
499  */
500 int trace_parser_get_init(struct trace_parser *parser, int size)
501 {
502         memset(parser, 0, sizeof(*parser));
503
504         parser->buffer = kmalloc(size, GFP_KERNEL);
505         if (!parser->buffer)
506                 return 1;
507
508         parser->size = size;
509         return 0;
510 }
511
512 /*
513  * trace_parser_put - frees the buffer for trace parser
514  */
515 void trace_parser_put(struct trace_parser *parser)
516 {
517         kfree(parser->buffer);
518 }
519
520 /*
521  * trace_get_user - reads the user input string separated by  space
522  * (matched by isspace(ch))
523  *
524  * For each string found the 'struct trace_parser' is updated,
525  * and the function returns.
526  *
527  * Returns number of bytes read.
528  *
529  * See kernel/trace/trace.h for 'struct trace_parser' details.
530  */
531 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
532         size_t cnt, loff_t *ppos)
533 {
534         char ch;
535         size_t read = 0;
536         ssize_t ret;
537
538         if (!*ppos)
539                 trace_parser_clear(parser);
540
541         ret = get_user(ch, ubuf++);
542         if (ret)
543                 goto out;
544
545         read++;
546         cnt--;
547
548         /*
549          * The parser is not finished with the last write,
550          * continue reading the user input without skipping spaces.
551          */
552         if (!parser->cont) {
553                 /* skip white space */
554                 while (cnt && isspace(ch)) {
555                         ret = get_user(ch, ubuf++);
556                         if (ret)
557                                 goto out;
558                         read++;
559                         cnt--;
560                 }
561
562                 /* only spaces were written */
563                 if (isspace(ch)) {
564                         *ppos += read;
565                         ret = read;
566                         goto out;
567                 }
568
569                 parser->idx = 0;
570         }
571
572         /* read the non-space input */
573         while (cnt && !isspace(ch)) {
574                 if (parser->idx < parser->size - 1)
575                         parser->buffer[parser->idx++] = ch;
576                 else {
577                         ret = -EINVAL;
578                         goto out;
579                 }
580                 ret = get_user(ch, ubuf++);
581                 if (ret)
582                         goto out;
583                 read++;
584                 cnt--;
585         }
586
587         /* We either got finished input or we have to wait for another call. */
588         if (isspace(ch)) {
589                 parser->buffer[parser->idx] = 0;
590                 parser->cont = false;
591         } else {
592                 parser->cont = true;
593                 parser->buffer[parser->idx++] = ch;
594         }
595
596         *ppos += read;
597         ret = read;
598
599 out:
600         return ret;
601 }
602
603 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
604 {
605         int len;
606         int ret;
607
608         if (!cnt)
609                 return 0;
610
611         if (s->len <= s->readpos)
612                 return -EBUSY;
613
614         len = s->len - s->readpos;
615         if (cnt > len)
616                 cnt = len;
617         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
618         if (ret == cnt)
619                 return -EFAULT;
620
621         cnt -= ret;
622
623         s->readpos += cnt;
624         return cnt;
625 }
626
627 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
628 {
629         int len;
630
631         if (s->len <= s->readpos)
632                 return -EBUSY;
633
634         len = s->len - s->readpos;
635         if (cnt > len)
636                 cnt = len;
637         memcpy(buf, s->buffer + s->readpos, cnt);
638
639         s->readpos += cnt;
640         return cnt;
641 }
642
643 /*
644  * ftrace_max_lock is used to protect the swapping of buffers
645  * when taking a max snapshot. The buffers themselves are
646  * protected by per_cpu spinlocks. But the action of the swap
647  * needs its own lock.
648  *
649  * This is defined as a arch_spinlock_t in order to help
650  * with performance when lockdep debugging is enabled.
651  *
652  * It is also used in other places outside the update_max_tr
653  * so it needs to be defined outside of the
654  * CONFIG_TRACER_MAX_TRACE.
655  */
656 static arch_spinlock_t ftrace_max_lock =
657         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
658
659 unsigned long __read_mostly     tracing_thresh;
660
661 #ifdef CONFIG_TRACER_MAX_TRACE
662 unsigned long __read_mostly     tracing_max_latency;
663
664 /*
665  * Copy the new maximum trace into the separate maximum-trace
666  * structure. (this way the maximum trace is permanently saved,
667  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
668  */
669 static void
670 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
671 {
672         struct trace_array_cpu *data = tr->data[cpu];
673         struct trace_array_cpu *max_data;
674
675         max_tr.cpu = cpu;
676         max_tr.time_start = data->preempt_timestamp;
677
678         max_data = max_tr.data[cpu];
679         max_data->saved_latency = tracing_max_latency;
680         max_data->critical_start = data->critical_start;
681         max_data->critical_end = data->critical_end;
682
683         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
684         max_data->pid = tsk->pid;
685         max_data->uid = task_uid(tsk);
686         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
687         max_data->policy = tsk->policy;
688         max_data->rt_priority = tsk->rt_priority;
689
690         /* record this tasks comm */
691         tracing_record_cmdline(tsk);
692 }
693
694 /**
695  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
696  * @tr: tracer
697  * @tsk: the task with the latency
698  * @cpu: The cpu that initiated the trace.
699  *
700  * Flip the buffers between the @tr and the max_tr and record information
701  * about which task was the cause of this latency.
702  */
703 void
704 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
705 {
706         struct ring_buffer *buf = tr->buffer;
707
708         if (trace_stop_count)
709                 return;
710
711         WARN_ON_ONCE(!irqs_disabled());
712         if (!current_trace->use_max_tr) {
713                 WARN_ON_ONCE(1);
714                 return;
715         }
716         arch_spin_lock(&ftrace_max_lock);
717
718         tr->buffer = max_tr.buffer;
719         max_tr.buffer = buf;
720
721         __update_max_tr(tr, tsk, cpu);
722         arch_spin_unlock(&ftrace_max_lock);
723 }
724
725 /**
726  * update_max_tr_single - only copy one trace over, and reset the rest
727  * @tr - tracer
728  * @tsk - task with the latency
729  * @cpu - the cpu of the buffer to copy.
730  *
731  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
732  */
733 void
734 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
735 {
736         int ret;
737
738         if (trace_stop_count)
739                 return;
740
741         WARN_ON_ONCE(!irqs_disabled());
742         if (!current_trace->use_max_tr) {
743                 WARN_ON_ONCE(1);
744                 return;
745         }
746
747         arch_spin_lock(&ftrace_max_lock);
748
749         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
750
751         if (ret == -EBUSY) {
752                 /*
753                  * We failed to swap the buffer due to a commit taking
754                  * place on this CPU. We fail to record, but we reset
755                  * the max trace buffer (no one writes directly to it)
756                  * and flag that it failed.
757                  */
758                 trace_array_printk(&max_tr, _THIS_IP_,
759                         "Failed to swap buffers due to commit in progress\n");
760         }
761
762         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
763
764         __update_max_tr(tr, tsk, cpu);
765         arch_spin_unlock(&ftrace_max_lock);
766 }
767 #endif /* CONFIG_TRACER_MAX_TRACE */
768
769 static void default_wait_pipe(struct trace_iterator *iter)
770 {
771         DEFINE_WAIT(wait);
772
773         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
774
775         /*
776          * The events can happen in critical sections where
777          * checking a work queue can cause deadlocks.
778          * After adding a task to the queue, this flag is set
779          * only to notify events to try to wake up the queue
780          * using irq_work.
781          *
782          * We don't clear it even if the buffer is no longer
783          * empty. The flag only causes the next event to run
784          * irq_work to do the work queue wake up. The worse
785          * that can happen if we race with !trace_empty() is that
786          * an event will cause an irq_work to try to wake up
787          * an empty queue.
788          *
789          * There's no reason to protect this flag either, as
790          * the work queue and irq_work logic will do the necessary
791          * synchronization for the wake ups. The only thing
792          * that is necessary is that the wake up happens after
793          * a task has been queued. It's OK for spurious wake ups.
794          */
795         trace_wakeup_needed = true;
796
797         if (trace_empty(iter))
798                 schedule();
799
800         finish_wait(&trace_wait, &wait);
801 }
802
803 /**
804  * register_tracer - register a tracer with the ftrace system.
805  * @type - the plugin for the tracer
806  *
807  * Register a new plugin tracer.
808  */
809 int register_tracer(struct tracer *type)
810 {
811         struct tracer *t;
812         int ret = 0;
813
814         if (!type->name) {
815                 pr_info("Tracer must have a name\n");
816                 return -1;
817         }
818
819         if (strlen(type->name) >= MAX_TRACER_SIZE) {
820                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
821                 return -1;
822         }
823
824         mutex_lock(&trace_types_lock);
825
826         tracing_selftest_running = true;
827
828         for (t = trace_types; t; t = t->next) {
829                 if (strcmp(type->name, t->name) == 0) {
830                         /* already found */
831                         pr_info("Tracer %s already registered\n",
832                                 type->name);
833                         ret = -1;
834                         goto out;
835                 }
836         }
837
838         if (!type->set_flag)
839                 type->set_flag = &dummy_set_flag;
840         if (!type->flags)
841                 type->flags = &dummy_tracer_flags;
842         else
843                 if (!type->flags->opts)
844                         type->flags->opts = dummy_tracer_opt;
845         if (!type->wait_pipe)
846                 type->wait_pipe = default_wait_pipe;
847
848
849 #ifdef CONFIG_FTRACE_STARTUP_TEST
850         if (type->selftest && !tracing_selftest_disabled) {
851                 struct tracer *saved_tracer = current_trace;
852                 struct trace_array *tr = &global_trace;
853
854                 /*
855                  * Run a selftest on this tracer.
856                  * Here we reset the trace buffer, and set the current
857                  * tracer to be this tracer. The tracer can then run some
858                  * internal tracing to verify that everything is in order.
859                  * If we fail, we do not register this tracer.
860                  */
861                 tracing_reset_online_cpus(tr);
862
863                 current_trace = type;
864
865                 /* If we expanded the buffers, make sure the max is expanded too */
866                 if (ring_buffer_expanded && type->use_max_tr)
867                         ring_buffer_resize(max_tr.buffer, trace_buf_size,
868                                                 RING_BUFFER_ALL_CPUS);
869
870                 /* the test is responsible for initializing and enabling */
871                 pr_info("Testing tracer %s: ", type->name);
872                 ret = type->selftest(type, tr);
873                 /* the test is responsible for resetting too */
874                 current_trace = saved_tracer;
875                 if (ret) {
876                         printk(KERN_CONT "FAILED!\n");
877                         /* Add the warning after printing 'FAILED' */
878                         WARN_ON(1);
879                         goto out;
880                 }
881                 /* Only reset on passing, to avoid touching corrupted buffers */
882                 tracing_reset_online_cpus(tr);
883
884                 /* Shrink the max buffer again */
885                 if (ring_buffer_expanded && type->use_max_tr)
886                         ring_buffer_resize(max_tr.buffer, 1,
887                                                 RING_BUFFER_ALL_CPUS);
888
889                 printk(KERN_CONT "PASSED\n");
890         }
891 #endif
892
893         type->next = trace_types;
894         trace_types = type;
895
896  out:
897         tracing_selftest_running = false;
898         mutex_unlock(&trace_types_lock);
899
900         if (ret || !default_bootup_tracer)
901                 goto out_unlock;
902
903         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
904                 goto out_unlock;
905
906         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
907         /* Do we want this tracer to start on bootup? */
908         tracing_set_tracer(type->name);
909         default_bootup_tracer = NULL;
910         /* disable other selftests, since this will break it. */
911         tracing_selftest_disabled = 1;
912 #ifdef CONFIG_FTRACE_STARTUP_TEST
913         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
914                type->name);
915 #endif
916
917  out_unlock:
918         return ret;
919 }
920
921 void tracing_reset(struct trace_array *tr, int cpu)
922 {
923         struct ring_buffer *buffer = tr->buffer;
924
925         ring_buffer_record_disable(buffer);
926
927         /* Make sure all commits have finished */
928         synchronize_sched();
929         ring_buffer_reset_cpu(buffer, cpu);
930
931         ring_buffer_record_enable(buffer);
932 }
933
934 void tracing_reset_online_cpus(struct trace_array *tr)
935 {
936         struct ring_buffer *buffer = tr->buffer;
937         int cpu;
938
939         ring_buffer_record_disable(buffer);
940
941         /* Make sure all commits have finished */
942         synchronize_sched();
943
944         tr->time_start = ftrace_now(tr->cpu);
945
946         for_each_online_cpu(cpu)
947                 ring_buffer_reset_cpu(buffer, cpu);
948
949         ring_buffer_record_enable(buffer);
950 }
951
952 void tracing_reset_current(int cpu)
953 {
954         tracing_reset(&global_trace, cpu);
955 }
956
957 void tracing_reset_current_online_cpus(void)
958 {
959         tracing_reset_online_cpus(&global_trace);
960 }
961
962 #define SAVED_CMDLINES 128
963 #define NO_CMDLINE_MAP UINT_MAX
964 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
965 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
966 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
967 static int cmdline_idx;
968 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
969
970 /* temporary disable recording */
971 static atomic_t trace_record_cmdline_disabled __read_mostly;
972
973 static void trace_init_cmdlines(void)
974 {
975         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
976         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
977         cmdline_idx = 0;
978 }
979
980 int is_tracing_stopped(void)
981 {
982         return trace_stop_count;
983 }
984
985 /**
986  * ftrace_off_permanent - disable all ftrace code permanently
987  *
988  * This should only be called when a serious anomally has
989  * been detected.  This will turn off the function tracing,
990  * ring buffers, and other tracing utilites. It takes no
991  * locks and can be called from any context.
992  */
993 void ftrace_off_permanent(void)
994 {
995         tracing_disabled = 1;
996         ftrace_stop();
997         tracing_off_permanent();
998 }
999
1000 /**
1001  * tracing_start - quick start of the tracer
1002  *
1003  * If tracing is enabled but was stopped by tracing_stop,
1004  * this will start the tracer back up.
1005  */
1006 void tracing_start(void)
1007 {
1008         struct ring_buffer *buffer;
1009         unsigned long flags;
1010
1011         if (tracing_disabled)
1012                 return;
1013
1014         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1015         if (--trace_stop_count) {
1016                 if (trace_stop_count < 0) {
1017                         /* Someone screwed up their debugging */
1018                         WARN_ON_ONCE(1);
1019                         trace_stop_count = 0;
1020                 }
1021                 goto out;
1022         }
1023
1024         /* Prevent the buffers from switching */
1025         arch_spin_lock(&ftrace_max_lock);
1026
1027         buffer = global_trace.buffer;
1028         if (buffer)
1029                 ring_buffer_record_enable(buffer);
1030
1031         buffer = max_tr.buffer;
1032         if (buffer)
1033                 ring_buffer_record_enable(buffer);
1034
1035         arch_spin_unlock(&ftrace_max_lock);
1036
1037         ftrace_start();
1038  out:
1039         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1040 }
1041
1042 /**
1043  * tracing_stop - quick stop of the tracer
1044  *
1045  * Light weight way to stop tracing. Use in conjunction with
1046  * tracing_start.
1047  */
1048 void tracing_stop(void)
1049 {
1050         struct ring_buffer *buffer;
1051         unsigned long flags;
1052
1053         ftrace_stop();
1054         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1055         if (trace_stop_count++)
1056                 goto out;
1057
1058         /* Prevent the buffers from switching */
1059         arch_spin_lock(&ftrace_max_lock);
1060
1061         buffer = global_trace.buffer;
1062         if (buffer)
1063                 ring_buffer_record_disable(buffer);
1064
1065         buffer = max_tr.buffer;
1066         if (buffer)
1067                 ring_buffer_record_disable(buffer);
1068
1069         arch_spin_unlock(&ftrace_max_lock);
1070
1071  out:
1072         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1073 }
1074
1075 void trace_stop_cmdline_recording(void);
1076
1077 static void trace_save_cmdline(struct task_struct *tsk)
1078 {
1079         unsigned pid, idx;
1080
1081         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1082                 return;
1083
1084         /*
1085          * It's not the end of the world if we don't get
1086          * the lock, but we also don't want to spin
1087          * nor do we want to disable interrupts,
1088          * so if we miss here, then better luck next time.
1089          */
1090         if (!arch_spin_trylock(&trace_cmdline_lock))
1091                 return;
1092
1093         idx = map_pid_to_cmdline[tsk->pid];
1094         if (idx == NO_CMDLINE_MAP) {
1095                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1096
1097                 /*
1098                  * Check whether the cmdline buffer at idx has a pid
1099                  * mapped. We are going to overwrite that entry so we
1100                  * need to clear the map_pid_to_cmdline. Otherwise we
1101                  * would read the new comm for the old pid.
1102                  */
1103                 pid = map_cmdline_to_pid[idx];
1104                 if (pid != NO_CMDLINE_MAP)
1105                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1106
1107                 map_cmdline_to_pid[idx] = tsk->pid;
1108                 map_pid_to_cmdline[tsk->pid] = idx;
1109
1110                 cmdline_idx = idx;
1111         }
1112
1113         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1114
1115         arch_spin_unlock(&trace_cmdline_lock);
1116 }
1117
1118 void trace_find_cmdline(int pid, char comm[])
1119 {
1120         unsigned map;
1121
1122         if (!pid) {
1123                 strcpy(comm, "<idle>");
1124                 return;
1125         }
1126
1127         if (WARN_ON_ONCE(pid < 0)) {
1128                 strcpy(comm, "<XXX>");
1129                 return;
1130         }
1131
1132         if (pid > PID_MAX_DEFAULT) {
1133                 strcpy(comm, "<...>");
1134                 return;
1135         }
1136
1137         preempt_disable();
1138         arch_spin_lock(&trace_cmdline_lock);
1139         map = map_pid_to_cmdline[pid];
1140         if (map != NO_CMDLINE_MAP)
1141                 strcpy(comm, saved_cmdlines[map]);
1142         else
1143                 strcpy(comm, "<...>");
1144
1145         arch_spin_unlock(&trace_cmdline_lock);
1146         preempt_enable();
1147 }
1148
1149 void tracing_record_cmdline(struct task_struct *tsk)
1150 {
1151         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1152                 return;
1153
1154         if (!__this_cpu_read(trace_cmdline_save))
1155                 return;
1156
1157         __this_cpu_write(trace_cmdline_save, false);
1158
1159         trace_save_cmdline(tsk);
1160 }
1161
1162 void
1163 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1164                              int pc)
1165 {
1166         struct task_struct *tsk = current;
1167
1168         entry->preempt_count            = pc & 0xff;
1169         entry->pid                      = (tsk) ? tsk->pid : 0;
1170         entry->padding                  = 0;
1171         entry->flags =
1172 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1173                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1174 #else
1175                 TRACE_FLAG_IRQS_NOSUPPORT |
1176 #endif
1177                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1178                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1179                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1180 }
1181 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1182
1183 struct ring_buffer_event *
1184 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1185                           int type,
1186                           unsigned long len,
1187                           unsigned long flags, int pc)
1188 {
1189         struct ring_buffer_event *event;
1190
1191         event = ring_buffer_lock_reserve(buffer, len);
1192         if (event != NULL) {
1193                 struct trace_entry *ent = ring_buffer_event_data(event);
1194
1195                 tracing_generic_entry_update(ent, flags, pc);
1196                 ent->type = type;
1197         }
1198
1199         return event;
1200 }
1201
1202 void
1203 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1204 {
1205         __this_cpu_write(trace_cmdline_save, true);
1206         if (trace_wakeup_needed) {
1207                 trace_wakeup_needed = false;
1208                 /* irq_work_queue() supplies it's own memory barriers */
1209                 irq_work_queue(&trace_work_wakeup);
1210         }
1211         ring_buffer_unlock_commit(buffer, event);
1212 }
1213
1214 static inline void
1215 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1216                              struct ring_buffer_event *event,
1217                              unsigned long flags, int pc)
1218 {
1219         __buffer_unlock_commit(buffer, event);
1220
1221         ftrace_trace_stack(buffer, flags, 6, pc);
1222         ftrace_trace_userstack(buffer, flags, pc);
1223 }
1224
1225 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1226                                 struct ring_buffer_event *event,
1227                                 unsigned long flags, int pc)
1228 {
1229         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1230 }
1231 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1232
1233 struct ring_buffer_event *
1234 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1235                                   int type, unsigned long len,
1236                                   unsigned long flags, int pc)
1237 {
1238         *current_rb = global_trace.buffer;
1239         return trace_buffer_lock_reserve(*current_rb,
1240                                          type, len, flags, pc);
1241 }
1242 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1243
1244 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1245                                         struct ring_buffer_event *event,
1246                                         unsigned long flags, int pc)
1247 {
1248         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1249 }
1250 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1251
1252 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1253                                      struct ring_buffer_event *event,
1254                                      unsigned long flags, int pc,
1255                                      struct pt_regs *regs)
1256 {
1257         __buffer_unlock_commit(buffer, event);
1258
1259         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1260         ftrace_trace_userstack(buffer, flags, pc);
1261 }
1262 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1263
1264 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1265                                          struct ring_buffer_event *event)
1266 {
1267         ring_buffer_discard_commit(buffer, event);
1268 }
1269 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1270
1271 void
1272 trace_function(struct trace_array *tr,
1273                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1274                int pc)
1275 {
1276         struct ftrace_event_call *call = &event_function;
1277         struct ring_buffer *buffer = tr->buffer;
1278         struct ring_buffer_event *event;
1279         struct ftrace_entry *entry;
1280
1281         /* If we are reading the ring buffer, don't trace */
1282         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1283                 return;
1284
1285         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1286                                           flags, pc);
1287         if (!event)
1288                 return;
1289         entry   = ring_buffer_event_data(event);
1290         entry->ip                       = ip;
1291         entry->parent_ip                = parent_ip;
1292
1293         if (!filter_check_discard(call, entry, buffer, event))
1294                 __buffer_unlock_commit(buffer, event);
1295 }
1296
1297 void
1298 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1299        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1300        int pc)
1301 {
1302         if (likely(!atomic_read(&data->disabled)))
1303                 trace_function(tr, ip, parent_ip, flags, pc);
1304 }
1305
1306 #ifdef CONFIG_STACKTRACE
1307
1308 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1309 struct ftrace_stack {
1310         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1311 };
1312
1313 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1314 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1315
1316 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1317                                  unsigned long flags,
1318                                  int skip, int pc, struct pt_regs *regs)
1319 {
1320         struct ftrace_event_call *call = &event_kernel_stack;
1321         struct ring_buffer_event *event;
1322         struct stack_entry *entry;
1323         struct stack_trace trace;
1324         int use_stack;
1325         int size = FTRACE_STACK_ENTRIES;
1326
1327         trace.nr_entries        = 0;
1328         trace.skip              = skip;
1329
1330         /*
1331          * Since events can happen in NMIs there's no safe way to
1332          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1333          * or NMI comes in, it will just have to use the default
1334          * FTRACE_STACK_SIZE.
1335          */
1336         preempt_disable_notrace();
1337
1338         use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1339         /*
1340          * We don't need any atomic variables, just a barrier.
1341          * If an interrupt comes in, we don't care, because it would
1342          * have exited and put the counter back to what we want.
1343          * We just need a barrier to keep gcc from moving things
1344          * around.
1345          */
1346         barrier();
1347         if (use_stack == 1) {
1348                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1349                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1350
1351                 if (regs)
1352                         save_stack_trace_regs(regs, &trace);
1353                 else
1354                         save_stack_trace(&trace);
1355
1356                 if (trace.nr_entries > size)
1357                         size = trace.nr_entries;
1358         } else
1359                 /* From now on, use_stack is a boolean */
1360                 use_stack = 0;
1361
1362         size *= sizeof(unsigned long);
1363
1364         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1365                                           sizeof(*entry) + size, flags, pc);
1366         if (!event)
1367                 goto out;
1368         entry = ring_buffer_event_data(event);
1369
1370         memset(&entry->caller, 0, size);
1371
1372         if (use_stack)
1373                 memcpy(&entry->caller, trace.entries,
1374                        trace.nr_entries * sizeof(unsigned long));
1375         else {
1376                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1377                 trace.entries           = entry->caller;
1378                 if (regs)
1379                         save_stack_trace_regs(regs, &trace);
1380                 else
1381                         save_stack_trace(&trace);
1382         }
1383
1384         entry->size = trace.nr_entries;
1385
1386         if (!filter_check_discard(call, entry, buffer, event))
1387                 __buffer_unlock_commit(buffer, event);
1388
1389  out:
1390         /* Again, don't let gcc optimize things here */
1391         barrier();
1392         __get_cpu_var(ftrace_stack_reserve)--;
1393         preempt_enable_notrace();
1394
1395 }
1396
1397 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1398                              int skip, int pc, struct pt_regs *regs)
1399 {
1400         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1401                 return;
1402
1403         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1404 }
1405
1406 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1407                         int skip, int pc)
1408 {
1409         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1410                 return;
1411
1412         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1413 }
1414
1415 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1416                    int pc)
1417 {
1418         __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1419 }
1420
1421 /**
1422  * trace_dump_stack - record a stack back trace in the trace buffer
1423  */
1424 void trace_dump_stack(void)
1425 {
1426         unsigned long flags;
1427
1428         if (tracing_disabled || tracing_selftest_running)
1429                 return;
1430
1431         local_save_flags(flags);
1432
1433         /* skipping 3 traces, seems to get us at the caller of this function */
1434         __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1435 }
1436
1437 static DEFINE_PER_CPU(int, user_stack_count);
1438
1439 void
1440 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1441 {
1442         struct ftrace_event_call *call = &event_user_stack;
1443         struct ring_buffer_event *event;
1444         struct userstack_entry *entry;
1445         struct stack_trace trace;
1446
1447         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1448                 return;
1449
1450         /*
1451          * NMIs can not handle page faults, even with fix ups.
1452          * The save user stack can (and often does) fault.
1453          */
1454         if (unlikely(in_nmi()))
1455                 return;
1456
1457         /*
1458          * prevent recursion, since the user stack tracing may
1459          * trigger other kernel events.
1460          */
1461         preempt_disable();
1462         if (__this_cpu_read(user_stack_count))
1463                 goto out;
1464
1465         __this_cpu_inc(user_stack_count);
1466
1467         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1468                                           sizeof(*entry), flags, pc);
1469         if (!event)
1470                 goto out_drop_count;
1471         entry   = ring_buffer_event_data(event);
1472
1473         entry->tgid             = current->tgid;
1474         memset(&entry->caller, 0, sizeof(entry->caller));
1475
1476         trace.nr_entries        = 0;
1477         trace.max_entries       = FTRACE_STACK_ENTRIES;
1478         trace.skip              = 0;
1479         trace.entries           = entry->caller;
1480
1481         save_stack_trace_user(&trace);
1482         if (!filter_check_discard(call, entry, buffer, event))
1483                 __buffer_unlock_commit(buffer, event);
1484
1485  out_drop_count:
1486         __this_cpu_dec(user_stack_count);
1487  out:
1488         preempt_enable();
1489 }
1490
1491 #ifdef UNUSED
1492 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1493 {
1494         ftrace_trace_userstack(tr, flags, preempt_count());
1495 }
1496 #endif /* UNUSED */
1497
1498 #endif /* CONFIG_STACKTRACE */
1499
1500 /* created for use with alloc_percpu */
1501 struct trace_buffer_struct {
1502         char buffer[TRACE_BUF_SIZE];
1503 };
1504
1505 static struct trace_buffer_struct *trace_percpu_buffer;
1506 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1507 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1508 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1509
1510 /*
1511  * The buffer used is dependent on the context. There is a per cpu
1512  * buffer for normal context, softirq contex, hard irq context and
1513  * for NMI context. Thise allows for lockless recording.
1514  *
1515  * Note, if the buffers failed to be allocated, then this returns NULL
1516  */
1517 static char *get_trace_buf(void)
1518 {
1519         struct trace_buffer_struct *percpu_buffer;
1520         struct trace_buffer_struct *buffer;
1521
1522         /*
1523          * If we have allocated per cpu buffers, then we do not
1524          * need to do any locking.
1525          */
1526         if (in_nmi())
1527                 percpu_buffer = trace_percpu_nmi_buffer;
1528         else if (in_irq())
1529                 percpu_buffer = trace_percpu_irq_buffer;
1530         else if (in_softirq())
1531                 percpu_buffer = trace_percpu_sirq_buffer;
1532         else
1533                 percpu_buffer = trace_percpu_buffer;
1534
1535         if (!percpu_buffer)
1536                 return NULL;
1537
1538         buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1539
1540         return buffer->buffer;
1541 }
1542
1543 static int alloc_percpu_trace_buffer(void)
1544 {
1545         struct trace_buffer_struct *buffers;
1546         struct trace_buffer_struct *sirq_buffers;
1547         struct trace_buffer_struct *irq_buffers;
1548         struct trace_buffer_struct *nmi_buffers;
1549
1550         buffers = alloc_percpu(struct trace_buffer_struct);
1551         if (!buffers)
1552                 goto err_warn;
1553
1554         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1555         if (!sirq_buffers)
1556                 goto err_sirq;
1557
1558         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1559         if (!irq_buffers)
1560                 goto err_irq;
1561
1562         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1563         if (!nmi_buffers)
1564                 goto err_nmi;
1565
1566         trace_percpu_buffer = buffers;
1567         trace_percpu_sirq_buffer = sirq_buffers;
1568         trace_percpu_irq_buffer = irq_buffers;
1569         trace_percpu_nmi_buffer = nmi_buffers;
1570
1571         return 0;
1572
1573  err_nmi:
1574         free_percpu(irq_buffers);
1575  err_irq:
1576         free_percpu(sirq_buffers);
1577  err_sirq:
1578         free_percpu(buffers);
1579  err_warn:
1580         WARN(1, "Could not allocate percpu trace_printk buffer");
1581         return -ENOMEM;
1582 }
1583
1584 static int buffers_allocated;
1585
1586 void trace_printk_init_buffers(void)
1587 {
1588         if (buffers_allocated)
1589                 return;
1590
1591         if (alloc_percpu_trace_buffer())
1592                 return;
1593
1594         pr_info("ftrace: Allocated trace_printk buffers\n");
1595
1596         /* Expand the buffers to set size */
1597         tracing_update_buffers();
1598
1599         buffers_allocated = 1;
1600
1601         /*
1602          * trace_printk_init_buffers() can be called by modules.
1603          * If that happens, then we need to start cmdline recording
1604          * directly here. If the global_trace.buffer is already
1605          * allocated here, then this was called by module code.
1606          */
1607         if (global_trace.buffer)
1608                 tracing_start_cmdline_record();
1609 }
1610
1611 void trace_printk_start_comm(void)
1612 {
1613         /* Start tracing comms if trace printk is set */
1614         if (!buffers_allocated)
1615                 return;
1616         tracing_start_cmdline_record();
1617 }
1618
1619 static void trace_printk_start_stop_comm(int enabled)
1620 {
1621         if (!buffers_allocated)
1622                 return;
1623
1624         if (enabled)
1625                 tracing_start_cmdline_record();
1626         else
1627                 tracing_stop_cmdline_record();
1628 }
1629
1630 /**
1631  * trace_vbprintk - write binary msg to tracing buffer
1632  *
1633  */
1634 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1635 {
1636         struct ftrace_event_call *call = &event_bprint;
1637         struct ring_buffer_event *event;
1638         struct ring_buffer *buffer;
1639         struct trace_array *tr = &global_trace;
1640         struct bprint_entry *entry;
1641         unsigned long flags;
1642         char *tbuffer;
1643         int len = 0, size, pc;
1644
1645         if (unlikely(tracing_selftest_running || tracing_disabled))
1646                 return 0;
1647
1648         /* Don't pollute graph traces with trace_vprintk internals */
1649         pause_graph_tracing();
1650
1651         pc = preempt_count();
1652         preempt_disable_notrace();
1653
1654         tbuffer = get_trace_buf();
1655         if (!tbuffer) {
1656                 len = 0;
1657                 goto out;
1658         }
1659
1660         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1661
1662         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1663                 goto out;
1664
1665         local_save_flags(flags);
1666         size = sizeof(*entry) + sizeof(u32) * len;
1667         buffer = tr->buffer;
1668         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1669                                           flags, pc);
1670         if (!event)
1671                 goto out;
1672         entry = ring_buffer_event_data(event);
1673         entry->ip                       = ip;
1674         entry->fmt                      = fmt;
1675
1676         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1677         if (!filter_check_discard(call, entry, buffer, event)) {
1678                 __buffer_unlock_commit(buffer, event);
1679                 ftrace_trace_stack(buffer, flags, 6, pc);
1680         }
1681
1682 out:
1683         preempt_enable_notrace();
1684         unpause_graph_tracing();
1685
1686         return len;
1687 }
1688 EXPORT_SYMBOL_GPL(trace_vbprintk);
1689
1690 int trace_array_printk(struct trace_array *tr,
1691                        unsigned long ip, const char *fmt, ...)
1692 {
1693         int ret;
1694         va_list ap;
1695
1696         if (!(trace_flags & TRACE_ITER_PRINTK))
1697                 return 0;
1698
1699         va_start(ap, fmt);
1700         ret = trace_array_vprintk(tr, ip, fmt, ap);
1701         va_end(ap);
1702         return ret;
1703 }
1704
1705 int trace_array_vprintk(struct trace_array *tr,
1706                         unsigned long ip, const char *fmt, va_list args)
1707 {
1708         struct ftrace_event_call *call = &event_print;
1709         struct ring_buffer_event *event;
1710         struct ring_buffer *buffer;
1711         int len = 0, size, pc;
1712         struct print_entry *entry;
1713         unsigned long flags;
1714         char *tbuffer;
1715
1716         if (tracing_disabled || tracing_selftest_running)
1717                 return 0;
1718
1719         /* Don't pollute graph traces with trace_vprintk internals */
1720         pause_graph_tracing();
1721
1722         pc = preempt_count();
1723         preempt_disable_notrace();
1724
1725
1726         tbuffer = get_trace_buf();
1727         if (!tbuffer) {
1728                 len = 0;
1729                 goto out;
1730         }
1731
1732         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1733         if (len > TRACE_BUF_SIZE)
1734                 goto out;
1735
1736         local_save_flags(flags);
1737         size = sizeof(*entry) + len + 1;
1738         buffer = tr->buffer;
1739         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1740                                           flags, pc);
1741         if (!event)
1742                 goto out;
1743         entry = ring_buffer_event_data(event);
1744         entry->ip = ip;
1745
1746         memcpy(&entry->buf, tbuffer, len);
1747         entry->buf[len] = '\0';
1748         if (!filter_check_discard(call, entry, buffer, event)) {
1749                 __buffer_unlock_commit(buffer, event);
1750                 ftrace_trace_stack(buffer, flags, 6, pc);
1751         }
1752  out:
1753         preempt_enable_notrace();
1754         unpause_graph_tracing();
1755
1756         return len;
1757 }
1758
1759 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1760 {
1761         return trace_array_vprintk(&global_trace, ip, fmt, args);
1762 }
1763 EXPORT_SYMBOL_GPL(trace_vprintk);
1764
1765 static void trace_iterator_increment(struct trace_iterator *iter)
1766 {
1767         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
1768
1769         iter->idx++;
1770         if (buf_iter)
1771                 ring_buffer_read(buf_iter, NULL);
1772 }
1773
1774 static struct trace_entry *
1775 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1776                 unsigned long *lost_events)
1777 {
1778         struct ring_buffer_event *event;
1779         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
1780
1781         if (buf_iter)
1782                 event = ring_buffer_iter_peek(buf_iter, ts);
1783         else
1784                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1785                                          lost_events);
1786
1787         if (event) {
1788                 iter->ent_size = ring_buffer_event_length(event);
1789                 return ring_buffer_event_data(event);
1790         }
1791         iter->ent_size = 0;
1792         return NULL;
1793 }
1794
1795 static struct trace_entry *
1796 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1797                   unsigned long *missing_events, u64 *ent_ts)
1798 {
1799         struct ring_buffer *buffer = iter->tr->buffer;
1800         struct trace_entry *ent, *next = NULL;
1801         unsigned long lost_events = 0, next_lost = 0;
1802         int cpu_file = iter->cpu_file;
1803         u64 next_ts = 0, ts;
1804         int next_cpu = -1;
1805         int next_size = 0;
1806         int cpu;
1807
1808         /*
1809          * If we are in a per_cpu trace file, don't bother by iterating over
1810          * all cpu and peek directly.
1811          */
1812         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1813                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1814                         return NULL;
1815                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1816                 if (ent_cpu)
1817                         *ent_cpu = cpu_file;
1818
1819                 return ent;
1820         }
1821
1822         for_each_tracing_cpu(cpu) {
1823
1824                 if (ring_buffer_empty_cpu(buffer, cpu))
1825                         continue;
1826
1827                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1828
1829                 /*
1830                  * Pick the entry with the smallest timestamp:
1831                  */
1832                 if (ent && (!next || ts < next_ts)) {
1833                         next = ent;
1834                         next_cpu = cpu;
1835                         next_ts = ts;
1836                         next_lost = lost_events;
1837                         next_size = iter->ent_size;
1838                 }
1839         }
1840
1841         iter->ent_size = next_size;
1842
1843         if (ent_cpu)
1844                 *ent_cpu = next_cpu;
1845
1846         if (ent_ts)
1847                 *ent_ts = next_ts;
1848
1849         if (missing_events)
1850                 *missing_events = next_lost;
1851
1852         return next;
1853 }
1854
1855 /* Find the next real entry, without updating the iterator itself */
1856 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1857                                           int *ent_cpu, u64 *ent_ts)
1858 {
1859         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1860 }
1861
1862 /* Find the next real entry, and increment the iterator to the next entry */
1863 void *trace_find_next_entry_inc(struct trace_iterator *iter)
1864 {
1865         iter->ent = __find_next_entry(iter, &iter->cpu,
1866                                       &iter->lost_events, &iter->ts);
1867
1868         if (iter->ent)
1869                 trace_iterator_increment(iter);
1870
1871         return iter->ent ? iter : NULL;
1872 }
1873
1874 static void trace_consume(struct trace_iterator *iter)
1875 {
1876         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1877                             &iter->lost_events);
1878 }
1879
1880 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1881 {
1882         struct trace_iterator *iter = m->private;
1883         int i = (int)*pos;
1884         void *ent;
1885
1886         WARN_ON_ONCE(iter->leftover);
1887
1888         (*pos)++;
1889
1890         /* can't go backwards */
1891         if (iter->idx > i)
1892                 return NULL;
1893
1894         if (iter->idx < 0)
1895                 ent = trace_find_next_entry_inc(iter);
1896         else
1897                 ent = iter;
1898
1899         while (ent && iter->idx < i)
1900                 ent = trace_find_next_entry_inc(iter);
1901
1902         iter->pos = *pos;
1903
1904         return ent;
1905 }
1906
1907 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1908 {
1909         struct trace_array *tr = iter->tr;
1910         struct ring_buffer_event *event;
1911         struct ring_buffer_iter *buf_iter;
1912         unsigned long entries = 0;
1913         u64 ts;
1914
1915         tr->data[cpu]->skipped_entries = 0;
1916
1917         buf_iter = trace_buffer_iter(iter, cpu);
1918         if (!buf_iter)
1919                 return;
1920
1921         ring_buffer_iter_reset(buf_iter);
1922
1923         /*
1924          * We could have the case with the max latency tracers
1925          * that a reset never took place on a cpu. This is evident
1926          * by the timestamp being before the start of the buffer.
1927          */
1928         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1929                 if (ts >= iter->tr->time_start)
1930                         break;
1931                 entries++;
1932                 ring_buffer_read(buf_iter, NULL);
1933         }
1934
1935         tr->data[cpu]->skipped_entries = entries;
1936 }
1937
1938 /*
1939  * The current tracer is copied to avoid a global locking
1940  * all around.
1941  */
1942 static void *s_start(struct seq_file *m, loff_t *pos)
1943 {
1944         struct trace_iterator *iter = m->private;
1945         static struct tracer *old_tracer;
1946         int cpu_file = iter->cpu_file;
1947         void *p = NULL;
1948         loff_t l = 0;
1949         int cpu;
1950
1951         /* copy the tracer to avoid using a global lock all around */
1952         mutex_lock(&trace_types_lock);
1953         if (unlikely(old_tracer != current_trace && current_trace)) {
1954                 old_tracer = current_trace;
1955                 *iter->trace = *current_trace;
1956         }
1957         mutex_unlock(&trace_types_lock);
1958
1959         atomic_inc(&trace_record_cmdline_disabled);
1960
1961         if (*pos != iter->pos) {
1962                 iter->ent = NULL;
1963                 iter->cpu = 0;
1964                 iter->idx = -1;
1965
1966                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1967                         for_each_tracing_cpu(cpu)
1968                                 tracing_iter_reset(iter, cpu);
1969                 } else
1970                         tracing_iter_reset(iter, cpu_file);
1971
1972                 iter->leftover = 0;
1973                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1974                         ;
1975
1976         } else {
1977                 /*
1978                  * If we overflowed the seq_file before, then we want
1979                  * to just reuse the trace_seq buffer again.
1980                  */
1981                 if (iter->leftover)
1982                         p = iter;
1983                 else {
1984                         l = *pos - 1;
1985                         p = s_next(m, p, &l);
1986                 }
1987         }
1988
1989         trace_event_read_lock();
1990         trace_access_lock(cpu_file);
1991         return p;
1992 }
1993
1994 static void s_stop(struct seq_file *m, void *p)
1995 {
1996         struct trace_iterator *iter = m->private;
1997
1998         atomic_dec(&trace_record_cmdline_disabled);
1999         trace_access_unlock(iter->cpu_file);
2000         trace_event_read_unlock();
2001 }
2002
2003 static void
2004 get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
2005 {
2006         unsigned long count;
2007         int cpu;
2008
2009         *total = 0;
2010         *entries = 0;
2011
2012         for_each_tracing_cpu(cpu) {
2013                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
2014                 /*
2015                  * If this buffer has skipped entries, then we hold all
2016                  * entries for the trace and we need to ignore the
2017                  * ones before the time stamp.
2018                  */
2019                 if (tr->data[cpu]->skipped_entries) {
2020                         count -= tr->data[cpu]->skipped_entries;
2021                         /* total is the same as the entries */
2022                         *total += count;
2023                 } else
2024                         *total += count +
2025                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
2026                 *entries += count;
2027         }
2028 }
2029
2030 static void print_lat_help_header(struct seq_file *m)
2031 {
2032         seq_puts(m, "#                  _------=> CPU#            \n");
2033         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2034         seq_puts(m, "#                | / _----=> need-resched    \n");
2035         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2036         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2037         seq_puts(m, "#                |||| /     delay             \n");
2038         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2039         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2040 }
2041
2042 static void print_event_info(struct trace_array *tr, struct seq_file *m)
2043 {
2044         unsigned long total;
2045         unsigned long entries;
2046
2047         get_total_entries(tr, &total, &entries);
2048         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2049                    entries, total, num_online_cpus());
2050         seq_puts(m, "#\n");
2051 }
2052
2053 static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
2054 {
2055         print_event_info(tr, m);
2056         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2057         seq_puts(m, "#              | |       |          |         |\n");
2058 }
2059
2060 static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2061 {
2062         print_event_info(tr, m);
2063         seq_puts(m, "#                              _-----=> irqs-off\n");
2064         seq_puts(m, "#                             / _----=> need-resched\n");
2065         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2066         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2067         seq_puts(m, "#                            ||| /     delay\n");
2068         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2069         seq_puts(m, "#              | |       |   ||||       |         |\n");
2070 }
2071
2072 void
2073 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2074 {
2075         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2076         struct trace_array *tr = iter->tr;
2077         struct trace_array_cpu *data = tr->data[tr->cpu];
2078         struct tracer *type = current_trace;
2079         unsigned long entries;
2080         unsigned long total;
2081         const char *name = "preemption";
2082
2083         if (type)
2084                 name = type->name;
2085
2086         get_total_entries(tr, &total, &entries);
2087
2088         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2089                    name, UTS_RELEASE);
2090         seq_puts(m, "# -----------------------------------"
2091                  "---------------------------------\n");
2092         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2093                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2094                    nsecs_to_usecs(data->saved_latency),
2095                    entries,
2096                    total,
2097                    tr->cpu,
2098 #if defined(CONFIG_PREEMPT_NONE)
2099                    "server",
2100 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2101                    "desktop",
2102 #elif defined(CONFIG_PREEMPT)
2103                    "preempt",
2104 #else
2105                    "unknown",
2106 #endif
2107                    /* These are reserved for later use */
2108                    0, 0, 0, 0);
2109 #ifdef CONFIG_SMP
2110         seq_printf(m, " #P:%d)\n", num_online_cpus());
2111 #else
2112         seq_puts(m, ")\n");
2113 #endif
2114         seq_puts(m, "#    -----------------\n");
2115         seq_printf(m, "#    | task: %.16s-%d "
2116                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2117                    data->comm, data->pid,
2118                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2119                    data->policy, data->rt_priority);
2120         seq_puts(m, "#    -----------------\n");
2121
2122         if (data->critical_start) {
2123                 seq_puts(m, "#  => started at: ");
2124                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2125                 trace_print_seq(m, &iter->seq);
2126                 seq_puts(m, "\n#  => ended at:   ");
2127                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2128                 trace_print_seq(m, &iter->seq);
2129                 seq_puts(m, "\n#\n");
2130         }
2131
2132         seq_puts(m, "#\n");
2133 }
2134
2135 static void test_cpu_buff_start(struct trace_iterator *iter)
2136 {
2137         struct trace_seq *s = &iter->seq;
2138
2139         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2140                 return;
2141
2142         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2143                 return;
2144
2145         if (cpumask_test_cpu(iter->cpu, iter->started))
2146                 return;
2147
2148         if (iter->tr->data[iter->cpu]->skipped_entries)
2149                 return;
2150
2151         cpumask_set_cpu(iter->cpu, iter->started);
2152
2153         /* Don't print started cpu buffer for the first entry of the trace */
2154         if (iter->idx > 1)
2155                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2156                                 iter->cpu);
2157 }
2158
2159 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2160 {
2161         struct trace_seq *s = &iter->seq;
2162         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2163         struct trace_entry *entry;
2164         struct trace_event *event;
2165
2166         entry = iter->ent;
2167
2168         test_cpu_buff_start(iter);
2169
2170         event = ftrace_find_event(entry->type);
2171
2172         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2173                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2174                         if (!trace_print_lat_context(iter))
2175                                 goto partial;
2176                 } else {
2177                         if (!trace_print_context(iter))
2178                                 goto partial;
2179                 }
2180         }
2181
2182         if (event)
2183                 return event->funcs->trace(iter, sym_flags, event);
2184
2185         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2186                 goto partial;
2187
2188         return TRACE_TYPE_HANDLED;
2189 partial:
2190         return TRACE_TYPE_PARTIAL_LINE;
2191 }
2192
2193 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2194 {
2195         struct trace_seq *s = &iter->seq;
2196         struct trace_entry *entry;
2197         struct trace_event *event;
2198
2199         entry = iter->ent;
2200
2201         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2202                 if (!trace_seq_printf(s, "%d %d %llu ",
2203                                       entry->pid, iter->cpu, iter->ts))
2204                         goto partial;
2205         }
2206
2207         event = ftrace_find_event(entry->type);
2208         if (event)
2209                 return event->funcs->raw(iter, 0, event);
2210
2211         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2212                 goto partial;
2213
2214         return TRACE_TYPE_HANDLED;
2215 partial:
2216         return TRACE_TYPE_PARTIAL_LINE;
2217 }
2218
2219 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2220 {
2221         struct trace_seq *s = &iter->seq;
2222         unsigned char newline = '\n';
2223         struct trace_entry *entry;
2224         struct trace_event *event;
2225
2226         entry = iter->ent;
2227
2228         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2229                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2230                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2231                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2232         }
2233
2234         event = ftrace_find_event(entry->type);
2235         if (event) {
2236                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2237                 if (ret != TRACE_TYPE_HANDLED)
2238                         return ret;
2239         }
2240
2241         SEQ_PUT_FIELD_RET(s, newline);
2242
2243         return TRACE_TYPE_HANDLED;
2244 }
2245
2246 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2247 {
2248         struct trace_seq *s = &iter->seq;
2249         struct trace_entry *entry;
2250         struct trace_event *event;
2251
2252         entry = iter->ent;
2253
2254         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2255                 SEQ_PUT_FIELD_RET(s, entry->pid);
2256                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2257                 SEQ_PUT_FIELD_RET(s, iter->ts);
2258         }
2259
2260         event = ftrace_find_event(entry->type);
2261         return event ? event->funcs->binary(iter, 0, event) :
2262                 TRACE_TYPE_HANDLED;
2263 }
2264
2265 int trace_empty(struct trace_iterator *iter)
2266 {
2267         struct ring_buffer_iter *buf_iter;
2268         int cpu;
2269
2270         /* If we are looking at one CPU buffer, only check that one */
2271         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2272                 cpu = iter->cpu_file;
2273                 buf_iter = trace_buffer_iter(iter, cpu);
2274                 if (buf_iter) {
2275                         if (!ring_buffer_iter_empty(buf_iter))
2276                                 return 0;
2277                 } else {
2278                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2279                                 return 0;
2280                 }
2281                 return 1;
2282         }
2283
2284         for_each_tracing_cpu(cpu) {
2285                 buf_iter = trace_buffer_iter(iter, cpu);
2286                 if (buf_iter) {
2287                         if (!ring_buffer_iter_empty(buf_iter))
2288                                 return 0;
2289                 } else {
2290                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2291                                 return 0;
2292                 }
2293         }
2294
2295         return 1;
2296 }
2297
2298 /*  Called with trace_event_read_lock() held. */
2299 enum print_line_t print_trace_line(struct trace_iterator *iter)
2300 {
2301         enum print_line_t ret;
2302
2303         if (iter->lost_events &&
2304             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2305                                  iter->cpu, iter->lost_events))
2306                 return TRACE_TYPE_PARTIAL_LINE;
2307
2308         if (iter->trace && iter->trace->print_line) {
2309                 ret = iter->trace->print_line(iter);
2310                 if (ret != TRACE_TYPE_UNHANDLED)
2311                         return ret;
2312         }
2313
2314         if (iter->ent->type == TRACE_BPRINT &&
2315                         trace_flags & TRACE_ITER_PRINTK &&
2316                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2317                 return trace_print_bprintk_msg_only(iter);
2318
2319         if (iter->ent->type == TRACE_PRINT &&
2320                         trace_flags & TRACE_ITER_PRINTK &&
2321                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2322                 return trace_print_printk_msg_only(iter);
2323
2324         if (trace_flags & TRACE_ITER_BIN)
2325                 return print_bin_fmt(iter);
2326
2327         if (trace_flags & TRACE_ITER_HEX)
2328                 return print_hex_fmt(iter);
2329
2330         if (trace_flags & TRACE_ITER_RAW)
2331                 return print_raw_fmt(iter);
2332
2333         return print_trace_fmt(iter);
2334 }
2335
2336 void trace_latency_header(struct seq_file *m)
2337 {
2338         struct trace_iterator *iter = m->private;
2339
2340         /* print nothing if the buffers are empty */
2341         if (trace_empty(iter))
2342                 return;
2343
2344         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2345                 print_trace_header(m, iter);
2346
2347         if (!(trace_flags & TRACE_ITER_VERBOSE))
2348                 print_lat_help_header(m);
2349 }
2350
2351 void trace_default_header(struct seq_file *m)
2352 {
2353         struct trace_iterator *iter = m->private;
2354
2355         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2356                 return;
2357
2358         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2359                 /* print nothing if the buffers are empty */
2360                 if (trace_empty(iter))
2361                         return;
2362                 print_trace_header(m, iter);
2363                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2364                         print_lat_help_header(m);
2365         } else {
2366                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2367                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2368                                 print_func_help_header_irq(iter->tr, m);
2369                         else
2370                                 print_func_help_header(iter->tr, m);
2371                 }
2372         }
2373 }
2374
2375 static void test_ftrace_alive(struct seq_file *m)
2376 {
2377         if (!ftrace_is_dead())
2378                 return;
2379         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2380         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2381 }
2382
2383 static int s_show(struct seq_file *m, void *v)
2384 {
2385         struct trace_iterator *iter = v;
2386         int ret;
2387
2388         if (iter->ent == NULL) {
2389                 if (iter->tr) {
2390                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2391                         seq_puts(m, "#\n");
2392                         test_ftrace_alive(m);
2393                 }
2394                 if (iter->trace && iter->trace->print_header)
2395                         iter->trace->print_header(m);
2396                 else
2397                         trace_default_header(m);
2398
2399         } else if (iter->leftover) {
2400                 /*
2401                  * If we filled the seq_file buffer earlier, we
2402                  * want to just show it now.
2403                  */
2404                 ret = trace_print_seq(m, &iter->seq);
2405
2406                 /* ret should this time be zero, but you never know */
2407                 iter->leftover = ret;
2408
2409         } else {
2410                 print_trace_line(iter);
2411                 ret = trace_print_seq(m, &iter->seq);
2412                 /*
2413                  * If we overflow the seq_file buffer, then it will
2414                  * ask us for this data again at start up.
2415                  * Use that instead.
2416                  *  ret is 0 if seq_file write succeeded.
2417                  *        -1 otherwise.
2418                  */
2419                 iter->leftover = ret;
2420         }
2421
2422         return 0;
2423 }
2424
2425 static const struct seq_operations tracer_seq_ops = {
2426         .start          = s_start,
2427         .next           = s_next,
2428         .stop           = s_stop,
2429         .show           = s_show,
2430 };
2431
2432 static struct trace_iterator *
2433 __tracing_open(struct inode *inode, struct file *file)
2434 {
2435         long cpu_file = (long) inode->i_private;
2436         struct trace_iterator *iter;
2437         int cpu;
2438
2439         if (tracing_disabled)
2440                 return ERR_PTR(-ENODEV);
2441
2442         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2443         if (!iter)
2444                 return ERR_PTR(-ENOMEM);
2445
2446         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2447                                     GFP_KERNEL);
2448         if (!iter->buffer_iter)
2449                 goto release;
2450
2451         /*
2452          * We make a copy of the current tracer to avoid concurrent
2453          * changes on it while we are reading.
2454          */
2455         mutex_lock(&trace_types_lock);
2456         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2457         if (!iter->trace)
2458                 goto fail;
2459
2460         if (current_trace)
2461                 *iter->trace = *current_trace;
2462
2463         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2464                 goto fail;
2465
2466         if (current_trace && current_trace->print_max)
2467                 iter->tr = &max_tr;
2468         else
2469                 iter->tr = &global_trace;
2470         iter->pos = -1;
2471         mutex_init(&iter->mutex);
2472         iter->cpu_file = cpu_file;
2473
2474         /* Notify the tracer early; before we stop tracing. */
2475         if (iter->trace && iter->trace->open)
2476                 iter->trace->open(iter);
2477
2478         /* Annotate start of buffers if we had overruns */
2479         if (ring_buffer_overruns(iter->tr->buffer))
2480                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2481
2482         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2483         if (trace_clocks[trace_clock_id].in_ns)
2484                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2485
2486         /* stop the trace while dumping */
2487         tracing_stop();
2488
2489         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2490                 for_each_tracing_cpu(cpu) {
2491                         iter->buffer_iter[cpu] =
2492                                 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2493                 }
2494                 ring_buffer_read_prepare_sync();
2495                 for_each_tracing_cpu(cpu) {
2496                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2497                         tracing_iter_reset(iter, cpu);
2498                 }
2499         } else {
2500                 cpu = iter->cpu_file;
2501                 iter->buffer_iter[cpu] =
2502                         ring_buffer_read_prepare(iter->tr->buffer, cpu);
2503                 ring_buffer_read_prepare_sync();
2504                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2505                 tracing_iter_reset(iter, cpu);
2506         }
2507
2508         mutex_unlock(&trace_types_lock);
2509
2510         return iter;
2511
2512  fail:
2513         mutex_unlock(&trace_types_lock);
2514         kfree(iter->trace);
2515         kfree(iter->buffer_iter);
2516 release:
2517         seq_release_private(inode, file);
2518         return ERR_PTR(-ENOMEM);
2519 }
2520
2521 int tracing_open_generic(struct inode *inode, struct file *filp)
2522 {
2523         if (tracing_disabled)
2524                 return -ENODEV;
2525
2526         filp->private_data = inode->i_private;
2527         return 0;
2528 }
2529
2530 static int tracing_release(struct inode *inode, struct file *file)
2531 {
2532         struct seq_file *m = file->private_data;
2533         struct trace_iterator *iter;
2534         int cpu;
2535
2536         if (!(file->f_mode & FMODE_READ))
2537                 return 0;
2538
2539         iter = m->private;
2540
2541         mutex_lock(&trace_types_lock);
2542         for_each_tracing_cpu(cpu) {
2543                 if (iter->buffer_iter[cpu])
2544                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2545         }
2546
2547         if (iter->trace && iter->trace->close)
2548                 iter->trace->close(iter);
2549
2550         /* reenable tracing if it was previously enabled */
2551         tracing_start();
2552         mutex_unlock(&trace_types_lock);
2553
2554         mutex_destroy(&iter->mutex);
2555         free_cpumask_var(iter->started);
2556         kfree(iter->trace);
2557         kfree(iter->buffer_iter);
2558         seq_release_private(inode, file);
2559         return 0;
2560 }
2561
2562 static int tracing_open(struct inode *inode, struct file *file)
2563 {
2564         struct trace_iterator *iter;
2565         int ret = 0;
2566
2567         /* If this file was open for write, then erase contents */
2568         if ((file->f_mode & FMODE_WRITE) &&
2569             (file->f_flags & O_TRUNC)) {
2570                 long cpu = (long) inode->i_private;
2571
2572                 if (cpu == TRACE_PIPE_ALL_CPU)
2573                         tracing_reset_online_cpus(&global_trace);
2574                 else
2575                         tracing_reset(&global_trace, cpu);
2576         }
2577
2578         if (file->f_mode & FMODE_READ) {
2579                 iter = __tracing_open(inode, file);
2580                 if (IS_ERR(iter))
2581                         ret = PTR_ERR(iter);
2582                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2583                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2584         }
2585         return ret;
2586 }
2587
2588 static void *
2589 t_next(struct seq_file *m, void *v, loff_t *pos)
2590 {
2591         struct tracer *t = v;
2592
2593         (*pos)++;
2594
2595         if (t)
2596                 t = t->next;
2597
2598         return t;
2599 }
2600
2601 static void *t_start(struct seq_file *m, loff_t *pos)
2602 {
2603         struct tracer *t;
2604         loff_t l = 0;
2605
2606         mutex_lock(&trace_types_lock);
2607         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2608                 ;
2609
2610         return t;
2611 }
2612
2613 static void t_stop(struct seq_file *m, void *p)
2614 {
2615         mutex_unlock(&trace_types_lock);
2616 }
2617
2618 static int t_show(struct seq_file *m, void *v)
2619 {
2620         struct tracer *t = v;
2621
2622         if (!t)
2623                 return 0;
2624
2625         seq_printf(m, "%s", t->name);
2626         if (t->next)
2627                 seq_putc(m, ' ');
2628         else
2629                 seq_putc(m, '\n');
2630
2631         return 0;
2632 }
2633
2634 static const struct seq_operations show_traces_seq_ops = {
2635         .start          = t_start,
2636         .next           = t_next,
2637         .stop           = t_stop,
2638         .show           = t_show,
2639 };
2640
2641 static int show_traces_open(struct inode *inode, struct file *file)
2642 {
2643         if (tracing_disabled)
2644                 return -ENODEV;
2645
2646         return seq_open(file, &show_traces_seq_ops);
2647 }
2648
2649 static ssize_t
2650 tracing_write_stub(struct file *filp, const char __user *ubuf,
2651                    size_t count, loff_t *ppos)
2652 {
2653         return count;
2654 }
2655
2656 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2657 {
2658         if (file->f_mode & FMODE_READ)
2659                 return seq_lseek(file, offset, origin);
2660         else
2661                 return 0;
2662 }
2663
2664 static const struct file_operations tracing_fops = {
2665         .open           = tracing_open,
2666         .read           = seq_read,
2667         .write          = tracing_write_stub,
2668         .llseek         = tracing_seek,
2669         .release        = tracing_release,
2670 };
2671
2672 static const struct file_operations show_traces_fops = {
2673         .open           = show_traces_open,
2674         .read           = seq_read,
2675         .release        = seq_release,
2676         .llseek         = seq_lseek,
2677 };
2678
2679 /*
2680  * Only trace on a CPU if the bitmask is set:
2681  */
2682 static cpumask_var_t tracing_cpumask;
2683
2684 /*
2685  * The tracer itself will not take this lock, but still we want
2686  * to provide a consistent cpumask to user-space:
2687  */
2688 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2689
2690 /*
2691  * Temporary storage for the character representation of the
2692  * CPU bitmask (and one more byte for the newline):
2693  */
2694 static char mask_str[NR_CPUS + 1];
2695
2696 static ssize_t
2697 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2698                      size_t count, loff_t *ppos)
2699 {
2700         int len;
2701
2702         mutex_lock(&tracing_cpumask_update_lock);
2703
2704         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2705         if (count - len < 2) {
2706                 count = -EINVAL;
2707                 goto out_err;
2708         }
2709         len += sprintf(mask_str + len, "\n");
2710         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2711
2712 out_err:
2713         mutex_unlock(&tracing_cpumask_update_lock);
2714
2715         return count;
2716 }
2717
2718 static ssize_t
2719 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2720                       size_t count, loff_t *ppos)
2721 {
2722         int err, cpu;
2723         cpumask_var_t tracing_cpumask_new;
2724
2725         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2726                 return -ENOMEM;
2727
2728         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2729         if (err)
2730                 goto err_unlock;
2731
2732         mutex_lock(&tracing_cpumask_update_lock);
2733
2734         local_irq_disable();
2735         arch_spin_lock(&ftrace_max_lock);
2736         for_each_tracing_cpu(cpu) {
2737                 /*
2738                  * Increase/decrease the disabled counter if we are
2739                  * about to flip a bit in the cpumask:
2740                  */
2741                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2742                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2743                         atomic_inc(&global_trace.data[cpu]->disabled);
2744                         ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2745                 }
2746                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2747                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2748                         atomic_dec(&global_trace.data[cpu]->disabled);
2749                         ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2750                 }
2751         }
2752         arch_spin_unlock(&ftrace_max_lock);
2753         local_irq_enable();
2754
2755         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2756
2757         mutex_unlock(&tracing_cpumask_update_lock);
2758         free_cpumask_var(tracing_cpumask_new);
2759
2760         return count;
2761
2762 err_unlock:
2763         free_cpumask_var(tracing_cpumask_new);
2764
2765         return err;
2766 }
2767
2768 static const struct file_operations tracing_cpumask_fops = {
2769         .open           = tracing_open_generic,
2770         .read           = tracing_cpumask_read,
2771         .write          = tracing_cpumask_write,
2772         .llseek         = generic_file_llseek,
2773 };
2774
2775 static int tracing_trace_options_show(struct seq_file *m, void *v)
2776 {
2777         struct tracer_opt *trace_opts;
2778         u32 tracer_flags;
2779         int i;
2780
2781         mutex_lock(&trace_types_lock);
2782         tracer_flags = current_trace->flags->val;
2783         trace_opts = current_trace->flags->opts;
2784
2785         for (i = 0; trace_options[i]; i++) {
2786                 if (trace_flags & (1 << i))
2787                         seq_printf(m, "%s\n", trace_options[i]);
2788                 else
2789                         seq_printf(m, "no%s\n", trace_options[i]);
2790         }
2791
2792         for (i = 0; trace_opts[i].name; i++) {
2793                 if (tracer_flags & trace_opts[i].bit)
2794                         seq_printf(m, "%s\n", trace_opts[i].name);
2795                 else
2796                         seq_printf(m, "no%s\n", trace_opts[i].name);
2797         }
2798         mutex_unlock(&trace_types_lock);
2799
2800         return 0;
2801 }
2802
2803 static int __set_tracer_option(struct tracer *trace,
2804                                struct tracer_flags *tracer_flags,
2805                                struct tracer_opt *opts, int neg)
2806 {
2807         int ret;
2808
2809         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2810         if (ret)
2811                 return ret;
2812
2813         if (neg)
2814                 tracer_flags->val &= ~opts->bit;
2815         else
2816                 tracer_flags->val |= opts->bit;
2817         return 0;
2818 }
2819
2820 /* Try to assign a tracer specific option */
2821 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2822 {
2823         struct tracer_flags *tracer_flags = trace->flags;
2824         struct tracer_opt *opts = NULL;
2825         int i;
2826
2827         for (i = 0; tracer_flags->opts[i].name; i++) {
2828                 opts = &tracer_flags->opts[i];
2829
2830                 if (strcmp(cmp, opts->name) == 0)
2831                         return __set_tracer_option(trace, trace->flags,
2832                                                    opts, neg);
2833         }
2834
2835         return -EINVAL;
2836 }
2837
2838 static void set_tracer_flags(unsigned int mask, int enabled)
2839 {
2840         /* do nothing if flag is already set */
2841         if (!!(trace_flags & mask) == !!enabled)
2842                 return;
2843
2844         if (enabled)
2845                 trace_flags |= mask;
2846         else
2847                 trace_flags &= ~mask;
2848
2849         if (mask == TRACE_ITER_RECORD_CMD)
2850                 trace_event_enable_cmd_record(enabled);
2851
2852         if (mask == TRACE_ITER_OVERWRITE)
2853                 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2854
2855         if (mask == TRACE_ITER_PRINTK)
2856                 trace_printk_start_stop_comm(enabled);
2857 }
2858
2859 static int trace_set_options(char *option)
2860 {
2861         char *cmp;
2862         int neg = 0;
2863         int ret = 0;
2864         int i;
2865
2866         cmp = strstrip(option);
2867
2868         if (strncmp(cmp, "no", 2) == 0) {
2869                 neg = 1;
2870                 cmp += 2;
2871         }
2872
2873         for (i = 0; trace_options[i]; i++) {
2874                 if (strcmp(cmp, trace_options[i]) == 0) {
2875                         set_tracer_flags(1 << i, !neg);
2876                         break;
2877                 }
2878         }
2879
2880         /* If no option could be set, test the specific tracer options */
2881         if (!trace_options[i]) {
2882                 mutex_lock(&trace_types_lock);
2883                 ret = set_tracer_option(current_trace, cmp, neg);
2884                 mutex_unlock(&trace_types_lock);
2885         }
2886
2887         return ret;
2888 }
2889
2890 static ssize_t
2891 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2892                         size_t cnt, loff_t *ppos)
2893 {
2894         char buf[64];
2895
2896         if (cnt >= sizeof(buf))
2897                 return -EINVAL;
2898
2899         if (copy_from_user(&buf, ubuf, cnt))
2900                 return -EFAULT;
2901
2902         buf[cnt] = 0;
2903
2904         trace_set_options(buf);
2905
2906         *ppos += cnt;
2907
2908         return cnt;
2909 }
2910
2911 static int tracing_trace_options_open(struct inode *inode, struct file *file)
2912 {
2913         if (tracing_disabled)
2914                 return -ENODEV;
2915         return single_open(file, tracing_trace_options_show, NULL);
2916 }
2917
2918 static const struct file_operations tracing_iter_fops = {
2919         .open           = tracing_trace_options_open,
2920         .read           = seq_read,
2921         .llseek         = seq_lseek,
2922         .release        = single_release,
2923         .write          = tracing_trace_options_write,
2924 };
2925
2926 static const char readme_msg[] =
2927         "tracing mini-HOWTO:\n\n"
2928         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2929         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2930         "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2931         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2932         "nop\n"
2933         "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2934         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2935         "wakeup\n"
2936         "# cat /sys/kernel/debug/tracing/trace_options\n"
2937         "noprint-parent nosym-offset nosym-addr noverbose\n"
2938         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2939         "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2940         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2941         "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2942 ;
2943
2944 static ssize_t
2945 tracing_readme_read(struct file *filp, char __user *ubuf,
2946                        size_t cnt, loff_t *ppos)
2947 {
2948         return simple_read_from_buffer(ubuf, cnt, ppos,
2949                                         readme_msg, strlen(readme_msg));
2950 }
2951
2952 static const struct file_operations tracing_readme_fops = {
2953         .open           = tracing_open_generic,
2954         .read           = tracing_readme_read,
2955         .llseek         = generic_file_llseek,
2956 };
2957
2958 static ssize_t
2959 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2960                                 size_t cnt, loff_t *ppos)
2961 {
2962         char *buf_comm;
2963         char *file_buf;
2964         char *buf;
2965         int len = 0;
2966         int pid;
2967         int i;
2968
2969         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2970         if (!file_buf)
2971                 return -ENOMEM;
2972
2973         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2974         if (!buf_comm) {
2975                 kfree(file_buf);
2976                 return -ENOMEM;
2977         }
2978
2979         buf = file_buf;
2980
2981         for (i = 0; i < SAVED_CMDLINES; i++) {
2982                 int r;
2983
2984                 pid = map_cmdline_to_pid[i];
2985                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2986                         continue;
2987
2988                 trace_find_cmdline(pid, buf_comm);
2989                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2990                 buf += r;
2991                 len += r;
2992         }
2993
2994         len = simple_read_from_buffer(ubuf, cnt, ppos,
2995                                       file_buf, len);
2996
2997         kfree(file_buf);
2998         kfree(buf_comm);
2999
3000         return len;
3001 }
3002
3003 static const struct file_operations tracing_saved_cmdlines_fops = {
3004     .open       = tracing_open_generic,
3005     .read       = tracing_saved_cmdlines_read,
3006     .llseek     = generic_file_llseek,
3007 };
3008
3009 static ssize_t
3010 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3011                        size_t cnt, loff_t *ppos)
3012 {
3013         char buf[MAX_TRACER_SIZE+2];
3014         int r;
3015
3016         mutex_lock(&trace_types_lock);
3017         if (current_trace)
3018                 r = sprintf(buf, "%s\n", current_trace->name);
3019         else
3020                 r = sprintf(buf, "\n");
3021         mutex_unlock(&trace_types_lock);
3022
3023         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3024 }
3025
3026 int tracer_init(struct tracer *t, struct trace_array *tr)
3027 {
3028         tracing_reset_online_cpus(tr);
3029         return t->init(tr);
3030 }
3031
3032 static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3033 {
3034         int cpu;
3035         for_each_tracing_cpu(cpu)
3036                 tr->data[cpu]->entries = val;
3037 }
3038
3039 /* resize @tr's buffer to the size of @size_tr's entries */
3040 static int resize_buffer_duplicate_size(struct trace_array *tr,
3041                                         struct trace_array *size_tr, int cpu_id)
3042 {
3043         int cpu, ret = 0;
3044
3045         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3046                 for_each_tracing_cpu(cpu) {
3047                         ret = ring_buffer_resize(tr->buffer,
3048                                         size_tr->data[cpu]->entries, cpu);
3049                         if (ret < 0)
3050                                 break;
3051                         tr->data[cpu]->entries = size_tr->data[cpu]->entries;
3052                 }
3053         } else {
3054                 ret = ring_buffer_resize(tr->buffer,
3055                                         size_tr->data[cpu_id]->entries, cpu_id);
3056                 if (ret == 0)
3057                         tr->data[cpu_id]->entries =
3058                                 size_tr->data[cpu_id]->entries;
3059         }
3060
3061         return ret;
3062 }
3063
3064 static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3065 {
3066         int ret;
3067
3068         /*
3069          * If kernel or user changes the size of the ring buffer
3070          * we use the size that was given, and we can forget about
3071          * expanding it later.
3072          */
3073         ring_buffer_expanded = 1;
3074
3075         /* May be called before buffers are initialized */
3076         if (!global_trace.buffer)
3077                 return 0;
3078
3079         ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3080         if (ret < 0)
3081                 return ret;
3082
3083         if (!current_trace->use_max_tr)
3084                 goto out;
3085
3086         ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3087         if (ret < 0) {
3088                 int r = resize_buffer_duplicate_size(&global_trace,
3089                                                      &global_trace, cpu);
3090                 if (r < 0) {
3091                         /*
3092                          * AARGH! We are left with different
3093                          * size max buffer!!!!
3094                          * The max buffer is our "snapshot" buffer.
3095                          * When a tracer needs a snapshot (one of the
3096                          * latency tracers), it swaps the max buffer
3097                          * with the saved snap shot. We succeeded to
3098                          * update the size of the main buffer, but failed to
3099                          * update the size of the max buffer. But when we tried
3100                          * to reset the main buffer to the original size, we
3101                          * failed there too. This is very unlikely to
3102                          * happen, but if it does, warn and kill all
3103                          * tracing.
3104                          */
3105                         WARN_ON(1);
3106                         tracing_disabled = 1;
3107                 }
3108                 return ret;
3109         }
3110
3111         if (cpu == RING_BUFFER_ALL_CPUS)
3112                 set_buffer_entries(&max_tr, size);
3113         else
3114                 max_tr.data[cpu]->entries = size;
3115
3116  out:
3117         if (cpu == RING_BUFFER_ALL_CPUS)
3118                 set_buffer_entries(&global_trace, size);
3119         else
3120                 global_trace.data[cpu]->entries = size;
3121
3122         return ret;
3123 }
3124
3125 static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3126 {
3127         int ret = size;
3128
3129         mutex_lock(&trace_types_lock);
3130
3131         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3132                 /* make sure, this cpu is enabled in the mask */
3133                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3134                         ret = -EINVAL;
3135                         goto out;
3136                 }
3137         }
3138
3139         ret = __tracing_resize_ring_buffer(size, cpu_id);
3140         if (ret < 0)
3141                 ret = -ENOMEM;
3142
3143 out:
3144         mutex_unlock(&trace_types_lock);
3145
3146         return ret;
3147 }
3148
3149
3150 /**
3151  * tracing_update_buffers - used by tracing facility to expand ring buffers
3152  *
3153  * To save on memory when the tracing is never used on a system with it
3154  * configured in. The ring buffers are set to a minimum size. But once
3155  * a user starts to use the tracing facility, then they need to grow
3156  * to their default size.
3157  *
3158  * This function is to be called when a tracer is about to be used.
3159  */
3160 int tracing_update_buffers(void)
3161 {
3162         int ret = 0;
3163
3164         mutex_lock(&trace_types_lock);
3165         if (!ring_buffer_expanded)
3166                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3167                                                 RING_BUFFER_ALL_CPUS);
3168         mutex_unlock(&trace_types_lock);
3169
3170         return ret;
3171 }
3172
3173 struct trace_option_dentry;
3174
3175 static struct trace_option_dentry *
3176 create_trace_option_files(struct tracer *tracer);
3177
3178 static void
3179 destroy_trace_option_files(struct trace_option_dentry *topts);
3180
3181 static int tracing_set_tracer(const char *buf)
3182 {
3183         static struct trace_option_dentry *topts;
3184         struct trace_array *tr = &global_trace;
3185         struct tracer *t;
3186         int ret = 0;
3187
3188         mutex_lock(&trace_types_lock);
3189
3190         if (!ring_buffer_expanded) {
3191                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3192                                                 RING_BUFFER_ALL_CPUS);
3193                 if (ret < 0)
3194                         goto out;
3195                 ret = 0;
3196         }
3197
3198         for (t = trace_types; t; t = t->next) {
3199                 if (strcmp(t->name, buf) == 0)
3200                         break;
3201         }
3202         if (!t) {
3203                 ret = -EINVAL;
3204                 goto out;
3205         }
3206         if (t == current_trace)
3207                 goto out;
3208
3209         trace_branch_disable();
3210         if (current_trace && current_trace->reset)
3211                 current_trace->reset(tr);
3212         if (current_trace && current_trace->use_max_tr) {
3213                 /*
3214                  * We don't free the ring buffer. instead, resize it because
3215                  * The max_tr ring buffer has some state (e.g. ring->clock) and
3216                  * we want preserve it.
3217                  */
3218                 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3219                 set_buffer_entries(&max_tr, 1);
3220         }
3221         destroy_trace_option_files(topts);
3222
3223         current_trace = &nop_trace;
3224
3225         topts = create_trace_option_files(t);
3226         if (t->use_max_tr) {
3227                 /* we need to make per cpu buffer sizes equivalent */
3228                 ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
3229                                                    RING_BUFFER_ALL_CPUS);
3230                 if (ret < 0)
3231                         goto out;
3232         }
3233
3234         if (t->init) {
3235                 ret = tracer_init(t, tr);
3236                 if (ret)
3237                         goto out;
3238         }
3239
3240         current_trace = t;
3241         trace_branch_enable(tr);
3242  out:
3243         mutex_unlock(&trace_types_lock);
3244
3245         return ret;
3246 }
3247
3248 static ssize_t
3249 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3250                         size_t cnt, loff_t *ppos)
3251 {
3252         char buf[MAX_TRACER_SIZE+1];
3253         int i;
3254         size_t ret;
3255         int err;
3256
3257         ret = cnt;
3258
3259         if (cnt > MAX_TRACER_SIZE)
3260                 cnt = MAX_TRACER_SIZE;
3261
3262         if (copy_from_user(&buf, ubuf, cnt))
3263                 return -EFAULT;
3264
3265         buf[cnt] = 0;
3266
3267         /* strip ending whitespace. */
3268         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3269                 buf[i] = 0;
3270
3271         err = tracing_set_tracer(buf);
3272         if (err)
3273                 return err;
3274
3275         *ppos += ret;
3276
3277         return ret;
3278 }
3279
3280 static ssize_t
3281 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3282                      size_t cnt, loff_t *ppos)
3283 {
3284         unsigned long *ptr = filp->private_data;
3285         char buf[64];
3286         int r;
3287
3288         r = snprintf(buf, sizeof(buf), "%ld\n",
3289                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3290         if (r > sizeof(buf))
3291                 r = sizeof(buf);
3292         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3293 }
3294
3295 static ssize_t
3296 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3297                       size_t cnt, loff_t *ppos)
3298 {
3299         unsigned long *ptr = filp->private_data;
3300         unsigned long val;
3301         int ret;
3302
3303         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3304         if (ret)
3305                 return ret;
3306
3307         *ptr = val * 1000;
3308
3309         return cnt;
3310 }
3311
3312 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3313 {
3314         long cpu_file = (long) inode->i_private;
3315         struct trace_iterator *iter;
3316         int ret = 0;
3317
3318         if (tracing_disabled)
3319                 return -ENODEV;
3320
3321         mutex_lock(&trace_types_lock);
3322
3323         /* create a buffer to store the information to pass to userspace */
3324         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3325         if (!iter) {
3326                 ret = -ENOMEM;
3327                 goto out;
3328         }
3329
3330         /*
3331          * We make a copy of the current tracer to avoid concurrent
3332          * changes on it while we are reading.
3333          */
3334         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3335         if (!iter->trace) {
3336                 ret = -ENOMEM;
3337                 goto fail;
3338         }
3339         if (current_trace)
3340                 *iter->trace = *current_trace;
3341
3342         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3343                 ret = -ENOMEM;
3344                 goto fail;
3345         }
3346
3347         /* trace pipe does not show start of buffer */
3348         cpumask_setall(iter->started);
3349
3350         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3351                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3352
3353         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3354         if (trace_clocks[trace_clock_id].in_ns)
3355                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3356
3357         iter->cpu_file = cpu_file;
3358         iter->tr = &global_trace;
3359         mutex_init(&iter->mutex);
3360         filp->private_data = iter;
3361
3362         if (iter->trace->pipe_open)
3363                 iter->trace->pipe_open(iter);
3364
3365         nonseekable_open(inode, filp);
3366 out:
3367         mutex_unlock(&trace_types_lock);
3368         return ret;
3369
3370 fail:
3371         kfree(iter->trace);
3372         kfree(iter);
3373         mutex_unlock(&trace_types_lock);
3374         return ret;
3375 }
3376
3377 static int tracing_release_pipe(struct inode *inode, struct file *file)
3378 {
3379         struct trace_iterator *iter = file->private_data;
3380
3381         mutex_lock(&trace_types_lock);
3382
3383         if (iter->trace->pipe_close)
3384                 iter->trace->pipe_close(iter);
3385
3386         mutex_unlock(&trace_types_lock);
3387
3388         free_cpumask_var(iter->started);
3389         mutex_destroy(&iter->mutex);
3390         kfree(iter->trace);
3391         kfree(iter);
3392
3393         return 0;
3394 }
3395
3396 static unsigned int
3397 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3398 {
3399         struct trace_iterator *iter = filp->private_data;
3400
3401         if (trace_flags & TRACE_ITER_BLOCK) {
3402                 /*
3403                  * Always select as readable when in blocking mode
3404                  */
3405                 return POLLIN | POLLRDNORM;
3406         } else {
3407                 if (!trace_empty(iter))
3408                         return POLLIN | POLLRDNORM;
3409                 poll_wait(filp, &trace_wait, poll_table);
3410                 if (!trace_empty(iter))
3411                         return POLLIN | POLLRDNORM;
3412
3413                 return 0;
3414         }
3415 }
3416
3417 /*
3418  * This is a make-shift waitqueue.
3419  * A tracer might use this callback on some rare cases:
3420  *
3421  *  1) the current tracer might hold the runqueue lock when it wakes up
3422  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3423  *  2) the function tracers, trace all functions, we don't want
3424  *     the overhead of calling wake_up and friends
3425  *     (and tracing them too)
3426  *
3427  *     Anyway, this is really very primitive wakeup.
3428  */
3429 void poll_wait_pipe(struct trace_iterator *iter)
3430 {
3431         set_current_state(TASK_INTERRUPTIBLE);
3432         /* sleep for 100 msecs, and try again. */
3433         schedule_timeout(HZ / 10);
3434 }
3435
3436 /* Must be called with trace_types_lock mutex held. */
3437 static int tracing_wait_pipe(struct file *filp)
3438 {
3439         struct trace_iterator *iter = filp->private_data;
3440
3441         while (trace_empty(iter)) {
3442
3443                 if ((filp->f_flags & O_NONBLOCK)) {
3444                         return -EAGAIN;
3445                 }
3446
3447                 mutex_unlock(&iter->mutex);
3448
3449                 iter->trace->wait_pipe(iter);
3450
3451                 mutex_lock(&iter->mutex);
3452
3453                 if (signal_pending(current))
3454                         return -EINTR;
3455
3456                 /*
3457                  * We block until we read something and tracing is disabled.
3458                  * We still block if tracing is disabled, but we have never
3459                  * read anything. This allows a user to cat this file, and
3460                  * then enable tracing. But after we have read something,
3461                  * we give an EOF when tracing is again disabled.
3462                  *
3463                  * iter->pos will be 0 if we haven't read anything.
3464                  */
3465                 if (!tracing_is_enabled() && iter->pos)
3466                         break;
3467         }
3468
3469         return 1;
3470 }
3471
3472 /*
3473  * Consumer reader.
3474  */
3475 static ssize_t
3476 tracing_read_pipe(struct file *filp, char __user *ubuf,
3477                   size_t cnt, loff_t *ppos)
3478 {
3479         struct trace_iterator *iter = filp->private_data;
3480         static struct tracer *old_tracer;
3481         ssize_t sret;
3482
3483         /* return any leftover data */
3484         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3485         if (sret != -EBUSY)
3486                 return sret;
3487
3488         trace_seq_init(&iter->seq);
3489
3490         /* copy the tracer to avoid using a global lock all around */
3491         mutex_lock(&trace_types_lock);
3492         if (unlikely(old_tracer != current_trace && current_trace)) {
3493                 old_tracer = current_trace;
3494                 *iter->trace = *current_trace;
3495         }
3496         mutex_unlock(&trace_types_lock);
3497
3498         /*
3499          * Avoid more than one consumer on a single file descriptor
3500          * This is just a matter of traces coherency, the ring buffer itself
3501          * is protected.
3502          */
3503         mutex_lock(&iter->mutex);
3504         if (iter->trace->read) {
3505                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3506                 if (sret)
3507                         goto out;
3508         }
3509
3510 waitagain:
3511         sret = tracing_wait_pipe(filp);
3512         if (sret <= 0)
3513                 goto out;
3514
3515         /* stop when tracing is finished */
3516         if (trace_empty(iter)) {
3517                 sret = 0;
3518                 goto out;
3519         }
3520
3521         if (cnt >= PAGE_SIZE)
3522                 cnt = PAGE_SIZE - 1;
3523
3524         /* reset all but tr, trace, and overruns */
3525         memset(&iter->seq, 0,
3526                sizeof(struct trace_iterator) -
3527                offsetof(struct trace_iterator, seq));
3528         iter->pos = -1;
3529
3530         trace_event_read_lock();
3531         trace_access_lock(iter->cpu_file);
3532         while (trace_find_next_entry_inc(iter) != NULL) {
3533                 enum print_line_t ret;
3534                 int len = iter->seq.len;
3535
3536                 ret = print_trace_line(iter);
3537                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3538                         /* don't print partial lines */
3539                         iter->seq.len = len;
3540                         break;
3541                 }
3542                 if (ret != TRACE_TYPE_NO_CONSUME)
3543                         trace_consume(iter);
3544
3545                 if (iter->seq.len >= cnt)
3546                         break;
3547
3548                 /*
3549                  * Setting the full flag means we reached the trace_seq buffer
3550                  * size and we should leave by partial output condition above.
3551                  * One of the trace_seq_* functions is not used properly.
3552                  */
3553                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3554                           iter->ent->type);
3555         }
3556         trace_access_unlock(iter->cpu_file);
3557         trace_event_read_unlock();
3558
3559         /* Now copy what we have to the user */
3560         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3561         if (iter->seq.readpos >= iter->seq.len)
3562                 trace_seq_init(&iter->seq);
3563
3564         /*
3565          * If there was nothing to send to user, in spite of consuming trace
3566          * entries, go back to wait for more entries.
3567          */
3568         if (sret == -EBUSY)
3569                 goto waitagain;
3570
3571 out:
3572         mutex_unlock(&iter->mutex);
3573
3574         return sret;
3575 }
3576
3577 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3578                                      struct pipe_buffer *buf)
3579 {
3580         __free_page(buf->page);
3581 }
3582
3583 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3584                                      unsigned int idx)
3585 {
3586         __free_page(spd->pages[idx]);
3587 }
3588
3589 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3590         .can_merge              = 0,
3591         .map                    = generic_pipe_buf_map,
3592         .unmap                  = generic_pipe_buf_unmap,
3593         .confirm                = generic_pipe_buf_confirm,
3594         .release                = tracing_pipe_buf_release,
3595         .steal                  = generic_pipe_buf_steal,
3596         .get                    = generic_pipe_buf_get,
3597 };
3598
3599 static size_t
3600 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3601 {
3602         size_t count;
3603         int ret;
3604
3605         /* Seq buffer is page-sized, exactly what we need. */
3606         for (;;) {
3607                 count = iter->seq.len;
3608                 ret = print_trace_line(iter);
3609                 count = iter->seq.len - count;
3610                 if (rem < count) {
3611                         rem = 0;
3612                         iter->seq.len -= count;
3613                         break;
3614                 }
3615                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3616                         iter->seq.len -= count;
3617                         break;
3618                 }
3619
3620                 if (ret != TRACE_TYPE_NO_CONSUME)
3621                         trace_consume(iter);
3622                 rem -= count;
3623                 if (!trace_find_next_entry_inc(iter))   {
3624                         rem = 0;
3625                         iter->ent = NULL;
3626                         break;
3627                 }
3628         }
3629
3630         return rem;
3631 }
3632
3633 static ssize_t tracing_splice_read_pipe(struct file *filp,
3634                                         loff_t *ppos,
3635                                         struct pipe_inode_info *pipe,
3636                                         size_t len,
3637                                         unsigned int flags)
3638 {
3639         struct page *pages_def[PIPE_DEF_BUFFERS];
3640         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3641         struct trace_iterator *iter = filp->private_data;
3642         struct splice_pipe_desc spd = {
3643                 .pages          = pages_def,
3644                 .partial        = partial_def,
3645                 .nr_pages       = 0, /* This gets updated below. */
3646                 .nr_pages_max   = PIPE_DEF_BUFFERS,
3647                 .flags          = flags,
3648                 .ops            = &tracing_pipe_buf_ops,
3649                 .spd_release    = tracing_spd_release_pipe,
3650         };
3651         static struct tracer *old_tracer;
3652         ssize_t ret;
3653         size_t rem;
3654         unsigned int i;
3655
3656         if (splice_grow_spd(pipe, &spd))
3657                 return -ENOMEM;
3658
3659         /* copy the tracer to avoid using a global lock all around */
3660         mutex_lock(&trace_types_lock);
3661         if (unlikely(old_tracer != current_trace && current_trace)) {
3662                 old_tracer = current_trace;
3663                 *iter->trace = *current_trace;
3664         }
3665         mutex_unlock(&trace_types_lock);
3666
3667         mutex_lock(&iter->mutex);
3668
3669         if (iter->trace->splice_read) {
3670                 ret = iter->trace->splice_read(iter, filp,
3671                                                ppos, pipe, len, flags);
3672                 if (ret)
3673                         goto out_err;
3674         }
3675
3676         ret = tracing_wait_pipe(filp);
3677         if (ret <= 0)
3678                 goto out_err;
3679
3680         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3681                 ret = -EFAULT;
3682                 goto out_err;
3683         }
3684
3685         trace_event_read_lock();
3686         trace_access_lock(iter->cpu_file);
3687
3688         /* Fill as many pages as possible. */
3689         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3690                 spd.pages[i] = alloc_page(GFP_KERNEL);
3691                 if (!spd.pages[i])
3692                         break;
3693
3694                 rem = tracing_fill_pipe_page(rem, iter);
3695
3696                 /* Copy the data into the page, so we can start over. */
3697                 ret = trace_seq_to_buffer(&iter->seq,
3698                                           page_address(spd.pages[i]),
3699                                           iter->seq.len);
3700                 if (ret < 0) {
3701                         __free_page(spd.pages[i]);
3702                         break;
3703                 }
3704                 spd.partial[i].offset = 0;
3705                 spd.partial[i].len = iter->seq.len;
3706
3707                 trace_seq_init(&iter->seq);
3708         }
3709
3710         trace_access_unlock(iter->cpu_file);
3711         trace_event_read_unlock();
3712         mutex_unlock(&iter->mutex);
3713
3714         spd.nr_pages = i;
3715
3716         ret = splice_to_pipe(pipe, &spd);
3717 out:
3718         splice_shrink_spd(&spd);
3719         return ret;
3720
3721 out_err:
3722         mutex_unlock(&iter->mutex);
3723         goto out;
3724 }
3725
3726 struct ftrace_entries_info {
3727         struct trace_array      *tr;
3728         int                     cpu;
3729 };
3730
3731 static int tracing_entries_open(struct inode *inode, struct file *filp)
3732 {
3733         struct ftrace_entries_info *info;
3734
3735         if (tracing_disabled)
3736                 return -ENODEV;
3737
3738         info = kzalloc(sizeof(*info), GFP_KERNEL);
3739         if (!info)
3740                 return -ENOMEM;
3741
3742         info->tr = &global_trace;
3743         info->cpu = (unsigned long)inode->i_private;
3744
3745         filp->private_data = info;
3746
3747         return 0;
3748 }
3749
3750 static ssize_t
3751 tracing_entries_read(struct file *filp, char __user *ubuf,
3752                      size_t cnt, loff_t *ppos)
3753 {
3754         struct ftrace_entries_info *info = filp->private_data;
3755         struct trace_array *tr = info->tr;
3756         char buf[64];
3757         int r = 0;
3758         ssize_t ret;
3759
3760         mutex_lock(&trace_types_lock);
3761
3762         if (info->cpu == RING_BUFFER_ALL_CPUS) {
3763                 int cpu, buf_size_same;
3764                 unsigned long size;
3765
3766                 size = 0;
3767                 buf_size_same = 1;
3768                 /* check if all cpu sizes are same */
3769                 for_each_tracing_cpu(cpu) {
3770                         /* fill in the size from first enabled cpu */
3771                         if (size == 0)
3772                                 size = tr->data[cpu]->entries;
3773                         if (size != tr->data[cpu]->entries) {
3774                                 buf_size_same = 0;
3775                                 break;
3776                         }
3777                 }
3778
3779                 if (buf_size_same) {
3780                         if (!ring_buffer_expanded)
3781                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3782                                             size >> 10,
3783                                             trace_buf_size >> 10);
3784                         else
3785                                 r = sprintf(buf, "%lu\n", size >> 10);
3786                 } else
3787                         r = sprintf(buf, "X\n");
3788         } else
3789                 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3790
3791         mutex_unlock(&trace_types_lock);
3792
3793         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3794         return ret;
3795 }
3796
3797 static ssize_t
3798 tracing_entries_write(struct file *filp, const char __user *ubuf,
3799                       size_t cnt, loff_t *ppos)
3800 {
3801         struct ftrace_entries_info *info = filp->private_data;
3802         unsigned long val;
3803         int ret;
3804
3805         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3806         if (ret)
3807                 return ret;
3808
3809         /* must have at least 1 entry */
3810         if (!val)
3811                 return -EINVAL;
3812
3813         /* value is in KB */
3814         val <<= 10;
3815
3816         ret = tracing_resize_ring_buffer(val, info->cpu);
3817         if (ret < 0)
3818                 return ret;
3819
3820         *ppos += cnt;
3821
3822         return cnt;
3823 }
3824
3825 static int
3826 tracing_entries_release(struct inode *inode, struct file *filp)
3827 {
3828         struct ftrace_entries_info *info = filp->private_data;
3829
3830         kfree(info);
3831
3832         return 0;
3833 }
3834
3835 static ssize_t
3836 tracing_total_entries_read(struct file *filp, char __user *ubuf,
3837                                 size_t cnt, loff_t *ppos)
3838 {
3839         struct trace_array *tr = filp->private_data;
3840         char buf[64];
3841         int r, cpu;
3842         unsigned long size = 0, expanded_size = 0;
3843
3844         mutex_lock(&trace_types_lock);
3845         for_each_tracing_cpu(cpu) {
3846                 size += tr->data[cpu]->entries >> 10;
3847                 if (!ring_buffer_expanded)
3848                         expanded_size += trace_buf_size >> 10;
3849         }
3850         if (ring_buffer_expanded)
3851                 r = sprintf(buf, "%lu\n", size);
3852         else
3853                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3854         mutex_unlock(&trace_types_lock);
3855
3856         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3857 }
3858
3859 static ssize_t
3860 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3861                           size_t cnt, loff_t *ppos)
3862 {
3863         /*
3864          * There is no need to read what the user has written, this function
3865          * is just to make sure that there is no error when "echo" is used
3866          */
3867
3868         *ppos += cnt;
3869
3870         return cnt;
3871 }
3872
3873 static int
3874 tracing_free_buffer_release(struct inode *inode, struct file *filp)
3875 {
3876         /* disable tracing ? */
3877         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3878                 tracing_off();
3879         /* resize the ring buffer to 0 */
3880         tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3881
3882         return 0;
3883 }
3884
3885 static ssize_t
3886 tracing_mark_write(struct file *filp, const char __user *ubuf,
3887                                         size_t cnt, loff_t *fpos)
3888 {
3889         unsigned long addr = (unsigned long)ubuf;
3890         struct ring_buffer_event *event;
3891         struct ring_buffer *buffer;
3892         struct print_entry *entry;
3893         unsigned long irq_flags;
3894         struct page *pages[2];
3895         void *map_page[2];
3896         int nr_pages = 1;
3897         ssize_t written;
3898         int offset;
3899         int size;
3900         int len;
3901         int ret;
3902         int i;
3903
3904         if (tracing_disabled)
3905                 return -EINVAL;
3906
3907         if (!(trace_flags & TRACE_ITER_MARKERS))
3908                 return -EINVAL;
3909
3910         if (cnt > TRACE_BUF_SIZE)
3911                 cnt = TRACE_BUF_SIZE;
3912
3913         /*
3914          * Userspace is injecting traces into the kernel trace buffer.
3915          * We want to be as non intrusive as possible.
3916          * To do so, we do not want to allocate any special buffers
3917          * or take any locks, but instead write the userspace data
3918          * straight into the ring buffer.
3919          *
3920          * First we need to pin the userspace buffer into memory,
3921          * which, most likely it is, because it just referenced it.
3922          * But there's no guarantee that it is. By using get_user_pages_fast()
3923          * and kmap_atomic/kunmap_atomic() we can get access to the
3924          * pages directly. We then write the data directly into the
3925          * ring buffer.
3926          */
3927         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3928
3929         /* check if we cross pages */
3930         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3931                 nr_pages = 2;
3932
3933         offset = addr & (PAGE_SIZE - 1);
3934         addr &= PAGE_MASK;
3935
3936         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3937         if (ret < nr_pages) {
3938                 while (--ret >= 0)
3939                         put_page(pages[ret]);
3940                 written = -EFAULT;
3941                 goto out;
3942         }
3943
3944         for (i = 0; i < nr_pages; i++)
3945                 map_page[i] = kmap_atomic(pages[i]);
3946
3947         local_save_flags(irq_flags);
3948         size = sizeof(*entry) + cnt + 2; /* possible \n added */
3949         buffer = global_trace.buffer;
3950         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3951                                           irq_flags, preempt_count());
3952         if (!event) {
3953                 /* Ring buffer disabled, return as if not open for write */
3954                 written = -EBADF;
3955                 goto out_unlock;
3956         }
3957
3958         entry = ring_buffer_event_data(event);
3959         entry->ip = _THIS_IP_;
3960
3961         if (nr_pages == 2) {
3962                 len = PAGE_SIZE - offset;
3963                 memcpy(&entry->buf, map_page[0] + offset, len);
3964                 memcpy(&entry->buf[len], map_page[1], cnt - len);
3965         } else
3966                 memcpy(&entry->buf, map_page[0] + offset, cnt);
3967
3968         if (entry->buf[cnt - 1] != '\n') {
3969                 entry->buf[cnt] = '\n';
3970                 entry->buf[cnt + 1] = '\0';
3971         } else
3972                 entry->buf[cnt] = '\0';
3973
3974         __buffer_unlock_commit(buffer, event);
3975
3976         written = cnt;
3977
3978         *fpos += written;
3979
3980  out_unlock:
3981         for (i = 0; i < nr_pages; i++){
3982                 kunmap_atomic(map_page[i]);
3983                 put_page(pages[i]);
3984         }
3985  out:
3986         return written;
3987 }
3988
3989 static int tracing_clock_show(struct seq_file *m, void *v)
3990 {
3991         int i;
3992
3993         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3994                 seq_printf(m,
3995                         "%s%s%s%s", i ? " " : "",
3996                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3997                         i == trace_clock_id ? "]" : "");
3998         seq_putc(m, '\n');
3999
4000         return 0;
4001 }
4002
4003 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4004                                    size_t cnt, loff_t *fpos)
4005 {
4006         char buf[64];
4007         const char *clockstr;
4008         int i;
4009
4010         if (cnt >= sizeof(buf))
4011                 return -EINVAL;
4012
4013         if (copy_from_user(&buf, ubuf, cnt))
4014                 return -EFAULT;
4015
4016         buf[cnt] = 0;
4017
4018         clockstr = strstrip(buf);
4019
4020         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4021                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4022                         break;
4023         }
4024         if (i == ARRAY_SIZE(trace_clocks))
4025                 return -EINVAL;
4026
4027         trace_clock_id = i;
4028
4029         mutex_lock(&trace_types_lock);
4030
4031         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
4032         if (max_tr.buffer)
4033                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
4034
4035         /*
4036          * New clock may not be consistent with the previous clock.
4037          * Reset the buffer so that it doesn't have incomparable timestamps.
4038          */
4039         tracing_reset_online_cpus(&global_trace);
4040         if (max_tr.buffer)
4041                 tracing_reset_online_cpus(&max_tr);
4042
4043         mutex_unlock(&trace_types_lock);
4044
4045         *fpos += cnt;
4046
4047         return cnt;
4048 }
4049
4050 static int tracing_clock_open(struct inode *inode, struct file *file)
4051 {
4052         if (tracing_disabled)
4053                 return -ENODEV;
4054         return single_open(file, tracing_clock_show, NULL);
4055 }
4056
4057 static const struct file_operations tracing_max_lat_fops = {
4058         .open           = tracing_open_generic,
4059         .read           = tracing_max_lat_read,
4060         .write          = tracing_max_lat_write,
4061         .llseek         = generic_file_llseek,
4062 };
4063
4064 static const struct file_operations set_tracer_fops = {
4065         .open           = tracing_open_generic,
4066         .read           = tracing_set_trace_read,
4067         .write          = tracing_set_trace_write,
4068         .llseek         = generic_file_llseek,
4069 };
4070
4071 static const struct file_operations tracing_pipe_fops = {
4072         .open           = tracing_open_pipe,
4073         .poll           = tracing_poll_pipe,
4074         .read           = tracing_read_pipe,
4075         .splice_read    = tracing_splice_read_pipe,
4076         .release        = tracing_release_pipe,
4077         .llseek         = no_llseek,
4078 };
4079
4080 static const struct file_operations tracing_entries_fops = {
4081         .open           = tracing_entries_open,
4082         .read           = tracing_entries_read,
4083         .write          = tracing_entries_write,
4084         .release        = tracing_entries_release,
4085         .llseek         = generic_file_llseek,
4086 };
4087
4088 static const struct file_operations tracing_total_entries_fops = {
4089         .open           = tracing_open_generic,
4090         .read           = tracing_total_entries_read,
4091         .llseek         = generic_file_llseek,
4092 };
4093
4094 static const struct file_operations tracing_free_buffer_fops = {
4095         .write          = tracing_free_buffer_write,
4096         .release        = tracing_free_buffer_release,
4097 };
4098
4099 static const struct file_operations tracing_mark_fops = {
4100         .open           = tracing_open_generic,
4101         .write          = tracing_mark_write,
4102         .llseek         = generic_file_llseek,
4103 };
4104
4105 static const struct file_operations trace_clock_fops = {
4106         .open           = tracing_clock_open,
4107         .read           = seq_read,
4108         .llseek         = seq_lseek,
4109         .release        = single_release,
4110         .write          = tracing_clock_write,
4111 };
4112
4113 struct ftrace_buffer_info {
4114         struct trace_array      *tr;
4115         void                    *spare;
4116         int                     cpu;
4117         unsigned int            read;
4118 };
4119
4120 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4121 {
4122         int cpu = (int)(long)inode->i_private;
4123         struct ftrace_buffer_info *info;
4124
4125         if (tracing_disabled)
4126                 return -ENODEV;
4127
4128         info = kzalloc(sizeof(*info), GFP_KERNEL);
4129         if (!info)
4130                 return -ENOMEM;
4131
4132         info->tr        = &global_trace;
4133         info->cpu       = cpu;
4134         info->spare     = NULL;
4135         /* Force reading ring buffer for first read */
4136         info->read      = (unsigned int)-1;
4137
4138         filp->private_data = info;
4139
4140         return nonseekable_open(inode, filp);
4141 }
4142
4143 static ssize_t
4144 tracing_buffers_read(struct file *filp, char __user *ubuf,
4145                      size_t count, loff_t *ppos)
4146 {
4147         struct ftrace_buffer_info *info = filp->private_data;
4148         ssize_t ret;
4149         size_t size;
4150
4151         if (!count)
4152                 return 0;
4153
4154         if (!info->spare)
4155                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4156         if (!info->spare)
4157                 return -ENOMEM;
4158
4159         /* Do we have previous read data to read? */
4160         if (info->read < PAGE_SIZE)
4161                 goto read;
4162
4163         trace_access_lock(info->cpu);
4164         ret = ring_buffer_read_page(info->tr->buffer,
4165                                     &info->spare,
4166                                     count,
4167                                     info->cpu, 0);
4168         trace_access_unlock(info->cpu);
4169         if (ret < 0)
4170                 return 0;
4171
4172         info->read = 0;
4173
4174 read:
4175         size = PAGE_SIZE - info->read;
4176         if (size > count)
4177                 size = count;
4178
4179         ret = copy_to_user(ubuf, info->spare + info->read, size);
4180         if (ret == size)
4181                 return -EFAULT;
4182         size -= ret;
4183
4184         *ppos += size;
4185         info->read += size;
4186
4187         return size;
4188 }
4189
4190 static int tracing_buffers_release(struct inode *inode, struct file *file)
4191 {
4192         struct ftrace_buffer_info *info = file->private_data;
4193
4194         if (info->spare)
4195                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
4196         kfree(info);
4197
4198         return 0;
4199 }
4200
4201 struct buffer_ref {
4202         struct ring_buffer      *buffer;
4203         void                    *page;
4204         int                     ref;
4205 };
4206
4207 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4208                                     struct pipe_buffer *buf)
4209 {
4210         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4211
4212         if (--ref->ref)
4213                 return;
4214
4215         ring_buffer_free_read_page(ref->buffer, ref->page);
4216         kfree(ref);
4217         buf->private = 0;
4218 }
4219
4220 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4221                                 struct pipe_buffer *buf)
4222 {
4223         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4224
4225         ref->ref++;
4226 }
4227
4228 /* Pipe buffer operations for a buffer. */
4229 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4230         .can_merge              = 0,
4231         .map                    = generic_pipe_buf_map,
4232         .unmap                  = generic_pipe_buf_unmap,
4233         .confirm                = generic_pipe_buf_confirm,
4234         .release                = buffer_pipe_buf_release,
4235         .steal                  = generic_pipe_buf_steal,
4236         .get                    = buffer_pipe_buf_get,
4237 };
4238
4239 /*
4240  * Callback from splice_to_pipe(), if we need to release some pages
4241  * at the end of the spd in case we error'ed out in filling the pipe.
4242  */
4243 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4244 {
4245         struct buffer_ref *ref =
4246                 (struct buffer_ref *)spd->partial[i].private;
4247
4248         if (--ref->ref)
4249                 return;
4250
4251         ring_buffer_free_read_page(ref->buffer, ref->page);
4252         kfree(ref);
4253         spd->partial[i].private = 0;
4254 }
4255
4256 static ssize_t
4257 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4258                             struct pipe_inode_info *pipe, size_t len,
4259                             unsigned int flags)
4260 {
4261         struct ftrace_buffer_info *info = file->private_data;
4262         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4263         struct page *pages_def[PIPE_DEF_BUFFERS];
4264         struct splice_pipe_desc spd = {
4265                 .pages          = pages_def,
4266                 .partial        = partial_def,
4267                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4268                 .flags          = flags,
4269                 .ops            = &buffer_pipe_buf_ops,
4270                 .spd_release    = buffer_spd_release,
4271         };
4272         struct buffer_ref *ref;
4273         int entries, size, i;
4274         size_t ret;
4275
4276         if (splice_grow_spd(pipe, &spd))
4277                 return -ENOMEM;
4278
4279         if (*ppos & (PAGE_SIZE - 1)) {
4280                 ret = -EINVAL;
4281                 goto out;
4282         }
4283
4284         if (len & (PAGE_SIZE - 1)) {
4285                 if (len < PAGE_SIZE) {
4286                         ret = -EINVAL;
4287                         goto out;
4288                 }
4289                 len &= PAGE_MASK;
4290         }
4291
4292         trace_access_lock(info->cpu);
4293         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4294
4295         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4296                 struct page *page;
4297                 int r;
4298
4299                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4300                 if (!ref)
4301                         break;
4302
4303                 ref->ref = 1;
4304                 ref->buffer = info->tr->buffer;
4305                 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4306                 if (!ref->page) {
4307                         kfree(ref);
4308                         break;
4309                 }
4310
4311                 r = ring_buffer_read_page(ref->buffer, &ref->page,
4312                                           len, info->cpu, 1);
4313                 if (r < 0) {
4314                         ring_buffer_free_read_page(ref->buffer, ref->page);
4315                         kfree(ref);
4316                         break;
4317                 }
4318
4319                 /*
4320                  * zero out any left over data, this is going to
4321                  * user land.
4322                  */
4323                 size = ring_buffer_page_len(ref->page);
4324                 if (size < PAGE_SIZE)
4325                         memset(ref->page + size, 0, PAGE_SIZE - size);
4326
4327                 page = virt_to_page(ref->page);
4328
4329                 spd.pages[i] = page;
4330                 spd.partial[i].len = PAGE_SIZE;
4331                 spd.partial[i].offset = 0;
4332                 spd.partial[i].private = (unsigned long)ref;
4333                 spd.nr_pages++;
4334                 *ppos += PAGE_SIZE;
4335
4336                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4337         }
4338
4339         trace_access_unlock(info->cpu);
4340         spd.nr_pages = i;
4341
4342         /* did we read anything? */
4343         if (!spd.nr_pages) {
4344                 if (flags & SPLICE_F_NONBLOCK)
4345                         ret = -EAGAIN;
4346                 else
4347                         ret = 0;
4348                 /* TODO: block */
4349                 goto out;
4350         }
4351
4352         ret = splice_to_pipe(pipe, &spd);
4353         splice_shrink_spd(&spd);
4354 out:
4355         return ret;
4356 }
4357
4358 static const struct file_operations tracing_buffers_fops = {
4359         .open           = tracing_buffers_open,
4360         .read           = tracing_buffers_read,
4361         .release        = tracing_buffers_release,
4362         .splice_read    = tracing_buffers_splice_read,
4363         .llseek         = no_llseek,
4364 };
4365
4366 static ssize_t
4367 tracing_stats_read(struct file *filp, char __user *ubuf,
4368                    size_t count, loff_t *ppos)
4369 {
4370         unsigned long cpu = (unsigned long)filp->private_data;
4371         struct trace_array *tr = &global_trace;
4372         struct trace_seq *s;
4373         unsigned long cnt;
4374         unsigned long long t;
4375         unsigned long usec_rem;
4376
4377         s = kmalloc(sizeof(*s), GFP_KERNEL);
4378         if (!s)
4379                 return -ENOMEM;
4380
4381         trace_seq_init(s);
4382
4383         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4384         trace_seq_printf(s, "entries: %ld\n", cnt);
4385
4386         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4387         trace_seq_printf(s, "overrun: %ld\n", cnt);
4388
4389         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4390         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4391
4392         cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4393         trace_seq_printf(s, "bytes: %ld\n", cnt);
4394
4395         if (trace_clocks[trace_clock_id].in_ns) {
4396                 /* local or global for trace_clock */
4397                 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4398                 usec_rem = do_div(t, USEC_PER_SEC);
4399                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
4400                                                                 t, usec_rem);
4401
4402                 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4403                 usec_rem = do_div(t, USEC_PER_SEC);
4404                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4405         } else {
4406                 /* counter or tsc mode for trace_clock */
4407                 trace_seq_printf(s, "oldest event ts: %llu\n",
4408                                 ring_buffer_oldest_event_ts(tr->buffer, cpu));
4409
4410                 trace_seq_printf(s, "now ts: %llu\n",
4411                                 ring_buffer_time_stamp(tr->buffer, cpu));
4412         }
4413
4414         cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4415         trace_seq_printf(s, "dropped events: %ld\n", cnt);
4416
4417         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4418
4419         kfree(s);
4420
4421         return count;
4422 }
4423
4424 static const struct file_operations tracing_stats_fops = {
4425         .open           = tracing_open_generic,
4426         .read           = tracing_stats_read,
4427         .llseek         = generic_file_llseek,
4428 };
4429
4430 #ifdef CONFIG_DYNAMIC_FTRACE
4431
4432 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4433 {
4434         return 0;
4435 }
4436
4437 static ssize_t
4438 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4439                   size_t cnt, loff_t *ppos)
4440 {
4441         static char ftrace_dyn_info_buffer[1024];
4442         static DEFINE_MUTEX(dyn_info_mutex);
4443         unsigned long *p = filp->private_data;
4444         char *buf = ftrace_dyn_info_buffer;
4445         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4446         int r;
4447
4448         mutex_lock(&dyn_info_mutex);
4449         r = sprintf(buf, "%ld ", *p);
4450
4451         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4452         buf[r++] = '\n';
4453
4454         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4455
4456         mutex_unlock(&dyn_info_mutex);
4457
4458         return r;
4459 }
4460
4461 static const struct file_operations tracing_dyn_info_fops = {
4462         .open           = tracing_open_generic,
4463         .read           = tracing_read_dyn_info,
4464         .llseek         = generic_file_llseek,
4465 };
4466 #endif
4467
4468 static struct dentry *d_tracer;
4469
4470 struct dentry *tracing_init_dentry(void)
4471 {
4472         static int once;
4473
4474         if (d_tracer)
4475                 return d_tracer;
4476
4477         if (!debugfs_initialized())
4478                 return NULL;
4479
4480         d_tracer = debugfs_create_dir("tracing", NULL);
4481
4482         if (!d_tracer && !once) {
4483                 once = 1;
4484                 pr_warning("Could not create debugfs directory 'tracing'\n");
4485                 return NULL;
4486         }
4487
4488         return d_tracer;
4489 }
4490
4491 static struct dentry *d_percpu;
4492
4493 struct dentry *tracing_dentry_percpu(void)
4494 {
4495         static int once;
4496         struct dentry *d_tracer;
4497
4498         if (d_percpu)
4499                 return d_percpu;
4500
4501         d_tracer = tracing_init_dentry();
4502
4503         if (!d_tracer)
4504                 return NULL;
4505
4506         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4507
4508         if (!d_percpu && !once) {
4509                 once = 1;
4510                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4511                 return NULL;
4512         }
4513
4514         return d_percpu;
4515 }
4516
4517 static void tracing_init_debugfs_percpu(long cpu)
4518 {
4519         struct dentry *d_percpu = tracing_dentry_percpu();
4520         struct dentry *d_cpu;
4521         char cpu_dir[30]; /* 30 characters should be more than enough */
4522
4523         if (!d_percpu)
4524                 return;
4525
4526         snprintf(cpu_dir, 30, "cpu%ld", cpu);
4527         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4528         if (!d_cpu) {
4529                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4530                 return;
4531         }
4532
4533         /* per cpu trace_pipe */
4534         trace_create_file("trace_pipe", 0444, d_cpu,
4535                         (void *) cpu, &tracing_pipe_fops);
4536
4537         /* per cpu trace */
4538         trace_create_file("trace", 0644, d_cpu,
4539                         (void *) cpu, &tracing_fops);
4540
4541         trace_create_file("trace_pipe_raw", 0444, d_cpu,
4542                         (void *) cpu, &tracing_buffers_fops);
4543
4544         trace_create_file("stats", 0444, d_cpu,
4545                         (void *) cpu, &tracing_stats_fops);
4546
4547         trace_create_file("buffer_size_kb", 0444, d_cpu,
4548                         (void *) cpu, &tracing_entries_fops);
4549 }
4550
4551 #ifdef CONFIG_FTRACE_SELFTEST
4552 /* Let selftest have access to static functions in this file */
4553 #include "trace_selftest.c"
4554 #endif
4555
4556 struct trace_option_dentry {
4557         struct tracer_opt               *opt;
4558         struct tracer_flags             *flags;
4559         struct dentry                   *entry;
4560 };
4561
4562 static ssize_t
4563 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4564                         loff_t *ppos)
4565 {
4566         struct trace_option_dentry *topt = filp->private_data;
4567         char *buf;
4568
4569         if (topt->flags->val & topt->opt->bit)
4570                 buf = "1\n";
4571         else
4572                 buf = "0\n";
4573
4574         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4575 }
4576
4577 static ssize_t
4578 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4579                          loff_t *ppos)
4580 {
4581         struct trace_option_dentry *topt = filp->private_data;
4582         unsigned long val;
4583         int ret;
4584
4585         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4586         if (ret)
4587                 return ret;
4588
4589         if (val != 0 && val != 1)
4590                 return -EINVAL;
4591
4592         if (!!(topt->flags->val & topt->opt->bit) != val) {
4593                 mutex_lock(&trace_types_lock);
4594                 ret = __set_tracer_option(current_trace, topt->flags,
4595                                           topt->opt, !val);
4596                 mutex_unlock(&trace_types_lock);
4597                 if (ret)
4598                         return ret;
4599         }
4600
4601         *ppos += cnt;
4602
4603         return cnt;
4604 }
4605
4606
4607 static const struct file_operations trace_options_fops = {
4608         .open = tracing_open_generic,
4609         .read = trace_options_read,
4610         .write = trace_options_write,
4611         .llseek = generic_file_llseek,
4612 };
4613
4614 static ssize_t
4615 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4616                         loff_t *ppos)
4617 {
4618         long index = (long)filp->private_data;
4619         char *buf;
4620
4621         if (trace_flags & (1 << index))
4622                 buf = "1\n";
4623         else
4624                 buf = "0\n";
4625
4626         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4627 }
4628
4629 static ssize_t
4630 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4631                          loff_t *ppos)
4632 {
4633         long index = (long)filp->private_data;
4634         unsigned long val;
4635         int ret;
4636
4637         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4638         if (ret)
4639                 return ret;
4640
4641         if (val != 0 && val != 1)
4642                 return -EINVAL;
4643         set_tracer_flags(1 << index, val);
4644
4645         *ppos += cnt;
4646
4647         return cnt;
4648 }
4649
4650 static const struct file_operations trace_options_core_fops = {
4651         .open = tracing_open_generic,
4652         .read = trace_options_core_read,
4653         .write = trace_options_core_write,
4654         .llseek = generic_file_llseek,
4655 };
4656
4657 struct dentry *trace_create_file(const char *name,
4658                                  umode_t mode,
4659                                  struct dentry *parent,
4660                                  void *data,
4661                                  const struct file_operations *fops)
4662 {
4663         struct dentry *ret;
4664
4665         ret = debugfs_create_file(name, mode, parent, data, fops);
4666         if (!ret)
4667                 pr_warning("Could not create debugfs '%s' entry\n", name);
4668
4669         return ret;
4670 }
4671
4672
4673 static struct dentry *trace_options_init_dentry(void)
4674 {
4675         struct dentry *d_tracer;
4676         static struct dentry *t_options;
4677
4678         if (t_options)
4679                 return t_options;
4680
4681         d_tracer = tracing_init_dentry();
4682         if (!d_tracer)
4683                 return NULL;
4684
4685         t_options = debugfs_create_dir("options", d_tracer);
4686         if (!t_options) {
4687                 pr_warning("Could not create debugfs directory 'options'\n");
4688                 return NULL;
4689         }
4690
4691         return t_options;
4692 }
4693
4694 static void
4695 create_trace_option_file(struct trace_option_dentry *topt,
4696                          struct tracer_flags *flags,
4697                          struct tracer_opt *opt)
4698 {
4699         struct dentry *t_options;
4700
4701         t_options = trace_options_init_dentry();
4702         if (!t_options)
4703                 return;
4704
4705         topt->flags = flags;
4706         topt->opt = opt;
4707
4708         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4709                                     &trace_options_fops);
4710
4711 }
4712
4713 static struct trace_option_dentry *
4714 create_trace_option_files(struct tracer *tracer)
4715 {
4716         struct trace_option_dentry *topts;
4717         struct tracer_flags *flags;
4718         struct tracer_opt *opts;
4719         int cnt;
4720
4721         if (!tracer)
4722                 return NULL;
4723
4724         flags = tracer->flags;
4725
4726         if (!flags || !flags->opts)
4727                 return NULL;
4728
4729         opts = flags->opts;
4730
4731         for (cnt = 0; opts[cnt].name; cnt++)
4732                 ;
4733
4734         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4735         if (!topts)
4736                 return NULL;
4737
4738         for (cnt = 0; opts[cnt].name; cnt++)
4739                 create_trace_option_file(&topts[cnt], flags,
4740                                          &opts[cnt]);
4741
4742         return topts;
4743 }
4744
4745 static void
4746 destroy_trace_option_files(struct trace_option_dentry *topts)
4747 {
4748         int cnt;
4749
4750         if (!topts)
4751                 return;
4752
4753         for (cnt = 0; topts[cnt].opt; cnt++) {
4754                 if (topts[cnt].entry)
4755                         debugfs_remove(topts[cnt].entry);
4756         }
4757
4758         kfree(topts);
4759 }
4760
4761 static struct dentry *
4762 create_trace_option_core_file(const char *option, long index)
4763 {
4764         struct dentry *t_options;
4765
4766         t_options = trace_options_init_dentry();
4767         if (!t_options)
4768                 return NULL;
4769
4770         return trace_create_file(option, 0644, t_options, (void *)index,
4771                                     &trace_options_core_fops);
4772 }
4773
4774 static __init void create_trace_options_dir(void)
4775 {
4776         struct dentry *t_options;
4777         int i;
4778
4779         t_options = trace_options_init_dentry();
4780         if (!t_options)
4781                 return;
4782
4783         for (i = 0; trace_options[i]; i++)
4784                 create_trace_option_core_file(trace_options[i], i);
4785 }
4786
4787 static ssize_t
4788 rb_simple_read(struct file *filp, char __user *ubuf,
4789                size_t cnt, loff_t *ppos)
4790 {
4791         struct trace_array *tr = filp->private_data;
4792         struct ring_buffer *buffer = tr->buffer;
4793         char buf[64];
4794         int r;
4795
4796         if (buffer)
4797                 r = ring_buffer_record_is_on(buffer);
4798         else
4799                 r = 0;
4800
4801         r = sprintf(buf, "%d\n", r);
4802
4803         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4804 }
4805
4806 static ssize_t
4807 rb_simple_write(struct file *filp, const char __user *ubuf,
4808                 size_t cnt, loff_t *ppos)
4809 {
4810         struct trace_array *tr = filp->private_data;
4811         struct ring_buffer *buffer = tr->buffer;
4812         unsigned long val;
4813         int ret;
4814
4815         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4816         if (ret)
4817                 return ret;
4818
4819         if (buffer) {
4820                 mutex_lock(&trace_types_lock);
4821                 if (val) {
4822                         ring_buffer_record_on(buffer);
4823                         if (current_trace->start)
4824                                 current_trace->start(tr);
4825                 } else {
4826                         ring_buffer_record_off(buffer);
4827                         if (current_trace->stop)
4828                                 current_trace->stop(tr);
4829                 }
4830                 mutex_unlock(&trace_types_lock);
4831         }
4832
4833         (*ppos)++;
4834
4835         return cnt;
4836 }
4837
4838 static const struct file_operations rb_simple_fops = {
4839         .open           = tracing_open_generic,
4840         .read           = rb_simple_read,
4841         .write          = rb_simple_write,
4842         .llseek         = default_llseek,
4843 };
4844
4845 static __init int tracer_init_debugfs(void)
4846 {
4847         struct dentry *d_tracer;
4848         int cpu;
4849
4850         trace_access_lock_init();
4851
4852         d_tracer = tracing_init_dentry();
4853
4854         trace_create_file("trace_options", 0644, d_tracer,
4855                         NULL, &tracing_iter_fops);
4856
4857         trace_create_file("tracing_cpumask", 0644, d_tracer,
4858                         NULL, &tracing_cpumask_fops);
4859
4860         trace_create_file("trace", 0644, d_tracer,
4861                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4862
4863         trace_create_file("available_tracers", 0444, d_tracer,
4864                         &global_trace, &show_traces_fops);
4865
4866         trace_create_file("current_tracer", 0644, d_tracer,
4867                         &global_trace, &set_tracer_fops);
4868
4869 #ifdef CONFIG_TRACER_MAX_TRACE
4870         trace_create_file("tracing_max_latency", 0644, d_tracer,
4871                         &tracing_max_latency, &tracing_max_lat_fops);
4872 #endif
4873
4874         trace_create_file("tracing_thresh", 0644, d_tracer,
4875                         &tracing_thresh, &tracing_max_lat_fops);
4876
4877         trace_create_file("README", 0444, d_tracer,
4878                         NULL, &tracing_readme_fops);
4879
4880         trace_create_file("trace_pipe", 0444, d_tracer,
4881                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4882
4883         trace_create_file("buffer_size_kb", 0644, d_tracer,
4884                         (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4885
4886         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4887                         &global_trace, &tracing_total_entries_fops);
4888
4889         trace_create_file("free_buffer", 0644, d_tracer,
4890                         &global_trace, &tracing_free_buffer_fops);
4891
4892         trace_create_file("trace_marker", 0220, d_tracer,
4893                         NULL, &tracing_mark_fops);
4894
4895         trace_create_file("saved_cmdlines", 0444, d_tracer,
4896                         NULL, &tracing_saved_cmdlines_fops);
4897
4898         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4899                           &trace_clock_fops);
4900
4901         trace_create_file("tracing_on", 0644, d_tracer,
4902                             &global_trace, &rb_simple_fops);
4903
4904 #ifdef CONFIG_DYNAMIC_FTRACE
4905         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4906                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4907 #endif
4908
4909         create_trace_options_dir();
4910
4911         for_each_tracing_cpu(cpu)
4912                 tracing_init_debugfs_percpu(cpu);
4913
4914         return 0;
4915 }
4916
4917 static int trace_panic_handler(struct notifier_block *this,
4918                                unsigned long event, void *unused)
4919 {
4920         if (ftrace_dump_on_oops)
4921                 ftrace_dump(ftrace_dump_on_oops);
4922         return NOTIFY_OK;
4923 }
4924
4925 static struct notifier_block trace_panic_notifier = {
4926         .notifier_call  = trace_panic_handler,
4927         .next           = NULL,
4928         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4929 };
4930
4931 static int trace_die_handler(struct notifier_block *self,
4932                              unsigned long val,
4933                              void *data)
4934 {
4935         switch (val) {
4936         case DIE_OOPS:
4937                 if (ftrace_dump_on_oops)
4938                         ftrace_dump(ftrace_dump_on_oops);
4939                 break;
4940         default:
4941                 break;
4942         }
4943         return NOTIFY_OK;
4944 }
4945
4946 static struct notifier_block trace_die_notifier = {
4947         .notifier_call = trace_die_handler,
4948         .priority = 200
4949 };
4950
4951 /*
4952  * printk is set to max of 1024, we really don't need it that big.
4953  * Nothing should be printing 1000 characters anyway.
4954  */
4955 #define TRACE_MAX_PRINT         1000
4956
4957 /*
4958  * Define here KERN_TRACE so that we have one place to modify
4959  * it if we decide to change what log level the ftrace dump
4960  * should be at.
4961  */
4962 #define KERN_TRACE              KERN_EMERG
4963
4964 void
4965 trace_printk_seq(struct trace_seq *s)
4966 {
4967         /* Probably should print a warning here. */
4968         if (s->len >= 1000)
4969                 s->len = 1000;
4970
4971         /* should be zero ended, but we are paranoid. */
4972         s->buffer[s->len] = 0;
4973
4974         printk(KERN_TRACE "%s", s->buffer);
4975
4976         trace_seq_init(s);
4977 }
4978
4979 void trace_init_global_iter(struct trace_iterator *iter)
4980 {
4981         iter->tr = &global_trace;
4982         iter->trace = current_trace;
4983         iter->cpu_file = TRACE_PIPE_ALL_CPU;
4984 }
4985
4986 static void
4987 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4988 {
4989         static arch_spinlock_t ftrace_dump_lock =
4990                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4991         /* use static because iter can be a bit big for the stack */
4992         static struct trace_iterator iter;
4993         unsigned int old_userobj;
4994         static int dump_ran;
4995         unsigned long flags;
4996         int cnt = 0, cpu;
4997
4998         /* only one dump */
4999         local_irq_save(flags);
5000         arch_spin_lock(&ftrace_dump_lock);
5001         if (dump_ran)
5002                 goto out;
5003
5004         dump_ran = 1;
5005
5006         tracing_off();
5007
5008         /* Did function tracer already get disabled? */
5009         if (ftrace_is_dead()) {
5010                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
5011                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
5012         }
5013
5014         if (disable_tracing)
5015                 ftrace_kill();
5016
5017         trace_init_global_iter(&iter);
5018
5019         for_each_tracing_cpu(cpu) {
5020                 atomic_inc(&iter.tr->data[cpu]->disabled);
5021         }
5022
5023         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
5024
5025         /* don't look at user memory in panic mode */
5026         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
5027
5028         /* Simulate the iterator */
5029         iter.tr = &global_trace;
5030         iter.trace = current_trace;
5031
5032         switch (oops_dump_mode) {
5033         case DUMP_ALL:
5034                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5035                 break;
5036         case DUMP_ORIG:
5037                 iter.cpu_file = raw_smp_processor_id();
5038                 break;
5039         case DUMP_NONE:
5040                 goto out_enable;
5041         default:
5042                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
5043                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5044         }
5045
5046         printk(KERN_TRACE "Dumping ftrace buffer:\n");
5047
5048         /*
5049          * We need to stop all tracing on all CPUS to read the
5050          * the next buffer. This is a bit expensive, but is
5051          * not done often. We fill all what we can read,
5052          * and then release the locks again.
5053          */
5054
5055         while (!trace_empty(&iter)) {
5056
5057                 if (!cnt)
5058                         printk(KERN_TRACE "---------------------------------\n");
5059
5060                 cnt++;
5061
5062                 /* reset all but tr, trace, and overruns */
5063                 memset(&iter.seq, 0,
5064                        sizeof(struct trace_iterator) -
5065                        offsetof(struct trace_iterator, seq));
5066                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5067                 iter.pos = -1;
5068
5069                 if (trace_find_next_entry_inc(&iter) != NULL) {
5070                         int ret;
5071
5072                         ret = print_trace_line(&iter);
5073                         if (ret != TRACE_TYPE_NO_CONSUME)
5074                                 trace_consume(&iter);
5075                 }
5076                 touch_nmi_watchdog();
5077
5078                 trace_printk_seq(&iter.seq);
5079         }
5080
5081         if (!cnt)
5082                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
5083         else
5084                 printk(KERN_TRACE "---------------------------------\n");
5085
5086  out_enable:
5087         /* Re-enable tracing if requested */
5088         if (!disable_tracing) {
5089                 trace_flags |= old_userobj;
5090
5091                 for_each_tracing_cpu(cpu) {
5092                         atomic_dec(&iter.tr->data[cpu]->disabled);
5093                 }
5094                 tracing_on();
5095         }
5096
5097  out:
5098         arch_spin_unlock(&ftrace_dump_lock);
5099         local_irq_restore(flags);
5100 }
5101
5102 /* By default: disable tracing after the dump */
5103 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5104 {
5105         __ftrace_dump(true, oops_dump_mode);
5106 }
5107 EXPORT_SYMBOL_GPL(ftrace_dump);
5108
5109 __init static int tracer_alloc_buffers(void)
5110 {
5111         int ring_buf_size;
5112         enum ring_buffer_flags rb_flags;
5113         int i;
5114         int ret = -ENOMEM;
5115
5116
5117         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5118                 goto out;
5119
5120         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5121                 goto out_free_buffer_mask;
5122
5123         /* Only allocate trace_printk buffers if a trace_printk exists */
5124         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5125                 /* Must be called before global_trace.buffer is allocated */
5126                 trace_printk_init_buffers();
5127
5128         /* To save memory, keep the ring buffer size to its minimum */
5129         if (ring_buffer_expanded)
5130                 ring_buf_size = trace_buf_size;
5131         else
5132                 ring_buf_size = 1;
5133
5134         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5135
5136         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5137         cpumask_copy(tracing_cpumask, cpu_all_mask);
5138
5139         /* TODO: make the number of buffers hot pluggable with CPUS */
5140         global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5141         if (!global_trace.buffer) {
5142                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5143                 WARN_ON(1);
5144                 goto out_free_cpumask;
5145         }
5146         if (global_trace.buffer_disabled)
5147                 tracing_off();
5148
5149
5150 #ifdef CONFIG_TRACER_MAX_TRACE
5151         max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5152         if (!max_tr.buffer) {
5153                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5154                 WARN_ON(1);
5155                 ring_buffer_free(global_trace.buffer);
5156                 goto out_free_cpumask;
5157         }
5158 #endif
5159
5160         /* Allocate the first page for all buffers */
5161         for_each_tracing_cpu(i) {
5162                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5163                 max_tr.data[i] = &per_cpu(max_tr_data, i);
5164         }
5165
5166         set_buffer_entries(&global_trace,
5167                            ring_buffer_size(global_trace.buffer, 0));
5168 #ifdef CONFIG_TRACER_MAX_TRACE
5169         set_buffer_entries(&max_tr, 1);
5170 #endif
5171
5172         trace_init_cmdlines();
5173         init_irq_work(&trace_work_wakeup, trace_wake_up);
5174
5175         register_tracer(&nop_trace);
5176         current_trace = &nop_trace;
5177         /* All seems OK, enable tracing */
5178         tracing_disabled = 0;
5179
5180         atomic_notifier_chain_register(&panic_notifier_list,
5181                                        &trace_panic_notifier);
5182
5183         register_die_notifier(&trace_die_notifier);
5184
5185         while (trace_boot_options) {
5186                 char *option;
5187
5188                 option = strsep(&trace_boot_options, ",");
5189                 trace_set_options(option);
5190         }
5191
5192         return 0;
5193
5194 out_free_cpumask:
5195         free_cpumask_var(tracing_cpumask);
5196 out_free_buffer_mask:
5197         free_cpumask_var(tracing_buffer_mask);
5198 out:
5199         return ret;
5200 }
5201
5202 __init static int clear_boot_tracer(void)
5203 {
5204         /*
5205          * The default tracer at boot buffer is an init section.
5206          * This function is called in lateinit. If we did not
5207          * find the boot tracer, then clear it out, to prevent
5208          * later registration from accessing the buffer that is
5209          * about to be freed.
5210          */
5211         if (!default_bootup_tracer)
5212                 return 0;
5213
5214         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5215                default_bootup_tracer);
5216         default_bootup_tracer = NULL;
5217
5218         return 0;
5219 }
5220
5221 early_initcall(tracer_alloc_buffers);
5222 fs_initcall(tracer_init_debugfs);
5223 late_initcall(clear_boot_tracer);