tracing: Protect tracer flags with trace_types_lock
[pandora-kernel.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/fs.h>
40
41 #include "trace.h"
42 #include "trace_output.h"
43
44 /*
45  * On boot up, the ring buffer is set to the minimum size, so that
46  * we do not waste memory on systems that are not using tracing.
47  */
48 int ring_buffer_expanded;
49
50 /*
51  * We need to change this state when a selftest is running.
52  * A selftest will lurk into the ring-buffer to count the
53  * entries inserted during the selftest although some concurrent
54  * insertions into the ring-buffer such as trace_printk could occurred
55  * at the same time, giving false positive or negative results.
56  */
57 static bool __read_mostly tracing_selftest_running;
58
59 /*
60  * If a tracer is running, we do not want to run SELFTEST.
61  */
62 bool __read_mostly tracing_selftest_disabled;
63
64 /* For tracers that don't implement custom flags */
65 static struct tracer_opt dummy_tracer_opt[] = {
66         { }
67 };
68
69 static struct tracer_flags dummy_tracer_flags = {
70         .val = 0,
71         .opts = dummy_tracer_opt
72 };
73
74 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
75 {
76         return 0;
77 }
78
79 /*
80  * Kill all tracing for good (never come back).
81  * It is initialized to 1 but will turn to zero if the initialization
82  * of the tracer is successful. But that is the only place that sets
83  * this back to zero.
84  */
85 static int tracing_disabled = 1;
86
87 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
88
89 static inline void ftrace_disable_cpu(void)
90 {
91         preempt_disable();
92         __this_cpu_inc(ftrace_cpu_disabled);
93 }
94
95 static inline void ftrace_enable_cpu(void)
96 {
97         __this_cpu_dec(ftrace_cpu_disabled);
98         preempt_enable();
99 }
100
101 cpumask_var_t __read_mostly     tracing_buffer_mask;
102
103 /*
104  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105  *
106  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
107  * is set, then ftrace_dump is called. This will output the contents
108  * of the ftrace buffers to the console.  This is very useful for
109  * capturing traces that lead to crashes and outputing it to a
110  * serial console.
111  *
112  * It is default off, but you can enable it with either specifying
113  * "ftrace_dump_on_oops" in the kernel command line, or setting
114  * /proc/sys/kernel/ftrace_dump_on_oops
115  * Set 1 if you want to dump buffers of all CPUs
116  * Set 2 if you want to dump the buffer of the CPU that triggered oops
117  */
118
119 enum ftrace_dump_mode ftrace_dump_on_oops;
120
121 static int tracing_set_tracer(const char *buf);
122
123 #define MAX_TRACER_SIZE         100
124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125 static char *default_bootup_tracer;
126
127 static int __init set_cmdline_ftrace(char *str)
128 {
129         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
130         default_bootup_tracer = bootup_tracer_buf;
131         /* We are using ftrace early, expand it */
132         ring_buffer_expanded = 1;
133         return 1;
134 }
135 __setup("ftrace=", set_cmdline_ftrace);
136
137 static int __init set_ftrace_dump_on_oops(char *str)
138 {
139         if (*str++ != '=' || !*str) {
140                 ftrace_dump_on_oops = DUMP_ALL;
141                 return 1;
142         }
143
144         if (!strcmp("orig_cpu", str)) {
145                 ftrace_dump_on_oops = DUMP_ORIG;
146                 return 1;
147         }
148
149         return 0;
150 }
151 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
152
153 unsigned long long ns2usecs(cycle_t nsec)
154 {
155         nsec += 500;
156         do_div(nsec, 1000);
157         return nsec;
158 }
159
160 /*
161  * The global_trace is the descriptor that holds the tracing
162  * buffers for the live tracing. For each CPU, it contains
163  * a link list of pages that will store trace entries. The
164  * page descriptor of the pages in the memory is used to hold
165  * the link list by linking the lru item in the page descriptor
166  * to each of the pages in the buffer per CPU.
167  *
168  * For each active CPU there is a data field that holds the
169  * pages for the buffer for that CPU. Each CPU has the same number
170  * of pages allocated for its buffer.
171  */
172 static struct trace_array       global_trace;
173
174 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
175
176 int filter_current_check_discard(struct ring_buffer *buffer,
177                                  struct ftrace_event_call *call, void *rec,
178                                  struct ring_buffer_event *event)
179 {
180         return filter_check_discard(call, rec, buffer, event);
181 }
182 EXPORT_SYMBOL_GPL(filter_current_check_discard);
183
184 cycle_t ftrace_now(int cpu)
185 {
186         u64 ts;
187
188         /* Early boot up does not have a buffer yet */
189         if (!global_trace.buffer)
190                 return trace_clock_local();
191
192         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
193         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
194
195         return ts;
196 }
197
198 /*
199  * The max_tr is used to snapshot the global_trace when a maximum
200  * latency is reached. Some tracers will use this to store a maximum
201  * trace while it continues examining live traces.
202  *
203  * The buffers for the max_tr are set up the same as the global_trace.
204  * When a snapshot is taken, the link list of the max_tr is swapped
205  * with the link list of the global_trace and the buffers are reset for
206  * the global_trace so the tracing can continue.
207  */
208 static struct trace_array       max_tr;
209
210 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
211
212 /* tracer_enabled is used to toggle activation of a tracer */
213 static int                      tracer_enabled = 1;
214
215 /**
216  * tracing_is_enabled - return tracer_enabled status
217  *
218  * This function is used by other tracers to know the status
219  * of the tracer_enabled flag.  Tracers may use this function
220  * to know if it should enable their features when starting
221  * up. See irqsoff tracer for an example (start_irqsoff_tracer).
222  */
223 int tracing_is_enabled(void)
224 {
225         return tracer_enabled;
226 }
227
228 /*
229  * trace_buf_size is the size in bytes that is allocated
230  * for a buffer. Note, the number of bytes is always rounded
231  * to page size.
232  *
233  * This number is purposely set to a low number of 16384.
234  * If the dump on oops happens, it will be much appreciated
235  * to not have to wait for all that output. Anyway this can be
236  * boot time and run time configurable.
237  */
238 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
239
240 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
241
242 /* trace_types holds a link list of available tracers. */
243 static struct tracer            *trace_types __read_mostly;
244
245 /* current_trace points to the tracer that is currently active */
246 static struct tracer            *current_trace __read_mostly;
247
248 /*
249  * trace_types_lock is used to protect the trace_types list.
250  */
251 static DEFINE_MUTEX(trace_types_lock);
252
253 /*
254  * serialize the access of the ring buffer
255  *
256  * ring buffer serializes readers, but it is low level protection.
257  * The validity of the events (which returns by ring_buffer_peek() ..etc)
258  * are not protected by ring buffer.
259  *
260  * The content of events may become garbage if we allow other process consumes
261  * these events concurrently:
262  *   A) the page of the consumed events may become a normal page
263  *      (not reader page) in ring buffer, and this page will be rewrited
264  *      by events producer.
265  *   B) The page of the consumed events may become a page for splice_read,
266  *      and this page will be returned to system.
267  *
268  * These primitives allow multi process access to different cpu ring buffer
269  * concurrently.
270  *
271  * These primitives don't distinguish read-only and read-consume access.
272  * Multi read-only access are also serialized.
273  */
274
275 #ifdef CONFIG_SMP
276 static DECLARE_RWSEM(all_cpu_access_lock);
277 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
278
279 static inline void trace_access_lock(int cpu)
280 {
281         if (cpu == TRACE_PIPE_ALL_CPU) {
282                 /* gain it for accessing the whole ring buffer. */
283                 down_write(&all_cpu_access_lock);
284         } else {
285                 /* gain it for accessing a cpu ring buffer. */
286
287                 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
288                 down_read(&all_cpu_access_lock);
289
290                 /* Secondly block other access to this @cpu ring buffer. */
291                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
292         }
293 }
294
295 static inline void trace_access_unlock(int cpu)
296 {
297         if (cpu == TRACE_PIPE_ALL_CPU) {
298                 up_write(&all_cpu_access_lock);
299         } else {
300                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
301                 up_read(&all_cpu_access_lock);
302         }
303 }
304
305 static inline void trace_access_lock_init(void)
306 {
307         int cpu;
308
309         for_each_possible_cpu(cpu)
310                 mutex_init(&per_cpu(cpu_access_lock, cpu));
311 }
312
313 #else
314
315 static DEFINE_MUTEX(access_lock);
316
317 static inline void trace_access_lock(int cpu)
318 {
319         (void)cpu;
320         mutex_lock(&access_lock);
321 }
322
323 static inline void trace_access_unlock(int cpu)
324 {
325         (void)cpu;
326         mutex_unlock(&access_lock);
327 }
328
329 static inline void trace_access_lock_init(void)
330 {
331 }
332
333 #endif
334
335 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
336 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
337
338 /* trace_flags holds trace_options default values */
339 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
340         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
341         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
342
343 static int trace_stop_count;
344 static DEFINE_RAW_SPINLOCK(tracing_start_lock);
345
346 static void wakeup_work_handler(struct work_struct *work)
347 {
348         wake_up(&trace_wait);
349 }
350
351 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
352
353 /**
354  * trace_wake_up - wake up tasks waiting for trace input
355  *
356  * Schedules a delayed work to wake up any task that is blocked on the
357  * trace_wait queue. These is used with trace_poll for tasks polling the
358  * trace.
359  */
360 void trace_wake_up(void)
361 {
362         const unsigned long delay = msecs_to_jiffies(2);
363
364         if (trace_flags & TRACE_ITER_BLOCK)
365                 return;
366         schedule_delayed_work(&wakeup_work, delay);
367 }
368
369 static int __init set_buf_size(char *str)
370 {
371         unsigned long buf_size;
372
373         if (!str)
374                 return 0;
375         buf_size = memparse(str, &str);
376         /* nr_entries can not be zero */
377         if (buf_size == 0)
378                 return 0;
379         trace_buf_size = buf_size;
380         return 1;
381 }
382 __setup("trace_buf_size=", set_buf_size);
383
384 static int __init set_tracing_thresh(char *str)
385 {
386         unsigned long threshhold;
387         int ret;
388
389         if (!str)
390                 return 0;
391         ret = strict_strtoul(str, 0, &threshhold);
392         if (ret < 0)
393                 return 0;
394         tracing_thresh = threshhold * 1000;
395         return 1;
396 }
397 __setup("tracing_thresh=", set_tracing_thresh);
398
399 unsigned long nsecs_to_usecs(unsigned long nsecs)
400 {
401         return nsecs / 1000;
402 }
403
404 /* These must match the bit postions in trace_iterator_flags */
405 static const char *trace_options[] = {
406         "print-parent",
407         "sym-offset",
408         "sym-addr",
409         "verbose",
410         "raw",
411         "hex",
412         "bin",
413         "block",
414         "stacktrace",
415         "trace_printk",
416         "ftrace_preempt",
417         "branch",
418         "annotate",
419         "userstacktrace",
420         "sym-userobj",
421         "printk-msg-only",
422         "context-info",
423         "latency-format",
424         "sleep-time",
425         "graph-time",
426         "record-cmd",
427         "overwrite",
428         "disable_on_free",
429         NULL
430 };
431
432 static struct {
433         u64 (*func)(void);
434         const char *name;
435 } trace_clocks[] = {
436         { trace_clock_local,    "local" },
437         { trace_clock_global,   "global" },
438         { trace_clock_counter,  "counter" },
439 };
440
441 int trace_clock_id;
442
443 /*
444  * trace_parser_get_init - gets the buffer for trace parser
445  */
446 int trace_parser_get_init(struct trace_parser *parser, int size)
447 {
448         memset(parser, 0, sizeof(*parser));
449
450         parser->buffer = kmalloc(size, GFP_KERNEL);
451         if (!parser->buffer)
452                 return 1;
453
454         parser->size = size;
455         return 0;
456 }
457
458 /*
459  * trace_parser_put - frees the buffer for trace parser
460  */
461 void trace_parser_put(struct trace_parser *parser)
462 {
463         kfree(parser->buffer);
464 }
465
466 /*
467  * trace_get_user - reads the user input string separated by  space
468  * (matched by isspace(ch))
469  *
470  * For each string found the 'struct trace_parser' is updated,
471  * and the function returns.
472  *
473  * Returns number of bytes read.
474  *
475  * See kernel/trace/trace.h for 'struct trace_parser' details.
476  */
477 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
478         size_t cnt, loff_t *ppos)
479 {
480         char ch;
481         size_t read = 0;
482         ssize_t ret;
483
484         if (!*ppos)
485                 trace_parser_clear(parser);
486
487         ret = get_user(ch, ubuf++);
488         if (ret)
489                 goto out;
490
491         read++;
492         cnt--;
493
494         /*
495          * The parser is not finished with the last write,
496          * continue reading the user input without skipping spaces.
497          */
498         if (!parser->cont) {
499                 /* skip white space */
500                 while (cnt && isspace(ch)) {
501                         ret = get_user(ch, ubuf++);
502                         if (ret)
503                                 goto out;
504                         read++;
505                         cnt--;
506                 }
507
508                 /* only spaces were written */
509                 if (isspace(ch)) {
510                         *ppos += read;
511                         ret = read;
512                         goto out;
513                 }
514
515                 parser->idx = 0;
516         }
517
518         /* read the non-space input */
519         while (cnt && !isspace(ch)) {
520                 if (parser->idx < parser->size - 1)
521                         parser->buffer[parser->idx++] = ch;
522                 else {
523                         ret = -EINVAL;
524                         goto out;
525                 }
526                 ret = get_user(ch, ubuf++);
527                 if (ret)
528                         goto out;
529                 read++;
530                 cnt--;
531         }
532
533         /* We either got finished input or we have to wait for another call. */
534         if (isspace(ch)) {
535                 parser->buffer[parser->idx] = 0;
536                 parser->cont = false;
537         } else {
538                 parser->cont = true;
539                 parser->buffer[parser->idx++] = ch;
540         }
541
542         *ppos += read;
543         ret = read;
544
545 out:
546         return ret;
547 }
548
549 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
550 {
551         int len;
552         int ret;
553
554         if (!cnt)
555                 return 0;
556
557         if (s->len <= s->readpos)
558                 return -EBUSY;
559
560         len = s->len - s->readpos;
561         if (cnt > len)
562                 cnt = len;
563         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
564         if (ret == cnt)
565                 return -EFAULT;
566
567         cnt -= ret;
568
569         s->readpos += cnt;
570         return cnt;
571 }
572
573 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
574 {
575         int len;
576         void *ret;
577
578         if (s->len <= s->readpos)
579                 return -EBUSY;
580
581         len = s->len - s->readpos;
582         if (cnt > len)
583                 cnt = len;
584         ret = memcpy(buf, s->buffer + s->readpos, cnt);
585         if (!ret)
586                 return -EFAULT;
587
588         s->readpos += cnt;
589         return cnt;
590 }
591
592 /*
593  * ftrace_max_lock is used to protect the swapping of buffers
594  * when taking a max snapshot. The buffers themselves are
595  * protected by per_cpu spinlocks. But the action of the swap
596  * needs its own lock.
597  *
598  * This is defined as a arch_spinlock_t in order to help
599  * with performance when lockdep debugging is enabled.
600  *
601  * It is also used in other places outside the update_max_tr
602  * so it needs to be defined outside of the
603  * CONFIG_TRACER_MAX_TRACE.
604  */
605 static arch_spinlock_t ftrace_max_lock =
606         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
607
608 unsigned long __read_mostly     tracing_thresh;
609
610 #ifdef CONFIG_TRACER_MAX_TRACE
611 unsigned long __read_mostly     tracing_max_latency;
612
613 /*
614  * Copy the new maximum trace into the separate maximum-trace
615  * structure. (this way the maximum trace is permanently saved,
616  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
617  */
618 static void
619 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
620 {
621         struct trace_array_cpu *data = tr->data[cpu];
622         struct trace_array_cpu *max_data;
623
624         max_tr.cpu = cpu;
625         max_tr.time_start = data->preempt_timestamp;
626
627         max_data = max_tr.data[cpu];
628         max_data->saved_latency = tracing_max_latency;
629         max_data->critical_start = data->critical_start;
630         max_data->critical_end = data->critical_end;
631
632         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
633         max_data->pid = tsk->pid;
634         max_data->uid = task_uid(tsk);
635         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
636         max_data->policy = tsk->policy;
637         max_data->rt_priority = tsk->rt_priority;
638
639         /* record this tasks comm */
640         tracing_record_cmdline(tsk);
641 }
642
643 /**
644  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
645  * @tr: tracer
646  * @tsk: the task with the latency
647  * @cpu: The cpu that initiated the trace.
648  *
649  * Flip the buffers between the @tr and the max_tr and record information
650  * about which task was the cause of this latency.
651  */
652 void
653 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
654 {
655         struct ring_buffer *buf;
656
657         if (trace_stop_count)
658                 return;
659
660         WARN_ON_ONCE(!irqs_disabled());
661         if (!current_trace->use_max_tr) {
662                 WARN_ON_ONCE(1);
663                 return;
664         }
665         arch_spin_lock(&ftrace_max_lock);
666
667         buf = tr->buffer;
668         tr->buffer = max_tr.buffer;
669         max_tr.buffer = buf;
670
671         __update_max_tr(tr, tsk, cpu);
672         arch_spin_unlock(&ftrace_max_lock);
673 }
674
675 /**
676  * update_max_tr_single - only copy one trace over, and reset the rest
677  * @tr - tracer
678  * @tsk - task with the latency
679  * @cpu - the cpu of the buffer to copy.
680  *
681  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
682  */
683 void
684 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
685 {
686         int ret;
687
688         if (trace_stop_count)
689                 return;
690
691         WARN_ON_ONCE(!irqs_disabled());
692         if (!current_trace->use_max_tr) {
693                 WARN_ON_ONCE(1);
694                 return;
695         }
696
697         arch_spin_lock(&ftrace_max_lock);
698
699         ftrace_disable_cpu();
700
701         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
702
703         if (ret == -EBUSY) {
704                 /*
705                  * We failed to swap the buffer due to a commit taking
706                  * place on this CPU. We fail to record, but we reset
707                  * the max trace buffer (no one writes directly to it)
708                  * and flag that it failed.
709                  */
710                 trace_array_printk(&max_tr, _THIS_IP_,
711                         "Failed to swap buffers due to commit in progress\n");
712         }
713
714         ftrace_enable_cpu();
715
716         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
717
718         __update_max_tr(tr, tsk, cpu);
719         arch_spin_unlock(&ftrace_max_lock);
720 }
721 #endif /* CONFIG_TRACER_MAX_TRACE */
722
723 /**
724  * register_tracer - register a tracer with the ftrace system.
725  * @type - the plugin for the tracer
726  *
727  * Register a new plugin tracer.
728  */
729 int register_tracer(struct tracer *type)
730 __releases(kernel_lock)
731 __acquires(kernel_lock)
732 {
733         struct tracer *t;
734         int ret = 0;
735
736         if (!type->name) {
737                 pr_info("Tracer must have a name\n");
738                 return -1;
739         }
740
741         if (strlen(type->name) >= MAX_TRACER_SIZE) {
742                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
743                 return -1;
744         }
745
746         mutex_lock(&trace_types_lock);
747
748         tracing_selftest_running = true;
749
750         for (t = trace_types; t; t = t->next) {
751                 if (strcmp(type->name, t->name) == 0) {
752                         /* already found */
753                         pr_info("Tracer %s already registered\n",
754                                 type->name);
755                         ret = -1;
756                         goto out;
757                 }
758         }
759
760         if (!type->set_flag)
761                 type->set_flag = &dummy_set_flag;
762         if (!type->flags)
763                 type->flags = &dummy_tracer_flags;
764         else
765                 if (!type->flags->opts)
766                         type->flags->opts = dummy_tracer_opt;
767         if (!type->wait_pipe)
768                 type->wait_pipe = default_wait_pipe;
769
770
771 #ifdef CONFIG_FTRACE_STARTUP_TEST
772         if (type->selftest && !tracing_selftest_disabled) {
773                 struct tracer *saved_tracer = current_trace;
774                 struct trace_array *tr = &global_trace;
775
776                 /*
777                  * Run a selftest on this tracer.
778                  * Here we reset the trace buffer, and set the current
779                  * tracer to be this tracer. The tracer can then run some
780                  * internal tracing to verify that everything is in order.
781                  * If we fail, we do not register this tracer.
782                  */
783                 tracing_reset_online_cpus(tr);
784
785                 current_trace = type;
786
787                 /* If we expanded the buffers, make sure the max is expanded too */
788                 if (ring_buffer_expanded && type->use_max_tr)
789                         ring_buffer_resize(max_tr.buffer, trace_buf_size);
790
791                 /* the test is responsible for initializing and enabling */
792                 pr_info("Testing tracer %s: ", type->name);
793                 ret = type->selftest(type, tr);
794                 /* the test is responsible for resetting too */
795                 current_trace = saved_tracer;
796                 if (ret) {
797                         printk(KERN_CONT "FAILED!\n");
798                         goto out;
799                 }
800                 /* Only reset on passing, to avoid touching corrupted buffers */
801                 tracing_reset_online_cpus(tr);
802
803                 /* Shrink the max buffer again */
804                 if (ring_buffer_expanded && type->use_max_tr)
805                         ring_buffer_resize(max_tr.buffer, 1);
806
807                 printk(KERN_CONT "PASSED\n");
808         }
809 #endif
810
811         type->next = trace_types;
812         trace_types = type;
813
814  out:
815         tracing_selftest_running = false;
816         mutex_unlock(&trace_types_lock);
817
818         if (ret || !default_bootup_tracer)
819                 goto out_unlock;
820
821         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
822                 goto out_unlock;
823
824         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
825         /* Do we want this tracer to start on bootup? */
826         tracing_set_tracer(type->name);
827         default_bootup_tracer = NULL;
828         /* disable other selftests, since this will break it. */
829         tracing_selftest_disabled = 1;
830 #ifdef CONFIG_FTRACE_STARTUP_TEST
831         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
832                type->name);
833 #endif
834
835  out_unlock:
836         return ret;
837 }
838
839 void unregister_tracer(struct tracer *type)
840 {
841         struct tracer **t;
842
843         mutex_lock(&trace_types_lock);
844         for (t = &trace_types; *t; t = &(*t)->next) {
845                 if (*t == type)
846                         goto found;
847         }
848         pr_info("Tracer %s not registered\n", type->name);
849         goto out;
850
851  found:
852         *t = (*t)->next;
853
854         if (type == current_trace && tracer_enabled) {
855                 tracer_enabled = 0;
856                 tracing_stop();
857                 if (current_trace->stop)
858                         current_trace->stop(&global_trace);
859                 current_trace = &nop_trace;
860         }
861 out:
862         mutex_unlock(&trace_types_lock);
863 }
864
865 static void __tracing_reset(struct ring_buffer *buffer, int cpu)
866 {
867         ftrace_disable_cpu();
868         ring_buffer_reset_cpu(buffer, cpu);
869         ftrace_enable_cpu();
870 }
871
872 void tracing_reset(struct trace_array *tr, int cpu)
873 {
874         struct ring_buffer *buffer = tr->buffer;
875
876         ring_buffer_record_disable(buffer);
877
878         /* Make sure all commits have finished */
879         synchronize_sched();
880         __tracing_reset(buffer, cpu);
881
882         ring_buffer_record_enable(buffer);
883 }
884
885 void tracing_reset_online_cpus(struct trace_array *tr)
886 {
887         struct ring_buffer *buffer = tr->buffer;
888         int cpu;
889
890         ring_buffer_record_disable(buffer);
891
892         /* Make sure all commits have finished */
893         synchronize_sched();
894
895         tr->time_start = ftrace_now(tr->cpu);
896
897         for_each_online_cpu(cpu)
898                 __tracing_reset(buffer, cpu);
899
900         ring_buffer_record_enable(buffer);
901 }
902
903 void tracing_reset_current(int cpu)
904 {
905         tracing_reset(&global_trace, cpu);
906 }
907
908 void tracing_reset_current_online_cpus(void)
909 {
910         tracing_reset_online_cpus(&global_trace);
911 }
912
913 #define SAVED_CMDLINES 128
914 #define NO_CMDLINE_MAP UINT_MAX
915 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
916 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
917 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
918 static int cmdline_idx;
919 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
920
921 /* temporary disable recording */
922 static atomic_t trace_record_cmdline_disabled __read_mostly;
923
924 static void trace_init_cmdlines(void)
925 {
926         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
927         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
928         cmdline_idx = 0;
929 }
930
931 int is_tracing_stopped(void)
932 {
933         return trace_stop_count;
934 }
935
936 /**
937  * ftrace_off_permanent - disable all ftrace code permanently
938  *
939  * This should only be called when a serious anomally has
940  * been detected.  This will turn off the function tracing,
941  * ring buffers, and other tracing utilites. It takes no
942  * locks and can be called from any context.
943  */
944 void ftrace_off_permanent(void)
945 {
946         tracing_disabled = 1;
947         ftrace_stop();
948         tracing_off_permanent();
949 }
950
951 /**
952  * tracing_start - quick start of the tracer
953  *
954  * If tracing is enabled but was stopped by tracing_stop,
955  * this will start the tracer back up.
956  */
957 void tracing_start(void)
958 {
959         struct ring_buffer *buffer;
960         unsigned long flags;
961
962         if (tracing_disabled)
963                 return;
964
965         raw_spin_lock_irqsave(&tracing_start_lock, flags);
966         if (--trace_stop_count) {
967                 if (trace_stop_count < 0) {
968                         /* Someone screwed up their debugging */
969                         WARN_ON_ONCE(1);
970                         trace_stop_count = 0;
971                 }
972                 goto out;
973         }
974
975         /* Prevent the buffers from switching */
976         arch_spin_lock(&ftrace_max_lock);
977
978         buffer = global_trace.buffer;
979         if (buffer)
980                 ring_buffer_record_enable(buffer);
981
982         buffer = max_tr.buffer;
983         if (buffer)
984                 ring_buffer_record_enable(buffer);
985
986         arch_spin_unlock(&ftrace_max_lock);
987
988         ftrace_start();
989  out:
990         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
991 }
992
993 /**
994  * tracing_stop - quick stop of the tracer
995  *
996  * Light weight way to stop tracing. Use in conjunction with
997  * tracing_start.
998  */
999 void tracing_stop(void)
1000 {
1001         struct ring_buffer *buffer;
1002         unsigned long flags;
1003
1004         ftrace_stop();
1005         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1006         if (trace_stop_count++)
1007                 goto out;
1008
1009         /* Prevent the buffers from switching */
1010         arch_spin_lock(&ftrace_max_lock);
1011
1012         buffer = global_trace.buffer;
1013         if (buffer)
1014                 ring_buffer_record_disable(buffer);
1015
1016         buffer = max_tr.buffer;
1017         if (buffer)
1018                 ring_buffer_record_disable(buffer);
1019
1020         arch_spin_unlock(&ftrace_max_lock);
1021
1022  out:
1023         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1024 }
1025
1026 void trace_stop_cmdline_recording(void);
1027
1028 static void trace_save_cmdline(struct task_struct *tsk)
1029 {
1030         unsigned pid, idx;
1031
1032         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1033                 return;
1034
1035         /*
1036          * It's not the end of the world if we don't get
1037          * the lock, but we also don't want to spin
1038          * nor do we want to disable interrupts,
1039          * so if we miss here, then better luck next time.
1040          */
1041         if (!arch_spin_trylock(&trace_cmdline_lock))
1042                 return;
1043
1044         idx = map_pid_to_cmdline[tsk->pid];
1045         if (idx == NO_CMDLINE_MAP) {
1046                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1047
1048                 /*
1049                  * Check whether the cmdline buffer at idx has a pid
1050                  * mapped. We are going to overwrite that entry so we
1051                  * need to clear the map_pid_to_cmdline. Otherwise we
1052                  * would read the new comm for the old pid.
1053                  */
1054                 pid = map_cmdline_to_pid[idx];
1055                 if (pid != NO_CMDLINE_MAP)
1056                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1057
1058                 map_cmdline_to_pid[idx] = tsk->pid;
1059                 map_pid_to_cmdline[tsk->pid] = idx;
1060
1061                 cmdline_idx = idx;
1062         }
1063
1064         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1065
1066         arch_spin_unlock(&trace_cmdline_lock);
1067 }
1068
1069 void trace_find_cmdline(int pid, char comm[])
1070 {
1071         unsigned map;
1072
1073         if (!pid) {
1074                 strcpy(comm, "<idle>");
1075                 return;
1076         }
1077
1078         if (WARN_ON_ONCE(pid < 0)) {
1079                 strcpy(comm, "<XXX>");
1080                 return;
1081         }
1082
1083         if (pid > PID_MAX_DEFAULT) {
1084                 strcpy(comm, "<...>");
1085                 return;
1086         }
1087
1088         preempt_disable();
1089         arch_spin_lock(&trace_cmdline_lock);
1090         map = map_pid_to_cmdline[pid];
1091         if (map != NO_CMDLINE_MAP)
1092                 strcpy(comm, saved_cmdlines[map]);
1093         else
1094                 strcpy(comm, "<...>");
1095
1096         arch_spin_unlock(&trace_cmdline_lock);
1097         preempt_enable();
1098 }
1099
1100 void tracing_record_cmdline(struct task_struct *tsk)
1101 {
1102         if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1103             !tracing_is_on())
1104                 return;
1105
1106         trace_save_cmdline(tsk);
1107 }
1108
1109 void
1110 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1111                              int pc)
1112 {
1113         struct task_struct *tsk = current;
1114
1115         entry->preempt_count            = pc & 0xff;
1116         entry->pid                      = (tsk) ? tsk->pid : 0;
1117         entry->padding                  = 0;
1118         entry->flags =
1119 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1120                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1121 #else
1122                 TRACE_FLAG_IRQS_NOSUPPORT |
1123 #endif
1124                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1125                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1126                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1127 }
1128 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1129
1130 struct ring_buffer_event *
1131 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1132                           int type,
1133                           unsigned long len,
1134                           unsigned long flags, int pc)
1135 {
1136         struct ring_buffer_event *event;
1137
1138         event = ring_buffer_lock_reserve(buffer, len);
1139         if (event != NULL) {
1140                 struct trace_entry *ent = ring_buffer_event_data(event);
1141
1142                 tracing_generic_entry_update(ent, flags, pc);
1143                 ent->type = type;
1144         }
1145
1146         return event;
1147 }
1148
1149 static inline void
1150 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1151                              struct ring_buffer_event *event,
1152                              unsigned long flags, int pc,
1153                              int wake)
1154 {
1155         ring_buffer_unlock_commit(buffer, event);
1156
1157         ftrace_trace_stack(buffer, flags, 6, pc);
1158         ftrace_trace_userstack(buffer, flags, pc);
1159
1160         if (wake)
1161                 trace_wake_up();
1162 }
1163
1164 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1165                                 struct ring_buffer_event *event,
1166                                 unsigned long flags, int pc)
1167 {
1168         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1169 }
1170
1171 struct ring_buffer_event *
1172 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1173                                   int type, unsigned long len,
1174                                   unsigned long flags, int pc)
1175 {
1176         *current_rb = global_trace.buffer;
1177         return trace_buffer_lock_reserve(*current_rb,
1178                                          type, len, flags, pc);
1179 }
1180 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1181
1182 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1183                                         struct ring_buffer_event *event,
1184                                         unsigned long flags, int pc)
1185 {
1186         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1187 }
1188 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1189
1190 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1191                                        struct ring_buffer_event *event,
1192                                        unsigned long flags, int pc)
1193 {
1194         __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1195 }
1196 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1197
1198 void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1199                                             struct ring_buffer_event *event,
1200                                             unsigned long flags, int pc,
1201                                             struct pt_regs *regs)
1202 {
1203         ring_buffer_unlock_commit(buffer, event);
1204
1205         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1206         ftrace_trace_userstack(buffer, flags, pc);
1207 }
1208 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1209
1210 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1211                                          struct ring_buffer_event *event)
1212 {
1213         ring_buffer_discard_commit(buffer, event);
1214 }
1215 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1216
1217 void
1218 trace_function(struct trace_array *tr,
1219                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1220                int pc)
1221 {
1222         struct ftrace_event_call *call = &event_function;
1223         struct ring_buffer *buffer = tr->buffer;
1224         struct ring_buffer_event *event;
1225         struct ftrace_entry *entry;
1226
1227         /* If we are reading the ring buffer, don't trace */
1228         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1229                 return;
1230
1231         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1232                                           flags, pc);
1233         if (!event)
1234                 return;
1235         entry   = ring_buffer_event_data(event);
1236         entry->ip                       = ip;
1237         entry->parent_ip                = parent_ip;
1238
1239         if (!filter_check_discard(call, entry, buffer, event))
1240                 ring_buffer_unlock_commit(buffer, event);
1241 }
1242
1243 void
1244 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1245        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1246        int pc)
1247 {
1248         if (likely(!atomic_read(&data->disabled)))
1249                 trace_function(tr, ip, parent_ip, flags, pc);
1250 }
1251
1252 #ifdef CONFIG_STACKTRACE
1253
1254 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1255 struct ftrace_stack {
1256         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1257 };
1258
1259 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1260 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1261
1262 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1263                                  unsigned long flags,
1264                                  int skip, int pc, struct pt_regs *regs)
1265 {
1266         struct ftrace_event_call *call = &event_kernel_stack;
1267         struct ring_buffer_event *event;
1268         struct stack_entry *entry;
1269         struct stack_trace trace;
1270         int use_stack;
1271         int size = FTRACE_STACK_ENTRIES;
1272
1273         trace.nr_entries        = 0;
1274         trace.skip              = skip;
1275
1276         /*
1277          * Since events can happen in NMIs there's no safe way to
1278          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1279          * or NMI comes in, it will just have to use the default
1280          * FTRACE_STACK_SIZE.
1281          */
1282         preempt_disable_notrace();
1283
1284         use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1285         /*
1286          * We don't need any atomic variables, just a barrier.
1287          * If an interrupt comes in, we don't care, because it would
1288          * have exited and put the counter back to what we want.
1289          * We just need a barrier to keep gcc from moving things
1290          * around.
1291          */
1292         barrier();
1293         if (use_stack == 1) {
1294                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1295                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1296
1297                 if (regs)
1298                         save_stack_trace_regs(regs, &trace);
1299                 else
1300                         save_stack_trace(&trace);
1301
1302                 if (trace.nr_entries > size)
1303                         size = trace.nr_entries;
1304         } else
1305                 /* From now on, use_stack is a boolean */
1306                 use_stack = 0;
1307
1308         size *= sizeof(unsigned long);
1309
1310         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1311                                           sizeof(*entry) + size, flags, pc);
1312         if (!event)
1313                 goto out;
1314         entry = ring_buffer_event_data(event);
1315
1316         memset(&entry->caller, 0, size);
1317
1318         if (use_stack)
1319                 memcpy(&entry->caller, trace.entries,
1320                        trace.nr_entries * sizeof(unsigned long));
1321         else {
1322                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1323                 trace.entries           = entry->caller;
1324                 if (regs)
1325                         save_stack_trace_regs(regs, &trace);
1326                 else
1327                         save_stack_trace(&trace);
1328         }
1329
1330         entry->size = trace.nr_entries;
1331
1332         if (!filter_check_discard(call, entry, buffer, event))
1333                 ring_buffer_unlock_commit(buffer, event);
1334
1335  out:
1336         /* Again, don't let gcc optimize things here */
1337         barrier();
1338         __get_cpu_var(ftrace_stack_reserve)--;
1339         preempt_enable_notrace();
1340
1341 }
1342
1343 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1344                              int skip, int pc, struct pt_regs *regs)
1345 {
1346         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1347                 return;
1348
1349         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1350 }
1351
1352 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1353                         int skip, int pc)
1354 {
1355         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1356                 return;
1357
1358         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1359 }
1360
1361 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1362                    int pc)
1363 {
1364         __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1365 }
1366
1367 /**
1368  * trace_dump_stack - record a stack back trace in the trace buffer
1369  */
1370 void trace_dump_stack(void)
1371 {
1372         unsigned long flags;
1373
1374         if (tracing_disabled || tracing_selftest_running)
1375                 return;
1376
1377         local_save_flags(flags);
1378
1379         /* skipping 3 traces, seems to get us at the caller of this function */
1380         __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1381 }
1382
1383 static DEFINE_PER_CPU(int, user_stack_count);
1384
1385 void
1386 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1387 {
1388         struct ftrace_event_call *call = &event_user_stack;
1389         struct ring_buffer_event *event;
1390         struct userstack_entry *entry;
1391         struct stack_trace trace;
1392
1393         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1394                 return;
1395
1396         /*
1397          * NMIs can not handle page faults, even with fix ups.
1398          * The save user stack can (and often does) fault.
1399          */
1400         if (unlikely(in_nmi()))
1401                 return;
1402
1403         /*
1404          * prevent recursion, since the user stack tracing may
1405          * trigger other kernel events.
1406          */
1407         preempt_disable();
1408         if (__this_cpu_read(user_stack_count))
1409                 goto out;
1410
1411         __this_cpu_inc(user_stack_count);
1412
1413         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1414                                           sizeof(*entry), flags, pc);
1415         if (!event)
1416                 goto out_drop_count;
1417         entry   = ring_buffer_event_data(event);
1418
1419         entry->tgid             = current->tgid;
1420         memset(&entry->caller, 0, sizeof(entry->caller));
1421
1422         trace.nr_entries        = 0;
1423         trace.max_entries       = FTRACE_STACK_ENTRIES;
1424         trace.skip              = 0;
1425         trace.entries           = entry->caller;
1426
1427         save_stack_trace_user(&trace);
1428         if (!filter_check_discard(call, entry, buffer, event))
1429                 ring_buffer_unlock_commit(buffer, event);
1430
1431  out_drop_count:
1432         __this_cpu_dec(user_stack_count);
1433  out:
1434         preempt_enable();
1435 }
1436
1437 #ifdef UNUSED
1438 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1439 {
1440         ftrace_trace_userstack(tr, flags, preempt_count());
1441 }
1442 #endif /* UNUSED */
1443
1444 #endif /* CONFIG_STACKTRACE */
1445
1446 /**
1447  * trace_vbprintk - write binary msg to tracing buffer
1448  *
1449  */
1450 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1451 {
1452         static arch_spinlock_t trace_buf_lock =
1453                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1454         static u32 trace_buf[TRACE_BUF_SIZE];
1455
1456         struct ftrace_event_call *call = &event_bprint;
1457         struct ring_buffer_event *event;
1458         struct ring_buffer *buffer;
1459         struct trace_array *tr = &global_trace;
1460         struct trace_array_cpu *data;
1461         struct bprint_entry *entry;
1462         unsigned long flags;
1463         int disable;
1464         int cpu, len = 0, size, pc;
1465
1466         if (unlikely(tracing_selftest_running || tracing_disabled))
1467                 return 0;
1468
1469         /* Don't pollute graph traces with trace_vprintk internals */
1470         pause_graph_tracing();
1471
1472         pc = preempt_count();
1473         preempt_disable_notrace();
1474         cpu = raw_smp_processor_id();
1475         data = tr->data[cpu];
1476
1477         disable = atomic_inc_return(&data->disabled);
1478         if (unlikely(disable != 1))
1479                 goto out;
1480
1481         /* Lockdep uses trace_printk for lock tracing */
1482         local_irq_save(flags);
1483         arch_spin_lock(&trace_buf_lock);
1484         len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1485
1486         if (len > TRACE_BUF_SIZE || len < 0)
1487                 goto out_unlock;
1488
1489         size = sizeof(*entry) + sizeof(u32) * len;
1490         buffer = tr->buffer;
1491         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1492                                           flags, pc);
1493         if (!event)
1494                 goto out_unlock;
1495         entry = ring_buffer_event_data(event);
1496         entry->ip                       = ip;
1497         entry->fmt                      = fmt;
1498
1499         memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1500         if (!filter_check_discard(call, entry, buffer, event)) {
1501                 ring_buffer_unlock_commit(buffer, event);
1502                 ftrace_trace_stack(buffer, flags, 6, pc);
1503         }
1504
1505 out_unlock:
1506         arch_spin_unlock(&trace_buf_lock);
1507         local_irq_restore(flags);
1508
1509 out:
1510         atomic_dec_return(&data->disabled);
1511         preempt_enable_notrace();
1512         unpause_graph_tracing();
1513
1514         return len;
1515 }
1516 EXPORT_SYMBOL_GPL(trace_vbprintk);
1517
1518 int trace_array_printk(struct trace_array *tr,
1519                        unsigned long ip, const char *fmt, ...)
1520 {
1521         int ret;
1522         va_list ap;
1523
1524         if (!(trace_flags & TRACE_ITER_PRINTK))
1525                 return 0;
1526
1527         va_start(ap, fmt);
1528         ret = trace_array_vprintk(tr, ip, fmt, ap);
1529         va_end(ap);
1530         return ret;
1531 }
1532
1533 int trace_array_vprintk(struct trace_array *tr,
1534                         unsigned long ip, const char *fmt, va_list args)
1535 {
1536         static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1537         static char trace_buf[TRACE_BUF_SIZE];
1538
1539         struct ftrace_event_call *call = &event_print;
1540         struct ring_buffer_event *event;
1541         struct ring_buffer *buffer;
1542         struct trace_array_cpu *data;
1543         int cpu, len = 0, size, pc;
1544         struct print_entry *entry;
1545         unsigned long irq_flags;
1546         int disable;
1547
1548         if (tracing_disabled || tracing_selftest_running)
1549                 return 0;
1550
1551         pc = preempt_count();
1552         preempt_disable_notrace();
1553         cpu = raw_smp_processor_id();
1554         data = tr->data[cpu];
1555
1556         disable = atomic_inc_return(&data->disabled);
1557         if (unlikely(disable != 1))
1558                 goto out;
1559
1560         pause_graph_tracing();
1561         raw_local_irq_save(irq_flags);
1562         arch_spin_lock(&trace_buf_lock);
1563         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1564
1565         size = sizeof(*entry) + len + 1;
1566         buffer = tr->buffer;
1567         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1568                                           irq_flags, pc);
1569         if (!event)
1570                 goto out_unlock;
1571         entry = ring_buffer_event_data(event);
1572         entry->ip = ip;
1573
1574         memcpy(&entry->buf, trace_buf, len);
1575         entry->buf[len] = '\0';
1576         if (!filter_check_discard(call, entry, buffer, event)) {
1577                 ring_buffer_unlock_commit(buffer, event);
1578                 ftrace_trace_stack(buffer, irq_flags, 6, pc);
1579         }
1580
1581  out_unlock:
1582         arch_spin_unlock(&trace_buf_lock);
1583         raw_local_irq_restore(irq_flags);
1584         unpause_graph_tracing();
1585  out:
1586         atomic_dec_return(&data->disabled);
1587         preempt_enable_notrace();
1588
1589         return len;
1590 }
1591
1592 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1593 {
1594         return trace_array_vprintk(&global_trace, ip, fmt, args);
1595 }
1596 EXPORT_SYMBOL_GPL(trace_vprintk);
1597
1598 static void trace_iterator_increment(struct trace_iterator *iter)
1599 {
1600         /* Don't allow ftrace to trace into the ring buffers */
1601         ftrace_disable_cpu();
1602
1603         iter->idx++;
1604         if (iter->buffer_iter[iter->cpu])
1605                 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1606
1607         ftrace_enable_cpu();
1608 }
1609
1610 static struct trace_entry *
1611 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1612                 unsigned long *lost_events)
1613 {
1614         struct ring_buffer_event *event;
1615         struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1616
1617         /* Don't allow ftrace to trace into the ring buffers */
1618         ftrace_disable_cpu();
1619
1620         if (buf_iter)
1621                 event = ring_buffer_iter_peek(buf_iter, ts);
1622         else
1623                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1624                                          lost_events);
1625
1626         ftrace_enable_cpu();
1627
1628         if (event) {
1629                 iter->ent_size = ring_buffer_event_length(event);
1630                 return ring_buffer_event_data(event);
1631         }
1632         iter->ent_size = 0;
1633         return NULL;
1634 }
1635
1636 static struct trace_entry *
1637 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1638                   unsigned long *missing_events, u64 *ent_ts)
1639 {
1640         struct ring_buffer *buffer = iter->tr->buffer;
1641         struct trace_entry *ent, *next = NULL;
1642         unsigned long lost_events = 0, next_lost = 0;
1643         int cpu_file = iter->cpu_file;
1644         u64 next_ts = 0, ts;
1645         int next_cpu = -1;
1646         int next_size = 0;
1647         int cpu;
1648
1649         /*
1650          * If we are in a per_cpu trace file, don't bother by iterating over
1651          * all cpu and peek directly.
1652          */
1653         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1654                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1655                         return NULL;
1656                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1657                 if (ent_cpu)
1658                         *ent_cpu = cpu_file;
1659
1660                 return ent;
1661         }
1662
1663         for_each_tracing_cpu(cpu) {
1664
1665                 if (ring_buffer_empty_cpu(buffer, cpu))
1666                         continue;
1667
1668                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1669
1670                 /*
1671                  * Pick the entry with the smallest timestamp:
1672                  */
1673                 if (ent && (!next || ts < next_ts)) {
1674                         next = ent;
1675                         next_cpu = cpu;
1676                         next_ts = ts;
1677                         next_lost = lost_events;
1678                         next_size = iter->ent_size;
1679                 }
1680         }
1681
1682         iter->ent_size = next_size;
1683
1684         if (ent_cpu)
1685                 *ent_cpu = next_cpu;
1686
1687         if (ent_ts)
1688                 *ent_ts = next_ts;
1689
1690         if (missing_events)
1691                 *missing_events = next_lost;
1692
1693         return next;
1694 }
1695
1696 /* Find the next real entry, without updating the iterator itself */
1697 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1698                                           int *ent_cpu, u64 *ent_ts)
1699 {
1700         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1701 }
1702
1703 /* Find the next real entry, and increment the iterator to the next entry */
1704 void *trace_find_next_entry_inc(struct trace_iterator *iter)
1705 {
1706         iter->ent = __find_next_entry(iter, &iter->cpu,
1707                                       &iter->lost_events, &iter->ts);
1708
1709         if (iter->ent)
1710                 trace_iterator_increment(iter);
1711
1712         return iter->ent ? iter : NULL;
1713 }
1714
1715 static void trace_consume(struct trace_iterator *iter)
1716 {
1717         /* Don't allow ftrace to trace into the ring buffers */
1718         ftrace_disable_cpu();
1719         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1720                             &iter->lost_events);
1721         ftrace_enable_cpu();
1722 }
1723
1724 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1725 {
1726         struct trace_iterator *iter = m->private;
1727         int i = (int)*pos;
1728         void *ent;
1729
1730         WARN_ON_ONCE(iter->leftover);
1731
1732         (*pos)++;
1733
1734         /* can't go backwards */
1735         if (iter->idx > i)
1736                 return NULL;
1737
1738         if (iter->idx < 0)
1739                 ent = trace_find_next_entry_inc(iter);
1740         else
1741                 ent = iter;
1742
1743         while (ent && iter->idx < i)
1744                 ent = trace_find_next_entry_inc(iter);
1745
1746         iter->pos = *pos;
1747
1748         return ent;
1749 }
1750
1751 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1752 {
1753         struct trace_array *tr = iter->tr;
1754         struct ring_buffer_event *event;
1755         struct ring_buffer_iter *buf_iter;
1756         unsigned long entries = 0;
1757         u64 ts;
1758
1759         tr->data[cpu]->skipped_entries = 0;
1760
1761         if (!iter->buffer_iter[cpu])
1762                 return;
1763
1764         buf_iter = iter->buffer_iter[cpu];
1765         ring_buffer_iter_reset(buf_iter);
1766
1767         /*
1768          * We could have the case with the max latency tracers
1769          * that a reset never took place on a cpu. This is evident
1770          * by the timestamp being before the start of the buffer.
1771          */
1772         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1773                 if (ts >= iter->tr->time_start)
1774                         break;
1775                 entries++;
1776                 ring_buffer_read(buf_iter, NULL);
1777         }
1778
1779         tr->data[cpu]->skipped_entries = entries;
1780 }
1781
1782 /*
1783  * The current tracer is copied to avoid a global locking
1784  * all around.
1785  */
1786 static void *s_start(struct seq_file *m, loff_t *pos)
1787 {
1788         struct trace_iterator *iter = m->private;
1789         static struct tracer *old_tracer;
1790         int cpu_file = iter->cpu_file;
1791         void *p = NULL;
1792         loff_t l = 0;
1793         int cpu;
1794
1795         /* copy the tracer to avoid using a global lock all around */
1796         mutex_lock(&trace_types_lock);
1797         if (unlikely(old_tracer != current_trace && current_trace)) {
1798                 old_tracer = current_trace;
1799                 *iter->trace = *current_trace;
1800         }
1801         mutex_unlock(&trace_types_lock);
1802
1803         atomic_inc(&trace_record_cmdline_disabled);
1804
1805         if (*pos != iter->pos) {
1806                 iter->ent = NULL;
1807                 iter->cpu = 0;
1808                 iter->idx = -1;
1809
1810                 ftrace_disable_cpu();
1811
1812                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1813                         for_each_tracing_cpu(cpu)
1814                                 tracing_iter_reset(iter, cpu);
1815                 } else
1816                         tracing_iter_reset(iter, cpu_file);
1817
1818                 ftrace_enable_cpu();
1819
1820                 iter->leftover = 0;
1821                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1822                         ;
1823
1824         } else {
1825                 /*
1826                  * If we overflowed the seq_file before, then we want
1827                  * to just reuse the trace_seq buffer again.
1828                  */
1829                 if (iter->leftover)
1830                         p = iter;
1831                 else {
1832                         l = *pos - 1;
1833                         p = s_next(m, p, &l);
1834                 }
1835         }
1836
1837         trace_event_read_lock();
1838         trace_access_lock(cpu_file);
1839         return p;
1840 }
1841
1842 static void s_stop(struct seq_file *m, void *p)
1843 {
1844         struct trace_iterator *iter = m->private;
1845
1846         atomic_dec(&trace_record_cmdline_disabled);
1847         trace_access_unlock(iter->cpu_file);
1848         trace_event_read_unlock();
1849 }
1850
1851 static void print_lat_help_header(struct seq_file *m)
1852 {
1853         seq_puts(m, "#                  _------=> CPU#            \n");
1854         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1855         seq_puts(m, "#                | / _----=> need-resched    \n");
1856         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1857         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1858         seq_puts(m, "#                |||| /     delay             \n");
1859         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1860         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
1861 }
1862
1863 static void print_func_help_header(struct seq_file *m)
1864 {
1865         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1866         seq_puts(m, "#              | |       |          |         |\n");
1867 }
1868
1869
1870 void
1871 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1872 {
1873         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1874         struct trace_array *tr = iter->tr;
1875         struct trace_array_cpu *data = tr->data[tr->cpu];
1876         struct tracer *type = current_trace;
1877         unsigned long entries = 0;
1878         unsigned long total = 0;
1879         unsigned long count;
1880         const char *name = "preemption";
1881         int cpu;
1882
1883         if (type)
1884                 name = type->name;
1885
1886
1887         for_each_tracing_cpu(cpu) {
1888                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1889                 /*
1890                  * If this buffer has skipped entries, then we hold all
1891                  * entries for the trace and we need to ignore the
1892                  * ones before the time stamp.
1893                  */
1894                 if (tr->data[cpu]->skipped_entries) {
1895                         count -= tr->data[cpu]->skipped_entries;
1896                         /* total is the same as the entries */
1897                         total += count;
1898                 } else
1899                         total += count +
1900                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
1901                 entries += count;
1902         }
1903
1904         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1905                    name, UTS_RELEASE);
1906         seq_puts(m, "# -----------------------------------"
1907                  "---------------------------------\n");
1908         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1909                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1910                    nsecs_to_usecs(data->saved_latency),
1911                    entries,
1912                    total,
1913                    tr->cpu,
1914 #if defined(CONFIG_PREEMPT_NONE)
1915                    "server",
1916 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1917                    "desktop",
1918 #elif defined(CONFIG_PREEMPT)
1919                    "preempt",
1920 #else
1921                    "unknown",
1922 #endif
1923                    /* These are reserved for later use */
1924                    0, 0, 0, 0);
1925 #ifdef CONFIG_SMP
1926         seq_printf(m, " #P:%d)\n", num_online_cpus());
1927 #else
1928         seq_puts(m, ")\n");
1929 #endif
1930         seq_puts(m, "#    -----------------\n");
1931         seq_printf(m, "#    | task: %.16s-%d "
1932                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1933                    data->comm, data->pid, data->uid, data->nice,
1934                    data->policy, data->rt_priority);
1935         seq_puts(m, "#    -----------------\n");
1936
1937         if (data->critical_start) {
1938                 seq_puts(m, "#  => started at: ");
1939                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1940                 trace_print_seq(m, &iter->seq);
1941                 seq_puts(m, "\n#  => ended at:   ");
1942                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1943                 trace_print_seq(m, &iter->seq);
1944                 seq_puts(m, "\n#\n");
1945         }
1946
1947         seq_puts(m, "#\n");
1948 }
1949
1950 static void test_cpu_buff_start(struct trace_iterator *iter)
1951 {
1952         struct trace_seq *s = &iter->seq;
1953
1954         if (!(trace_flags & TRACE_ITER_ANNOTATE))
1955                 return;
1956
1957         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1958                 return;
1959
1960         if (cpumask_test_cpu(iter->cpu, iter->started))
1961                 return;
1962
1963         if (iter->tr->data[iter->cpu]->skipped_entries)
1964                 return;
1965
1966         cpumask_set_cpu(iter->cpu, iter->started);
1967
1968         /* Don't print started cpu buffer for the first entry of the trace */
1969         if (iter->idx > 1)
1970                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1971                                 iter->cpu);
1972 }
1973
1974 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1975 {
1976         struct trace_seq *s = &iter->seq;
1977         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1978         struct trace_entry *entry;
1979         struct trace_event *event;
1980
1981         entry = iter->ent;
1982
1983         test_cpu_buff_start(iter);
1984
1985         event = ftrace_find_event(entry->type);
1986
1987         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1988                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1989                         if (!trace_print_lat_context(iter))
1990                                 goto partial;
1991                 } else {
1992                         if (!trace_print_context(iter))
1993                                 goto partial;
1994                 }
1995         }
1996
1997         if (event)
1998                 return event->funcs->trace(iter, sym_flags, event);
1999
2000         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2001                 goto partial;
2002
2003         return TRACE_TYPE_HANDLED;
2004 partial:
2005         return TRACE_TYPE_PARTIAL_LINE;
2006 }
2007
2008 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2009 {
2010         struct trace_seq *s = &iter->seq;
2011         struct trace_entry *entry;
2012         struct trace_event *event;
2013
2014         entry = iter->ent;
2015
2016         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2017                 if (!trace_seq_printf(s, "%d %d %llu ",
2018                                       entry->pid, iter->cpu, iter->ts))
2019                         goto partial;
2020         }
2021
2022         event = ftrace_find_event(entry->type);
2023         if (event)
2024                 return event->funcs->raw(iter, 0, event);
2025
2026         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2027                 goto partial;
2028
2029         return TRACE_TYPE_HANDLED;
2030 partial:
2031         return TRACE_TYPE_PARTIAL_LINE;
2032 }
2033
2034 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2035 {
2036         struct trace_seq *s = &iter->seq;
2037         unsigned char newline = '\n';
2038         struct trace_entry *entry;
2039         struct trace_event *event;
2040
2041         entry = iter->ent;
2042
2043         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2044                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2045                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2046                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2047         }
2048
2049         event = ftrace_find_event(entry->type);
2050         if (event) {
2051                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2052                 if (ret != TRACE_TYPE_HANDLED)
2053                         return ret;
2054         }
2055
2056         SEQ_PUT_FIELD_RET(s, newline);
2057
2058         return TRACE_TYPE_HANDLED;
2059 }
2060
2061 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2062 {
2063         struct trace_seq *s = &iter->seq;
2064         struct trace_entry *entry;
2065         struct trace_event *event;
2066
2067         entry = iter->ent;
2068
2069         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2070                 SEQ_PUT_FIELD_RET(s, entry->pid);
2071                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2072                 SEQ_PUT_FIELD_RET(s, iter->ts);
2073         }
2074
2075         event = ftrace_find_event(entry->type);
2076         return event ? event->funcs->binary(iter, 0, event) :
2077                 TRACE_TYPE_HANDLED;
2078 }
2079
2080 int trace_empty(struct trace_iterator *iter)
2081 {
2082         int cpu;
2083
2084         /* If we are looking at one CPU buffer, only check that one */
2085         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2086                 cpu = iter->cpu_file;
2087                 if (iter->buffer_iter[cpu]) {
2088                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2089                                 return 0;
2090                 } else {
2091                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2092                                 return 0;
2093                 }
2094                 return 1;
2095         }
2096
2097         for_each_tracing_cpu(cpu) {
2098                 if (iter->buffer_iter[cpu]) {
2099                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2100                                 return 0;
2101                 } else {
2102                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2103                                 return 0;
2104                 }
2105         }
2106
2107         return 1;
2108 }
2109
2110 /*  Called with trace_event_read_lock() held. */
2111 enum print_line_t print_trace_line(struct trace_iterator *iter)
2112 {
2113         enum print_line_t ret;
2114
2115         if (iter->lost_events &&
2116             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2117                                  iter->cpu, iter->lost_events))
2118                 return TRACE_TYPE_PARTIAL_LINE;
2119
2120         if (iter->trace && iter->trace->print_line) {
2121                 ret = iter->trace->print_line(iter);
2122                 if (ret != TRACE_TYPE_UNHANDLED)
2123                         return ret;
2124         }
2125
2126         if (iter->ent->type == TRACE_BPRINT &&
2127                         trace_flags & TRACE_ITER_PRINTK &&
2128                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2129                 return trace_print_bprintk_msg_only(iter);
2130
2131         if (iter->ent->type == TRACE_PRINT &&
2132                         trace_flags & TRACE_ITER_PRINTK &&
2133                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2134                 return trace_print_printk_msg_only(iter);
2135
2136         if (trace_flags & TRACE_ITER_BIN)
2137                 return print_bin_fmt(iter);
2138
2139         if (trace_flags & TRACE_ITER_HEX)
2140                 return print_hex_fmt(iter);
2141
2142         if (trace_flags & TRACE_ITER_RAW)
2143                 return print_raw_fmt(iter);
2144
2145         return print_trace_fmt(iter);
2146 }
2147
2148 void trace_default_header(struct seq_file *m)
2149 {
2150         struct trace_iterator *iter = m->private;
2151
2152         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2153                 return;
2154
2155         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2156                 /* print nothing if the buffers are empty */
2157                 if (trace_empty(iter))
2158                         return;
2159                 print_trace_header(m, iter);
2160                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2161                         print_lat_help_header(m);
2162         } else {
2163                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2164                         print_func_help_header(m);
2165         }
2166 }
2167
2168 static void test_ftrace_alive(struct seq_file *m)
2169 {
2170         if (!ftrace_is_dead())
2171                 return;
2172         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2173         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2174 }
2175
2176 static int s_show(struct seq_file *m, void *v)
2177 {
2178         struct trace_iterator *iter = v;
2179         int ret;
2180
2181         if (iter->ent == NULL) {
2182                 if (iter->tr) {
2183                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2184                         seq_puts(m, "#\n");
2185                         test_ftrace_alive(m);
2186                 }
2187                 if (iter->trace && iter->trace->print_header)
2188                         iter->trace->print_header(m);
2189                 else
2190                         trace_default_header(m);
2191
2192         } else if (iter->leftover) {
2193                 /*
2194                  * If we filled the seq_file buffer earlier, we
2195                  * want to just show it now.
2196                  */
2197                 ret = trace_print_seq(m, &iter->seq);
2198
2199                 /* ret should this time be zero, but you never know */
2200                 iter->leftover = ret;
2201
2202         } else {
2203                 print_trace_line(iter);
2204                 ret = trace_print_seq(m, &iter->seq);
2205                 /*
2206                  * If we overflow the seq_file buffer, then it will
2207                  * ask us for this data again at start up.
2208                  * Use that instead.
2209                  *  ret is 0 if seq_file write succeeded.
2210                  *        -1 otherwise.
2211                  */
2212                 iter->leftover = ret;
2213         }
2214
2215         return 0;
2216 }
2217
2218 static const struct seq_operations tracer_seq_ops = {
2219         .start          = s_start,
2220         .next           = s_next,
2221         .stop           = s_stop,
2222         .show           = s_show,
2223 };
2224
2225 static struct trace_iterator *
2226 __tracing_open(struct inode *inode, struct file *file)
2227 {
2228         long cpu_file = (long) inode->i_private;
2229         void *fail_ret = ERR_PTR(-ENOMEM);
2230         struct trace_iterator *iter;
2231         struct seq_file *m;
2232         int cpu, ret;
2233
2234         if (tracing_disabled)
2235                 return ERR_PTR(-ENODEV);
2236
2237         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2238         if (!iter)
2239                 return ERR_PTR(-ENOMEM);
2240
2241         /*
2242          * We make a copy of the current tracer to avoid concurrent
2243          * changes on it while we are reading.
2244          */
2245         mutex_lock(&trace_types_lock);
2246         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2247         if (!iter->trace)
2248                 goto fail;
2249
2250         if (current_trace)
2251                 *iter->trace = *current_trace;
2252
2253         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2254                 goto fail;
2255
2256         if (current_trace && current_trace->print_max)
2257                 iter->tr = &max_tr;
2258         else
2259                 iter->tr = &global_trace;
2260         iter->pos = -1;
2261         mutex_init(&iter->mutex);
2262         iter->cpu_file = cpu_file;
2263
2264         /* Notify the tracer early; before we stop tracing. */
2265         if (iter->trace && iter->trace->open)
2266                 iter->trace->open(iter);
2267
2268         /* Annotate start of buffers if we had overruns */
2269         if (ring_buffer_overruns(iter->tr->buffer))
2270                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2271
2272         /* stop the trace while dumping */
2273         tracing_stop();
2274
2275         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2276                 for_each_tracing_cpu(cpu) {
2277                         iter->buffer_iter[cpu] =
2278                                 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2279                 }
2280                 ring_buffer_read_prepare_sync();
2281                 for_each_tracing_cpu(cpu) {
2282                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2283                         tracing_iter_reset(iter, cpu);
2284                 }
2285         } else {
2286                 cpu = iter->cpu_file;
2287                 iter->buffer_iter[cpu] =
2288                         ring_buffer_read_prepare(iter->tr->buffer, cpu);
2289                 ring_buffer_read_prepare_sync();
2290                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2291                 tracing_iter_reset(iter, cpu);
2292         }
2293
2294         ret = seq_open(file, &tracer_seq_ops);
2295         if (ret < 0) {
2296                 fail_ret = ERR_PTR(ret);
2297                 goto fail_buffer;
2298         }
2299
2300         m = file->private_data;
2301         m->private = iter;
2302
2303         mutex_unlock(&trace_types_lock);
2304
2305         return iter;
2306
2307  fail_buffer:
2308         for_each_tracing_cpu(cpu) {
2309                 if (iter->buffer_iter[cpu])
2310                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2311         }
2312         free_cpumask_var(iter->started);
2313         tracing_start();
2314  fail:
2315         mutex_unlock(&trace_types_lock);
2316         kfree(iter->trace);
2317         kfree(iter);
2318
2319         return fail_ret;
2320 }
2321
2322 int tracing_open_generic(struct inode *inode, struct file *filp)
2323 {
2324         if (tracing_disabled)
2325                 return -ENODEV;
2326
2327         filp->private_data = inode->i_private;
2328         return 0;
2329 }
2330
2331 static int tracing_release(struct inode *inode, struct file *file)
2332 {
2333         struct seq_file *m = file->private_data;
2334         struct trace_iterator *iter;
2335         int cpu;
2336
2337         if (!(file->f_mode & FMODE_READ))
2338                 return 0;
2339
2340         iter = m->private;
2341
2342         mutex_lock(&trace_types_lock);
2343         for_each_tracing_cpu(cpu) {
2344                 if (iter->buffer_iter[cpu])
2345                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2346         }
2347
2348         if (iter->trace && iter->trace->close)
2349                 iter->trace->close(iter);
2350
2351         /* reenable tracing if it was previously enabled */
2352         tracing_start();
2353         mutex_unlock(&trace_types_lock);
2354
2355         seq_release(inode, file);
2356         mutex_destroy(&iter->mutex);
2357         free_cpumask_var(iter->started);
2358         kfree(iter->trace);
2359         kfree(iter);
2360         return 0;
2361 }
2362
2363 static int tracing_open(struct inode *inode, struct file *file)
2364 {
2365         struct trace_iterator *iter;
2366         int ret = 0;
2367
2368         /* If this file was open for write, then erase contents */
2369         if ((file->f_mode & FMODE_WRITE) &&
2370             (file->f_flags & O_TRUNC)) {
2371                 long cpu = (long) inode->i_private;
2372
2373                 if (cpu == TRACE_PIPE_ALL_CPU)
2374                         tracing_reset_online_cpus(&global_trace);
2375                 else
2376                         tracing_reset(&global_trace, cpu);
2377         }
2378
2379         if (file->f_mode & FMODE_READ) {
2380                 iter = __tracing_open(inode, file);
2381                 if (IS_ERR(iter))
2382                         ret = PTR_ERR(iter);
2383                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2384                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2385         }
2386         return ret;
2387 }
2388
2389 static void *
2390 t_next(struct seq_file *m, void *v, loff_t *pos)
2391 {
2392         struct tracer *t = v;
2393
2394         (*pos)++;
2395
2396         if (t)
2397                 t = t->next;
2398
2399         return t;
2400 }
2401
2402 static void *t_start(struct seq_file *m, loff_t *pos)
2403 {
2404         struct tracer *t;
2405         loff_t l = 0;
2406
2407         mutex_lock(&trace_types_lock);
2408         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2409                 ;
2410
2411         return t;
2412 }
2413
2414 static void t_stop(struct seq_file *m, void *p)
2415 {
2416         mutex_unlock(&trace_types_lock);
2417 }
2418
2419 static int t_show(struct seq_file *m, void *v)
2420 {
2421         struct tracer *t = v;
2422
2423         if (!t)
2424                 return 0;
2425
2426         seq_printf(m, "%s", t->name);
2427         if (t->next)
2428                 seq_putc(m, ' ');
2429         else
2430                 seq_putc(m, '\n');
2431
2432         return 0;
2433 }
2434
2435 static const struct seq_operations show_traces_seq_ops = {
2436         .start          = t_start,
2437         .next           = t_next,
2438         .stop           = t_stop,
2439         .show           = t_show,
2440 };
2441
2442 static int show_traces_open(struct inode *inode, struct file *file)
2443 {
2444         if (tracing_disabled)
2445                 return -ENODEV;
2446
2447         return seq_open(file, &show_traces_seq_ops);
2448 }
2449
2450 static ssize_t
2451 tracing_write_stub(struct file *filp, const char __user *ubuf,
2452                    size_t count, loff_t *ppos)
2453 {
2454         return count;
2455 }
2456
2457 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2458 {
2459         if (file->f_mode & FMODE_READ)
2460                 return seq_lseek(file, offset, origin);
2461         else
2462                 return 0;
2463 }
2464
2465 static const struct file_operations tracing_fops = {
2466         .open           = tracing_open,
2467         .read           = seq_read,
2468         .write          = tracing_write_stub,
2469         .llseek         = tracing_seek,
2470         .release        = tracing_release,
2471 };
2472
2473 static const struct file_operations show_traces_fops = {
2474         .open           = show_traces_open,
2475         .read           = seq_read,
2476         .release        = seq_release,
2477         .llseek         = seq_lseek,
2478 };
2479
2480 /*
2481  * Only trace on a CPU if the bitmask is set:
2482  */
2483 static cpumask_var_t tracing_cpumask;
2484
2485 /*
2486  * The tracer itself will not take this lock, but still we want
2487  * to provide a consistent cpumask to user-space:
2488  */
2489 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2490
2491 /*
2492  * Temporary storage for the character representation of the
2493  * CPU bitmask (and one more byte for the newline):
2494  */
2495 static char mask_str[NR_CPUS + 1];
2496
2497 static ssize_t
2498 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2499                      size_t count, loff_t *ppos)
2500 {
2501         int len;
2502
2503         mutex_lock(&tracing_cpumask_update_lock);
2504
2505         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2506         if (count - len < 2) {
2507                 count = -EINVAL;
2508                 goto out_err;
2509         }
2510         len += sprintf(mask_str + len, "\n");
2511         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2512
2513 out_err:
2514         mutex_unlock(&tracing_cpumask_update_lock);
2515
2516         return count;
2517 }
2518
2519 static ssize_t
2520 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2521                       size_t count, loff_t *ppos)
2522 {
2523         int err, cpu;
2524         cpumask_var_t tracing_cpumask_new;
2525
2526         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2527                 return -ENOMEM;
2528
2529         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2530         if (err)
2531                 goto err_unlock;
2532
2533         mutex_lock(&tracing_cpumask_update_lock);
2534
2535         local_irq_disable();
2536         arch_spin_lock(&ftrace_max_lock);
2537         for_each_tracing_cpu(cpu) {
2538                 /*
2539                  * Increase/decrease the disabled counter if we are
2540                  * about to flip a bit in the cpumask:
2541                  */
2542                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2543                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2544                         atomic_inc(&global_trace.data[cpu]->disabled);
2545                         ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2546                 }
2547                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2548                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2549                         atomic_dec(&global_trace.data[cpu]->disabled);
2550                         ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2551                 }
2552         }
2553         arch_spin_unlock(&ftrace_max_lock);
2554         local_irq_enable();
2555
2556         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2557
2558         mutex_unlock(&tracing_cpumask_update_lock);
2559         free_cpumask_var(tracing_cpumask_new);
2560
2561         return count;
2562
2563 err_unlock:
2564         free_cpumask_var(tracing_cpumask_new);
2565
2566         return err;
2567 }
2568
2569 static const struct file_operations tracing_cpumask_fops = {
2570         .open           = tracing_open_generic,
2571         .read           = tracing_cpumask_read,
2572         .write          = tracing_cpumask_write,
2573         .llseek         = generic_file_llseek,
2574 };
2575
2576 static int tracing_trace_options_show(struct seq_file *m, void *v)
2577 {
2578         struct tracer_opt *trace_opts;
2579         u32 tracer_flags;
2580         int i;
2581
2582         mutex_lock(&trace_types_lock);
2583         tracer_flags = current_trace->flags->val;
2584         trace_opts = current_trace->flags->opts;
2585
2586         for (i = 0; trace_options[i]; i++) {
2587                 if (trace_flags & (1 << i))
2588                         seq_printf(m, "%s\n", trace_options[i]);
2589                 else
2590                         seq_printf(m, "no%s\n", trace_options[i]);
2591         }
2592
2593         for (i = 0; trace_opts[i].name; i++) {
2594                 if (tracer_flags & trace_opts[i].bit)
2595                         seq_printf(m, "%s\n", trace_opts[i].name);
2596                 else
2597                         seq_printf(m, "no%s\n", trace_opts[i].name);
2598         }
2599         mutex_unlock(&trace_types_lock);
2600
2601         return 0;
2602 }
2603
2604 static int __set_tracer_option(struct tracer *trace,
2605                                struct tracer_flags *tracer_flags,
2606                                struct tracer_opt *opts, int neg)
2607 {
2608         int ret;
2609
2610         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2611         if (ret)
2612                 return ret;
2613
2614         if (neg)
2615                 tracer_flags->val &= ~opts->bit;
2616         else
2617                 tracer_flags->val |= opts->bit;
2618         return 0;
2619 }
2620
2621 /* Try to assign a tracer specific option */
2622 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2623 {
2624         struct tracer_flags *tracer_flags = trace->flags;
2625         struct tracer_opt *opts = NULL;
2626         int i;
2627
2628         for (i = 0; tracer_flags->opts[i].name; i++) {
2629                 opts = &tracer_flags->opts[i];
2630
2631                 if (strcmp(cmp, opts->name) == 0)
2632                         return __set_tracer_option(trace, trace->flags,
2633                                                    opts, neg);
2634         }
2635
2636         return -EINVAL;
2637 }
2638
2639 static void set_tracer_flags(unsigned int mask, int enabled)
2640 {
2641         /* do nothing if flag is already set */
2642         if (!!(trace_flags & mask) == !!enabled)
2643                 return;
2644
2645         if (enabled)
2646                 trace_flags |= mask;
2647         else
2648                 trace_flags &= ~mask;
2649
2650         if (mask == TRACE_ITER_RECORD_CMD)
2651                 trace_event_enable_cmd_record(enabled);
2652
2653         if (mask == TRACE_ITER_OVERWRITE)
2654                 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2655 }
2656
2657 static ssize_t
2658 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2659                         size_t cnt, loff_t *ppos)
2660 {
2661         char buf[64];
2662         char *cmp;
2663         int neg = 0;
2664         int ret = 0;
2665         int i;
2666
2667         if (cnt >= sizeof(buf))
2668                 return -EINVAL;
2669
2670         if (copy_from_user(&buf, ubuf, cnt))
2671                 return -EFAULT;
2672
2673         buf[cnt] = 0;
2674         cmp = strstrip(buf);
2675
2676         if (strncmp(cmp, "no", 2) == 0) {
2677                 neg = 1;
2678                 cmp += 2;
2679         }
2680
2681         mutex_lock(&trace_types_lock);
2682
2683         for (i = 0; trace_options[i]; i++) {
2684                 if (strcmp(cmp, trace_options[i]) == 0) {
2685                         set_tracer_flags(1 << i, !neg);
2686                         break;
2687                 }
2688         }
2689
2690         /* If no option could be set, test the specific tracer options */
2691         if (!trace_options[i])
2692                 ret = set_tracer_option(current_trace, cmp, neg);
2693
2694         mutex_unlock(&trace_types_lock);
2695
2696         if (ret)
2697                 return ret;
2698
2699         *ppos += cnt;
2700
2701         return cnt;
2702 }
2703
2704 static int tracing_trace_options_open(struct inode *inode, struct file *file)
2705 {
2706         if (tracing_disabled)
2707                 return -ENODEV;
2708         return single_open(file, tracing_trace_options_show, NULL);
2709 }
2710
2711 static const struct file_operations tracing_iter_fops = {
2712         .open           = tracing_trace_options_open,
2713         .read           = seq_read,
2714         .llseek         = seq_lseek,
2715         .release        = single_release,
2716         .write          = tracing_trace_options_write,
2717 };
2718
2719 static const char readme_msg[] =
2720         "tracing mini-HOWTO:\n\n"
2721         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2722         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2723         "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2724         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2725         "nop\n"
2726         "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2727         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2728         "sched_switch\n"
2729         "# cat /sys/kernel/debug/tracing/trace_options\n"
2730         "noprint-parent nosym-offset nosym-addr noverbose\n"
2731         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2732         "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2733         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2734         "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2735 ;
2736
2737 static ssize_t
2738 tracing_readme_read(struct file *filp, char __user *ubuf,
2739                        size_t cnt, loff_t *ppos)
2740 {
2741         return simple_read_from_buffer(ubuf, cnt, ppos,
2742                                         readme_msg, strlen(readme_msg));
2743 }
2744
2745 static const struct file_operations tracing_readme_fops = {
2746         .open           = tracing_open_generic,
2747         .read           = tracing_readme_read,
2748         .llseek         = generic_file_llseek,
2749 };
2750
2751 static ssize_t
2752 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2753                                 size_t cnt, loff_t *ppos)
2754 {
2755         char *buf_comm;
2756         char *file_buf;
2757         char *buf;
2758         int len = 0;
2759         int pid;
2760         int i;
2761
2762         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2763         if (!file_buf)
2764                 return -ENOMEM;
2765
2766         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2767         if (!buf_comm) {
2768                 kfree(file_buf);
2769                 return -ENOMEM;
2770         }
2771
2772         buf = file_buf;
2773
2774         for (i = 0; i < SAVED_CMDLINES; i++) {
2775                 int r;
2776
2777                 pid = map_cmdline_to_pid[i];
2778                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2779                         continue;
2780
2781                 trace_find_cmdline(pid, buf_comm);
2782                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2783                 buf += r;
2784                 len += r;
2785         }
2786
2787         len = simple_read_from_buffer(ubuf, cnt, ppos,
2788                                       file_buf, len);
2789
2790         kfree(file_buf);
2791         kfree(buf_comm);
2792
2793         return len;
2794 }
2795
2796 static const struct file_operations tracing_saved_cmdlines_fops = {
2797     .open       = tracing_open_generic,
2798     .read       = tracing_saved_cmdlines_read,
2799     .llseek     = generic_file_llseek,
2800 };
2801
2802 static ssize_t
2803 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2804                   size_t cnt, loff_t *ppos)
2805 {
2806         char buf[64];
2807         int r;
2808
2809         r = sprintf(buf, "%u\n", tracer_enabled);
2810         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2811 }
2812
2813 static ssize_t
2814 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2815                    size_t cnt, loff_t *ppos)
2816 {
2817         struct trace_array *tr = filp->private_data;
2818         unsigned long val;
2819         int ret;
2820
2821         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2822         if (ret)
2823                 return ret;
2824
2825         val = !!val;
2826
2827         mutex_lock(&trace_types_lock);
2828         if (tracer_enabled ^ val) {
2829
2830                 /* Only need to warn if this is used to change the state */
2831                 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
2832
2833                 if (val) {
2834                         tracer_enabled = 1;
2835                         if (current_trace->start)
2836                                 current_trace->start(tr);
2837                         tracing_start();
2838                 } else {
2839                         tracer_enabled = 0;
2840                         tracing_stop();
2841                         if (current_trace->stop)
2842                                 current_trace->stop(tr);
2843                 }
2844         }
2845         mutex_unlock(&trace_types_lock);
2846
2847         *ppos += cnt;
2848
2849         return cnt;
2850 }
2851
2852 static ssize_t
2853 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2854                        size_t cnt, loff_t *ppos)
2855 {
2856         char buf[MAX_TRACER_SIZE+2];
2857         int r;
2858
2859         mutex_lock(&trace_types_lock);
2860         if (current_trace)
2861                 r = sprintf(buf, "%s\n", current_trace->name);
2862         else
2863                 r = sprintf(buf, "\n");
2864         mutex_unlock(&trace_types_lock);
2865
2866         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2867 }
2868
2869 int tracer_init(struct tracer *t, struct trace_array *tr)
2870 {
2871         tracing_reset_online_cpus(tr);
2872         return t->init(tr);
2873 }
2874
2875 static int __tracing_resize_ring_buffer(unsigned long size)
2876 {
2877         int ret;
2878
2879         /*
2880          * If kernel or user changes the size of the ring buffer
2881          * we use the size that was given, and we can forget about
2882          * expanding it later.
2883          */
2884         ring_buffer_expanded = 1;
2885
2886         ret = ring_buffer_resize(global_trace.buffer, size);
2887         if (ret < 0)
2888                 return ret;
2889
2890         if (!current_trace->use_max_tr)
2891                 goto out;
2892
2893         ret = ring_buffer_resize(max_tr.buffer, size);
2894         if (ret < 0) {
2895                 int r;
2896
2897                 r = ring_buffer_resize(global_trace.buffer,
2898                                        global_trace.entries);
2899                 if (r < 0) {
2900                         /*
2901                          * AARGH! We are left with different
2902                          * size max buffer!!!!
2903                          * The max buffer is our "snapshot" buffer.
2904                          * When a tracer needs a snapshot (one of the
2905                          * latency tracers), it swaps the max buffer
2906                          * with the saved snap shot. We succeeded to
2907                          * update the size of the main buffer, but failed to
2908                          * update the size of the max buffer. But when we tried
2909                          * to reset the main buffer to the original size, we
2910                          * failed there too. This is very unlikely to
2911                          * happen, but if it does, warn and kill all
2912                          * tracing.
2913                          */
2914                         WARN_ON(1);
2915                         tracing_disabled = 1;
2916                 }
2917                 return ret;
2918         }
2919
2920         max_tr.entries = size;
2921  out:
2922         global_trace.entries = size;
2923
2924         return ret;
2925 }
2926
2927 static ssize_t tracing_resize_ring_buffer(unsigned long size)
2928 {
2929         int cpu, ret = size;
2930
2931         mutex_lock(&trace_types_lock);
2932
2933         tracing_stop();
2934
2935         /* disable all cpu buffers */
2936         for_each_tracing_cpu(cpu) {
2937                 if (global_trace.data[cpu])
2938                         atomic_inc(&global_trace.data[cpu]->disabled);
2939                 if (max_tr.data[cpu])
2940                         atomic_inc(&max_tr.data[cpu]->disabled);
2941         }
2942
2943         if (size != global_trace.entries)
2944                 ret = __tracing_resize_ring_buffer(size);
2945
2946         if (ret < 0)
2947                 ret = -ENOMEM;
2948
2949         for_each_tracing_cpu(cpu) {
2950                 if (global_trace.data[cpu])
2951                         atomic_dec(&global_trace.data[cpu]->disabled);
2952                 if (max_tr.data[cpu])
2953                         atomic_dec(&max_tr.data[cpu]->disabled);
2954         }
2955
2956         tracing_start();
2957         mutex_unlock(&trace_types_lock);
2958
2959         return ret;
2960 }
2961
2962
2963 /**
2964  * tracing_update_buffers - used by tracing facility to expand ring buffers
2965  *
2966  * To save on memory when the tracing is never used on a system with it
2967  * configured in. The ring buffers are set to a minimum size. But once
2968  * a user starts to use the tracing facility, then they need to grow
2969  * to their default size.
2970  *
2971  * This function is to be called when a tracer is about to be used.
2972  */
2973 int tracing_update_buffers(void)
2974 {
2975         int ret = 0;
2976
2977         mutex_lock(&trace_types_lock);
2978         if (!ring_buffer_expanded)
2979                 ret = __tracing_resize_ring_buffer(trace_buf_size);
2980         mutex_unlock(&trace_types_lock);
2981
2982         return ret;
2983 }
2984
2985 struct trace_option_dentry;
2986
2987 static struct trace_option_dentry *
2988 create_trace_option_files(struct tracer *tracer);
2989
2990 static void
2991 destroy_trace_option_files(struct trace_option_dentry *topts);
2992
2993 static int tracing_set_tracer(const char *buf)
2994 {
2995         static struct trace_option_dentry *topts;
2996         struct trace_array *tr = &global_trace;
2997         struct tracer *t;
2998         int ret = 0;
2999
3000         mutex_lock(&trace_types_lock);
3001
3002         if (!ring_buffer_expanded) {
3003                 ret = __tracing_resize_ring_buffer(trace_buf_size);
3004                 if (ret < 0)
3005                         goto out;
3006                 ret = 0;
3007         }
3008
3009         for (t = trace_types; t; t = t->next) {
3010                 if (strcmp(t->name, buf) == 0)
3011                         break;
3012         }
3013         if (!t) {
3014                 ret = -EINVAL;
3015                 goto out;
3016         }
3017         if (t == current_trace)
3018                 goto out;
3019
3020         trace_branch_disable();
3021         if (current_trace && current_trace->reset)
3022                 current_trace->reset(tr);
3023         if (current_trace && current_trace->use_max_tr) {
3024                 /*
3025                  * We don't free the ring buffer. instead, resize it because
3026                  * The max_tr ring buffer has some state (e.g. ring->clock) and
3027                  * we want preserve it.
3028                  */
3029                 ring_buffer_resize(max_tr.buffer, 1);
3030                 max_tr.entries = 1;
3031         }
3032         destroy_trace_option_files(topts);
3033
3034         current_trace = t;
3035
3036         topts = create_trace_option_files(current_trace);
3037         if (current_trace->use_max_tr) {
3038                 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
3039                 if (ret < 0)
3040                         goto out;
3041                 max_tr.entries = global_trace.entries;
3042         }
3043
3044         if (t->init) {
3045                 ret = tracer_init(t, tr);
3046                 if (ret)
3047                         goto out;
3048         }
3049
3050         trace_branch_enable(tr);
3051  out:
3052         mutex_unlock(&trace_types_lock);
3053
3054         return ret;
3055 }
3056
3057 static ssize_t
3058 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3059                         size_t cnt, loff_t *ppos)
3060 {
3061         char buf[MAX_TRACER_SIZE+1];
3062         int i;
3063         size_t ret;
3064         int err;
3065
3066         ret = cnt;
3067
3068         if (cnt > MAX_TRACER_SIZE)
3069                 cnt = MAX_TRACER_SIZE;
3070
3071         if (copy_from_user(&buf, ubuf, cnt))
3072                 return -EFAULT;
3073
3074         buf[cnt] = 0;
3075
3076         /* strip ending whitespace. */
3077         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3078                 buf[i] = 0;
3079
3080         err = tracing_set_tracer(buf);
3081         if (err)
3082                 return err;
3083
3084         *ppos += ret;
3085
3086         return ret;
3087 }
3088
3089 static ssize_t
3090 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3091                      size_t cnt, loff_t *ppos)
3092 {
3093         unsigned long *ptr = filp->private_data;
3094         char buf[64];
3095         int r;
3096
3097         r = snprintf(buf, sizeof(buf), "%ld\n",
3098                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3099         if (r > sizeof(buf))
3100                 r = sizeof(buf);
3101         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3102 }
3103
3104 static ssize_t
3105 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3106                       size_t cnt, loff_t *ppos)
3107 {
3108         unsigned long *ptr = filp->private_data;
3109         unsigned long val;
3110         int ret;
3111
3112         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3113         if (ret)
3114                 return ret;
3115
3116         *ptr = val * 1000;
3117
3118         return cnt;
3119 }
3120
3121 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3122 {
3123         long cpu_file = (long) inode->i_private;
3124         struct trace_iterator *iter;
3125         int ret = 0;
3126
3127         if (tracing_disabled)
3128                 return -ENODEV;
3129
3130         mutex_lock(&trace_types_lock);
3131
3132         /* create a buffer to store the information to pass to userspace */
3133         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3134         if (!iter) {
3135                 ret = -ENOMEM;
3136                 goto out;
3137         }
3138
3139         /*
3140          * We make a copy of the current tracer to avoid concurrent
3141          * changes on it while we are reading.
3142          */
3143         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3144         if (!iter->trace) {
3145                 ret = -ENOMEM;
3146                 goto fail;
3147         }
3148         if (current_trace)
3149                 *iter->trace = *current_trace;
3150
3151         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3152                 ret = -ENOMEM;
3153                 goto fail;
3154         }
3155
3156         /* trace pipe does not show start of buffer */
3157         cpumask_setall(iter->started);
3158
3159         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3160                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3161
3162         iter->cpu_file = cpu_file;
3163         iter->tr = &global_trace;
3164         mutex_init(&iter->mutex);
3165         filp->private_data = iter;
3166
3167         if (iter->trace->pipe_open)
3168                 iter->trace->pipe_open(iter);
3169
3170         nonseekable_open(inode, filp);
3171 out:
3172         mutex_unlock(&trace_types_lock);
3173         return ret;
3174
3175 fail:
3176         kfree(iter->trace);
3177         kfree(iter);
3178         mutex_unlock(&trace_types_lock);
3179         return ret;
3180 }
3181
3182 static int tracing_release_pipe(struct inode *inode, struct file *file)
3183 {
3184         struct trace_iterator *iter = file->private_data;
3185
3186         mutex_lock(&trace_types_lock);
3187
3188         if (iter->trace->pipe_close)
3189                 iter->trace->pipe_close(iter);
3190
3191         mutex_unlock(&trace_types_lock);
3192
3193         free_cpumask_var(iter->started);
3194         mutex_destroy(&iter->mutex);
3195         kfree(iter->trace);
3196         kfree(iter);
3197
3198         return 0;
3199 }
3200
3201 static unsigned int
3202 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3203 {
3204         struct trace_iterator *iter = filp->private_data;
3205
3206         if (trace_flags & TRACE_ITER_BLOCK) {
3207                 /*
3208                  * Always select as readable when in blocking mode
3209                  */
3210                 return POLLIN | POLLRDNORM;
3211         } else {
3212                 if (!trace_empty(iter))
3213                         return POLLIN | POLLRDNORM;
3214                 poll_wait(filp, &trace_wait, poll_table);
3215                 if (!trace_empty(iter))
3216                         return POLLIN | POLLRDNORM;
3217
3218                 return 0;
3219         }
3220 }
3221
3222
3223 void default_wait_pipe(struct trace_iterator *iter)
3224 {
3225         DEFINE_WAIT(wait);
3226
3227         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3228
3229         if (trace_empty(iter))
3230                 schedule();
3231
3232         finish_wait(&trace_wait, &wait);
3233 }
3234
3235 /*
3236  * This is a make-shift waitqueue.
3237  * A tracer might use this callback on some rare cases:
3238  *
3239  *  1) the current tracer might hold the runqueue lock when it wakes up
3240  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3241  *  2) the function tracers, trace all functions, we don't want
3242  *     the overhead of calling wake_up and friends
3243  *     (and tracing them too)
3244  *
3245  *     Anyway, this is really very primitive wakeup.
3246  */
3247 void poll_wait_pipe(struct trace_iterator *iter)
3248 {
3249         set_current_state(TASK_INTERRUPTIBLE);
3250         /* sleep for 100 msecs, and try again. */
3251         schedule_timeout(HZ / 10);
3252 }
3253
3254 /* Must be called with trace_types_lock mutex held. */
3255 static int tracing_wait_pipe(struct file *filp)
3256 {
3257         struct trace_iterator *iter = filp->private_data;
3258
3259         while (trace_empty(iter)) {
3260
3261                 if ((filp->f_flags & O_NONBLOCK)) {
3262                         return -EAGAIN;
3263                 }
3264
3265                 mutex_unlock(&iter->mutex);
3266
3267                 iter->trace->wait_pipe(iter);
3268
3269                 mutex_lock(&iter->mutex);
3270
3271                 if (signal_pending(current))
3272                         return -EINTR;
3273
3274                 /*
3275                  * We block until we read something and tracing is disabled.
3276                  * We still block if tracing is disabled, but we have never
3277                  * read anything. This allows a user to cat this file, and
3278                  * then enable tracing. But after we have read something,
3279                  * we give an EOF when tracing is again disabled.
3280                  *
3281                  * iter->pos will be 0 if we haven't read anything.
3282                  */
3283                 if (!tracer_enabled && iter->pos)
3284                         break;
3285         }
3286
3287         return 1;
3288 }
3289
3290 /*
3291  * Consumer reader.
3292  */
3293 static ssize_t
3294 tracing_read_pipe(struct file *filp, char __user *ubuf,
3295                   size_t cnt, loff_t *ppos)
3296 {
3297         struct trace_iterator *iter = filp->private_data;
3298         static struct tracer *old_tracer;
3299         ssize_t sret;
3300
3301         /* return any leftover data */
3302         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3303         if (sret != -EBUSY)
3304                 return sret;
3305
3306         trace_seq_init(&iter->seq);
3307
3308         /* copy the tracer to avoid using a global lock all around */
3309         mutex_lock(&trace_types_lock);
3310         if (unlikely(old_tracer != current_trace && current_trace)) {
3311                 old_tracer = current_trace;
3312                 *iter->trace = *current_trace;
3313         }
3314         mutex_unlock(&trace_types_lock);
3315
3316         /*
3317          * Avoid more than one consumer on a single file descriptor
3318          * This is just a matter of traces coherency, the ring buffer itself
3319          * is protected.
3320          */
3321         mutex_lock(&iter->mutex);
3322         if (iter->trace->read) {
3323                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3324                 if (sret)
3325                         goto out;
3326         }
3327
3328 waitagain:
3329         sret = tracing_wait_pipe(filp);
3330         if (sret <= 0)
3331                 goto out;
3332
3333         /* stop when tracing is finished */
3334         if (trace_empty(iter)) {
3335                 sret = 0;
3336                 goto out;
3337         }
3338
3339         if (cnt >= PAGE_SIZE)
3340                 cnt = PAGE_SIZE - 1;
3341
3342         /* reset all but tr, trace, and overruns */
3343         memset(&iter->seq, 0,
3344                sizeof(struct trace_iterator) -
3345                offsetof(struct trace_iterator, seq));
3346         iter->pos = -1;
3347
3348         trace_event_read_lock();
3349         trace_access_lock(iter->cpu_file);
3350         while (trace_find_next_entry_inc(iter) != NULL) {
3351                 enum print_line_t ret;
3352                 int len = iter->seq.len;
3353
3354                 ret = print_trace_line(iter);
3355                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3356                         /* don't print partial lines */
3357                         iter->seq.len = len;
3358                         break;
3359                 }
3360                 if (ret != TRACE_TYPE_NO_CONSUME)
3361                         trace_consume(iter);
3362
3363                 if (iter->seq.len >= cnt)
3364                         break;
3365
3366                 /*
3367                  * Setting the full flag means we reached the trace_seq buffer
3368                  * size and we should leave by partial output condition above.
3369                  * One of the trace_seq_* functions is not used properly.
3370                  */
3371                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3372                           iter->ent->type);
3373         }
3374         trace_access_unlock(iter->cpu_file);
3375         trace_event_read_unlock();
3376
3377         /* Now copy what we have to the user */
3378         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3379         if (iter->seq.readpos >= iter->seq.len)
3380                 trace_seq_init(&iter->seq);
3381
3382         /*
3383          * If there was nothing to send to user, in spite of consuming trace
3384          * entries, go back to wait for more entries.
3385          */
3386         if (sret == -EBUSY)
3387                 goto waitagain;
3388
3389 out:
3390         mutex_unlock(&iter->mutex);
3391
3392         return sret;
3393 }
3394
3395 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3396                                      struct pipe_buffer *buf)
3397 {
3398         __free_page(buf->page);
3399 }
3400
3401 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3402                                      unsigned int idx)
3403 {
3404         __free_page(spd->pages[idx]);
3405 }
3406
3407 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3408         .can_merge              = 0,
3409         .map                    = generic_pipe_buf_map,
3410         .unmap                  = generic_pipe_buf_unmap,
3411         .confirm                = generic_pipe_buf_confirm,
3412         .release                = tracing_pipe_buf_release,
3413         .steal                  = generic_pipe_buf_steal,
3414         .get                    = generic_pipe_buf_get,
3415 };
3416
3417 static size_t
3418 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3419 {
3420         size_t count;
3421         int ret;
3422
3423         /* Seq buffer is page-sized, exactly what we need. */
3424         for (;;) {
3425                 count = iter->seq.len;
3426                 ret = print_trace_line(iter);
3427                 count = iter->seq.len - count;
3428                 if (rem < count) {
3429                         rem = 0;
3430                         iter->seq.len -= count;
3431                         break;
3432                 }
3433                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3434                         iter->seq.len -= count;
3435                         break;
3436                 }
3437
3438                 if (ret != TRACE_TYPE_NO_CONSUME)
3439                         trace_consume(iter);
3440                 rem -= count;
3441                 if (!trace_find_next_entry_inc(iter))   {
3442                         rem = 0;
3443                         iter->ent = NULL;
3444                         break;
3445                 }
3446         }
3447
3448         return rem;
3449 }
3450
3451 static ssize_t tracing_splice_read_pipe(struct file *filp,
3452                                         loff_t *ppos,
3453                                         struct pipe_inode_info *pipe,
3454                                         size_t len,
3455                                         unsigned int flags)
3456 {
3457         struct page *pages_def[PIPE_DEF_BUFFERS];
3458         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3459         struct trace_iterator *iter = filp->private_data;
3460         struct splice_pipe_desc spd = {
3461                 .pages          = pages_def,
3462                 .partial        = partial_def,
3463                 .nr_pages       = 0, /* This gets updated below. */
3464                 .nr_pages_max   = PIPE_DEF_BUFFERS,
3465                 .flags          = flags,
3466                 .ops            = &tracing_pipe_buf_ops,
3467                 .spd_release    = tracing_spd_release_pipe,
3468         };
3469         static struct tracer *old_tracer;
3470         ssize_t ret;
3471         size_t rem;
3472         unsigned int i;
3473
3474         if (splice_grow_spd(pipe, &spd))
3475                 return -ENOMEM;
3476
3477         /* copy the tracer to avoid using a global lock all around */
3478         mutex_lock(&trace_types_lock);
3479         if (unlikely(old_tracer != current_trace && current_trace)) {
3480                 old_tracer = current_trace;
3481                 *iter->trace = *current_trace;
3482         }
3483         mutex_unlock(&trace_types_lock);
3484
3485         mutex_lock(&iter->mutex);
3486
3487         if (iter->trace->splice_read) {
3488                 ret = iter->trace->splice_read(iter, filp,
3489                                                ppos, pipe, len, flags);
3490                 if (ret)
3491                         goto out_err;
3492         }
3493
3494         ret = tracing_wait_pipe(filp);
3495         if (ret <= 0)
3496                 goto out_err;
3497
3498         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3499                 ret = -EFAULT;
3500                 goto out_err;
3501         }
3502
3503         trace_event_read_lock();
3504         trace_access_lock(iter->cpu_file);
3505
3506         /* Fill as many pages as possible. */
3507         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3508                 spd.pages[i] = alloc_page(GFP_KERNEL);
3509                 if (!spd.pages[i])
3510                         break;
3511
3512                 rem = tracing_fill_pipe_page(rem, iter);
3513
3514                 /* Copy the data into the page, so we can start over. */
3515                 ret = trace_seq_to_buffer(&iter->seq,
3516                                           page_address(spd.pages[i]),
3517                                           iter->seq.len);
3518                 if (ret < 0) {
3519                         __free_page(spd.pages[i]);
3520                         break;
3521                 }
3522                 spd.partial[i].offset = 0;
3523                 spd.partial[i].len = iter->seq.len;
3524
3525                 trace_seq_init(&iter->seq);
3526         }
3527
3528         trace_access_unlock(iter->cpu_file);
3529         trace_event_read_unlock();
3530         mutex_unlock(&iter->mutex);
3531
3532         spd.nr_pages = i;
3533
3534         ret = splice_to_pipe(pipe, &spd);
3535 out:
3536         splice_shrink_spd(&spd);
3537         return ret;
3538
3539 out_err:
3540         mutex_unlock(&iter->mutex);
3541         goto out;
3542 }
3543
3544 static ssize_t
3545 tracing_entries_read(struct file *filp, char __user *ubuf,
3546                      size_t cnt, loff_t *ppos)
3547 {
3548         struct trace_array *tr = filp->private_data;
3549         char buf[96];
3550         int r;
3551
3552         mutex_lock(&trace_types_lock);
3553         if (!ring_buffer_expanded)
3554                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3555                             tr->entries >> 10,
3556                             trace_buf_size >> 10);
3557         else
3558                 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3559         mutex_unlock(&trace_types_lock);
3560
3561         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3562 }
3563
3564 static ssize_t
3565 tracing_entries_write(struct file *filp, const char __user *ubuf,
3566                       size_t cnt, loff_t *ppos)
3567 {
3568         unsigned long val;
3569         int ret;
3570
3571         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3572         if (ret)
3573                 return ret;
3574
3575         /* must have at least 1 entry */
3576         if (!val)
3577                 return -EINVAL;
3578
3579         /* value is in KB */
3580         val <<= 10;
3581
3582         ret = tracing_resize_ring_buffer(val);
3583         if (ret < 0)
3584                 return ret;
3585
3586         *ppos += cnt;
3587
3588         return cnt;
3589 }
3590
3591 static ssize_t
3592 tracing_total_entries_read(struct file *filp, char __user *ubuf,
3593                                 size_t cnt, loff_t *ppos)
3594 {
3595         struct trace_array *tr = filp->private_data;
3596         char buf[64];
3597         int r, cpu;
3598         unsigned long size = 0, expanded_size = 0;
3599
3600         mutex_lock(&trace_types_lock);
3601         for_each_tracing_cpu(cpu) {
3602                 size += tr->entries >> 10;
3603                 if (!ring_buffer_expanded)
3604                         expanded_size += trace_buf_size >> 10;
3605         }
3606         if (ring_buffer_expanded)
3607                 r = sprintf(buf, "%lu\n", size);
3608         else
3609                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3610         mutex_unlock(&trace_types_lock);
3611
3612         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3613 }
3614
3615 static ssize_t
3616 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3617                           size_t cnt, loff_t *ppos)
3618 {
3619         /*
3620          * There is no need to read what the user has written, this function
3621          * is just to make sure that there is no error when "echo" is used
3622          */
3623
3624         *ppos += cnt;
3625
3626         return cnt;
3627 }
3628
3629 static int
3630 tracing_free_buffer_release(struct inode *inode, struct file *filp)
3631 {
3632         /* disable tracing ? */
3633         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3634                 tracing_off();
3635         /* resize the ring buffer to 0 */
3636         tracing_resize_ring_buffer(0);
3637
3638         return 0;
3639 }
3640
3641 static ssize_t
3642 tracing_mark_write(struct file *filp, const char __user *ubuf,
3643                                         size_t cnt, loff_t *fpos)
3644 {
3645         unsigned long addr = (unsigned long)ubuf;
3646         struct ring_buffer_event *event;
3647         struct ring_buffer *buffer;
3648         struct print_entry *entry;
3649         unsigned long irq_flags;
3650         struct page *pages[2];
3651         int nr_pages = 1;
3652         ssize_t written;
3653         void *page1;
3654         void *page2;
3655         int offset;
3656         int size;
3657         int len;
3658         int ret;
3659
3660         if (tracing_disabled)
3661                 return -EINVAL;
3662
3663         if (cnt > TRACE_BUF_SIZE)
3664                 cnt = TRACE_BUF_SIZE;
3665
3666         /*
3667          * Userspace is injecting traces into the kernel trace buffer.
3668          * We want to be as non intrusive as possible.
3669          * To do so, we do not want to allocate any special buffers
3670          * or take any locks, but instead write the userspace data
3671          * straight into the ring buffer.
3672          *
3673          * First we need to pin the userspace buffer into memory,
3674          * which, most likely it is, because it just referenced it.
3675          * But there's no guarantee that it is. By using get_user_pages_fast()
3676          * and kmap_atomic/kunmap_atomic() we can get access to the
3677          * pages directly. We then write the data directly into the
3678          * ring buffer.
3679          */
3680         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3681
3682         /* check if we cross pages */
3683         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3684                 nr_pages = 2;
3685
3686         offset = addr & (PAGE_SIZE - 1);
3687         addr &= PAGE_MASK;
3688
3689         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3690         if (ret < nr_pages) {
3691                 while (--ret >= 0)
3692                         put_page(pages[ret]);
3693                 written = -EFAULT;
3694                 goto out;
3695         }
3696
3697         page1 = kmap_atomic(pages[0]);
3698         if (nr_pages == 2)
3699                 page2 = kmap_atomic(pages[1]);
3700
3701         local_save_flags(irq_flags);
3702         size = sizeof(*entry) + cnt + 2; /* possible \n added */
3703         buffer = global_trace.buffer;
3704         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3705                                           irq_flags, preempt_count());
3706         if (!event) {
3707                 /* Ring buffer disabled, return as if not open for write */
3708                 written = -EBADF;
3709                 goto out_unlock;
3710         }
3711
3712         entry = ring_buffer_event_data(event);
3713         entry->ip = _THIS_IP_;
3714
3715         if (nr_pages == 2) {
3716                 len = PAGE_SIZE - offset;
3717                 memcpy(&entry->buf, page1 + offset, len);
3718                 memcpy(&entry->buf[len], page2, cnt - len);
3719         } else
3720                 memcpy(&entry->buf, page1 + offset, cnt);
3721
3722         if (entry->buf[cnt - 1] != '\n') {
3723                 entry->buf[cnt] = '\n';
3724                 entry->buf[cnt + 1] = '\0';
3725         } else
3726                 entry->buf[cnt] = '\0';
3727
3728         ring_buffer_unlock_commit(buffer, event);
3729
3730         written = cnt;
3731
3732         *fpos += written;
3733
3734  out_unlock:
3735         if (nr_pages == 2)
3736                 kunmap_atomic(page2);
3737         kunmap_atomic(page1);
3738         while (nr_pages > 0)
3739                 put_page(pages[--nr_pages]);
3740  out:
3741         return written;
3742 }
3743
3744 static int tracing_clock_show(struct seq_file *m, void *v)
3745 {
3746         int i;
3747
3748         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3749                 seq_printf(m,
3750                         "%s%s%s%s", i ? " " : "",
3751                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3752                         i == trace_clock_id ? "]" : "");
3753         seq_putc(m, '\n');
3754
3755         return 0;
3756 }
3757
3758 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3759                                    size_t cnt, loff_t *fpos)
3760 {
3761         char buf[64];
3762         const char *clockstr;
3763         int i;
3764
3765         if (cnt >= sizeof(buf))
3766                 return -EINVAL;
3767
3768         if (copy_from_user(&buf, ubuf, cnt))
3769                 return -EFAULT;
3770
3771         buf[cnt] = 0;
3772
3773         clockstr = strstrip(buf);
3774
3775         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3776                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3777                         break;
3778         }
3779         if (i == ARRAY_SIZE(trace_clocks))
3780                 return -EINVAL;
3781
3782         trace_clock_id = i;
3783
3784         mutex_lock(&trace_types_lock);
3785
3786         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3787         if (max_tr.buffer)
3788                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3789
3790         mutex_unlock(&trace_types_lock);
3791
3792         *fpos += cnt;
3793
3794         return cnt;
3795 }
3796
3797 static int tracing_clock_open(struct inode *inode, struct file *file)
3798 {
3799         if (tracing_disabled)
3800                 return -ENODEV;
3801         return single_open(file, tracing_clock_show, NULL);
3802 }
3803
3804 static const struct file_operations tracing_max_lat_fops = {
3805         .open           = tracing_open_generic,
3806         .read           = tracing_max_lat_read,
3807         .write          = tracing_max_lat_write,
3808         .llseek         = generic_file_llseek,
3809 };
3810
3811 static const struct file_operations tracing_ctrl_fops = {
3812         .open           = tracing_open_generic,
3813         .read           = tracing_ctrl_read,
3814         .write          = tracing_ctrl_write,
3815         .llseek         = generic_file_llseek,
3816 };
3817
3818 static const struct file_operations set_tracer_fops = {
3819         .open           = tracing_open_generic,
3820         .read           = tracing_set_trace_read,
3821         .write          = tracing_set_trace_write,
3822         .llseek         = generic_file_llseek,
3823 };
3824
3825 static const struct file_operations tracing_pipe_fops = {
3826         .open           = tracing_open_pipe,
3827         .poll           = tracing_poll_pipe,
3828         .read           = tracing_read_pipe,
3829         .splice_read    = tracing_splice_read_pipe,
3830         .release        = tracing_release_pipe,
3831         .llseek         = no_llseek,
3832 };
3833
3834 static const struct file_operations tracing_entries_fops = {
3835         .open           = tracing_open_generic,
3836         .read           = tracing_entries_read,
3837         .write          = tracing_entries_write,
3838         .llseek         = generic_file_llseek,
3839 };
3840
3841 static const struct file_operations tracing_total_entries_fops = {
3842         .open           = tracing_open_generic,
3843         .read           = tracing_total_entries_read,
3844         .llseek         = generic_file_llseek,
3845 };
3846
3847 static const struct file_operations tracing_free_buffer_fops = {
3848         .write          = tracing_free_buffer_write,
3849         .release        = tracing_free_buffer_release,
3850 };
3851
3852 static const struct file_operations tracing_mark_fops = {
3853         .open           = tracing_open_generic,
3854         .write          = tracing_mark_write,
3855         .llseek         = generic_file_llseek,
3856 };
3857
3858 static const struct file_operations trace_clock_fops = {
3859         .open           = tracing_clock_open,
3860         .read           = seq_read,
3861         .llseek         = seq_lseek,
3862         .release        = single_release,
3863         .write          = tracing_clock_write,
3864 };
3865
3866 struct ftrace_buffer_info {
3867         struct trace_array      *tr;
3868         void                    *spare;
3869         int                     cpu;
3870         unsigned int            read;
3871 };
3872
3873 static int tracing_buffers_open(struct inode *inode, struct file *filp)
3874 {
3875         int cpu = (int)(long)inode->i_private;
3876         struct ftrace_buffer_info *info;
3877
3878         if (tracing_disabled)
3879                 return -ENODEV;
3880
3881         info = kzalloc(sizeof(*info), GFP_KERNEL);
3882         if (!info)
3883                 return -ENOMEM;
3884
3885         info->tr        = &global_trace;
3886         info->cpu       = cpu;
3887         info->spare     = NULL;
3888         /* Force reading ring buffer for first read */
3889         info->read      = (unsigned int)-1;
3890
3891         filp->private_data = info;
3892
3893         return nonseekable_open(inode, filp);
3894 }
3895
3896 static ssize_t
3897 tracing_buffers_read(struct file *filp, char __user *ubuf,
3898                      size_t count, loff_t *ppos)
3899 {
3900         struct ftrace_buffer_info *info = filp->private_data;
3901         ssize_t ret;
3902         size_t size;
3903
3904         if (!count)
3905                 return 0;
3906
3907         if (!info->spare)
3908                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
3909         if (!info->spare)
3910                 return -ENOMEM;
3911
3912         /* Do we have previous read data to read? */
3913         if (info->read < PAGE_SIZE)
3914                 goto read;
3915
3916         trace_access_lock(info->cpu);
3917         ret = ring_buffer_read_page(info->tr->buffer,
3918                                     &info->spare,
3919                                     count,
3920                                     info->cpu, 0);
3921         trace_access_unlock(info->cpu);
3922         if (ret < 0)
3923                 return 0;
3924
3925         info->read = 0;
3926
3927 read:
3928         size = PAGE_SIZE - info->read;
3929         if (size > count)
3930                 size = count;
3931
3932         ret = copy_to_user(ubuf, info->spare + info->read, size);
3933         if (ret == size)
3934                 return -EFAULT;
3935         size -= ret;
3936
3937         *ppos += size;
3938         info->read += size;
3939
3940         return size;
3941 }
3942
3943 static int tracing_buffers_release(struct inode *inode, struct file *file)
3944 {
3945         struct ftrace_buffer_info *info = file->private_data;
3946
3947         if (info->spare)
3948                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3949         kfree(info);
3950
3951         return 0;
3952 }
3953
3954 struct buffer_ref {
3955         struct ring_buffer      *buffer;
3956         void                    *page;
3957         int                     ref;
3958 };
3959
3960 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3961                                     struct pipe_buffer *buf)
3962 {
3963         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3964
3965         if (--ref->ref)
3966                 return;
3967
3968         ring_buffer_free_read_page(ref->buffer, ref->page);
3969         kfree(ref);
3970         buf->private = 0;
3971 }
3972
3973 static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3974                                  struct pipe_buffer *buf)
3975 {
3976         return 1;
3977 }
3978
3979 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3980                                 struct pipe_buffer *buf)
3981 {
3982         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3983
3984         ref->ref++;
3985 }
3986
3987 /* Pipe buffer operations for a buffer. */
3988 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3989         .can_merge              = 0,
3990         .map                    = generic_pipe_buf_map,
3991         .unmap                  = generic_pipe_buf_unmap,
3992         .confirm                = generic_pipe_buf_confirm,
3993         .release                = buffer_pipe_buf_release,
3994         .steal                  = buffer_pipe_buf_steal,
3995         .get                    = buffer_pipe_buf_get,
3996 };
3997
3998 /*
3999  * Callback from splice_to_pipe(), if we need to release some pages
4000  * at the end of the spd in case we error'ed out in filling the pipe.
4001  */
4002 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4003 {
4004         struct buffer_ref *ref =
4005                 (struct buffer_ref *)spd->partial[i].private;
4006
4007         if (--ref->ref)
4008                 return;
4009
4010         ring_buffer_free_read_page(ref->buffer, ref->page);
4011         kfree(ref);
4012         spd->partial[i].private = 0;
4013 }
4014
4015 static ssize_t
4016 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4017                             struct pipe_inode_info *pipe, size_t len,
4018                             unsigned int flags)
4019 {
4020         struct ftrace_buffer_info *info = file->private_data;
4021         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4022         struct page *pages_def[PIPE_DEF_BUFFERS];
4023         struct splice_pipe_desc spd = {
4024                 .pages          = pages_def,
4025                 .partial        = partial_def,
4026                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4027                 .flags          = flags,
4028                 .ops            = &buffer_pipe_buf_ops,
4029                 .spd_release    = buffer_spd_release,
4030         };
4031         struct buffer_ref *ref;
4032         int entries, size, i;
4033         size_t ret;
4034
4035         if (splice_grow_spd(pipe, &spd))
4036                 return -ENOMEM;
4037
4038         if (*ppos & (PAGE_SIZE - 1)) {
4039                 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4040                 ret = -EINVAL;
4041                 goto out;
4042         }
4043
4044         if (len & (PAGE_SIZE - 1)) {
4045                 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4046                 if (len < PAGE_SIZE) {
4047                         ret = -EINVAL;
4048                         goto out;
4049                 }
4050                 len &= PAGE_MASK;
4051         }
4052
4053         trace_access_lock(info->cpu);
4054         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4055
4056         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4057                 struct page *page;
4058                 int r;
4059
4060                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4061                 if (!ref)
4062                         break;
4063
4064                 ref->ref = 1;
4065                 ref->buffer = info->tr->buffer;
4066                 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4067                 if (!ref->page) {
4068                         kfree(ref);
4069                         break;
4070                 }
4071
4072                 r = ring_buffer_read_page(ref->buffer, &ref->page,
4073                                           len, info->cpu, 1);
4074                 if (r < 0) {
4075                         ring_buffer_free_read_page(ref->buffer, ref->page);
4076                         kfree(ref);
4077                         break;
4078                 }
4079
4080                 /*
4081                  * zero out any left over data, this is going to
4082                  * user land.
4083                  */
4084                 size = ring_buffer_page_len(ref->page);
4085                 if (size < PAGE_SIZE)
4086                         memset(ref->page + size, 0, PAGE_SIZE - size);
4087
4088                 page = virt_to_page(ref->page);
4089
4090                 spd.pages[i] = page;
4091                 spd.partial[i].len = PAGE_SIZE;
4092                 spd.partial[i].offset = 0;
4093                 spd.partial[i].private = (unsigned long)ref;
4094                 spd.nr_pages++;
4095                 *ppos += PAGE_SIZE;
4096
4097                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4098         }
4099
4100         trace_access_unlock(info->cpu);
4101         spd.nr_pages = i;
4102
4103         /* did we read anything? */
4104         if (!spd.nr_pages) {
4105                 if (flags & SPLICE_F_NONBLOCK)
4106                         ret = -EAGAIN;
4107                 else
4108                         ret = 0;
4109                 /* TODO: block */
4110                 goto out;
4111         }
4112
4113         ret = splice_to_pipe(pipe, &spd);
4114         splice_shrink_spd(&spd);
4115 out:
4116         return ret;
4117 }
4118
4119 static const struct file_operations tracing_buffers_fops = {
4120         .open           = tracing_buffers_open,
4121         .read           = tracing_buffers_read,
4122         .release        = tracing_buffers_release,
4123         .splice_read    = tracing_buffers_splice_read,
4124         .llseek         = no_llseek,
4125 };
4126
4127 static ssize_t
4128 tracing_stats_read(struct file *filp, char __user *ubuf,
4129                    size_t count, loff_t *ppos)
4130 {
4131         unsigned long cpu = (unsigned long)filp->private_data;
4132         struct trace_array *tr = &global_trace;
4133         struct trace_seq *s;
4134         unsigned long cnt;
4135         unsigned long long t;
4136         unsigned long usec_rem;
4137
4138         s = kmalloc(sizeof(*s), GFP_KERNEL);
4139         if (!s)
4140                 return -ENOMEM;
4141
4142         trace_seq_init(s);
4143
4144         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4145         trace_seq_printf(s, "entries: %ld\n", cnt);
4146
4147         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4148         trace_seq_printf(s, "overrun: %ld\n", cnt);
4149
4150         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4151         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4152
4153         cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4154         trace_seq_printf(s, "bytes: %ld\n", cnt);
4155
4156         t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4157         usec_rem = do_div(t, USEC_PER_SEC);
4158         trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4159
4160         t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4161         usec_rem = do_div(t, USEC_PER_SEC);
4162         trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4163
4164         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4165
4166         kfree(s);
4167
4168         return count;
4169 }
4170
4171 static const struct file_operations tracing_stats_fops = {
4172         .open           = tracing_open_generic,
4173         .read           = tracing_stats_read,
4174         .llseek         = generic_file_llseek,
4175 };
4176
4177 #ifdef CONFIG_DYNAMIC_FTRACE
4178
4179 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4180 {
4181         return 0;
4182 }
4183
4184 static ssize_t
4185 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4186                   size_t cnt, loff_t *ppos)
4187 {
4188         static char ftrace_dyn_info_buffer[1024];
4189         static DEFINE_MUTEX(dyn_info_mutex);
4190         unsigned long *p = filp->private_data;
4191         char *buf = ftrace_dyn_info_buffer;
4192         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4193         int r;
4194
4195         mutex_lock(&dyn_info_mutex);
4196         r = sprintf(buf, "%ld ", *p);
4197
4198         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4199         buf[r++] = '\n';
4200
4201         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4202
4203         mutex_unlock(&dyn_info_mutex);
4204
4205         return r;
4206 }
4207
4208 static const struct file_operations tracing_dyn_info_fops = {
4209         .open           = tracing_open_generic,
4210         .read           = tracing_read_dyn_info,
4211         .llseek         = generic_file_llseek,
4212 };
4213 #endif
4214
4215 static struct dentry *d_tracer;
4216
4217 struct dentry *tracing_init_dentry(void)
4218 {
4219         static int once;
4220
4221         if (d_tracer)
4222                 return d_tracer;
4223
4224         if (!debugfs_initialized())
4225                 return NULL;
4226
4227         d_tracer = debugfs_create_dir("tracing", NULL);
4228
4229         if (!d_tracer && !once) {
4230                 once = 1;
4231                 pr_warning("Could not create debugfs directory 'tracing'\n");
4232                 return NULL;
4233         }
4234
4235         return d_tracer;
4236 }
4237
4238 static struct dentry *d_percpu;
4239
4240 struct dentry *tracing_dentry_percpu(void)
4241 {
4242         static int once;
4243         struct dentry *d_tracer;
4244
4245         if (d_percpu)
4246                 return d_percpu;
4247
4248         d_tracer = tracing_init_dentry();
4249
4250         if (!d_tracer)
4251                 return NULL;
4252
4253         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4254
4255         if (!d_percpu && !once) {
4256                 once = 1;
4257                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4258                 return NULL;
4259         }
4260
4261         return d_percpu;
4262 }
4263
4264 static void tracing_init_debugfs_percpu(long cpu)
4265 {
4266         struct dentry *d_percpu = tracing_dentry_percpu();
4267         struct dentry *d_cpu;
4268         char cpu_dir[30]; /* 30 characters should be more than enough */
4269
4270         snprintf(cpu_dir, 30, "cpu%ld", cpu);
4271         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4272         if (!d_cpu) {
4273                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4274                 return;
4275         }
4276
4277         /* per cpu trace_pipe */
4278         trace_create_file("trace_pipe", 0444, d_cpu,
4279                         (void *) cpu, &tracing_pipe_fops);
4280
4281         /* per cpu trace */
4282         trace_create_file("trace", 0644, d_cpu,
4283                         (void *) cpu, &tracing_fops);
4284
4285         trace_create_file("trace_pipe_raw", 0444, d_cpu,
4286                         (void *) cpu, &tracing_buffers_fops);
4287
4288         trace_create_file("stats", 0444, d_cpu,
4289                         (void *) cpu, &tracing_stats_fops);
4290 }
4291
4292 #ifdef CONFIG_FTRACE_SELFTEST
4293 /* Let selftest have access to static functions in this file */
4294 #include "trace_selftest.c"
4295 #endif
4296
4297 struct trace_option_dentry {
4298         struct tracer_opt               *opt;
4299         struct tracer_flags             *flags;
4300         struct dentry                   *entry;
4301 };
4302
4303 static ssize_t
4304 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4305                         loff_t *ppos)
4306 {
4307         struct trace_option_dentry *topt = filp->private_data;
4308         char *buf;
4309
4310         if (topt->flags->val & topt->opt->bit)
4311                 buf = "1\n";
4312         else
4313                 buf = "0\n";
4314
4315         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4316 }
4317
4318 static ssize_t
4319 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4320                          loff_t *ppos)
4321 {
4322         struct trace_option_dentry *topt = filp->private_data;
4323         unsigned long val;
4324         int ret;
4325
4326         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4327         if (ret)
4328                 return ret;
4329
4330         if (val != 0 && val != 1)
4331                 return -EINVAL;
4332
4333         if (!!(topt->flags->val & topt->opt->bit) != val) {
4334                 mutex_lock(&trace_types_lock);
4335                 ret = __set_tracer_option(current_trace, topt->flags,
4336                                           topt->opt, !val);
4337                 mutex_unlock(&trace_types_lock);
4338                 if (ret)
4339                         return ret;
4340         }
4341
4342         *ppos += cnt;
4343
4344         return cnt;
4345 }
4346
4347
4348 static const struct file_operations trace_options_fops = {
4349         .open = tracing_open_generic,
4350         .read = trace_options_read,
4351         .write = trace_options_write,
4352         .llseek = generic_file_llseek,
4353 };
4354
4355 static ssize_t
4356 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4357                         loff_t *ppos)
4358 {
4359         long index = (long)filp->private_data;
4360         char *buf;
4361
4362         if (trace_flags & (1 << index))
4363                 buf = "1\n";
4364         else
4365                 buf = "0\n";
4366
4367         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4368 }
4369
4370 static ssize_t
4371 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4372                          loff_t *ppos)
4373 {
4374         long index = (long)filp->private_data;
4375         unsigned long val;
4376         int ret;
4377
4378         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4379         if (ret)
4380                 return ret;
4381
4382         if (val != 0 && val != 1)
4383                 return -EINVAL;
4384
4385         mutex_lock(&trace_types_lock);
4386         set_tracer_flags(1 << index, val);
4387         mutex_unlock(&trace_types_lock);
4388
4389         *ppos += cnt;
4390
4391         return cnt;
4392 }
4393
4394 static const struct file_operations trace_options_core_fops = {
4395         .open = tracing_open_generic,
4396         .read = trace_options_core_read,
4397         .write = trace_options_core_write,
4398         .llseek = generic_file_llseek,
4399 };
4400
4401 struct dentry *trace_create_file(const char *name,
4402                                  mode_t mode,
4403                                  struct dentry *parent,
4404                                  void *data,
4405                                  const struct file_operations *fops)
4406 {
4407         struct dentry *ret;
4408
4409         ret = debugfs_create_file(name, mode, parent, data, fops);
4410         if (!ret)
4411                 pr_warning("Could not create debugfs '%s' entry\n", name);
4412
4413         return ret;
4414 }
4415
4416
4417 static struct dentry *trace_options_init_dentry(void)
4418 {
4419         struct dentry *d_tracer;
4420         static struct dentry *t_options;
4421
4422         if (t_options)
4423                 return t_options;
4424
4425         d_tracer = tracing_init_dentry();
4426         if (!d_tracer)
4427                 return NULL;
4428
4429         t_options = debugfs_create_dir("options", d_tracer);
4430         if (!t_options) {
4431                 pr_warning("Could not create debugfs directory 'options'\n");
4432                 return NULL;
4433         }
4434
4435         return t_options;
4436 }
4437
4438 static void
4439 create_trace_option_file(struct trace_option_dentry *topt,
4440                          struct tracer_flags *flags,
4441                          struct tracer_opt *opt)
4442 {
4443         struct dentry *t_options;
4444
4445         t_options = trace_options_init_dentry();
4446         if (!t_options)
4447                 return;
4448
4449         topt->flags = flags;
4450         topt->opt = opt;
4451
4452         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4453                                     &trace_options_fops);
4454
4455 }
4456
4457 static struct trace_option_dentry *
4458 create_trace_option_files(struct tracer *tracer)
4459 {
4460         struct trace_option_dentry *topts;
4461         struct tracer_flags *flags;
4462         struct tracer_opt *opts;
4463         int cnt;
4464
4465         if (!tracer)
4466                 return NULL;
4467
4468         flags = tracer->flags;
4469
4470         if (!flags || !flags->opts)
4471                 return NULL;
4472
4473         opts = flags->opts;
4474
4475         for (cnt = 0; opts[cnt].name; cnt++)
4476                 ;
4477
4478         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4479         if (!topts)
4480                 return NULL;
4481
4482         for (cnt = 0; opts[cnt].name; cnt++)
4483                 create_trace_option_file(&topts[cnt], flags,
4484                                          &opts[cnt]);
4485
4486         return topts;
4487 }
4488
4489 static void
4490 destroy_trace_option_files(struct trace_option_dentry *topts)
4491 {
4492         int cnt;
4493
4494         if (!topts)
4495                 return;
4496
4497         for (cnt = 0; topts[cnt].opt; cnt++) {
4498                 if (topts[cnt].entry)
4499                         debugfs_remove(topts[cnt].entry);
4500         }
4501
4502         kfree(topts);
4503 }
4504
4505 static struct dentry *
4506 create_trace_option_core_file(const char *option, long index)
4507 {
4508         struct dentry *t_options;
4509
4510         t_options = trace_options_init_dentry();
4511         if (!t_options)
4512                 return NULL;
4513
4514         return trace_create_file(option, 0644, t_options, (void *)index,
4515                                     &trace_options_core_fops);
4516 }
4517
4518 static __init void create_trace_options_dir(void)
4519 {
4520         struct dentry *t_options;
4521         int i;
4522
4523         t_options = trace_options_init_dentry();
4524         if (!t_options)
4525                 return;
4526
4527         for (i = 0; trace_options[i]; i++)
4528                 create_trace_option_core_file(trace_options[i], i);
4529 }
4530
4531 static __init int tracer_init_debugfs(void)
4532 {
4533         struct dentry *d_tracer;
4534         int cpu;
4535
4536         trace_access_lock_init();
4537
4538         d_tracer = tracing_init_dentry();
4539
4540         trace_create_file("tracing_enabled", 0644, d_tracer,
4541                         &global_trace, &tracing_ctrl_fops);
4542
4543         trace_create_file("trace_options", 0644, d_tracer,
4544                         NULL, &tracing_iter_fops);
4545
4546         trace_create_file("tracing_cpumask", 0644, d_tracer,
4547                         NULL, &tracing_cpumask_fops);
4548
4549         trace_create_file("trace", 0644, d_tracer,
4550                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4551
4552         trace_create_file("available_tracers", 0444, d_tracer,
4553                         &global_trace, &show_traces_fops);
4554
4555         trace_create_file("current_tracer", 0644, d_tracer,
4556                         &global_trace, &set_tracer_fops);
4557
4558 #ifdef CONFIG_TRACER_MAX_TRACE
4559         trace_create_file("tracing_max_latency", 0644, d_tracer,
4560                         &tracing_max_latency, &tracing_max_lat_fops);
4561 #endif
4562
4563         trace_create_file("tracing_thresh", 0644, d_tracer,
4564                         &tracing_thresh, &tracing_max_lat_fops);
4565
4566         trace_create_file("README", 0444, d_tracer,
4567                         NULL, &tracing_readme_fops);
4568
4569         trace_create_file("trace_pipe", 0444, d_tracer,
4570                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4571
4572         trace_create_file("buffer_size_kb", 0644, d_tracer,
4573                         &global_trace, &tracing_entries_fops);
4574
4575         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4576                         &global_trace, &tracing_total_entries_fops);
4577
4578         trace_create_file("free_buffer", 0644, d_tracer,
4579                         &global_trace, &tracing_free_buffer_fops);
4580
4581         trace_create_file("trace_marker", 0220, d_tracer,
4582                         NULL, &tracing_mark_fops);
4583
4584         trace_create_file("saved_cmdlines", 0444, d_tracer,
4585                         NULL, &tracing_saved_cmdlines_fops);
4586
4587         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4588                           &trace_clock_fops);
4589
4590 #ifdef CONFIG_DYNAMIC_FTRACE
4591         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4592                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4593 #endif
4594
4595         create_trace_options_dir();
4596
4597         for_each_tracing_cpu(cpu)
4598                 tracing_init_debugfs_percpu(cpu);
4599
4600         return 0;
4601 }
4602
4603 static int trace_panic_handler(struct notifier_block *this,
4604                                unsigned long event, void *unused)
4605 {
4606         if (ftrace_dump_on_oops)
4607                 ftrace_dump(ftrace_dump_on_oops);
4608         return NOTIFY_OK;
4609 }
4610
4611 static struct notifier_block trace_panic_notifier = {
4612         .notifier_call  = trace_panic_handler,
4613         .next           = NULL,
4614         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4615 };
4616
4617 static int trace_die_handler(struct notifier_block *self,
4618                              unsigned long val,
4619                              void *data)
4620 {
4621         switch (val) {
4622         case DIE_OOPS:
4623                 if (ftrace_dump_on_oops)
4624                         ftrace_dump(ftrace_dump_on_oops);
4625                 break;
4626         default:
4627                 break;
4628         }
4629         return NOTIFY_OK;
4630 }
4631
4632 static struct notifier_block trace_die_notifier = {
4633         .notifier_call = trace_die_handler,
4634         .priority = 200
4635 };
4636
4637 /*
4638  * printk is set to max of 1024, we really don't need it that big.
4639  * Nothing should be printing 1000 characters anyway.
4640  */
4641 #define TRACE_MAX_PRINT         1000
4642
4643 /*
4644  * Define here KERN_TRACE so that we have one place to modify
4645  * it if we decide to change what log level the ftrace dump
4646  * should be at.
4647  */
4648 #define KERN_TRACE              KERN_EMERG
4649
4650 void
4651 trace_printk_seq(struct trace_seq *s)
4652 {
4653         /* Probably should print a warning here. */
4654         if (s->len >= 1000)
4655                 s->len = 1000;
4656
4657         /* should be zero ended, but we are paranoid. */
4658         s->buffer[s->len] = 0;
4659
4660         printk(KERN_TRACE "%s", s->buffer);
4661
4662         trace_seq_init(s);
4663 }
4664
4665 void trace_init_global_iter(struct trace_iterator *iter)
4666 {
4667         iter->tr = &global_trace;
4668         iter->trace = current_trace;
4669         iter->cpu_file = TRACE_PIPE_ALL_CPU;
4670 }
4671
4672 static void
4673 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4674 {
4675         static arch_spinlock_t ftrace_dump_lock =
4676                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4677         /* use static because iter can be a bit big for the stack */
4678         static struct trace_iterator iter;
4679         unsigned int old_userobj;
4680         static int dump_ran;
4681         unsigned long flags;
4682         int cnt = 0, cpu;
4683
4684         /* only one dump */
4685         local_irq_save(flags);
4686         arch_spin_lock(&ftrace_dump_lock);
4687         if (dump_ran)
4688                 goto out;
4689
4690         dump_ran = 1;
4691
4692         tracing_off();
4693
4694         /* Did function tracer already get disabled? */
4695         if (ftrace_is_dead()) {
4696                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4697                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
4698         }
4699
4700         if (disable_tracing)
4701                 ftrace_kill();
4702
4703         trace_init_global_iter(&iter);
4704
4705         for_each_tracing_cpu(cpu) {
4706                 atomic_inc(&iter.tr->data[cpu]->disabled);
4707         }
4708
4709         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4710
4711         /* don't look at user memory in panic mode */
4712         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4713
4714         /* Simulate the iterator */
4715         iter.tr = &global_trace;
4716         iter.trace = current_trace;
4717
4718         switch (oops_dump_mode) {
4719         case DUMP_ALL:
4720                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4721                 break;
4722         case DUMP_ORIG:
4723                 iter.cpu_file = raw_smp_processor_id();
4724                 break;
4725         case DUMP_NONE:
4726                 goto out_enable;
4727         default:
4728                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4729                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4730         }
4731
4732         printk(KERN_TRACE "Dumping ftrace buffer:\n");
4733
4734         /*
4735          * We need to stop all tracing on all CPUS to read the
4736          * the next buffer. This is a bit expensive, but is
4737          * not done often. We fill all what we can read,
4738          * and then release the locks again.
4739          */
4740
4741         while (!trace_empty(&iter)) {
4742
4743                 if (!cnt)
4744                         printk(KERN_TRACE "---------------------------------\n");
4745
4746                 cnt++;
4747
4748                 /* reset all but tr, trace, and overruns */
4749                 memset(&iter.seq, 0,
4750                        sizeof(struct trace_iterator) -
4751                        offsetof(struct trace_iterator, seq));
4752                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4753                 iter.pos = -1;
4754
4755                 if (trace_find_next_entry_inc(&iter) != NULL) {
4756                         int ret;
4757
4758                         ret = print_trace_line(&iter);
4759                         if (ret != TRACE_TYPE_NO_CONSUME)
4760                                 trace_consume(&iter);
4761                 }
4762
4763                 trace_printk_seq(&iter.seq);
4764         }
4765
4766         if (!cnt)
4767                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
4768         else
4769                 printk(KERN_TRACE "---------------------------------\n");
4770
4771  out_enable:
4772         /* Re-enable tracing if requested */
4773         if (!disable_tracing) {
4774                 trace_flags |= old_userobj;
4775
4776                 for_each_tracing_cpu(cpu) {
4777                         atomic_dec(&iter.tr->data[cpu]->disabled);
4778                 }
4779                 tracing_on();
4780         }
4781
4782  out:
4783         arch_spin_unlock(&ftrace_dump_lock);
4784         local_irq_restore(flags);
4785 }
4786
4787 /* By default: disable tracing after the dump */
4788 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4789 {
4790         __ftrace_dump(true, oops_dump_mode);
4791 }
4792
4793 __init static int tracer_alloc_buffers(void)
4794 {
4795         int ring_buf_size;
4796         enum ring_buffer_flags rb_flags;
4797         int i;
4798         int ret = -ENOMEM;
4799
4800
4801         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
4802                 goto out;
4803
4804         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4805                 goto out_free_buffer_mask;
4806
4807         /* To save memory, keep the ring buffer size to its minimum */
4808         if (ring_buffer_expanded)
4809                 ring_buf_size = trace_buf_size;
4810         else
4811                 ring_buf_size = 1;
4812
4813         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
4814
4815         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4816         cpumask_copy(tracing_cpumask, cpu_all_mask);
4817
4818         /* TODO: make the number of buffers hot pluggable with CPUS */
4819         global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
4820         if (!global_trace.buffer) {
4821                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
4822                 WARN_ON(1);
4823                 goto out_free_cpumask;
4824         }
4825         global_trace.entries = ring_buffer_size(global_trace.buffer);
4826
4827
4828 #ifdef CONFIG_TRACER_MAX_TRACE
4829         max_tr.buffer = ring_buffer_alloc(1, rb_flags);
4830         if (!max_tr.buffer) {
4831                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4832                 WARN_ON(1);
4833                 ring_buffer_free(global_trace.buffer);
4834                 goto out_free_cpumask;
4835         }
4836         max_tr.entries = 1;
4837 #endif
4838
4839         /* Allocate the first page for all buffers */
4840         for_each_tracing_cpu(i) {
4841                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4842                 max_tr.data[i] = &per_cpu(max_tr_data, i);
4843         }
4844
4845         trace_init_cmdlines();
4846
4847         register_tracer(&nop_trace);
4848         current_trace = &nop_trace;
4849         /* All seems OK, enable tracing */
4850         tracing_disabled = 0;
4851
4852         atomic_notifier_chain_register(&panic_notifier_list,
4853                                        &trace_panic_notifier);
4854
4855         register_die_notifier(&trace_die_notifier);
4856
4857         return 0;
4858
4859 out_free_cpumask:
4860         free_cpumask_var(tracing_cpumask);
4861 out_free_buffer_mask:
4862         free_cpumask_var(tracing_buffer_mask);
4863 out:
4864         return ret;
4865 }
4866
4867 __init static int clear_boot_tracer(void)
4868 {
4869         /*
4870          * The default tracer at boot buffer is an init section.
4871          * This function is called in lateinit. If we did not
4872          * find the boot tracer, then clear it out, to prevent
4873          * later registration from accessing the buffer that is
4874          * about to be freed.
4875          */
4876         if (!default_bootup_tracer)
4877                 return 0;
4878
4879         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4880                default_bootup_tracer);
4881         default_bootup_tracer = NULL;
4882
4883         return 0;
4884 }
4885
4886 early_initcall(tracer_alloc_buffers);
4887 fs_initcall(tracer_init_debugfs);
4888 late_initcall(clear_boot_tracer);