tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
[pandora-kernel.git] / kernel / trace / trace.c
index e5df02c..4c4df05 100644 (file)
@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
 
 static int trace_stop_count;
-static DEFINE_SPINLOCK(tracing_start_lock);
+static DEFINE_RAW_SPINLOCK(tracing_start_lock);
 
 static void wakeup_work_handler(struct work_struct *work)
 {
@@ -435,6 +435,7 @@ static struct {
 } trace_clocks[] = {
        { trace_clock_local,    "local" },
        { trace_clock_global,   "global" },
+       { trace_clock_counter,  "counter" },
 };
 
 int trace_clock_id;
@@ -533,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
        if (isspace(ch)) {
                parser->buffer[parser->idx] = 0;
                parser->cont = false;
-       } else {
+       } else if (parser->idx < parser->size - 1) {
                parser->cont = true;
                parser->buffer[parser->idx++] = ch;
+       } else {
+               ret = -EINVAL;
+               goto out;
        }
 
        *ppos += read;
@@ -630,7 +634,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
-       max_data->uid = task_uid(tsk);
+       /*
+        * If tsk == current, then use current_uid(), as that does not use
+        * RCU. The irq tracer can be called out of RCU scope.
+        */
+       if (tsk == current)
+               max_data->uid = current_uid();
+       else
+               max_data->uid = task_uid(tsk);
+
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
        max_data->policy = tsk->policy;
        max_data->rt_priority = tsk->rt_priority;
@@ -651,7 +663,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf = tr->buffer;
+       struct ring_buffer *buf;
 
        if (trace_stop_count)
                return;
@@ -663,6 +675,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        }
        arch_spin_lock(&ftrace_max_lock);
 
+       buf = tr->buffer;
        tr->buffer = max_tr.buffer;
        max_tr.buffer = buf;
 
@@ -960,7 +973,7 @@ void tracing_start(void)
        if (tracing_disabled)
                return;
 
-       spin_lock_irqsave(&tracing_start_lock, flags);
+       raw_spin_lock_irqsave(&tracing_start_lock, flags);
        if (--trace_stop_count) {
                if (trace_stop_count < 0) {
                        /* Someone screwed up their debugging */
@@ -985,7 +998,7 @@ void tracing_start(void)
 
        ftrace_start();
  out:
-       spin_unlock_irqrestore(&tracing_start_lock, flags);
+       raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
 }
 
 /**
@@ -1000,7 +1013,7 @@ void tracing_stop(void)
        unsigned long flags;
 
        ftrace_stop();
-       spin_lock_irqsave(&tracing_start_lock, flags);
+       raw_spin_lock_irqsave(&tracing_start_lock, flags);
        if (trace_stop_count++)
                goto out;
 
@@ -1018,7 +1031,7 @@ void tracing_stop(void)
        arch_spin_unlock(&ftrace_max_lock);
 
  out:
-       spin_unlock_irqrestore(&tracing_start_lock, flags);
+       raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
 }
 
 void trace_stop_cmdline_recording(void);
@@ -1087,7 +1100,7 @@ void trace_find_cmdline(int pid, char comm[])
        arch_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
        if (map != NO_CMDLINE_MAP)
-               strcpy(comm, saved_cmdlines[map]);
+               strlcpy(comm, saved_cmdlines[map], TASK_COMM_LEN);
        else
                strcpy(comm, "<...>");
 
@@ -1641,6 +1654,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
        int cpu_file = iter->cpu_file;
        u64 next_ts = 0, ts;
        int next_cpu = -1;
+       int next_size = 0;
        int cpu;
 
        /*
@@ -1672,9 +1686,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
                        next_cpu = cpu;
                        next_ts = ts;
                        next_lost = lost_events;
+                       next_size = iter->ent_size;
                }
        }
 
+       iter->ent_size = next_size;
+
        if (ent_cpu)
                *ent_cpu = next_cpu;
 
@@ -2159,6 +2176,14 @@ void trace_default_header(struct seq_file *m)
        }
 }
 
+static void test_ftrace_alive(struct seq_file *m)
+{
+       if (!ftrace_is_dead())
+               return;
+       seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+       seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
+}
+
 static int s_show(struct seq_file *m, void *v)
 {
        struct trace_iterator *iter = v;
@@ -2168,6 +2193,7 @@ static int s_show(struct seq_file *m, void *v)
                if (iter->tr) {
                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
                        seq_puts(m, "#\n");
+                       test_ftrace_alive(m);
                }
                if (iter->trace && iter->trace->print_header)
                        iter->trace->print_header(m);
@@ -2527,10 +2553,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                if (cpumask_test_cpu(cpu, tracing_cpumask) &&
                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
                }
                if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
                }
        }
        arch_spin_unlock(&ftrace_max_lock);
@@ -2619,11 +2647,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
        return -EINVAL;
 }
 
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+       if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+               return -1;
+
+       return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
 {
        /* do nothing if flag is already set */
        if (!!(trace_flags & mask) == !!enabled)
-               return;
+               return 0;
+
+       /* Give the tracer a chance to approve the change */
+       if (current_trace->flag_changed)
+               if (current_trace->flag_changed(current_trace, mask, !!enabled))
+                       return -EINVAL;
 
        if (enabled)
                trace_flags |= mask;
@@ -2633,8 +2675,14 @@ static void set_tracer_flags(unsigned int mask, int enabled)
        if (mask == TRACE_ITER_RECORD_CMD)
                trace_event_enable_cmd_record(enabled);
 
-       if (mask == TRACE_ITER_OVERWRITE)
+       if (mask == TRACE_ITER_OVERWRITE) {
                ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+               ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+       }
+
+       return 0;
 }
 
 static ssize_t
@@ -2644,7 +2692,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int neg = 0;
-       int ret;
+       int ret = 0;
        int i;
 
        if (cnt >= sizeof(buf))
@@ -2661,21 +2709,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                cmp += 2;
        }
 
+       mutex_lock(&trace_types_lock);
+
        for (i = 0; trace_options[i]; i++) {
                if (strcmp(cmp, trace_options[i]) == 0) {
-                       set_tracer_flags(1 << i, !neg);
+                       ret = set_tracer_flag(1 << i, !neg);
                        break;
                }
        }
 
        /* If no option could be set, test the specific tracer options */
-       if (!trace_options[i]) {
-               mutex_lock(&trace_types_lock);
+       if (!trace_options[i])
                ret = set_tracer_option(current_trace, cmp, neg);
-               mutex_unlock(&trace_types_lock);
-               if (ret)
-                       return ret;
-       }
+
+       mutex_unlock(&trace_types_lock);
+
+       if (ret)
+               return ret;
 
        *ppos += cnt;
 
@@ -2710,9 +2760,9 @@ static const char readme_msg[] =
        "# cat /sys/kernel/debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
        "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
-       "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+       "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
        "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
-       "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
+       "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
 ;
 
 static ssize_t
@@ -2999,6 +3049,9 @@ static int tracing_set_tracer(const char *buf)
                goto out;
 
        trace_branch_disable();
+
+       current_trace->enabled = false;
+
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
        if (current_trace && current_trace->use_max_tr) {
@@ -3028,6 +3081,7 @@ static int tracing_set_tracer(const char *buf)
                        goto out;
        }
 
+       current_trace->enabled = true;
        trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
@@ -3190,8 +3244,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
                 */
                return POLLIN | POLLRDNORM;
        } else {
-               if (!trace_empty(iter))
-                       return POLLIN | POLLRDNORM;
                poll_wait(filp, &trace_wait, poll_table);
                if (!trace_empty(iter))
                        return POLLIN | POLLRDNORM;
@@ -3279,13 +3331,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        static struct tracer *old_tracer;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
        if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3300,6 +3345,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -3324,6 +3377,7 @@ waitagain:
        memset(&iter->seq, 0,
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
+       cpumask_clear(iter->started);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -3442,6 +3496,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3511,9 +3566,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        spd.nr_pages = i;
 
-       ret = splice_to_pipe(pipe, &spd);
+       if (i)
+               ret = splice_to_pipe(pipe, &spd);
+       else
+               ret = 0;
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -3568,6 +3626,30 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
+static ssize_t
+tracing_total_entries_read(struct file *filp, char __user *ubuf,
+                               size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       char buf[64];
+       int r, cpu;
+       unsigned long size = 0, expanded_size = 0;
+
+       mutex_lock(&trace_types_lock);
+       for_each_tracing_cpu(cpu) {
+               size += tr->entries >> 10;
+               if (!ring_buffer_expanded)
+                       expanded_size += trace_buf_size >> 10;
+       }
+       if (ring_buffer_expanded)
+               r = sprintf(buf, "%lu\n", size);
+       else
+               r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
+       mutex_unlock(&trace_types_lock);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
 static ssize_t
 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
                          size_t cnt, loff_t *ppos)
@@ -3594,22 +3676,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static int mark_printk(const char *fmt, ...)
-{
-       int ret;
-       va_list args;
-       va_start(args, fmt);
-       ret = trace_vprintk(0, fmt, args);
-       va_end(args);
-       return ret;
-}
-
 static ssize_t
 tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
 {
-       char *buf;
-       size_t written;
+       unsigned long addr = (unsigned long)ubuf;
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct print_entry *entry;
+       unsigned long irq_flags;
+       struct page *pages[2];
+       int nr_pages = 1;
+       ssize_t written;
+       void *page1;
+       void *page2;
+       int offset;
+       int size;
+       int len;
+       int ret;
 
        if (tracing_disabled)
                return -EINVAL;
@@ -3617,28 +3701,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (cnt > TRACE_BUF_SIZE)
                cnt = TRACE_BUF_SIZE;
 
-       buf = kmalloc(cnt + 2, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
+       /*
+        * Userspace is injecting traces into the kernel trace buffer.
+        * We want to be as non intrusive as possible.
+        * To do so, we do not want to allocate any special buffers
+        * or take any locks, but instead write the userspace data
+        * straight into the ring buffer.
+        *
+        * First we need to pin the userspace buffer into memory,
+        * which, most likely it is, because it just referenced it.
+        * But there's no guarantee that it is. By using get_user_pages_fast()
+        * and kmap_atomic/kunmap_atomic() we can get access to the
+        * pages directly. We then write the data directly into the
+        * ring buffer.
+        */
+       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
-       if (copy_from_user(buf, ubuf, cnt)) {
-               kfree(buf);
-               return -EFAULT;
+       /* check if we cross pages */
+       if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
+               nr_pages = 2;
+
+       offset = addr & (PAGE_SIZE - 1);
+       addr &= PAGE_MASK;
+
+       ret = get_user_pages_fast(addr, nr_pages, 0, pages);
+       if (ret < nr_pages) {
+               while (--ret >= 0)
+                       put_page(pages[ret]);
+               written = -EFAULT;
+               goto out;
        }
-       if (buf[cnt-1] != '\n') {
-               buf[cnt] = '\n';
-               buf[cnt+1] = '\0';
+
+       page1 = kmap_atomic(pages[0]);
+       if (nr_pages == 2)
+               page2 = kmap_atomic(pages[1]);
+
+       local_save_flags(irq_flags);
+       size = sizeof(*entry) + cnt + 2; /* possible \n added */
+       buffer = global_trace.buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+                                         irq_flags, preempt_count());
+       if (!event) {
+               /* Ring buffer disabled, return as if not open for write */
+               written = -EBADF;
+               goto out_unlock;
+       }
+
+       entry = ring_buffer_event_data(event);
+       entry->ip = _THIS_IP_;
+
+       if (nr_pages == 2) {
+               len = PAGE_SIZE - offset;
+               memcpy(&entry->buf, page1 + offset, len);
+               memcpy(&entry->buf[len], page2, cnt - len);
        } else
-               buf[cnt] = '\0';
+               memcpy(&entry->buf, page1 + offset, cnt);
 
-       written = mark_printk("%s", buf);
-       kfree(buf);
-       *fpos += written;
+       if (entry->buf[cnt - 1] != '\n') {
+               entry->buf[cnt] = '\n';
+               entry->buf[cnt + 1] = '\0';
+       } else
+               entry->buf[cnt] = '\0';
 
-       /* don't tell userspace we wrote more - it might confuse them */
-       if (written > cnt)
-               written = cnt;
+       ring_buffer_unlock_commit(buffer, event);
+
+       written = cnt;
 
+       *fpos += written;
+
+ out_unlock:
+       if (nr_pages == 2)
+               kunmap_atomic(page2);
+       kunmap_atomic(page1);
+       while (nr_pages > 0)
+               put_page(pages[--nr_pages]);
+ out:
        return written;
 }
 
@@ -3739,6 +3876,12 @@ static const struct file_operations tracing_entries_fops = {
        .llseek         = generic_file_llseek,
 };
 
+static const struct file_operations tracing_total_entries_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_total_entries_read,
+       .llseek         = generic_file_llseek,
+};
+
 static const struct file_operations tracing_free_buffer_fops = {
        .write          = tracing_free_buffer_write,
        .release        = tracing_free_buffer_release,
@@ -3808,8 +3951,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        if (info->read < PAGE_SIZE)
                goto read;
 
-       info->read = 0;
-
        trace_access_lock(info->cpu);
        ret = ring_buffer_read_page(info->tr->buffer,
                                    &info->spare,
@@ -3819,6 +3960,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        if (ret < 0)
                return 0;
 
+       info->read = 0;
+
 read:
        size = PAGE_SIZE - info->read;
        if (size > count)
@@ -3918,6 +4061,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4005,7 +4149,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
@@ -4026,6 +4170,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
        struct trace_array *tr = &global_trace;
        struct trace_seq *s;
        unsigned long cnt;
+       unsigned long long t;
+       unsigned long usec_rem;
 
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
@@ -4042,6 +4188,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
        cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
        trace_seq_printf(s, "commit overrun: %ld\n", cnt);
 
+       cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
+       trace_seq_printf(s, "bytes: %ld\n", cnt);
+
+       t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
+       usec_rem = do_div(t, USEC_PER_SEC);
+       trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
+
+       t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
+       usec_rem = do_div(t, USEC_PER_SEC);
+       trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
+
        count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
 
        kfree(s);
@@ -4262,7 +4419,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
        if (val != 0 && val != 1)
                return -EINVAL;
-       set_tracer_flags(1 << index, val);
+
+       mutex_lock(&trace_types_lock);
+       ret = set_tracer_flag(1 << index, val);
+       mutex_unlock(&trace_types_lock);
+
+       if (ret < 0)
+               return ret;
 
        *ppos += cnt;
 
@@ -4414,6 +4577,8 @@ static __init int tracer_init_debugfs(void)
        trace_access_lock_init();
 
        d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
 
        trace_create_file("tracing_enabled", 0644, d_tracer,
                        &global_trace, &tracing_ctrl_fops);
@@ -4450,6 +4615,9 @@ static __init int tracer_init_debugfs(void)
        trace_create_file("buffer_size_kb", 0644, d_tracer,
                        &global_trace, &tracing_entries_fops);
 
+       trace_create_file("buffer_total_size_kb", 0444, d_tracer,
+                       &global_trace, &tracing_total_entries_fops);
+
        trace_create_file("free_buffer", 0644, d_tracer,
                        &global_trace, &tracing_free_buffer_fops);
 
@@ -4544,30 +4712,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
        iter->cpu_file = TRACE_PIPE_ALL_CPU;
 }
 
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 {
-       static arch_spinlock_t ftrace_dump_lock =
-               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
+       static atomic_t dump_running;
        unsigned int old_userobj;
-       static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
 
-       /* only one dump */
-       local_irq_save(flags);
-       arch_spin_lock(&ftrace_dump_lock);
-       if (dump_ran)
-               goto out;
-
-       dump_ran = 1;
+       /* Only allow one dump user at a time. */
+       if (atomic_inc_return(&dump_running) != 1) {
+               atomic_dec(&dump_running);
+               return;
+       }
 
+       /*
+        * Always turn off tracing when we dump.
+        * We don't need to show trace output of what happens
+        * between multiple crashes.
+        *
+        * If the user does a sysrq-z, then they can re-enable
+        * tracing with echo 1 > tracing_on.
+        */
        tracing_off();
 
-       if (disable_tracing)
-               ftrace_kill();
+       local_irq_save(flags);
 
        trace_init_global_iter(&iter);
 
@@ -4600,6 +4770,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
+       /* Did function tracer already get disabled? */
+       if (ftrace_is_dead()) {
+               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
+       }
+
        /*
         * We need to stop all tracing on all CPUS to read the
         * the next buffer. This is a bit expensive, but is
@@ -4638,26 +4814,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                printk(KERN_TRACE "---------------------------------\n");
 
  out_enable:
-       /* Re-enable tracing if requested */
-       if (!disable_tracing) {
-               trace_flags |= old_userobj;
+       trace_flags |= old_userobj;
 
-               for_each_tracing_cpu(cpu) {
-                       atomic_dec(&iter.tr->data[cpu]->disabled);
-               }
-               tracing_on();
+       for_each_tracing_cpu(cpu) {
+               atomic_dec(&iter.tr->data[cpu]->disabled);
        }
-
- out:
-       arch_spin_unlock(&ftrace_dump_lock);
+       atomic_dec(&dump_running);
        local_irq_restore(flags);
 }
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
-       __ftrace_dump(true, oops_dump_mode);
-}
+EXPORT_SYMBOL_GPL(ftrace_dump);
 
 __init static int tracer_alloc_buffers(void)
 {