tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
[pandora-kernel.git] / kernel / trace / trace.c
index f2bd275..4c4df05 100644 (file)
@@ -534,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
        if (isspace(ch)) {
                parser->buffer[parser->idx] = 0;
                parser->cont = false;
-       } else {
+       } else if (parser->idx < parser->size - 1) {
                parser->cont = true;
                parser->buffer[parser->idx++] = ch;
+       } else {
+               ret = -EINVAL;
+               goto out;
        }
 
        *ppos += read;
@@ -631,7 +634,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
-       max_data->uid = task_uid(tsk);
+       /*
+        * If tsk == current, then use current_uid(), as that does not use
+        * RCU. The irq tracer can be called out of RCU scope.
+        */
+       if (tsk == current)
+               max_data->uid = current_uid();
+       else
+               max_data->uid = task_uid(tsk);
+
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
        max_data->policy = tsk->policy;
        max_data->rt_priority = tsk->rt_priority;
@@ -652,7 +663,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf = tr->buffer;
+       struct ring_buffer *buf;
 
        if (trace_stop_count)
                return;
@@ -664,6 +675,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        }
        arch_spin_lock(&ftrace_max_lock);
 
+       buf = tr->buffer;
        tr->buffer = max_tr.buffer;
        max_tr.buffer = buf;
 
@@ -1088,7 +1100,7 @@ void trace_find_cmdline(int pid, char comm[])
        arch_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
        if (map != NO_CMDLINE_MAP)
-               strcpy(comm, saved_cmdlines[map]);
+               strlcpy(comm, saved_cmdlines[map], TASK_COMM_LEN);
        else
                strcpy(comm, "<...>");
 
@@ -1642,6 +1654,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
        int cpu_file = iter->cpu_file;
        u64 next_ts = 0, ts;
        int next_cpu = -1;
+       int next_size = 0;
        int cpu;
 
        /*
@@ -1673,9 +1686,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
                        next_cpu = cpu;
                        next_ts = ts;
                        next_lost = lost_events;
+                       next_size = iter->ent_size;
                }
        }
 
+       iter->ent_size = next_size;
+
        if (ent_cpu)
                *ent_cpu = next_cpu;
 
@@ -2537,10 +2553,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                if (cpumask_test_cpu(cpu, tracing_cpumask) &&
                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
                }
                if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
                }
        }
        arch_spin_unlock(&ftrace_max_lock);
@@ -2629,11 +2647,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
        return -EINVAL;
 }
 
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+       if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+               return -1;
+
+       return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
 {
        /* do nothing if flag is already set */
        if (!!(trace_flags & mask) == !!enabled)
-               return;
+               return 0;
+
+       /* Give the tracer a chance to approve the change */
+       if (current_trace->flag_changed)
+               if (current_trace->flag_changed(current_trace, mask, !!enabled))
+                       return -EINVAL;
 
        if (enabled)
                trace_flags |= mask;
@@ -2643,8 +2675,14 @@ static void set_tracer_flags(unsigned int mask, int enabled)
        if (mask == TRACE_ITER_RECORD_CMD)
                trace_event_enable_cmd_record(enabled);
 
-       if (mask == TRACE_ITER_OVERWRITE)
+       if (mask == TRACE_ITER_OVERWRITE) {
                ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+               ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+       }
+
+       return 0;
 }
 
 static ssize_t
@@ -2654,7 +2692,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int neg = 0;
-       int ret;
+       int ret = 0;
        int i;
 
        if (cnt >= sizeof(buf))
@@ -2671,21 +2709,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                cmp += 2;
        }
 
+       mutex_lock(&trace_types_lock);
+
        for (i = 0; trace_options[i]; i++) {
                if (strcmp(cmp, trace_options[i]) == 0) {
-                       set_tracer_flags(1 << i, !neg);
+                       ret = set_tracer_flag(1 << i, !neg);
                        break;
                }
        }
 
        /* If no option could be set, test the specific tracer options */
-       if (!trace_options[i]) {
-               mutex_lock(&trace_types_lock);
+       if (!trace_options[i])
                ret = set_tracer_option(current_trace, cmp, neg);
-               mutex_unlock(&trace_types_lock);
-               if (ret)
-                       return ret;
-       }
+
+       mutex_unlock(&trace_types_lock);
+
+       if (ret)
+               return ret;
 
        *ppos += cnt;
 
@@ -3009,6 +3049,9 @@ static int tracing_set_tracer(const char *buf)
                goto out;
 
        trace_branch_disable();
+
+       current_trace->enabled = false;
+
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
        if (current_trace && current_trace->use_max_tr) {
@@ -3038,6 +3081,7 @@ static int tracing_set_tracer(const char *buf)
                        goto out;
        }
 
+       current_trace->enabled = true;
        trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
@@ -3200,8 +3244,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
                 */
                return POLLIN | POLLRDNORM;
        } else {
-               if (!trace_empty(iter))
-                       return POLLIN | POLLRDNORM;
                poll_wait(filp, &trace_wait, poll_table);
                if (!trace_empty(iter))
                        return POLLIN | POLLRDNORM;
@@ -3289,13 +3331,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        static struct tracer *old_tracer;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
        if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3310,6 +3345,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -3334,6 +3377,7 @@ waitagain:
        memset(&iter->seq, 0,
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
+       cpumask_clear(iter->started);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -3452,6 +3496,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3521,9 +3566,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        spd.nr_pages = i;
 
-       ret = splice_to_pipe(pipe, &spd);
+       if (i)
+               ret = splice_to_pipe(pipe, &spd);
+       else
+               ret = 0;
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4013,6 +4061,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4100,7 +4149,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
@@ -4370,7 +4419,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
        if (val != 0 && val != 1)
                return -EINVAL;
-       set_tracer_flags(1 << index, val);
+
+       mutex_lock(&trace_types_lock);
+       ret = set_tracer_flag(1 << index, val);
+       mutex_unlock(&trace_types_lock);
+
+       if (ret < 0)
+               return ret;
 
        *ppos += cnt;
 
@@ -4522,6 +4577,8 @@ static __init int tracer_init_debugfs(void)
        trace_access_lock_init();
 
        d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
 
        trace_create_file("tracing_enabled", 0644, d_tracer,
                        &global_trace, &tracing_ctrl_fops);
@@ -4655,36 +4712,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
        iter->cpu_file = TRACE_PIPE_ALL_CPU;
 }
 
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 {
-       static arch_spinlock_t ftrace_dump_lock =
-               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
+       static atomic_t dump_running;
        unsigned int old_userobj;
-       static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
 
-       /* only one dump */
-       local_irq_save(flags);
-       arch_spin_lock(&ftrace_dump_lock);
-       if (dump_ran)
-               goto out;
-
-       dump_ran = 1;
+       /* Only allow one dump user at a time. */
+       if (atomic_inc_return(&dump_running) != 1) {
+               atomic_dec(&dump_running);
+               return;
+       }
 
+       /*
+        * Always turn off tracing when we dump.
+        * We don't need to show trace output of what happens
+        * between multiple crashes.
+        *
+        * If the user does a sysrq-z, then they can re-enable
+        * tracing with echo 1 > tracing_on.
+        */
        tracing_off();
 
-       /* Did function tracer already get disabled? */
-       if (ftrace_is_dead()) {
-               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
-               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
-       }
-
-       if (disable_tracing)
-               ftrace_kill();
+       local_irq_save(flags);
 
        trace_init_global_iter(&iter);
 
@@ -4717,6 +4770,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
+       /* Did function tracer already get disabled? */
+       if (ftrace_is_dead()) {
+               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
+       }
+
        /*
         * We need to stop all tracing on all CPUS to read the
         * the next buffer. This is a bit expensive, but is
@@ -4755,26 +4814,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                printk(KERN_TRACE "---------------------------------\n");
 
  out_enable:
-       /* Re-enable tracing if requested */
-       if (!disable_tracing) {
-               trace_flags |= old_userobj;
+       trace_flags |= old_userobj;
 
-               for_each_tracing_cpu(cpu) {
-                       atomic_dec(&iter.tr->data[cpu]->disabled);
-               }
-               tracing_on();
+       for_each_tracing_cpu(cpu) {
+               atomic_dec(&iter.tr->data[cpu]->disabled);
        }
-
- out:
-       arch_spin_unlock(&ftrace_dump_lock);
+       atomic_dec(&dump_running);
        local_irq_restore(flags);
 }
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
-       __ftrace_dump(true, oops_dump_mode);
-}
+EXPORT_SYMBOL_GPL(ftrace_dump);
 
 __init static int tracer_alloc_buffers(void)
 {