tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
[pandora-kernel.git] / kernel / trace / trace.c
index 6751c11..4c4df05 100644 (file)
@@ -534,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
        if (isspace(ch)) {
                parser->buffer[parser->idx] = 0;
                parser->cont = false;
-       } else {
+       } else if (parser->idx < parser->size - 1) {
                parser->cont = true;
                parser->buffer[parser->idx++] = ch;
+       } else {
+               ret = -EINVAL;
+               goto out;
        }
 
        *ppos += read;
@@ -631,7 +634,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
-       max_data->uid = task_uid(tsk);
+       /*
+        * If tsk == current, then use current_uid(), as that does not use
+        * RCU. The irq tracer can be called out of RCU scope.
+        */
+       if (tsk == current)
+               max_data->uid = current_uid();
+       else
+               max_data->uid = task_uid(tsk);
+
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
        max_data->policy = tsk->policy;
        max_data->rt_priority = tsk->rt_priority;
@@ -1089,7 +1100,7 @@ void trace_find_cmdline(int pid, char comm[])
        arch_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
        if (map != NO_CMDLINE_MAP)
-               strcpy(comm, saved_cmdlines[map]);
+               strlcpy(comm, saved_cmdlines[map], TASK_COMM_LEN);
        else
                strcpy(comm, "<...>");
 
@@ -2636,11 +2647,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
        return -EINVAL;
 }
 
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+       if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+               return -1;
+
+       return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
 {
        /* do nothing if flag is already set */
        if (!!(trace_flags & mask) == !!enabled)
-               return;
+               return 0;
+
+       /* Give the tracer a chance to approve the change */
+       if (current_trace->flag_changed)
+               if (current_trace->flag_changed(current_trace, mask, !!enabled))
+                       return -EINVAL;
 
        if (enabled)
                trace_flags |= mask;
@@ -2650,8 +2675,14 @@ static void set_tracer_flags(unsigned int mask, int enabled)
        if (mask == TRACE_ITER_RECORD_CMD)
                trace_event_enable_cmd_record(enabled);
 
-       if (mask == TRACE_ITER_OVERWRITE)
+       if (mask == TRACE_ITER_OVERWRITE) {
                ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+               ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+       }
+
+       return 0;
 }
 
 static ssize_t
@@ -2682,7 +2713,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 
        for (i = 0; trace_options[i]; i++) {
                if (strcmp(cmp, trace_options[i]) == 0) {
-                       set_tracer_flags(1 << i, !neg);
+                       ret = set_tracer_flag(1 << i, !neg);
                        break;
                }
        }
@@ -3018,6 +3049,9 @@ static int tracing_set_tracer(const char *buf)
                goto out;
 
        trace_branch_disable();
+
+       current_trace->enabled = false;
+
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
        if (current_trace && current_trace->use_max_tr) {
@@ -3047,6 +3081,7 @@ static int tracing_set_tracer(const char *buf)
                        goto out;
        }
 
+       current_trace->enabled = true;
        trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
@@ -3209,8 +3244,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
                 */
                return POLLIN | POLLRDNORM;
        } else {
-               if (!trace_empty(iter))
-                       return POLLIN | POLLRDNORM;
                poll_wait(filp, &trace_wait, poll_table);
                if (!trace_empty(iter))
                        return POLLIN | POLLRDNORM;
@@ -3298,13 +3331,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        static struct tracer *old_tracer;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
        if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3319,6 +3345,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -3343,6 +3377,7 @@ waitagain:
        memset(&iter->seq, 0,
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
+       cpumask_clear(iter->started);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -3531,7 +3566,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        spd.nr_pages = i;
 
-       ret = splice_to_pipe(pipe, &spd);
+       if (i)
+               ret = splice_to_pipe(pipe, &spd);
+       else
+               ret = 0;
 out:
        splice_shrink_spd(&spd);
        return ret;
@@ -4383,9 +4421,12 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
                return -EINVAL;
 
        mutex_lock(&trace_types_lock);
-       set_tracer_flags(1 << index, val);
+       ret = set_tracer_flag(1 << index, val);
        mutex_unlock(&trace_types_lock);
 
+       if (ret < 0)
+               return ret;
+
        *ppos += cnt;
 
        return cnt;
@@ -4536,6 +4577,8 @@ static __init int tracer_init_debugfs(void)
        trace_access_lock_init();
 
        d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
 
        trace_create_file("tracing_enabled", 0644, d_tracer,
                        &global_trace, &tracing_ctrl_fops);
@@ -4669,36 +4712,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
        iter->cpu_file = TRACE_PIPE_ALL_CPU;
 }
 
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 {
-       static arch_spinlock_t ftrace_dump_lock =
-               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
+       static atomic_t dump_running;
        unsigned int old_userobj;
-       static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
 
-       /* only one dump */
-       local_irq_save(flags);
-       arch_spin_lock(&ftrace_dump_lock);
-       if (dump_ran)
-               goto out;
-
-       dump_ran = 1;
+       /* Only allow one dump user at a time. */
+       if (atomic_inc_return(&dump_running) != 1) {
+               atomic_dec(&dump_running);
+               return;
+       }
 
+       /*
+        * Always turn off tracing when we dump.
+        * We don't need to show trace output of what happens
+        * between multiple crashes.
+        *
+        * If the user does a sysrq-z, then they can re-enable
+        * tracing with echo 1 > tracing_on.
+        */
        tracing_off();
 
-       /* Did function tracer already get disabled? */
-       if (ftrace_is_dead()) {
-               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
-               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
-       }
-
-       if (disable_tracing)
-               ftrace_kill();
+       local_irq_save(flags);
 
        trace_init_global_iter(&iter);
 
@@ -4731,6 +4770,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 
        printk(KERN_TRACE "Dumping ftrace buffer:\n");
 
+       /* Did function tracer already get disabled? */
+       if (ftrace_is_dead()) {
+               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
+       }
+
        /*
         * We need to stop all tracing on all CPUS to read the
         * the next buffer. This is a bit expensive, but is
@@ -4769,26 +4814,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                printk(KERN_TRACE "---------------------------------\n");
 
  out_enable:
-       /* Re-enable tracing if requested */
-       if (!disable_tracing) {
-               trace_flags |= old_userobj;
+       trace_flags |= old_userobj;
 
-               for_each_tracing_cpu(cpu) {
-                       atomic_dec(&iter.tr->data[cpu]->disabled);
-               }
-               tracing_on();
+       for_each_tracing_cpu(cpu) {
+               atomic_dec(&iter.tr->data[cpu]->disabled);
        }
-
- out:
-       arch_spin_unlock(&ftrace_dump_lock);
+       atomic_dec(&dump_running);
        local_irq_restore(flags);
 }
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
-       __ftrace_dump(true, oops_dump_mode);
-}
+EXPORT_SYMBOL_GPL(ftrace_dump);
 
 __init static int tracer_alloc_buffers(void)
 {