ftrace: Fix README to state tracing_on to start/stop tracing
[pandora-kernel.git] / kernel / trace / trace.c
index b419070..cea1605 100644 (file)
@@ -435,6 +435,7 @@ static struct {
 } trace_clocks[] = {
        { trace_clock_local,    "local" },
        { trace_clock_global,   "global" },
+       { trace_clock_counter,  "counter" },
 };
 
 int trace_clock_id;
@@ -2159,6 +2160,14 @@ void trace_default_header(struct seq_file *m)
        }
 }
 
+static void test_ftrace_alive(struct seq_file *m)
+{
+       if (!ftrace_is_dead())
+               return;
+       seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+       seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
+}
+
 static int s_show(struct seq_file *m, void *v)
 {
        struct trace_iterator *iter = v;
@@ -2168,6 +2177,7 @@ static int s_show(struct seq_file *m, void *v)
                if (iter->tr) {
                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
                        seq_puts(m, "#\n");
+                       test_ftrace_alive(m);
                }
                if (iter->trace && iter->trace->print_header)
                        iter->trace->print_header(m);
@@ -2710,9 +2720,9 @@ static const char readme_msg[] =
        "# cat /sys/kernel/debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
        "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
-       "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+       "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
        "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
-       "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
+       "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
 ;
 
 static ssize_t
@@ -3618,22 +3628,24 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static int mark_printk(const char *fmt, ...)
-{
-       int ret;
-       va_list args;
-       va_start(args, fmt);
-       ret = trace_vprintk(0, fmt, args);
-       va_end(args);
-       return ret;
-}
-
 static ssize_t
 tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
 {
-       char *buf;
-       size_t written;
+       unsigned long addr = (unsigned long)ubuf;
+       struct ring_buffer_event *event;
+       struct ring_buffer *buffer;
+       struct print_entry *entry;
+       unsigned long irq_flags;
+       struct page *pages[2];
+       int nr_pages = 1;
+       ssize_t written;
+       void *page1;
+       void *page2;
+       int offset;
+       int size;
+       int len;
+       int ret;
 
        if (tracing_disabled)
                return -EINVAL;
@@ -3641,28 +3653,81 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (cnt > TRACE_BUF_SIZE)
                cnt = TRACE_BUF_SIZE;
 
-       buf = kmalloc(cnt + 2, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
+       /*
+        * Userspace is injecting traces into the kernel trace buffer.
+        * We want to be as non intrusive as possible.
+        * To do so, we do not want to allocate any special buffers
+        * or take any locks, but instead write the userspace data
+        * straight into the ring buffer.
+        *
+        * First we need to pin the userspace buffer into memory,
+        * which, most likely it is, because it just referenced it.
+        * But there's no guarantee that it is. By using get_user_pages_fast()
+        * and kmap_atomic/kunmap_atomic() we can get access to the
+        * pages directly. We then write the data directly into the
+        * ring buffer.
+        */
+       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
-       if (copy_from_user(buf, ubuf, cnt)) {
-               kfree(buf);
-               return -EFAULT;
+       /* check if we cross pages */
+       if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
+               nr_pages = 2;
+
+       offset = addr & (PAGE_SIZE - 1);
+       addr &= PAGE_MASK;
+
+       ret = get_user_pages_fast(addr, nr_pages, 0, pages);
+       if (ret < nr_pages) {
+               while (--ret >= 0)
+                       put_page(pages[ret]);
+               written = -EFAULT;
+               goto out;
        }
-       if (buf[cnt-1] != '\n') {
-               buf[cnt] = '\n';
-               buf[cnt+1] = '\0';
+
+       page1 = kmap_atomic(pages[0]);
+       if (nr_pages == 2)
+               page2 = kmap_atomic(pages[1]);
+
+       local_save_flags(irq_flags);
+       size = sizeof(*entry) + cnt + 2; /* possible \n added */
+       buffer = global_trace.buffer;
+       event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+                                         irq_flags, preempt_count());
+       if (!event) {
+               /* Ring buffer disabled, return as if not open for write */
+               written = -EBADF;
+               goto out_unlock;
+       }
+
+       entry = ring_buffer_event_data(event);
+       entry->ip = _THIS_IP_;
+
+       if (nr_pages == 2) {
+               len = PAGE_SIZE - offset;
+               memcpy(&entry->buf, page1 + offset, len);
+               memcpy(&entry->buf[len], page2, cnt - len);
        } else
-               buf[cnt] = '\0';
+               memcpy(&entry->buf, page1 + offset, cnt);
 
-       written = mark_printk("%s", buf);
-       kfree(buf);
-       *fpos += written;
+       if (entry->buf[cnt - 1] != '\n') {
+               entry->buf[cnt] = '\n';
+               entry->buf[cnt + 1] = '\0';
+       } else
+               entry->buf[cnt] = '\0';
+
+       ring_buffer_unlock_commit(buffer, event);
+
+       written = cnt;
 
-       /* don't tell userspace we wrote more - it might confuse them */
-       if (written > cnt)
-               written = cnt;
+       *fpos += written;
 
+ out_unlock:
+       if (nr_pages == 2)
+               kunmap_atomic(page2);
+       kunmap_atomic(page1);
+       while (nr_pages > 0)
+               put_page(pages[--nr_pages]);
+ out:
        return written;
 }
 
@@ -4612,6 +4677,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
 
        tracing_off();
 
+       /* Did function tracer already get disabled? */
+       if (ftrace_is_dead()) {
+               printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+               printk("#          MAY BE MISSING FUNCTION EVENTS\n");
+       }
+
        if (disable_tracing)
                ftrace_kill();