tracing: Add percpu buffers for trace_printk()
authorSteven Rostedt <srostedt@redhat.com>
Thu, 22 Sep 2011 18:01:55 +0000 (14:01 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 24 Apr 2012 01:15:55 +0000 (21:15 -0400)
Currently, trace_printk() uses a single buffer to write into
to calculate the size and format needed to save the trace. To
do this safely in an SMP environment, a spin_lock() is taken
to only allow one writer at a time to the buffer. But this could
also affect what is being traced, and add synchronization that
would not be there otherwise.

Ideally, using percpu buffers would be useful, but since trace_printk()
is only used in development, having per cpu buffers for something
never used is a waste of space. Thus, the use of the trace_bprintk()
format section is changed to be used for static fmts as well as dynamic ones.
Then at boot up, we can check if the section that holds the trace_printk
formats is non-empty, and if it does contain something, then we
know a trace_printk() has been added to the kernel. At this time
the trace_printk per cpu buffers are allocated. A check is also
done at module load time in case a module is added that contains a
trace_printk().

Once the buffers are allocated, they are never freed. If you use
a trace_printk() then you should know what you are doing.

A buffer is made for each type of context:

  normal
  softirq
  irq
  nmi

The context is checked and the appropriate buffer is used.
This allows for totally lockless usage of trace_printk(),
and they no longer even disable interrupts.

Requested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/kernel.h
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_printk.c

index 645231c..c0d3442 100644 (file)
@@ -480,15 +480,16 @@ do {                                                                      \
 
 #define trace_printk(fmt, args...)                                     \
 do {                                                                   \
+       static const char *trace_printk_fmt                             \
+               __attribute__((section("__trace_printk_fmt"))) =        \
+               __builtin_constant_p(fmt) ? fmt : NULL;                 \
+                                                                       \
        __trace_printk_check_format(fmt, ##args);                       \
-       if (__builtin_constant_p(fmt)) {                                \
-               static const char *trace_printk_fmt                     \
-                 __attribute__((section("__trace_printk_fmt"))) =      \
-                       __builtin_constant_p(fmt) ? fmt : NULL;         \
                                                                        \
+       if (__builtin_constant_p(fmt))                                  \
                __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args);   \
-       } else                                                          \
-               __trace_printk(_THIS_IP_, fmt, ##args);         \
+       else                                                            \
+               __trace_printk(_THIS_IP_, fmt, ##args);                 \
 } while (0)
 
 extern __printf(2, 3)
index ed7b5d1..1ab8e35 100644 (file)
@@ -1498,25 +1498,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
 
 #endif /* CONFIG_STACKTRACE */
 
+/* created for use with alloc_percpu */
+struct trace_buffer_struct {
+       char buffer[TRACE_BUF_SIZE];
+};
+
+static struct trace_buffer_struct *trace_percpu_buffer;
+static struct trace_buffer_struct *trace_percpu_sirq_buffer;
+static struct trace_buffer_struct *trace_percpu_irq_buffer;
+static struct trace_buffer_struct *trace_percpu_nmi_buffer;
+
+/*
+ * The buffer used is dependent on the context. There is a per cpu
+ * buffer for normal context, softirq contex, hard irq context and
+ * for NMI context. Thise allows for lockless recording.
+ *
+ * Note, if the buffers failed to be allocated, then this returns NULL
+ */
+static char *get_trace_buf(void)
+{
+       struct trace_buffer_struct *percpu_buffer;
+       struct trace_buffer_struct *buffer;
+
+       /*
+        * If we have allocated per cpu buffers, then we do not
+        * need to do any locking.
+        */
+       if (in_nmi())
+               percpu_buffer = trace_percpu_nmi_buffer;
+       else if (in_irq())
+               percpu_buffer = trace_percpu_irq_buffer;
+       else if (in_softirq())
+               percpu_buffer = trace_percpu_sirq_buffer;
+       else
+               percpu_buffer = trace_percpu_buffer;
+
+       if (!percpu_buffer)
+               return NULL;
+
+       buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
+
+       return buffer->buffer;
+}
+
+static int alloc_percpu_trace_buffer(void)
+{
+       struct trace_buffer_struct *buffers;
+       struct trace_buffer_struct *sirq_buffers;
+       struct trace_buffer_struct *irq_buffers;
+       struct trace_buffer_struct *nmi_buffers;
+
+       buffers = alloc_percpu(struct trace_buffer_struct);
+       if (!buffers)
+               goto err_warn;
+
+       sirq_buffers = alloc_percpu(struct trace_buffer_struct);
+       if (!sirq_buffers)
+               goto err_sirq;
+
+       irq_buffers = alloc_percpu(struct trace_buffer_struct);
+       if (!irq_buffers)
+               goto err_irq;
+
+       nmi_buffers = alloc_percpu(struct trace_buffer_struct);
+       if (!nmi_buffers)
+               goto err_nmi;
+
+       trace_percpu_buffer = buffers;
+       trace_percpu_sirq_buffer = sirq_buffers;
+       trace_percpu_irq_buffer = irq_buffers;
+       trace_percpu_nmi_buffer = nmi_buffers;
+
+       return 0;
+
+ err_nmi:
+       free_percpu(irq_buffers);
+ err_irq:
+       free_percpu(sirq_buffers);
+ err_sirq:
+       free_percpu(buffers);
+ err_warn:
+       WARN(1, "Could not allocate percpu trace_printk buffer");
+       return -ENOMEM;
+}
+
+void trace_printk_init_buffers(void)
+{
+       static int buffers_allocated;
+
+       if (buffers_allocated)
+               return;
+
+       if (alloc_percpu_trace_buffer())
+               return;
+
+       pr_info("ftrace: Allocated trace_printk buffers\n");
+
+       buffers_allocated = 1;
+}
+
 /**
  * trace_vbprintk - write binary msg to tracing buffer
  *
  */
 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
-       static arch_spinlock_t trace_buf_lock =
-               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-       static u32 trace_buf[TRACE_BUF_SIZE];
-
        struct ftrace_event_call *call = &event_bprint;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
        struct bprint_entry *entry;
        unsigned long flags;
-       int disable;
-       int cpu, len = 0, size, pc;
+       char *tbuffer;
+       int len = 0, size, pc;
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -1526,43 +1620,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 
        pc = preempt_count();
        preempt_disable_notrace();
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
 
-       disable = atomic_inc_return(&data->disabled);
-       if (unlikely(disable != 1))
+       tbuffer = get_trace_buf();
+       if (!tbuffer) {
+               len = 0;
                goto out;
+       }
 
-       /* Lockdep uses trace_printk for lock tracing */
-       local_irq_save(flags);
-       arch_spin_lock(&trace_buf_lock);
-       len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+       len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 
-       if (len > TRACE_BUF_SIZE || len < 0)
-               goto out_unlock;
+       if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
+               goto out;
 
+       local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
        buffer = tr->buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
                                          flags, pc);
        if (!event)
-               goto out_unlock;
+               goto out;
        entry = ring_buffer_event_data(event);
        entry->ip                       = ip;
        entry->fmt                      = fmt;
 
-       memcpy(entry->buf, trace_buf, sizeof(u32) * len);
+       memcpy(entry->buf, tbuffer, sizeof(u32) * len);
        if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
                ftrace_trace_stack(buffer, flags, 6, pc);
        }
 
-out_unlock:
-       arch_spin_unlock(&trace_buf_lock);
-       local_irq_restore(flags);
-
 out:
-       atomic_dec_return(&data->disabled);
        preempt_enable_notrace();
        unpause_graph_tracing();
 
@@ -1588,58 +1675,53 @@ int trace_array_printk(struct trace_array *tr,
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
-       static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-       static char trace_buf[TRACE_BUF_SIZE];
-
        struct ftrace_event_call *call = &event_print;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
-       struct trace_array_cpu *data;
-       int cpu, len = 0, size, pc;
+       int len = 0, size, pc;
        struct print_entry *entry;
-       unsigned long irq_flags;
-       int disable;
+       unsigned long flags;
+       char *tbuffer;
 
        if (tracing_disabled || tracing_selftest_running)
                return 0;
 
+       /* Don't pollute graph traces with trace_vprintk internals */
+       pause_graph_tracing();
+
        pc = preempt_count();
        preempt_disable_notrace();
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
 
-       disable = atomic_inc_return(&data->disabled);
-       if (unlikely(disable != 1))
+
+       tbuffer = get_trace_buf();
+       if (!tbuffer) {
+               len = 0;
                goto out;
+       }
 
-       pause_graph_tracing();
-       raw_local_irq_save(irq_flags);
-       arch_spin_lock(&trace_buf_lock);
-       len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+       len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
+       if (len > TRACE_BUF_SIZE)
+               goto out;
 
+       local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
        buffer = tr->buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-                                         irq_flags, pc);
+                                         flags, pc);
        if (!event)
-               goto out_unlock;
+               goto out;
        entry = ring_buffer_event_data(event);
        entry->ip = ip;
 
-       memcpy(&entry->buf, trace_buf, len);
+       memcpy(&entry->buf, tbuffer, len);
        entry->buf[len] = '\0';
        if (!filter_check_discard(call, entry, buffer, event)) {
                ring_buffer_unlock_commit(buffer, event);
-               ftrace_trace_stack(buffer, irq_flags, 6, pc);
+               ftrace_trace_stack(buffer, flags, 6, pc);
        }
-
- out_unlock:
-       arch_spin_unlock(&trace_buf_lock);
-       raw_local_irq_restore(irq_flags);
-       unpause_graph_tracing();
  out:
-       atomic_dec_return(&data->disabled);
        preempt_enable_notrace();
+       unpause_graph_tracing();
 
        return len;
 }
@@ -4955,6 +5037,10 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
                goto out_free_buffer_mask;
 
+       /* Only allocate trace_printk buffers if a trace_printk exists */
+       if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+               trace_printk_init_buffers();
+
        /* To save memory, keep the ring buffer size to its minimum */
        if (ring_buffer_expanded)
                ring_buf_size = trace_buf_size;
index 95059f0..f9d8550 100644 (file)
@@ -826,6 +826,8 @@ extern struct list_head ftrace_events;
 extern const char *__start___trace_bprintk_fmt[];
 extern const char *__stop___trace_bprintk_fmt[];
 
+void trace_printk_init_buffers(void);
+
 #undef FTRACE_ENTRY
 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)    \
        extern struct ftrace_event_call                                 \
index 6fd4ffd..a9077c1 100644 (file)
@@ -51,6 +51,10 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
        const char **iter;
        char *fmt;
 
+       /* allocate the trace_printk per cpu buffers */
+       if (start != end)
+               trace_printk_init_buffers();
+
        mutex_lock(&btrace_mutex);
        for (iter = start; iter < end; iter++) {
                struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);