* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
+#include <linux/ring_buffer.h>
#include <linux/utsrelease.h>
+#include <linux/stacktrace.h>
+#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
+#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/gfp.h>
#include <linux/fs.h>
-#include <linux/kprobes.h>
-#include <linux/writeback.h>
-#include <linux/splice.h>
-
-#include <linux/stacktrace.h>
-#include <linux/ring_buffer.h>
-#include <linux/irqflags.h>
#include "trace.h"
#include "trace_output.h"
unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;
+/*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+static int ring_buffer_expanded;
+
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
- * insertions into the ring-buffer such as ftrace_printk could occurred
+ * insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
{
strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
+ /* We are using ftrace early, expand it */
+ ring_buffer_expanded = 1;
return 1;
}
__setup("ftrace=", set_ftrace);
"block",
"stacktrace",
"sched-tree",
- "ftrace_printk",
+ "trace_printk",
"ftrace_preempt",
"branch",
"annotate",
"sym-userobj",
"printk-msg-only",
"context-info",
+ "latency-format",
NULL
};
tracing_record_cmdline(tsk);
}
-static void
-trace_seq_reset(struct trace_seq *s)
-{
- s->len = 0;
- s->readpos = 0;
-}
-
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
int len;
int ret;
+ if (!cnt)
+ return 0;
+
if (s->len <= s->readpos)
return -EBUSY;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
- if (ret)
+ if (ret == cnt)
return -EFAULT;
- s->readpos += len;
+ cnt -= ret;
+
+ s->readpos += cnt;
return cnt;
}
if (!ret)
return -EFAULT;
- s->readpos += len;
+ s->readpos += cnt;
return cnt;
}
s->buffer[len] = 0;
seq_puts(m, s->buffer);
- trace_seq_reset(s);
+ trace_seq_init(s);
}
/**
}
#define SAVED_CMDLINES 128
+#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
- memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
- memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
+ memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
+ memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
cmdline_idx = 0;
}
static void trace_save_cmdline(struct task_struct *tsk)
{
- unsigned map;
- unsigned idx;
+ unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return;
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!spin_trylock(&trace_cmdline_lock))
+ if (!__raw_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
- if (idx >= SAVED_CMDLINES) {
+ if (idx == NO_CMDLINE_MAP) {
idx = (cmdline_idx + 1) % SAVED_CMDLINES;
- map = map_cmdline_to_pid[idx];
- if (map <= PID_MAX_DEFAULT)
- map_pid_to_cmdline[map] = (unsigned)-1;
+ /*
+ * Check whether the cmdline buffer at idx has a pid
+ * mapped. We are going to overwrite that entry so we
+ * need to clear the map_pid_to_cmdline. Otherwise we
+ * would read the new comm for the old pid.
+ */
+ pid = map_cmdline_to_pid[idx];
+ if (pid != NO_CMDLINE_MAP)
+ map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
+ map_cmdline_to_pid[idx] = tsk->pid;
map_pid_to_cmdline[tsk->pid] = idx;
cmdline_idx = idx;
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- spin_unlock(&trace_cmdline_lock);
+ __raw_spin_unlock(&trace_cmdline_lock);
}
-char *trace_find_cmdline(int pid)
+void trace_find_cmdline(int pid, char comm[])
{
- char *cmdline = "<...>";
unsigned map;
- if (!pid)
- return "<idle>";
+ if (!pid) {
+ strcpy(comm, "<idle>");
+ return;
+ }
- if (pid > PID_MAX_DEFAULT)
- goto out;
+ if (pid > PID_MAX_DEFAULT) {
+ strcpy(comm, "<...>");
+ return;
+ }
+ __raw_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
- if (map >= SAVED_CMDLINES)
- goto out;
-
- cmdline = saved_cmdlines[map];
+ if (map != NO_CMDLINE_MAP)
+ strcpy(comm, saved_cmdlines[map]);
+ else
+ strcpy(comm, "<...>");
- out:
- return cmdline;
+ __raw_spin_unlock(&trace_cmdline_lock);
}
void tracing_record_cmdline(struct task_struct *tsk)
{
- if (atomic_read(&trace_record_cmdline_disabled))
+ if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
+ !tracing_is_on())
return;
trace_save_cmdline(tsk);
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
- entry->tgid = (tsk) ? tsk->tgid : 0;
+ entry->tgid = (tsk) ? tsk->tgid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
trace_wake_up();
}
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
+ unsigned long flags, int pc)
+{
+ return trace_buffer_lock_reserve(&global_trace,
+ type, len, flags, pc);
+}
+
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
+}
+
void
trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+/**
+ * trace_vbprintk - write binary msg to tracing buffer
+ *
+ */
+int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+{
+ static raw_spinlock_t trace_buf_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static u32 trace_buf[TRACE_BUF_SIZE];
+
+ struct ring_buffer_event *event;
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ struct bprint_entry *entry;
+ unsigned long flags;
+ int resched;
+ int cpu, len = 0, size, pc;
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+ pc = preempt_count();
+ resched = ftrace_preempt_disable();
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+
+ if (unlikely(atomic_read(&data->disabled)))
+ goto out;
+
+ /* Lockdep uses trace_printk for lock tracing */
+ local_irq_save(flags);
+ __raw_spin_lock(&trace_buf_lock);
+ len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+ if (len > TRACE_BUF_SIZE || len < 0)
+ goto out_unlock;
+
+ size = sizeof(*entry) + sizeof(u32) * len;
+ event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
+ if (!event)
+ goto out_unlock;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->depth = depth;
+ entry->fmt = fmt;
+
+ memcpy(entry->buf, trace_buf, sizeof(u32) * len);
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+out_unlock:
+ __raw_spin_unlock(&trace_buf_lock);
+ local_irq_restore(flags);
+
+out:
+ ftrace_preempt_enable(resched);
+ unpause_graph_tracing();
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(trace_vbprintk);
+
+int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+{
+ static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static char trace_buf[TRACE_BUF_SIZE];
+
+ struct ring_buffer_event *event;
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ int cpu, len = 0, size, pc;
+ struct print_entry *entry;
+ unsigned long irq_flags;
+
+ if (tracing_disabled || tracing_selftest_running)
+ return 0;
+
+ pc = preempt_count();
+ preempt_disable_notrace();
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+
+ if (unlikely(atomic_read(&data->disabled)))
+ goto out;
+
+ pause_graph_tracing();
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&trace_buf_lock);
+ len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+ len = min(len, TRACE_BUF_SIZE-1);
+ trace_buf[len] = 0;
+
+ size = sizeof(*entry) + len + 1;
+ event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
+ if (!event)
+ goto out_unlock;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->depth = depth;
+
+ memcpy(&entry->buf, trace_buf, len);
+ entry->buf[len] = 0;
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ out_unlock:
+ __raw_spin_unlock(&trace_buf_lock);
+ raw_local_irq_restore(irq_flags);
+ unpause_graph_tracing();
+ out:
+ preempt_enable_notrace();
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(trace_vprintk);
+
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
total = entries +
ring_buffer_overruns(iter->tr->buffer);
- seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+ seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
- seq_puts(m, "-----------------------------------"
+ seq_puts(m, "# -----------------------------------"
"---------------------------------\n");
- seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+ seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
nsecs_to_usecs(data->saved_latency),
entries,
#else
seq_puts(m, ")\n");
#endif
- seq_puts(m, " -----------------\n");
- seq_printf(m, " | task: %.16s-%d "
+ seq_puts(m, "# -----------------\n");
+ seq_printf(m, "# | task: %.16s-%d "
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
data->comm, data->pid, data->uid, data->nice,
data->policy, data->rt_priority);
- seq_puts(m, " -----------------\n");
+ seq_puts(m, "# -----------------\n");
if (data->critical_start) {
- seq_puts(m, " => started at: ");
+ seq_puts(m, "# => started at: ");
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n => ended at: ");
+ seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n");
+ seq_puts(m, "#\n");
}
- seq_puts(m, "\n");
+ seq_puts(m, "#\n");
}
static void test_cpu_buff_start(struct trace_iterator *iter)
trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
}
-static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_event *event;
- struct trace_entry *entry = iter->ent;
-
- test_cpu_buff_start(iter);
-
- event = ftrace_find_event(entry->type);
-
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_lat_context(iter))
- goto partial;
- }
-
- if (event)
- return event->latency_trace(iter, sym_flags);
-
- if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
- goto partial;
-
- return TRACE_TYPE_HANDLED;
-partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
event = ftrace_find_event(entry->type);
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_context(iter))
- goto partial;
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ if (!trace_print_lat_context(iter))
+ goto partial;
+ } else {
+ if (!trace_print_context(iter))
+ goto partial;
+ }
}
if (event)
return TRACE_TYPE_HANDLED;
}
+static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct bprint_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_bprintf(s, field->fmt, field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
{
int cpu;
+ /* If we are looking at one CPU buffer, only check that one */
+ if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+ cpu = iter->cpu_file;
+ if (iter->buffer_iter[cpu]) {
+ if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+ return 0;
+ } else {
+ if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+ return 0;
+ }
+ return 1;
+ }
+
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
return ret;
}
+ if (iter->ent->type == TRACE_BPRINT &&
+ trace_flags & TRACE_ITER_PRINTK &&
+ trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+ return print_bprintk_msg_only(iter);
+
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
- if (iter->iter_flags & TRACE_FILE_LAT_FMT)
- return print_lat_fmt(iter);
-
return print_trace_fmt(iter);
}
};
static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file, int *ret)
+__tracing_open(struct inode *inode, struct file *file)
{
long cpu_file = (long) inode->i_private;
+ void *fail_ret = ERR_PTR(-ENOMEM);
struct trace_iterator *iter;
struct seq_file *m;
- int cpu;
+ int cpu, ret;
- if (tracing_disabled) {
- *ret = -ENODEV;
- return NULL;
- }
+ if (tracing_disabled)
+ return ERR_PTR(-ENODEV);
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter) {
- *ret = -ENOMEM;
- goto out;
- }
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
/*
* We make a copy of the current tracer to avoid concurrent
*/
mutex_lock(&trace_types_lock);
iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
- if (!iter->trace) {
- *ret = -ENOMEM;
+ if (!iter->trace)
goto fail;
- }
+
if (current_trace)
*iter->trace = *current_trace;
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
-
- if (!iter->buffer_iter[cpu])
- goto fail_buffer;
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
-
- if (!iter->buffer_iter[cpu])
- goto fail;
}
/* TODO stop tracer */
- *ret = seq_open(file, &tracer_seq_ops);
- if (*ret)
+ ret = seq_open(file, &tracer_seq_ops);
+ if (ret < 0) {
+ fail_ret = ERR_PTR(ret);
goto fail_buffer;
+ }
m = file->private_data;
m->private = iter;
mutex_unlock(&trace_types_lock);
- out:
return iter;
fail_buffer:
kfree(iter->trace);
kfree(iter);
- return ERR_PTR(-ENOMEM);
+ return fail_ret;
}
int tracing_open_generic(struct inode *inode, struct file *filp)
}
static int tracing_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- __tracing_open(inode, file, &ret);
-
- return ret;
-}
-
-static int tracing_lt_open(struct inode *inode, struct file *file)
{
struct trace_iterator *iter;
- int ret;
-
- iter = __tracing_open(inode, file, &ret);
+ int ret = 0;
- if (!ret)
+ iter = __tracing_open(inode, file);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
return ret;
}
-
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
return ret;
}
-static struct file_operations tracing_fops = {
+static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tracing_release,
};
-static struct file_operations tracing_lt_fops = {
- .open = tracing_lt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tracing_release,
-};
-
-static struct file_operations show_traces_fops = {
+static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.release = seq_release,
return err;
}
-static struct file_operations tracing_cpumask_fops = {
+static const struct file_operations tracing_cpumask_fops = {
.open = tracing_open_generic,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
tracing_trace_options_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- int i;
+ struct tracer_opt *trace_opts;
+ u32 tracer_flags;
+ int len = 0;
char *buf;
int r = 0;
- int len = 0;
- u32 tracer_flags = current_trace->flags->val;
- struct tracer_opt *trace_opts = current_trace->flags->opts;
+ int i;
/* calculate max size */
for (i = 0; trace_options[i]; i++) {
len += strlen(trace_options[i]);
- len += 3; /* "no" and space */
+ len += 3; /* "no" and newline */
}
+ mutex_lock(&trace_types_lock);
+ tracer_flags = current_trace->flags->val;
+ trace_opts = current_trace->flags->opts;
+
/*
* Increase the size with names of options specific
* of the current tracer.
*/
for (i = 0; trace_opts[i].name; i++) {
len += strlen(trace_opts[i].name);
- len += 3; /* "no" and space */
+ len += 3; /* "no" and newline */
}
/* +2 for \n and \0 */
buf = kmalloc(len + 2, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ mutex_unlock(&trace_types_lock);
return -ENOMEM;
+ }
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
- r += sprintf(buf + r, "%s ", trace_options[i]);
+ r += sprintf(buf + r, "%s\n", trace_options[i]);
else
- r += sprintf(buf + r, "no%s ", trace_options[i]);
+ r += sprintf(buf + r, "no%s\n", trace_options[i]);
}
for (i = 0; trace_opts[i].name; i++) {
if (tracer_flags & trace_opts[i].bit)
- r += sprintf(buf + r, "%s ",
+ r += sprintf(buf + r, "%s\n",
trace_opts[i].name);
else
- r += sprintf(buf + r, "no%s ",
+ r += sprintf(buf + r, "no%s\n",
trace_opts[i].name);
}
+ mutex_unlock(&trace_types_lock);
- r += sprintf(buf + r, "\n");
WARN_ON(r >= len + 2);
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
kfree(buf);
-
return r;
}
/* If no option could be set, test the specific tracer options */
if (!trace_options[i]) {
+ mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
+ mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
return cnt;
}
-static struct file_operations tracing_iter_fops = {
+static const struct file_operations tracing_iter_fops = {
.open = tracing_open_generic,
.read = tracing_trace_options_read,
.write = tracing_trace_options_write,
readme_msg, strlen(readme_msg));
}
-static struct file_operations tracing_readme_fops = {
+static const struct file_operations tracing_readme_fops = {
.open = tracing_open_generic,
.read = tracing_readme_read,
};
return t->init(tr);
}
-static int tracing_set_tracer(const char *buf)
+static int tracing_resize_ring_buffer(unsigned long size)
{
- struct trace_array *tr = &global_trace;
- struct tracer *t;
- int ret = 0;
+ int ret;
- mutex_lock(&trace_types_lock);
- for (t = trace_types; t; t = t->next) {
- if (strcmp(t->name, buf) == 0)
- break;
- }
- if (!t) {
- ret = -EINVAL;
- goto out;
+ /*
+ * If kernel or user changes the size of the ring buffer
+ * we use the size that was given, and we can forget about
+ * expanding it later.
+ */
+ ring_buffer_expanded = 1;
+
+ ret = ring_buffer_resize(global_trace.buffer, size);
+ if (ret < 0)
+ return ret;
+
+ ret = ring_buffer_resize(max_tr.buffer, size);
+ if (ret < 0) {
+ int r;
+
+ r = ring_buffer_resize(global_trace.buffer,
+ global_trace.entries);
+ if (r < 0) {
+ /*
+ * AARGH! We are left with different
+ * size max buffer!!!!
+ * The max buffer is our "snapshot" buffer.
+ * When a tracer needs a snapshot (one of the
+ * latency tracers), it swaps the max buffer
+ * with the saved snap shot. We succeeded to
+ * update the size of the main buffer, but failed to
+ * update the size of the max buffer. But when we tried
+ * to reset the main buffer to the original size, we
+ * failed there too. This is very unlikely to
+ * happen, but if it does, warn and kill all
+ * tracing.
+ */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ }
+ return ret;
}
- if (t == current_trace)
- goto out;
- trace_branch_disable();
- if (current_trace && current_trace->reset)
- current_trace->reset(tr);
+ global_trace.entries = size;
+
+ return ret;
+}
+
+/**
+ * tracing_update_buffers - used by tracing facility to expand ring buffers
+ *
+ * To save on memory when the tracing is never used on a system with it
+ * configured in. The ring buffers are set to a minimum size. But once
+ * a user starts to use the tracing facility, then they need to grow
+ * to their default size.
+ *
+ * This function is to be called when a tracer is about to be used.
+ */
+int tracing_update_buffers(void)
+{
+ int ret = 0;
+
+ mutex_lock(&trace_types_lock);
+ if (!ring_buffer_expanded)
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+}
+
+struct trace_option_dentry;
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer);
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts);
+
+static int tracing_set_tracer(const char *buf)
+{
+ static struct trace_option_dentry *topts;
+ struct trace_array *tr = &global_trace;
+ struct tracer *t;
+ int ret = 0;
+
+ mutex_lock(&trace_types_lock);
+
+ if (!ring_buffer_expanded) {
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+ }
+
+ for (t = trace_types; t; t = t->next) {
+ if (strcmp(t->name, buf) == 0)
+ break;
+ }
+ if (!t) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (t == current_trace)
+ goto out;
+
+ trace_branch_disable();
+ if (current_trace && current_trace->reset)
+ current_trace->reset(tr);
+
+ destroy_trace_option_files(topts);
+
+ current_trace = t;
+
+ topts = create_trace_option_files(current_trace);
- current_trace = t;
if (t->init) {
ret = tracer_init(t, tr);
if (ret)
if (sret != -EBUSY)
return sret;
- trace_seq_reset(&iter->seq);
+ trace_seq_init(&iter->seq);
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (iter->seq.readpos >= iter->seq.len)
- trace_seq_reset(&iter->seq);
+ trace_seq_init(&iter->seq);
/*
* If there was nothing to send to user, inspite of consuming trace
partial[i].offset = 0;
partial[i].len = iter->seq.len;
- trace_seq_reset(&iter->seq);
+ trace_seq_init(&iter->seq);
}
mutex_unlock(&iter->mutex);
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- char buf[64];
+ char buf[96];
int r;
- r = sprintf(buf, "%lu\n", tr->entries >> 10);
+ mutex_lock(&trace_types_lock);
+ if (!ring_buffer_expanded)
+ r = sprintf(buf, "%lu (expanded: %lu)\n",
+ tr->entries >> 10,
+ trace_buf_size >> 10);
+ else
+ r = sprintf(buf, "%lu\n", tr->entries >> 10);
+ mutex_unlock(&trace_types_lock);
+
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
val <<= 10;
if (val != global_trace.entries) {
- ret = ring_buffer_resize(global_trace.buffer, val);
+ ret = tracing_resize_ring_buffer(val);
if (ret < 0) {
cnt = ret;
goto out;
}
-
- ret = ring_buffer_resize(max_tr.buffer, val);
- if (ret < 0) {
- int r;
- cnt = ret;
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.entries);
- if (r < 0) {
- /* AARGH! We are left with different
- * size max buffer!!!! */
- WARN_ON(1);
- tracing_disabled = 1;
- }
- goto out;
- }
-
- global_trace.entries = val;
}
filp->f_pos += cnt;
return cnt;
}
-static struct file_operations tracing_max_lat_fops = {
+static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
};
-static struct file_operations tracing_ctrl_fops = {
+static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
};
-static struct file_operations set_tracer_fops = {
+static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
};
-static struct file_operations tracing_pipe_fops = {
+static const struct file_operations tracing_pipe_fops = {
.open = tracing_open_pipe,
.poll = tracing_poll_pipe,
.read = tracing_read_pipe,
.release = tracing_release_pipe,
};
-static struct file_operations tracing_entries_fops = {
+static const struct file_operations tracing_entries_fops = {
.open = tracing_open_generic,
.read = tracing_entries_read,
.write = tracing_entries_write,
};
-static struct file_operations tracing_mark_fops = {
+static const struct file_operations tracing_mark_fops = {
.open = tracing_open_generic,
.write = tracing_mark_write,
};
+struct ftrace_buffer_info {
+ struct trace_array *tr;
+ void *spare;
+ int cpu;
+ unsigned int read;
+};
+
+static int tracing_buffers_open(struct inode *inode, struct file *filp)
+{
+ int cpu = (int)(long)inode->i_private;
+ struct ftrace_buffer_info *info;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->tr = &global_trace;
+ info->cpu = cpu;
+ info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
+ /* Force reading ring buffer for first read */
+ info->read = (unsigned int)-1;
+ if (!info->spare)
+ goto out;
+
+ filp->private_data = info;
+
+ return 0;
+
+ out:
+ kfree(info);
+ return -ENOMEM;
+}
+
+static ssize_t
+tracing_buffers_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ftrace_buffer_info *info = filp->private_data;
+ unsigned int pos;
+ ssize_t ret;
+ size_t size;
+
+ if (!count)
+ return 0;
+
+ /* Do we have previous read data to read? */
+ if (info->read < PAGE_SIZE)
+ goto read;
+
+ info->read = 0;
+
+ ret = ring_buffer_read_page(info->tr->buffer,
+ &info->spare,
+ count,
+ info->cpu, 0);
+ if (ret < 0)
+ return 0;
+
+ pos = ring_buffer_page_len(info->spare);
+
+ if (pos < PAGE_SIZE)
+ memset(info->spare + pos, 0, PAGE_SIZE - pos);
+
+read:
+ size = PAGE_SIZE - info->read;
+ if (size > count)
+ size = count;
+
+ ret = copy_to_user(ubuf, info->spare + info->read, size);
+ if (ret == size)
+ return -EFAULT;
+ size -= ret;
+
+ *ppos += size;
+ info->read += size;
+
+ return size;
+}
+
+static int tracing_buffers_release(struct inode *inode, struct file *file)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+
+ ring_buffer_free_read_page(info->tr->buffer, info->spare);
+ kfree(info);
+
+ return 0;
+}
+
+struct buffer_ref {
+ struct ring_buffer *buffer;
+ void *page;
+ int ref;
+};
+
+static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+ if (--ref->ref)
+ return;
+
+ ring_buffer_free_read_page(ref->buffer, ref->page);
+ kfree(ref);
+ buf->private = 0;
+}
+
+static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return 1;
+}
+
+static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+ ref->ref++;
+}
+
+/* Pipe buffer operations for a buffer. */
+static struct pipe_buf_operations buffer_pipe_buf_ops = {
+ .can_merge = 0,
+ .map = generic_pipe_buf_map,
+ .unmap = generic_pipe_buf_unmap,
+ .confirm = generic_pipe_buf_confirm,
+ .release = buffer_pipe_buf_release,
+ .steal = buffer_pipe_buf_steal,
+ .get = buffer_pipe_buf_get,
+};
+
+/*
+ * Callback from splice_to_pipe(), if we need to release some pages
+ * at the end of the spd in case we error'ed out in filling the pipe.
+ */
+static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+ struct buffer_ref *ref =
+ (struct buffer_ref *)spd->partial[i].private;
+
+ if (--ref->ref)
+ return;
+
+ ring_buffer_free_read_page(ref->buffer, ref->page);
+ kfree(ref);
+ spd->partial[i].private = 0;
+}
+
+static ssize_t
+tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+ struct partial_page partial[PIPE_BUFFERS];
+ struct page *pages[PIPE_BUFFERS];
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .flags = flags,
+ .ops = &buffer_pipe_buf_ops,
+ .spd_release = buffer_spd_release,
+ };
+ struct buffer_ref *ref;
+ int size, i;
+ size_t ret;
+
+ /*
+ * We can't seek on a buffer input
+ */
+ if (unlikely(*ppos))
+ return -ESPIPE;
+
+
+ for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
+ struct page *page;
+ int r;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ break;
+
+ ref->buffer = info->tr->buffer;
+ ref->page = ring_buffer_alloc_read_page(ref->buffer);
+ if (!ref->page) {
+ kfree(ref);
+ break;
+ }
+
+ r = ring_buffer_read_page(ref->buffer, &ref->page,
+ len, info->cpu, 0);
+ if (r < 0) {
+ ring_buffer_free_read_page(ref->buffer,
+ ref->page);
+ kfree(ref);
+ break;
+ }
+
+ /*
+ * zero out any left over data, this is going to
+ * user land.
+ */
+ size = ring_buffer_page_len(ref->page);
+ if (size < PAGE_SIZE)
+ memset(ref->page + size, 0, PAGE_SIZE - size);
+
+ page = virt_to_page(ref->page);
+
+ spd.pages[i] = page;
+ spd.partial[i].len = PAGE_SIZE;
+ spd.partial[i].offset = 0;
+ spd.partial[i].private = (unsigned long)ref;
+ spd.nr_pages++;
+ }
+
+ spd.nr_pages = i;
+
+ /* did we read anything? */
+ if (!spd.nr_pages) {
+ if (flags & SPLICE_F_NONBLOCK)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ /* TODO: block */
+ return ret;
+ }
+
+ ret = splice_to_pipe(pipe, &spd);
+
+ return ret;
+}
+
+static const struct file_operations tracing_buffers_fops = {
+ .open = tracing_buffers_open,
+ .read = tracing_buffers_read,
+ .release = tracing_buffers_release,
+ .splice_read = tracing_buffers_splice_read,
+ .llseek = no_llseek,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
return r;
}
-static struct file_operations tracing_dyn_info_fops = {
+static const struct file_operations tracing_dyn_info_fops = {
.open = tracing_open_generic,
.read = tracing_read_dyn_info,
};
(void *) cpu, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
+
+ entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
+ (void *) cpu, &tracing_buffers_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
}
#ifdef CONFIG_FTRACE_SELFTEST
#include "trace_selftest.c"
#endif
+struct trace_option_dentry {
+ struct tracer_opt *opt;
+ struct tracer_flags *flags;
+ struct dentry *entry;
+};
+
+static ssize_t
+trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_option_dentry *topt = filp->private_data;
+ char *buf;
+
+ if (topt->flags->val & topt->opt->bit)
+ buf = "1\n";
+ else
+ buf = "0\n";
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_option_dentry *topt = filp->private_data;
+ unsigned long val;
+ char buf[64];
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+ switch (val) {
+ case 0:
+ /* do nothing if already cleared */
+ if (!(topt->flags->val & topt->opt->bit))
+ break;
+
+ mutex_lock(&trace_types_lock);
+ if (current_trace->set_flag)
+ ret = current_trace->set_flag(topt->flags->val,
+ topt->opt->bit, 0);
+ mutex_unlock(&trace_types_lock);
+ if (ret)
+ return ret;
+ topt->flags->val &= ~topt->opt->bit;
+ break;
+ case 1:
+ /* do nothing if already set */
+ if (topt->flags->val & topt->opt->bit)
+ break;
+
+ mutex_lock(&trace_types_lock);
+ if (current_trace->set_flag)
+ ret = current_trace->set_flag(topt->flags->val,
+ topt->opt->bit, 1);
+ mutex_unlock(&trace_types_lock);
+ if (ret)
+ return ret;
+ topt->flags->val |= topt->opt->bit;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+
+static const struct file_operations trace_options_fops = {
+ .open = tracing_open_generic,
+ .read = trace_options_read,
+ .write = trace_options_write,
+};
+
+static ssize_t
+trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ long index = (long)filp->private_data;
+ char *buf;
+
+ if (trace_flags & (1 << index))
+ buf = "1\n";
+ else
+ buf = "0\n";
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ long index = (long)filp->private_data;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ trace_flags &= ~(1 << index);
+ break;
+ case 1:
+ trace_flags |= 1 << index;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static const struct file_operations trace_options_core_fops = {
+ .open = tracing_open_generic,
+ .read = trace_options_core_read,
+ .write = trace_options_core_write,
+};
+
+static struct dentry *trace_options_init_dentry(void)
+{
+ struct dentry *d_tracer;
+ static struct dentry *t_options;
+
+ if (t_options)
+ return t_options;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return NULL;
+
+ t_options = debugfs_create_dir("options", d_tracer);
+ if (!t_options) {
+ pr_warning("Could not create debugfs directory 'options'\n");
+ return NULL;
+ }
+
+ return t_options;
+}
+
+static void
+create_trace_option_file(struct trace_option_dentry *topt,
+ struct tracer_flags *flags,
+ struct tracer_opt *opt)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return;
+
+ topt->flags = flags;
+ topt->opt = opt;
+
+ entry = debugfs_create_file(opt->name, 0644, t_options, topt,
+ &trace_options_fops);
+
+ topt->entry = entry;
+
+}
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer)
+{
+ struct trace_option_dentry *topts;
+ struct tracer_flags *flags;
+ struct tracer_opt *opts;
+ int cnt;
+
+ if (!tracer)
+ return NULL;
+
+ flags = tracer->flags;
+
+ if (!flags || !flags->opts)
+ return NULL;
+
+ opts = flags->opts;
+
+ for (cnt = 0; opts[cnt].name; cnt++)
+ ;
+
+ topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
+ if (!topts)
+ return NULL;
+
+ for (cnt = 0; opts[cnt].name; cnt++)
+ create_trace_option_file(&topts[cnt], flags,
+ &opts[cnt]);
+
+ return topts;
+}
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts)
+{
+ int cnt;
+
+ if (!topts)
+ return;
+
+ for (cnt = 0; topts[cnt].opt; cnt++) {
+ if (topts[cnt].entry)
+ debugfs_remove(topts[cnt].entry);
+ }
+
+ kfree(topts);
+}
+
+static struct dentry *
+create_trace_option_core_file(const char *option, long index)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return NULL;
+
+ entry = debugfs_create_file(option, 0644, t_options, (void *)index,
+ &trace_options_core_fops);
+
+ return entry;
+}
+
+static __init void create_trace_options_dir(void)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+ int i;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return;
+
+ for (i = 0; trace_options[i]; i++) {
+ entry = create_trace_option_core_file(trace_options[i], i);
+ if (!entry)
+ pr_warning("Could not create debugfs %s entry\n",
+ trace_options[i]);
+ }
+}
+
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
if (!entry)
pr_warning("Could not create debugfs 'trace_options' entry\n");
+ create_trace_options_dir();
+
entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
NULL, &tracing_cpumask_fops);
if (!entry)
return 0;
}
-int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
-{
- static DEFINE_SPINLOCK(trace_buf_lock);
- static char trace_buf[TRACE_BUF_SIZE];
-
- struct ring_buffer_event *event;
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- int cpu, len = 0, size, pc;
- struct print_entry *entry;
- unsigned long irq_flags;
-
- if (tracing_disabled || tracing_selftest_running)
- return 0;
-
- pc = preempt_count();
- preempt_disable_notrace();
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
-
- if (unlikely(atomic_read(&data->disabled)))
- goto out;
-
- pause_graph_tracing();
- spin_lock_irqsave(&trace_buf_lock, irq_flags);
- len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
-
- len = min(len, TRACE_BUF_SIZE-1);
- trace_buf[len] = 0;
-
- size = sizeof(*entry) + len + 1;
- event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
- if (!event)
- goto out_unlock;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->depth = depth;
-
- memcpy(&entry->buf, trace_buf, len);
- entry->buf[len] = 0;
- ring_buffer_unlock_commit(tr->buffer, event);
-
- out_unlock:
- spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
- unpause_graph_tracing();
- out:
- preempt_enable_notrace();
-
- return len;
-}
-EXPORT_SYMBOL_GPL(trace_vprintk);
-
-int __ftrace_printk(unsigned long ip, const char *fmt, ...)
-{
- int ret;
- va_list ap;
-
- if (!(trace_flags & TRACE_ITER_PRINTK))
- return 0;
-
- va_start(ap, fmt);
- ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
- va_end(ap);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__ftrace_printk);
-
-int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
-{
- if (!(trace_flags & TRACE_ITER_PRINTK))
- return 0;
-
- return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
-}
-EXPORT_SYMBOL_GPL(__ftrace_vprintk);
-
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
printk(KERN_TRACE "%s", s->buffer);
- trace_seq_reset(s);
+ trace_seq_init(s);
}
void ftrace_dump(void)
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
/*
* We need to stop all tracing on all CPUS to read the
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
+ int ring_buf_size;
int i;
int ret = -ENOMEM;
if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
+ /* To save memory, keep the ring buffer size to its minimum */
+ if (ring_buffer_expanded)
+ ring_buf_size = trace_buf_size;
+ else
+ ring_buf_size = 1;
+
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
- global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+ global_trace.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
#ifdef CONFIG_TRACER_MAX_TRACE
- max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+ max_tr.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
- ret = 0;
+
+ return 0;
out_free_cpumask:
free_cpumask_var(tracing_reader_cpumask);