+#include <trace/syscall.h>
#include <linux/kernel.h>
-#include <linux/ftrace.h>
#include <asm/syscall.h>
#include "trace_output.h"
#include "trace.h"
-static atomic_t refcount;
+/* Keep a counter of the syscall tracing users */
+static int refcount;
-/* Our two options */
+/* Prevent from races on thread flags toggling */
+static DEFINE_MUTEX(syscall_trace_lock);
+
+/* Option to display the parameters types */
enum {
TRACE_SYSCALLS_OPT_TYPES = 0x1,
};
};
static struct tracer_flags syscalls_flags = {
- .val = 0, /* By default: no args types */
+ .val = 0, /* By default: no parameters types */
.opts = syscalls_opts
};
unsigned long flags;
struct task_struct *g, *t;
+ mutex_lock(&syscall_trace_lock);
+
/* Don't enable the flag on the tasks twice */
- if (atomic_inc_return(&refcount) != 1)
- return;
+ if (++refcount != 1)
+ goto unlock;
arch_init_ftrace_syscalls();
read_lock_irqsave(&tasklist_lock, flags);
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void stop_ftrace_syscalls(void)
unsigned long flags;
struct task_struct *g, *t;
+ mutex_lock(&syscall_trace_lock);
+
/* There are perhaps still some users */
- if (atomic_dec_return(&refcount))
- return;
+ if (--refcount)
+ goto unlock;
read_lock_irqsave(&tasklist_lock, flags);
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void ftrace_syscall_enter(struct pt_regs *regs)
struct ring_buffer_event *event;
int size;
int syscall_nr;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
- cpu = raw_smp_processor_id();
-
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
int syscall_nr;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
- cpu = raw_smp_processor_id();
-
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;