perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track...
[pandora-kernel.git] / kernel / trace / trace_event_perf.c
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
13
14 static char *perf_trace_buf[4];
15
16 /*
17  * Force it to be aligned to unsigned long to avoid misaligned accesses
18  * suprises
19  */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21         perf_trace_t;
22
23 /* Count the events in use (per event id, not per instance) */
24 static int      total_ref_count;
25
26 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27                                  struct perf_event *p_event)
28 {
29         struct hlist_head *list;
30         int ret = -ENOMEM;
31         int cpu;
32
33         p_event->tp_event = tp_event;
34         if (tp_event->perf_refcount++ > 0)
35                 return 0;
36
37         list = alloc_percpu(struct hlist_head);
38         if (!list)
39                 goto fail;
40
41         for_each_possible_cpu(cpu)
42                 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
43
44         tp_event->perf_events = list;
45
46         if (!total_ref_count) {
47                 char *buf;
48                 int i;
49
50                 for (i = 0; i < 4; i++) {
51                         buf = (char *)alloc_percpu(perf_trace_t);
52                         if (!buf)
53                                 goto fail;
54
55                         perf_trace_buf[i] = buf;
56                 }
57         }
58
59         ret = tp_event->perf_event_enable(tp_event);
60         if (ret)
61                 goto fail;
62
63         total_ref_count++;
64         return 0;
65
66 fail:
67         if (!total_ref_count) {
68                 int i;
69
70                 for (i = 0; i < 4; i++) {
71                         free_percpu(perf_trace_buf[i]);
72                         perf_trace_buf[i] = NULL;
73                 }
74         }
75
76         if (!--tp_event->perf_refcount) {
77                 free_percpu(tp_event->perf_events);
78                 tp_event->perf_events = NULL;
79         }
80
81         return ret;
82 }
83
84 int perf_trace_init(struct perf_event *p_event)
85 {
86         struct ftrace_event_call *tp_event;
87         int event_id = p_event->attr.config;
88         int ret = -EINVAL;
89
90         mutex_lock(&event_mutex);
91         list_for_each_entry(tp_event, &ftrace_events, list) {
92                 if (tp_event->id == event_id && tp_event->perf_event_enable &&
93                     try_module_get(tp_event->mod)) {
94                         ret = perf_trace_event_init(tp_event, p_event);
95                         break;
96                 }
97         }
98         mutex_unlock(&event_mutex);
99
100         return ret;
101 }
102
103 int perf_trace_enable(struct perf_event *p_event)
104 {
105         struct ftrace_event_call *tp_event = p_event->tp_event;
106         struct hlist_head *list;
107
108         list = tp_event->perf_events;
109         if (WARN_ON_ONCE(!list))
110                 return -EINVAL;
111
112         list = per_cpu_ptr(list, smp_processor_id());
113         hlist_add_head_rcu(&p_event->hlist_entry, list);
114
115         return 0;
116 }
117
118 void perf_trace_disable(struct perf_event *p_event)
119 {
120         hlist_del_rcu(&p_event->hlist_entry);
121 }
122
123 void perf_trace_destroy(struct perf_event *p_event)
124 {
125         struct ftrace_event_call *tp_event = p_event->tp_event;
126         int i;
127
128         if (--tp_event->perf_refcount > 0)
129                 return;
130
131         tp_event->perf_event_disable(tp_event);
132
133         free_percpu(tp_event->perf_events);
134         tp_event->perf_events = NULL;
135
136         if (!--total_ref_count) {
137                 for (i = 0; i < 4; i++) {
138                         free_percpu(perf_trace_buf[i]);
139                         perf_trace_buf[i] = NULL;
140                 }
141         }
142 }
143
144 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
145                                        struct pt_regs *regs, int *rctxp)
146 {
147         struct trace_entry *entry;
148         char *raw_data;
149         int pc;
150
151         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
152
153         pc = preempt_count();
154
155         *rctxp = perf_swevent_get_recursion_context();
156         if (*rctxp < 0)
157                 return NULL;
158
159         raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id());
160
161         /* zero the dead bytes from align to not leak stack to user */
162         memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
163
164         entry = (struct trace_entry *)raw_data;
165         tracing_generic_entry_update(entry, regs->flags, pc);
166         entry->type = type;
167
168         return raw_data;
169 }
170 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);