c974e08d07abc39ea1257e3d614f674b6882f3e8
[pandora-kernel.git] / tools / perf / util / evsel.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9
10 #include "evsel.h"
11 #include "evlist.h"
12 #include "util.h"
13 #include "cpumap.h"
14 #include "thread_map.h"
15
16 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
18 void perf_evsel__init(struct perf_evsel *evsel,
19                       struct perf_event_attr *attr, int idx)
20 {
21         evsel->idx         = idx;
22         evsel->attr        = *attr;
23         INIT_LIST_HEAD(&evsel->node);
24 }
25
26 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
27 {
28         struct perf_evsel *evsel = zalloc(sizeof(*evsel));
29
30         if (evsel != NULL)
31                 perf_evsel__init(evsel, attr, idx);
32
33         return evsel;
34 }
35
36 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
37 {
38         evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
39         return evsel->fd != NULL ? 0 : -ENOMEM;
40 }
41
42 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
43 {
44         evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
45         return evsel->id != NULL ? 0 : -ENOMEM;
46 }
47
48 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
49 {
50         evsel->counts = zalloc((sizeof(*evsel->counts) +
51                                 (ncpus * sizeof(struct perf_counts_values))));
52         return evsel->counts != NULL ? 0 : -ENOMEM;
53 }
54
55 void perf_evsel__free_fd(struct perf_evsel *evsel)
56 {
57         xyarray__delete(evsel->fd);
58         evsel->fd = NULL;
59 }
60
61 void perf_evsel__free_id(struct perf_evsel *evsel)
62 {
63         xyarray__delete(evsel->id);
64         evsel->id = NULL;
65 }
66
67 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
68 {
69         int cpu, thread;
70
71         for (cpu = 0; cpu < ncpus; cpu++)
72                 for (thread = 0; thread < nthreads; ++thread) {
73                         close(FD(evsel, cpu, thread));
74                         FD(evsel, cpu, thread) = -1;
75                 }
76 }
77
78 void perf_evsel__exit(struct perf_evsel *evsel)
79 {
80         assert(list_empty(&evsel->node));
81         xyarray__delete(evsel->fd);
82         xyarray__delete(evsel->id);
83 }
84
85 void perf_evsel__delete(struct perf_evsel *evsel)
86 {
87         perf_evsel__exit(evsel);
88         close_cgroup(evsel->cgrp);
89         free(evsel);
90 }
91
92 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
93                               int cpu, int thread, bool scale)
94 {
95         struct perf_counts_values count;
96         size_t nv = scale ? 3 : 1;
97
98         if (FD(evsel, cpu, thread) < 0)
99                 return -EINVAL;
100
101         if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
102                 return -ENOMEM;
103
104         if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
105                 return -errno;
106
107         if (scale) {
108                 if (count.run == 0)
109                         count.val = 0;
110                 else if (count.run < count.ena)
111                         count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
112         } else
113                 count.ena = count.run = 0;
114
115         evsel->counts->cpu[cpu] = count;
116         return 0;
117 }
118
119 int __perf_evsel__read(struct perf_evsel *evsel,
120                        int ncpus, int nthreads, bool scale)
121 {
122         size_t nv = scale ? 3 : 1;
123         int cpu, thread;
124         struct perf_counts_values *aggr = &evsel->counts->aggr, count;
125
126         aggr->val = aggr->ena = aggr->run = 0;
127
128         for (cpu = 0; cpu < ncpus; cpu++) {
129                 for (thread = 0; thread < nthreads; thread++) {
130                         if (FD(evsel, cpu, thread) < 0)
131                                 continue;
132
133                         if (readn(FD(evsel, cpu, thread),
134                                   &count, nv * sizeof(u64)) < 0)
135                                 return -errno;
136
137                         aggr->val += count.val;
138                         if (scale) {
139                                 aggr->ena += count.ena;
140                                 aggr->run += count.run;
141                         }
142                 }
143         }
144
145         evsel->counts->scaled = 0;
146         if (scale) {
147                 if (aggr->run == 0) {
148                         evsel->counts->scaled = -1;
149                         aggr->val = 0;
150                         return 0;
151                 }
152
153                 if (aggr->run < aggr->ena) {
154                         evsel->counts->scaled = 1;
155                         aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
156                 }
157         } else
158                 aggr->ena = aggr->run = 0;
159
160         return 0;
161 }
162
163 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
164                               struct thread_map *threads, bool group, bool inherit)
165 {
166         int cpu, thread;
167         unsigned long flags = 0;
168         int pid = -1;
169
170         if (evsel->fd == NULL &&
171             perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
172                 return -1;
173
174         if (evsel->cgrp) {
175                 flags = PERF_FLAG_PID_CGROUP;
176                 pid = evsel->cgrp->fd;
177         }
178
179         for (cpu = 0; cpu < cpus->nr; cpu++) {
180                 int group_fd = -1;
181
182                 evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;
183
184                 for (thread = 0; thread < threads->nr; thread++) {
185
186                         if (!evsel->cgrp)
187                                 pid = threads->map[thread];
188
189                         FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
190                                                                      pid,
191                                                                      cpus->map[cpu],
192                                                                      group_fd, flags);
193                         if (FD(evsel, cpu, thread) < 0)
194                                 goto out_close;
195
196                         if (group && group_fd == -1)
197                                 group_fd = FD(evsel, cpu, thread);
198                 }
199         }
200
201         return 0;
202
203 out_close:
204         do {
205                 while (--thread >= 0) {
206                         close(FD(evsel, cpu, thread));
207                         FD(evsel, cpu, thread) = -1;
208                 }
209                 thread = threads->nr;
210         } while (--cpu >= 0);
211         return -1;
212 }
213
214 static struct {
215         struct cpu_map map;
216         int cpus[1];
217 } empty_cpu_map = {
218         .map.nr = 1,
219         .cpus   = { -1, },
220 };
221
222 static struct {
223         struct thread_map map;
224         int threads[1];
225 } empty_thread_map = {
226         .map.nr  = 1,
227         .threads = { -1, },
228 };
229
230 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
231                      struct thread_map *threads, bool group, bool inherit)
232 {
233         if (cpus == NULL) {
234                 /* Work around old compiler warnings about strict aliasing */
235                 cpus = &empty_cpu_map.map;
236         }
237
238         if (threads == NULL)
239                 threads = &empty_thread_map.map;
240
241         return __perf_evsel__open(evsel, cpus, threads, group, inherit);
242 }
243
244 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
245                              struct cpu_map *cpus, bool group, bool inherit)
246 {
247         return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
248 }
249
250 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
251                                 struct thread_map *threads, bool group, bool inherit)
252 {
253         return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
254 }
255
256 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
257                                        struct perf_sample *sample)
258 {
259         const u64 *array = event->sample.array;
260
261         array += ((event->header.size -
262                    sizeof(event->header)) / sizeof(u64)) - 1;
263
264         if (type & PERF_SAMPLE_CPU) {
265                 u32 *p = (u32 *)array;
266                 sample->cpu = *p;
267                 array--;
268         }
269
270         if (type & PERF_SAMPLE_STREAM_ID) {
271                 sample->stream_id = *array;
272                 array--;
273         }
274
275         if (type & PERF_SAMPLE_ID) {
276                 sample->id = *array;
277                 array--;
278         }
279
280         if (type & PERF_SAMPLE_TIME) {
281                 sample->time = *array;
282                 array--;
283         }
284
285         if (type & PERF_SAMPLE_TID) {
286                 u32 *p = (u32 *)array;
287                 sample->pid = p[0];
288                 sample->tid = p[1];
289         }
290
291         return 0;
292 }
293
294 int perf_event__parse_sample(const union perf_event *event, u64 type,
295                              bool sample_id_all, struct perf_sample *data)
296 {
297         const u64 *array;
298
299         data->cpu = data->pid = data->tid = -1;
300         data->stream_id = data->id = data->time = -1ULL;
301
302         if (event->header.type != PERF_RECORD_SAMPLE) {
303                 if (!sample_id_all)
304                         return 0;
305                 return perf_event__parse_id_sample(event, type, data);
306         }
307
308         array = event->sample.array;
309
310         if (type & PERF_SAMPLE_IP) {
311                 data->ip = event->ip.ip;
312                 array++;
313         }
314
315         if (type & PERF_SAMPLE_TID) {
316                 u32 *p = (u32 *)array;
317                 data->pid = p[0];
318                 data->tid = p[1];
319                 array++;
320         }
321
322         if (type & PERF_SAMPLE_TIME) {
323                 data->time = *array;
324                 array++;
325         }
326
327         if (type & PERF_SAMPLE_ADDR) {
328                 data->addr = *array;
329                 array++;
330         }
331
332         data->id = -1ULL;
333         if (type & PERF_SAMPLE_ID) {
334                 data->id = *array;
335                 array++;
336         }
337
338         if (type & PERF_SAMPLE_STREAM_ID) {
339                 data->stream_id = *array;
340                 array++;
341         }
342
343         if (type & PERF_SAMPLE_CPU) {
344                 u32 *p = (u32 *)array;
345                 data->cpu = *p;
346                 array++;
347         }
348
349         if (type & PERF_SAMPLE_PERIOD) {
350                 data->period = *array;
351                 array++;
352         }
353
354         if (type & PERF_SAMPLE_READ) {
355                 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
356                 return -1;
357         }
358
359         if (type & PERF_SAMPLE_CALLCHAIN) {
360                 data->callchain = (struct ip_callchain *)array;
361                 array += 1 + data->callchain->nr;
362         }
363
364         if (type & PERF_SAMPLE_RAW) {
365                 u32 *p = (u32 *)array;
366                 data->raw_size = *p;
367                 p++;
368                 data->raw_data = p;
369         }
370
371         return 0;
372 }