704bd91ce05dc5fd9c6fd7403bb88ae47ca10213
[pandora-kernel.git] / tools / perf / util / evlist.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18
19 #include "parse-events.h"
20
21 #include <sys/mman.h>
22
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30                        struct thread_map *threads)
31 {
32         int i;
33
34         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35                 INIT_HLIST_HEAD(&evlist->heads[i]);
36         INIT_LIST_HEAD(&evlist->entries);
37         perf_evlist__set_maps(evlist, cpus, threads);
38         evlist->workload.pid = -1;
39 }
40
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42                                      struct thread_map *threads)
43 {
44         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
46         if (evlist != NULL)
47                 perf_evlist__init(evlist, cpus, threads);
48
49         return evlist;
50 }
51
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53                                struct perf_record_opts *opts)
54 {
55         struct perf_evsel *evsel, *first;
56
57         if (evlist->cpus->map[0] < 0)
58                 opts->no_inherit = true;
59
60         first = perf_evlist__first(evlist);
61
62         list_for_each_entry(evsel, &evlist->entries, node) {
63                 perf_evsel__config(evsel, opts, first);
64
65                 if (evlist->nr_entries > 1)
66                         evsel->attr.sample_type |= PERF_SAMPLE_ID;
67         }
68 }
69
70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 {
72         struct perf_evsel *pos, *n;
73
74         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75                 list_del_init(&pos->node);
76                 perf_evsel__delete(pos);
77         }
78
79         evlist->nr_entries = 0;
80 }
81
82 void perf_evlist__exit(struct perf_evlist *evlist)
83 {
84         free(evlist->mmap);
85         free(evlist->pollfd);
86         evlist->mmap = NULL;
87         evlist->pollfd = NULL;
88 }
89
90 void perf_evlist__delete(struct perf_evlist *evlist)
91 {
92         perf_evlist__purge(evlist);
93         perf_evlist__exit(evlist);
94         free(evlist);
95 }
96
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 {
99         list_add_tail(&entry->node, &evlist->entries);
100         ++evlist->nr_entries;
101 }
102
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104                                    struct list_head *list,
105                                    int nr_entries)
106 {
107         list_splice_tail(list, &evlist->entries);
108         evlist->nr_entries += nr_entries;
109 }
110
111 void __perf_evlist__set_leader(struct list_head *list)
112 {
113         struct perf_evsel *evsel, *leader;
114
115         leader = list_entry(list->next, struct perf_evsel, node);
116         leader->leader = NULL;
117
118         list_for_each_entry(evsel, list, node) {
119                 if (evsel != leader)
120                         evsel->leader = leader;
121         }
122 }
123
124 void perf_evlist__set_leader(struct perf_evlist *evlist)
125 {
126         if (evlist->nr_entries)
127                 __perf_evlist__set_leader(&evlist->entries);
128 }
129
130 int perf_evlist__add_default(struct perf_evlist *evlist)
131 {
132         struct perf_event_attr attr = {
133                 .type = PERF_TYPE_HARDWARE,
134                 .config = PERF_COUNT_HW_CPU_CYCLES,
135         };
136         struct perf_evsel *evsel;
137
138         event_attr_init(&attr);
139
140         evsel = perf_evsel__new(&attr, 0);
141         if (evsel == NULL)
142                 goto error;
143
144         /* use strdup() because free(evsel) assumes name is allocated */
145         evsel->name = strdup("cycles");
146         if (!evsel->name)
147                 goto error_free;
148
149         perf_evlist__add(evlist, evsel);
150         return 0;
151 error_free:
152         perf_evsel__delete(evsel);
153 error:
154         return -ENOMEM;
155 }
156
157 int perf_evlist__add_attrs(struct perf_evlist *evlist,
158                            struct perf_event_attr *attrs, size_t nr_attrs)
159 {
160         struct perf_evsel *evsel, *n;
161         LIST_HEAD(head);
162         size_t i;
163
164         for (i = 0; i < nr_attrs; i++) {
165                 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
166                 if (evsel == NULL)
167                         goto out_delete_partial_list;
168                 list_add_tail(&evsel->node, &head);
169         }
170
171         perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
172
173         return 0;
174
175 out_delete_partial_list:
176         list_for_each_entry_safe(evsel, n, &head, node)
177                 perf_evsel__delete(evsel);
178         return -1;
179 }
180
181 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
182                                      struct perf_event_attr *attrs, size_t nr_attrs)
183 {
184         size_t i;
185
186         for (i = 0; i < nr_attrs; i++)
187                 event_attr_init(attrs + i);
188
189         return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
190 }
191
192 static int trace_event__id(const char *evname)
193 {
194         char *filename, *colon;
195         int err = -1, fd;
196
197         if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
198                 return -1;
199
200         colon = strrchr(filename, ':');
201         if (colon != NULL)
202                 *colon = '/';
203
204         fd = open(filename, O_RDONLY);
205         if (fd >= 0) {
206                 char id[16];
207                 if (read(fd, id, sizeof(id)) > 0)
208                         err = atoi(id);
209                 close(fd);
210         }
211
212         free(filename);
213         return err;
214 }
215
216 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
217                                  const char *tracepoints[],
218                                  size_t nr_tracepoints)
219 {
220         int err;
221         size_t i;
222         struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
223
224         if (attrs == NULL)
225                 return -1;
226
227         for (i = 0; i < nr_tracepoints; i++) {
228                 err = trace_event__id(tracepoints[i]);
229
230                 if (err < 0)
231                         goto out_free_attrs;
232
233                 attrs[i].type          = PERF_TYPE_TRACEPOINT;
234                 attrs[i].config        = err;
235                 attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
236                                           PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
237                 attrs[i].sample_period = 1;
238         }
239
240         err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
241 out_free_attrs:
242         free(attrs);
243         return err;
244 }
245
246 struct perf_evsel *
247 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
248 {
249         struct perf_evsel *evsel;
250
251         list_for_each_entry(evsel, &evlist->entries, node) {
252                 if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
253                     (int)evsel->attr.config == id)
254                         return evsel;
255         }
256
257         return NULL;
258 }
259
260 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
261                                           const struct perf_evsel_str_handler *assocs,
262                                           size_t nr_assocs)
263 {
264         struct perf_evsel *evsel;
265         int err;
266         size_t i;
267
268         for (i = 0; i < nr_assocs; i++) {
269                 err = trace_event__id(assocs[i].name);
270                 if (err < 0)
271                         goto out;
272
273                 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
274                 if (evsel == NULL)
275                         continue;
276
277                 err = -EEXIST;
278                 if (evsel->handler.func != NULL)
279                         goto out;
280                 evsel->handler.func = assocs[i].handler;
281         }
282
283         err = 0;
284 out:
285         return err;
286 }
287
288 void perf_evlist__disable(struct perf_evlist *evlist)
289 {
290         int cpu, thread;
291         struct perf_evsel *pos;
292
293         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
294                 list_for_each_entry(pos, &evlist->entries, node) {
295                         for (thread = 0; thread < evlist->threads->nr; thread++)
296                                 ioctl(FD(pos, cpu, thread),
297                                       PERF_EVENT_IOC_DISABLE, 0);
298                 }
299         }
300 }
301
302 void perf_evlist__enable(struct perf_evlist *evlist)
303 {
304         int cpu, thread;
305         struct perf_evsel *pos;
306
307         for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
308                 list_for_each_entry(pos, &evlist->entries, node) {
309                         for (thread = 0; thread < evlist->threads->nr; thread++)
310                                 ioctl(FD(pos, cpu, thread),
311                                       PERF_EVENT_IOC_ENABLE, 0);
312                 }
313         }
314 }
315
316 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
317 {
318         int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
319         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320         return evlist->pollfd != NULL ? 0 : -ENOMEM;
321 }
322
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324 {
325         fcntl(fd, F_SETFL, O_NONBLOCK);
326         evlist->pollfd[evlist->nr_fds].fd = fd;
327         evlist->pollfd[evlist->nr_fds].events = POLLIN;
328         evlist->nr_fds++;
329 }
330
331 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332                                  struct perf_evsel *evsel,
333                                  int cpu, int thread, u64 id)
334 {
335         int hash;
336         struct perf_sample_id *sid = SID(evsel, cpu, thread);
337
338         sid->id = id;
339         sid->evsel = evsel;
340         hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341         hlist_add_head(&sid->node, &evlist->heads[hash]);
342 }
343
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345                          int cpu, int thread, u64 id)
346 {
347         perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348         evsel->id[evsel->ids++] = id;
349 }
350
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352                                   struct perf_evsel *evsel,
353                                   int cpu, int thread, int fd)
354 {
355         u64 read_data[4] = { 0, };
356         int id_idx = 1; /* The first entry is the counter value */
357
358         if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
359             read(fd, &read_data, sizeof(read_data)) == -1)
360                 return -1;
361
362         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
363                 ++id_idx;
364         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
365                 ++id_idx;
366
367         perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
368         return 0;
369 }
370
371 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
372 {
373         struct hlist_head *head;
374         struct hlist_node *pos;
375         struct perf_sample_id *sid;
376         int hash;
377
378         if (evlist->nr_entries == 1)
379                 return perf_evlist__first(evlist);
380
381         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
382         head = &evlist->heads[hash];
383
384         hlist_for_each_entry(sid, pos, head, node)
385                 if (sid->id == id)
386                         return sid->evsel;
387
388         if (!perf_evlist__sample_id_all(evlist))
389                 return perf_evlist__first(evlist);
390
391         return NULL;
392 }
393
394 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
395 {
396         /* XXX Move this to perf.c, making it generally available */
397         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
398         struct perf_mmap *md = &evlist->mmap[idx];
399         unsigned int head = perf_mmap__read_head(md);
400         unsigned int old = md->prev;
401         unsigned char *data = md->base + page_size;
402         union perf_event *event = NULL;
403
404         if (evlist->overwrite) {
405                 /*
406                  * If we're further behind than half the buffer, there's a chance
407                  * the writer will bite our tail and mess up the samples under us.
408                  *
409                  * If we somehow ended up ahead of the head, we got messed up.
410                  *
411                  * In either case, truncate and restart at head.
412                  */
413                 int diff = head - old;
414                 if (diff > md->mask / 2 || diff < 0) {
415                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
416
417                         /*
418                          * head points to a known good entry, start there.
419                          */
420                         old = head;
421                 }
422         }
423
424         if (old != head) {
425                 size_t size;
426
427                 event = (union perf_event *)&data[old & md->mask];
428                 size = event->header.size;
429
430                 /*
431                  * Event straddles the mmap boundary -- header should always
432                  * be inside due to u64 alignment of output.
433                  */
434                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
435                         unsigned int offset = old;
436                         unsigned int len = min(sizeof(*event), size), cpy;
437                         void *dst = &evlist->event_copy;
438
439                         do {
440                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
441                                 memcpy(dst, &data[offset & md->mask], cpy);
442                                 offset += cpy;
443                                 dst += cpy;
444                                 len -= cpy;
445                         } while (len);
446
447                         event = &evlist->event_copy;
448                 }
449
450                 old += size;
451         }
452
453         md->prev = old;
454
455         if (!evlist->overwrite)
456                 perf_mmap__write_tail(md, old);
457
458         return event;
459 }
460
461 void perf_evlist__munmap(struct perf_evlist *evlist)
462 {
463         int i;
464
465         for (i = 0; i < evlist->nr_mmaps; i++) {
466                 if (evlist->mmap[i].base != NULL) {
467                         munmap(evlist->mmap[i].base, evlist->mmap_len);
468                         evlist->mmap[i].base = NULL;
469                 }
470         }
471
472         free(evlist->mmap);
473         evlist->mmap = NULL;
474 }
475
476 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
477 {
478         evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
479         if (cpu_map__all(evlist->cpus))
480                 evlist->nr_mmaps = evlist->threads->nr;
481         evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
482         return evlist->mmap != NULL ? 0 : -ENOMEM;
483 }
484
485 static int __perf_evlist__mmap(struct perf_evlist *evlist,
486                                int idx, int prot, int mask, int fd)
487 {
488         evlist->mmap[idx].prev = 0;
489         evlist->mmap[idx].mask = mask;
490         evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
491                                       MAP_SHARED, fd, 0);
492         if (evlist->mmap[idx].base == MAP_FAILED) {
493                 evlist->mmap[idx].base = NULL;
494                 return -1;
495         }
496
497         perf_evlist__add_pollfd(evlist, fd);
498         return 0;
499 }
500
501 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
502 {
503         struct perf_evsel *evsel;
504         int cpu, thread;
505
506         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
507                 int output = -1;
508
509                 for (thread = 0; thread < evlist->threads->nr; thread++) {
510                         list_for_each_entry(evsel, &evlist->entries, node) {
511                                 int fd = FD(evsel, cpu, thread);
512
513                                 if (output == -1) {
514                                         output = fd;
515                                         if (__perf_evlist__mmap(evlist, cpu,
516                                                                 prot, mask, output) < 0)
517                                                 goto out_unmap;
518                                 } else {
519                                         if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
520                                                 goto out_unmap;
521                                 }
522
523                                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
524                                     perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
525                                         goto out_unmap;
526                         }
527                 }
528         }
529
530         return 0;
531
532 out_unmap:
533         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
534                 if (evlist->mmap[cpu].base != NULL) {
535                         munmap(evlist->mmap[cpu].base, evlist->mmap_len);
536                         evlist->mmap[cpu].base = NULL;
537                 }
538         }
539         return -1;
540 }
541
542 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
543 {
544         struct perf_evsel *evsel;
545         int thread;
546
547         for (thread = 0; thread < evlist->threads->nr; thread++) {
548                 int output = -1;
549
550                 list_for_each_entry(evsel, &evlist->entries, node) {
551                         int fd = FD(evsel, 0, thread);
552
553                         if (output == -1) {
554                                 output = fd;
555                                 if (__perf_evlist__mmap(evlist, thread,
556                                                         prot, mask, output) < 0)
557                                         goto out_unmap;
558                         } else {
559                                 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
560                                         goto out_unmap;
561                         }
562
563                         if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
564                             perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
565                                 goto out_unmap;
566                 }
567         }
568
569         return 0;
570
571 out_unmap:
572         for (thread = 0; thread < evlist->threads->nr; thread++) {
573                 if (evlist->mmap[thread].base != NULL) {
574                         munmap(evlist->mmap[thread].base, evlist->mmap_len);
575                         evlist->mmap[thread].base = NULL;
576                 }
577         }
578         return -1;
579 }
580
581 /** perf_evlist__mmap - Create per cpu maps to receive events
582  *
583  * @evlist - list of events
584  * @pages - map length in pages
585  * @overwrite - overwrite older events?
586  *
587  * If overwrite is false the user needs to signal event consuption using:
588  *
589  *      struct perf_mmap *m = &evlist->mmap[cpu];
590  *      unsigned int head = perf_mmap__read_head(m);
591  *
592  *      perf_mmap__write_tail(m, head)
593  *
594  * Using perf_evlist__read_on_cpu does this automatically.
595  */
596 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
597                       bool overwrite)
598 {
599         unsigned int page_size = sysconf(_SC_PAGE_SIZE);
600         struct perf_evsel *evsel;
601         const struct cpu_map *cpus = evlist->cpus;
602         const struct thread_map *threads = evlist->threads;
603         int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
604
605         /* 512 kiB: default amount of unprivileged mlocked memory */
606         if (pages == UINT_MAX)
607                 pages = (512 * 1024) / page_size;
608         else if (!is_power_of_2(pages))
609                 return -EINVAL;
610
611         mask = pages * page_size - 1;
612
613         if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
614                 return -ENOMEM;
615
616         if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
617                 return -ENOMEM;
618
619         evlist->overwrite = overwrite;
620         evlist->mmap_len = (pages + 1) * page_size;
621
622         list_for_each_entry(evsel, &evlist->entries, node) {
623                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624                     evsel->sample_id == NULL &&
625                     perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
626                         return -ENOMEM;
627         }
628
629         if (cpu_map__all(cpus))
630                 return perf_evlist__mmap_per_thread(evlist, prot, mask);
631
632         return perf_evlist__mmap_per_cpu(evlist, prot, mask);
633 }
634
635 int perf_evlist__create_maps(struct perf_evlist *evlist,
636                              struct perf_target *target)
637 {
638         evlist->threads = thread_map__new_str(target->pid, target->tid,
639                                               target->uid);
640
641         if (evlist->threads == NULL)
642                 return -1;
643
644         if (perf_target__has_task(target))
645                 evlist->cpus = cpu_map__dummy_new();
646         else if (!perf_target__has_cpu(target) && !target->uses_mmap)
647                 evlist->cpus = cpu_map__dummy_new();
648         else
649                 evlist->cpus = cpu_map__new(target->cpu_list);
650
651         if (evlist->cpus == NULL)
652                 goto out_delete_threads;
653
654         return 0;
655
656 out_delete_threads:
657         thread_map__delete(evlist->threads);
658         return -1;
659 }
660
661 void perf_evlist__delete_maps(struct perf_evlist *evlist)
662 {
663         cpu_map__delete(evlist->cpus);
664         thread_map__delete(evlist->threads);
665         evlist->cpus    = NULL;
666         evlist->threads = NULL;
667 }
668
669 int perf_evlist__set_filters(struct perf_evlist *evlist)
670 {
671         const struct thread_map *threads = evlist->threads;
672         const struct cpu_map *cpus = evlist->cpus;
673         struct perf_evsel *evsel;
674         char *filter;
675         int thread;
676         int cpu;
677         int err;
678         int fd;
679
680         list_for_each_entry(evsel, &evlist->entries, node) {
681                 filter = evsel->filter;
682                 if (!filter)
683                         continue;
684                 for (cpu = 0; cpu < cpus->nr; cpu++) {
685                         for (thread = 0; thread < threads->nr; thread++) {
686                                 fd = FD(evsel, cpu, thread);
687                                 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
688                                 if (err)
689                                         return err;
690                         }
691                 }
692         }
693
694         return 0;
695 }
696
697 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
698 {
699         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
700
701         list_for_each_entry_continue(pos, &evlist->entries, node) {
702                 if (first->attr.sample_type != pos->attr.sample_type)
703                         return false;
704         }
705
706         return true;
707 }
708
709 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
710 {
711         struct perf_evsel *first = perf_evlist__first(evlist);
712         return first->attr.sample_type;
713 }
714
715 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
716 {
717         struct perf_evsel *first = perf_evlist__first(evlist);
718         struct perf_sample *data;
719         u64 sample_type;
720         u16 size = 0;
721
722         if (!first->attr.sample_id_all)
723                 goto out;
724
725         sample_type = first->attr.sample_type;
726
727         if (sample_type & PERF_SAMPLE_TID)
728                 size += sizeof(data->tid) * 2;
729
730        if (sample_type & PERF_SAMPLE_TIME)
731                 size += sizeof(data->time);
732
733         if (sample_type & PERF_SAMPLE_ID)
734                 size += sizeof(data->id);
735
736         if (sample_type & PERF_SAMPLE_STREAM_ID)
737                 size += sizeof(data->stream_id);
738
739         if (sample_type & PERF_SAMPLE_CPU)
740                 size += sizeof(data->cpu) * 2;
741 out:
742         return size;
743 }
744
745 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
746 {
747         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
748
749         list_for_each_entry_continue(pos, &evlist->entries, node) {
750                 if (first->attr.sample_id_all != pos->attr.sample_id_all)
751                         return false;
752         }
753
754         return true;
755 }
756
757 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
758 {
759         struct perf_evsel *first = perf_evlist__first(evlist);
760         return first->attr.sample_id_all;
761 }
762
763 void perf_evlist__set_selected(struct perf_evlist *evlist,
764                                struct perf_evsel *evsel)
765 {
766         evlist->selected = evsel;
767 }
768
769 int perf_evlist__open(struct perf_evlist *evlist)
770 {
771         struct perf_evsel *evsel;
772         int err, ncpus, nthreads;
773
774         list_for_each_entry(evsel, &evlist->entries, node) {
775                 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
776                 if (err < 0)
777                         goto out_err;
778         }
779
780         return 0;
781 out_err:
782         ncpus = evlist->cpus ? evlist->cpus->nr : 1;
783         nthreads = evlist->threads ? evlist->threads->nr : 1;
784
785         list_for_each_entry_reverse(evsel, &evlist->entries, node)
786                 perf_evsel__close(evsel, ncpus, nthreads);
787
788         errno = -err;
789         return err;
790 }
791
792 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
793                                   struct perf_record_opts *opts,
794                                   const char *argv[])
795 {
796         int child_ready_pipe[2], go_pipe[2];
797         char bf;
798
799         if (pipe(child_ready_pipe) < 0) {
800                 perror("failed to create 'ready' pipe");
801                 return -1;
802         }
803
804         if (pipe(go_pipe) < 0) {
805                 perror("failed to create 'go' pipe");
806                 goto out_close_ready_pipe;
807         }
808
809         evlist->workload.pid = fork();
810         if (evlist->workload.pid < 0) {
811                 perror("failed to fork");
812                 goto out_close_pipes;
813         }
814
815         if (!evlist->workload.pid) {
816                 if (opts->pipe_output)
817                         dup2(2, 1);
818
819                 close(child_ready_pipe[0]);
820                 close(go_pipe[1]);
821                 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
822
823                 /*
824                  * Do a dummy execvp to get the PLT entry resolved,
825                  * so we avoid the resolver overhead on the real
826                  * execvp call.
827                  */
828                 execvp("", (char **)argv);
829
830                 /*
831                  * Tell the parent we're ready to go
832                  */
833                 close(child_ready_pipe[1]);
834
835                 /*
836                  * Wait until the parent tells us to go.
837                  */
838                 if (read(go_pipe[0], &bf, 1) == -1)
839                         perror("unable to read pipe");
840
841                 execvp(argv[0], (char **)argv);
842
843                 perror(argv[0]);
844                 kill(getppid(), SIGUSR1);
845                 exit(-1);
846         }
847
848         if (perf_target__none(&opts->target))
849                 evlist->threads->map[0] = evlist->workload.pid;
850
851         close(child_ready_pipe[1]);
852         close(go_pipe[0]);
853         /*
854          * wait for child to settle
855          */
856         if (read(child_ready_pipe[0], &bf, 1) == -1) {
857                 perror("unable to read pipe");
858                 goto out_close_pipes;
859         }
860
861         evlist->workload.cork_fd = go_pipe[1];
862         close(child_ready_pipe[0]);
863         return 0;
864
865 out_close_pipes:
866         close(go_pipe[0]);
867         close(go_pipe[1]);
868 out_close_ready_pipe:
869         close(child_ready_pipe[0]);
870         close(child_ready_pipe[1]);
871         return -1;
872 }
873
874 int perf_evlist__start_workload(struct perf_evlist *evlist)
875 {
876         if (evlist->workload.cork_fd > 0) {
877                 /*
878                  * Remove the cork, let it rip!
879                  */
880                 return close(evlist->workload.cork_fd);
881         }
882
883         return 0;
884 }
885
886 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
887                               struct perf_sample *sample)
888 {
889         struct perf_evsel *evsel = perf_evlist__first(evlist);
890         return perf_evsel__parse_sample(evsel, event, sample);
891 }
892
893 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
894 {
895         struct perf_evsel *evsel;
896         size_t printed = 0;
897
898         list_for_each_entry(evsel, &evlist->entries, node) {
899                 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
900                                    perf_evsel__name(evsel));
901         }
902
903         return printed + fprintf(fp, "\n");;
904 }