2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
14 #include "event-parse.h"
19 #include "thread_map.h"
21 #include "../../../include/linux/hw_breakpoint.h"
22 #include "../../include/linux/perf_event.h"
23 #include "perf_regs.h"
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 static int __perf_evsel__sample_size(u64 sample_type)
29 u64 mask = sample_type & PERF_SAMPLE_MASK;
33 for (i = 0; i < 64; i++) {
34 if (mask & (1ULL << i))
43 void hists__init(struct hists *hists)
45 memset(hists, 0, sizeof(*hists));
46 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
47 hists->entries_in = &hists->entries_in_array[0];
48 hists->entries_collapsed = RB_ROOT;
49 hists->entries = RB_ROOT;
50 pthread_mutex_init(&hists->lock, NULL);
53 void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx)
58 INIT_LIST_HEAD(&evsel->node);
59 hists__init(&evsel->hists);
60 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
63 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
65 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
68 perf_evsel__init(evsel, attr, idx);
73 static struct event_format *event_format__new(const char *sys, const char *name)
77 void *bf = NULL, *nbf;
78 size_t size = 0, alloc_size = 0;
79 struct event_format *format = NULL;
81 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
84 fd = open(filename, O_RDONLY);
86 goto out_free_filename;
89 if (size == alloc_size) {
91 nbf = realloc(bf, alloc_size);
97 n = read(fd, bf + size, BUFSIZ);
103 pevent_parse_format(&format, bf, size, sys);
114 struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
116 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
119 struct perf_event_attr attr = {
120 .type = PERF_TYPE_TRACEPOINT,
121 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
122 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
125 evsel->tp_format = event_format__new(sys, name);
126 if (evsel->tp_format == NULL)
129 event_attr_init(&attr);
130 attr.config = evsel->tp_format->id;
131 attr.sample_period = 1;
132 perf_evsel__init(evsel, &attr, idx);
133 evsel->name = evsel->tp_format->name;
143 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
151 "stalled-cycles-frontend",
152 "stalled-cycles-backend",
156 static const char *__perf_evsel__hw_name(u64 config)
158 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
159 return perf_evsel__hw_names[config];
161 return "unknown-hardware";
164 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
166 int colon = 0, r = 0;
167 struct perf_event_attr *attr = &evsel->attr;
168 bool exclude_guest_default = false;
170 #define MOD_PRINT(context, mod) do { \
171 if (!attr->exclude_##context) { \
172 if (!colon) colon = ++r; \
173 r += scnprintf(bf + r, size - r, "%c", mod); \
176 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
177 MOD_PRINT(kernel, 'k');
178 MOD_PRINT(user, 'u');
180 exclude_guest_default = true;
183 if (attr->precise_ip) {
186 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
187 exclude_guest_default = true;
190 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
191 MOD_PRINT(host, 'H');
192 MOD_PRINT(guest, 'G');
200 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
202 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
203 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
206 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
218 static const char *__perf_evsel__sw_name(u64 config)
220 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
221 return perf_evsel__sw_names[config];
222 return "unknown-software";
225 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
227 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
228 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
231 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
235 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
237 if (type & HW_BREAKPOINT_R)
238 r += scnprintf(bf + r, size - r, "r");
240 if (type & HW_BREAKPOINT_W)
241 r += scnprintf(bf + r, size - r, "w");
243 if (type & HW_BREAKPOINT_X)
244 r += scnprintf(bf + r, size - r, "x");
249 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
251 struct perf_event_attr *attr = &evsel->attr;
252 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
253 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
256 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
257 [PERF_EVSEL__MAX_ALIASES] = {
258 { "L1-dcache", "l1-d", "l1d", "L1-data", },
259 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
261 { "dTLB", "d-tlb", "Data-TLB", },
262 { "iTLB", "i-tlb", "Instruction-TLB", },
263 { "branch", "branches", "bpu", "btb", "bpc", },
267 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
268 [PERF_EVSEL__MAX_ALIASES] = {
269 { "load", "loads", "read", },
270 { "store", "stores", "write", },
271 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
274 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
275 [PERF_EVSEL__MAX_ALIASES] = {
276 { "refs", "Reference", "ops", "access", },
277 { "misses", "miss", },
280 #define C(x) PERF_COUNT_HW_CACHE_##x
281 #define CACHE_READ (1 << C(OP_READ))
282 #define CACHE_WRITE (1 << C(OP_WRITE))
283 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
284 #define COP(x) (1 << x)
287 * cache operartion stat
288 * L1I : Read and prefetch only
289 * ITLB and BPU : Read-only
291 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
292 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
293 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
294 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
295 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
296 [C(ITLB)] = (CACHE_READ),
297 [C(BPU)] = (CACHE_READ),
298 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
301 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
303 if (perf_evsel__hw_cache_stat[type] & COP(op))
304 return true; /* valid */
306 return false; /* invalid */
309 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
310 char *bf, size_t size)
313 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
314 perf_evsel__hw_cache_op[op][0],
315 perf_evsel__hw_cache_result[result][0]);
318 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
319 perf_evsel__hw_cache_op[op][1]);
322 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
324 u8 op, result, type = (config >> 0) & 0xff;
325 const char *err = "unknown-ext-hardware-cache-type";
327 if (type > PERF_COUNT_HW_CACHE_MAX)
330 op = (config >> 8) & 0xff;
331 err = "unknown-ext-hardware-cache-op";
332 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
335 result = (config >> 16) & 0xff;
336 err = "unknown-ext-hardware-cache-result";
337 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
340 err = "invalid-cache";
341 if (!perf_evsel__is_cache_op_valid(type, op))
344 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
346 return scnprintf(bf, size, "%s", err);
349 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
351 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
352 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
355 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
357 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
358 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
361 const char *perf_evsel__name(struct perf_evsel *evsel)
368 switch (evsel->attr.type) {
370 perf_evsel__raw_name(evsel, bf, sizeof(bf));
373 case PERF_TYPE_HARDWARE:
374 perf_evsel__hw_name(evsel, bf, sizeof(bf));
377 case PERF_TYPE_HW_CACHE:
378 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
381 case PERF_TYPE_SOFTWARE:
382 perf_evsel__sw_name(evsel, bf, sizeof(bf));
385 case PERF_TYPE_TRACEPOINT:
386 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
389 case PERF_TYPE_BREAKPOINT:
390 perf_evsel__bp_name(evsel, bf, sizeof(bf));
394 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
399 evsel->name = strdup(bf);
401 return evsel->name ?: "unknown";
404 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
405 struct perf_evsel *first)
407 struct perf_event_attr *attr = &evsel->attr;
408 int track = !evsel->idx; /* only the first counter needs these */
411 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
412 attr->inherit = !opts->no_inherit;
413 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
414 PERF_FORMAT_TOTAL_TIME_RUNNING |
417 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
420 * We default some events to a 1 default interval. But keep
421 * it a weak assumption overridable by the user.
423 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
424 opts->user_interval != ULLONG_MAX)) {
426 attr->sample_type |= PERF_SAMPLE_PERIOD;
428 attr->sample_freq = opts->freq;
430 attr->sample_period = opts->default_interval;
434 if (opts->no_samples)
435 attr->sample_freq = 0;
437 if (opts->inherit_stat)
438 attr->inherit_stat = 1;
440 if (opts->sample_address) {
441 attr->sample_type |= PERF_SAMPLE_ADDR;
442 attr->mmap_data = track;
445 if (opts->call_graph) {
446 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
448 if (opts->call_graph == CALLCHAIN_DWARF) {
449 attr->sample_type |= PERF_SAMPLE_REGS_USER |
450 PERF_SAMPLE_STACK_USER;
451 attr->sample_regs_user = PERF_REGS_MASK;
452 attr->sample_stack_user = opts->stack_dump_size;
453 attr->exclude_callchain_user = 1;
457 if (perf_target__has_cpu(&opts->target))
458 attr->sample_type |= PERF_SAMPLE_CPU;
461 attr->sample_type |= PERF_SAMPLE_PERIOD;
463 if (!opts->sample_id_all_missing &&
464 (opts->sample_time || !opts->no_inherit ||
465 perf_target__has_cpu(&opts->target)))
466 attr->sample_type |= PERF_SAMPLE_TIME;
468 if (opts->raw_samples) {
469 attr->sample_type |= PERF_SAMPLE_TIME;
470 attr->sample_type |= PERF_SAMPLE_RAW;
471 attr->sample_type |= PERF_SAMPLE_CPU;
474 if (opts->no_delay) {
476 attr->wakeup_events = 1;
478 if (opts->branch_stack) {
479 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
480 attr->branch_sample_type = opts->branch_stack;
486 if (perf_target__none(&opts->target) &&
487 (!opts->group || evsel == first)) {
488 attr->enable_on_exec = 1;
492 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
495 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
498 for (cpu = 0; cpu < ncpus; cpu++) {
499 for (thread = 0; thread < nthreads; thread++) {
500 FD(evsel, cpu, thread) = -1;
505 return evsel->fd != NULL ? 0 : -ENOMEM;
508 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
513 for (cpu = 0; cpu < ncpus; cpu++) {
514 for (thread = 0; thread < nthreads; thread++) {
515 int fd = FD(evsel, cpu, thread),
516 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
526 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
528 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
529 if (evsel->sample_id == NULL)
532 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
533 if (evsel->id == NULL) {
534 xyarray__delete(evsel->sample_id);
535 evsel->sample_id = NULL;
542 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
544 evsel->counts = zalloc((sizeof(*evsel->counts) +
545 (ncpus * sizeof(struct perf_counts_values))));
546 return evsel->counts != NULL ? 0 : -ENOMEM;
549 void perf_evsel__free_fd(struct perf_evsel *evsel)
551 xyarray__delete(evsel->fd);
555 void perf_evsel__free_id(struct perf_evsel *evsel)
557 xyarray__delete(evsel->sample_id);
558 evsel->sample_id = NULL;
563 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
567 for (cpu = 0; cpu < ncpus; cpu++)
568 for (thread = 0; thread < nthreads; ++thread) {
569 close(FD(evsel, cpu, thread));
570 FD(evsel, cpu, thread) = -1;
574 void perf_evsel__exit(struct perf_evsel *evsel)
576 assert(list_empty(&evsel->node));
577 xyarray__delete(evsel->fd);
578 xyarray__delete(evsel->sample_id);
582 void perf_evsel__delete(struct perf_evsel *evsel)
584 perf_evsel__exit(evsel);
585 close_cgroup(evsel->cgrp);
586 free(evsel->group_name);
587 if (evsel->tp_format && evsel->name == evsel->tp_format->name) {
589 pevent_free_format(evsel->tp_format);
595 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
596 int cpu, int thread, bool scale)
598 struct perf_counts_values count;
599 size_t nv = scale ? 3 : 1;
601 if (FD(evsel, cpu, thread) < 0)
604 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
607 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
613 else if (count.run < count.ena)
614 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
616 count.ena = count.run = 0;
618 evsel->counts->cpu[cpu] = count;
622 int __perf_evsel__read(struct perf_evsel *evsel,
623 int ncpus, int nthreads, bool scale)
625 size_t nv = scale ? 3 : 1;
627 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
629 aggr->val = aggr->ena = aggr->run = 0;
631 for (cpu = 0; cpu < ncpus; cpu++) {
632 for (thread = 0; thread < nthreads; thread++) {
633 if (FD(evsel, cpu, thread) < 0)
636 if (readn(FD(evsel, cpu, thread),
637 &count, nv * sizeof(u64)) < 0)
640 aggr->val += count.val;
642 aggr->ena += count.ena;
643 aggr->run += count.run;
648 evsel->counts->scaled = 0;
650 if (aggr->run == 0) {
651 evsel->counts->scaled = -1;
656 if (aggr->run < aggr->ena) {
657 evsel->counts->scaled = 1;
658 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
661 aggr->ena = aggr->run = 0;
666 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
668 struct perf_evsel *leader = evsel->leader;
675 * Leader must be already processed/open,
680 fd = FD(leader, cpu, thread);
686 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
687 struct thread_map *threads)
690 unsigned long flags = 0;
693 if (evsel->fd == NULL &&
694 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
698 flags = PERF_FLAG_PID_CGROUP;
699 pid = evsel->cgrp->fd;
702 for (cpu = 0; cpu < cpus->nr; cpu++) {
704 for (thread = 0; thread < threads->nr; thread++) {
708 pid = threads->map[thread];
710 group_fd = get_group_fd(evsel, cpu, thread);
712 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
716 if (FD(evsel, cpu, thread) < 0) {
727 while (--thread >= 0) {
728 close(FD(evsel, cpu, thread));
729 FD(evsel, cpu, thread) = -1;
731 thread = threads->nr;
732 } while (--cpu >= 0);
736 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
738 if (evsel->fd == NULL)
741 perf_evsel__close_fd(evsel, ncpus, nthreads);
742 perf_evsel__free_fd(evsel);
755 struct thread_map map;
757 } empty_thread_map = {
762 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
763 struct thread_map *threads)
766 /* Work around old compiler warnings about strict aliasing */
767 cpus = &empty_cpu_map.map;
771 threads = &empty_thread_map.map;
773 return __perf_evsel__open(evsel, cpus, threads);
776 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
777 struct cpu_map *cpus)
779 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
782 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
783 struct thread_map *threads)
785 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
788 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
789 const union perf_event *event,
790 struct perf_sample *sample)
792 u64 type = evsel->attr.sample_type;
793 const u64 *array = event->sample.array;
794 bool swapped = evsel->needs_swap;
797 array += ((event->header.size -
798 sizeof(event->header)) / sizeof(u64)) - 1;
800 if (type & PERF_SAMPLE_CPU) {
803 /* undo swap of u64, then swap on individual u32s */
804 u.val64 = bswap_64(u.val64);
805 u.val32[0] = bswap_32(u.val32[0]);
808 sample->cpu = u.val32[0];
812 if (type & PERF_SAMPLE_STREAM_ID) {
813 sample->stream_id = *array;
817 if (type & PERF_SAMPLE_ID) {
822 if (type & PERF_SAMPLE_TIME) {
823 sample->time = *array;
827 if (type & PERF_SAMPLE_TID) {
830 /* undo swap of u64, then swap on individual u32s */
831 u.val64 = bswap_64(u.val64);
832 u.val32[0] = bswap_32(u.val32[0]);
833 u.val32[1] = bswap_32(u.val32[1]);
836 sample->pid = u.val32[0];
837 sample->tid = u.val32[1];
843 static bool sample_overlap(const union perf_event *event,
844 const void *offset, u64 size)
846 const void *base = event;
848 if (offset + size > base + event->header.size)
854 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
855 struct perf_sample *data)
857 u64 type = evsel->attr.sample_type;
858 u64 regs_user = evsel->attr.sample_regs_user;
859 bool swapped = evsel->needs_swap;
863 * used for cross-endian analysis. See git commit 65014ab3
864 * for why this goofiness is needed.
868 memset(data, 0, sizeof(*data));
869 data->cpu = data->pid = data->tid = -1;
870 data->stream_id = data->id = data->time = -1ULL;
873 if (event->header.type != PERF_RECORD_SAMPLE) {
874 if (!evsel->attr.sample_id_all)
876 return perf_evsel__parse_id_sample(evsel, event, data);
879 array = event->sample.array;
881 if (evsel->sample_size + sizeof(event->header) > event->header.size)
884 if (type & PERF_SAMPLE_IP) {
885 data->ip = event->ip.ip;
889 if (type & PERF_SAMPLE_TID) {
892 /* undo swap of u64, then swap on individual u32s */
893 u.val64 = bswap_64(u.val64);
894 u.val32[0] = bswap_32(u.val32[0]);
895 u.val32[1] = bswap_32(u.val32[1]);
898 data->pid = u.val32[0];
899 data->tid = u.val32[1];
903 if (type & PERF_SAMPLE_TIME) {
909 if (type & PERF_SAMPLE_ADDR) {
915 if (type & PERF_SAMPLE_ID) {
920 if (type & PERF_SAMPLE_STREAM_ID) {
921 data->stream_id = *array;
925 if (type & PERF_SAMPLE_CPU) {
929 /* undo swap of u64, then swap on individual u32s */
930 u.val64 = bswap_64(u.val64);
931 u.val32[0] = bswap_32(u.val32[0]);
934 data->cpu = u.val32[0];
938 if (type & PERF_SAMPLE_PERIOD) {
939 data->period = *array;
943 if (type & PERF_SAMPLE_READ) {
944 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
948 if (type & PERF_SAMPLE_CALLCHAIN) {
949 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
952 data->callchain = (struct ip_callchain *)array;
954 if (sample_overlap(event, array, data->callchain->nr))
957 array += 1 + data->callchain->nr;
960 if (type & PERF_SAMPLE_RAW) {
964 if (WARN_ONCE(swapped,
965 "Endianness of raw data not corrected!\n")) {
966 /* undo swap of u64, then swap on individual u32s */
967 u.val64 = bswap_64(u.val64);
968 u.val32[0] = bswap_32(u.val32[0]);
969 u.val32[1] = bswap_32(u.val32[1]);
972 if (sample_overlap(event, array, sizeof(u32)))
975 data->raw_size = u.val32[0];
976 pdata = (void *) array + sizeof(u32);
978 if (sample_overlap(event, pdata, data->raw_size))
981 data->raw_data = (void *) pdata;
983 array = (void *)array + data->raw_size + sizeof(u32);
986 if (type & PERF_SAMPLE_BRANCH_STACK) {
989 data->branch_stack = (struct branch_stack *)array;
992 sz = data->branch_stack->nr * sizeof(struct branch_entry);
997 if (type & PERF_SAMPLE_REGS_USER) {
998 /* First u64 tells us if we have any regs in sample. */
999 u64 avail = *array++;
1002 data->user_regs.regs = (u64 *)array;
1003 array += hweight_long(regs_user);
1007 if (type & PERF_SAMPLE_STACK_USER) {
1008 u64 size = *array++;
1010 data->user_stack.offset = ((char *)(array - 1)
1014 data->user_stack.size = 0;
1016 data->user_stack.data = (char *)array;
1017 array += size / sizeof(*array);
1018 data->user_stack.size = *array;
1025 int perf_event__synthesize_sample(union perf_event *event, u64 type,
1026 const struct perf_sample *sample,
1032 * used for cross-endian analysis. See git commit 65014ab3
1033 * for why this goofiness is needed.
1037 array = event->sample.array;
1039 if (type & PERF_SAMPLE_IP) {
1040 event->ip.ip = sample->ip;
1044 if (type & PERF_SAMPLE_TID) {
1045 u.val32[0] = sample->pid;
1046 u.val32[1] = sample->tid;
1049 * Inverse of what is done in perf_evsel__parse_sample
1051 u.val32[0] = bswap_32(u.val32[0]);
1052 u.val32[1] = bswap_32(u.val32[1]);
1053 u.val64 = bswap_64(u.val64);
1060 if (type & PERF_SAMPLE_TIME) {
1061 *array = sample->time;
1065 if (type & PERF_SAMPLE_ADDR) {
1066 *array = sample->addr;
1070 if (type & PERF_SAMPLE_ID) {
1071 *array = sample->id;
1075 if (type & PERF_SAMPLE_STREAM_ID) {
1076 *array = sample->stream_id;
1080 if (type & PERF_SAMPLE_CPU) {
1081 u.val32[0] = sample->cpu;
1084 * Inverse of what is done in perf_evsel__parse_sample
1086 u.val32[0] = bswap_32(u.val32[0]);
1087 u.val64 = bswap_64(u.val64);
1093 if (type & PERF_SAMPLE_PERIOD) {
1094 *array = sample->period;
1101 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1103 return pevent_find_field(evsel->tp_format, name);
1106 char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample,
1109 struct format_field *field = perf_evsel__field(evsel, name);
1115 offset = field->offset;
1117 if (field->flags & FIELD_IS_DYNAMIC) {
1118 offset = *(int *)(sample->raw_data + field->offset);
1122 return sample->raw_data + offset;
1125 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1128 struct format_field *field = perf_evsel__field(evsel, name);
1135 ptr = sample->raw_data + field->offset;
1137 switch (field->size) {
1141 value = *(u16 *)ptr;
1144 value = *(u32 *)ptr;
1147 value = *(u64 *)ptr;
1153 if (!evsel->needs_swap)
1156 switch (field->size) {
1158 return bswap_16(value);
1160 return bswap_32(value);
1162 return bswap_64(value);