6 #include "thread_map.h"
11 #include <linux/bitops.h>
12 #include <linux/hash.h>
14 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
15 #define SID(e, x, y) xyarray__entry(e->id, x, y)
17 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
19 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
24 INIT_LIST_HEAD(&evsel->node);
30 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
32 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
33 return evsel->fd != NULL ? 0 : -ENOMEM;
36 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
38 evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
39 return evsel->id != NULL ? 0 : -ENOMEM;
42 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
44 evsel->counts = zalloc((sizeof(*evsel->counts) +
45 (ncpus * sizeof(struct perf_counts_values))));
46 return evsel->counts != NULL ? 0 : -ENOMEM;
49 void perf_evsel__free_fd(struct perf_evsel *evsel)
51 xyarray__delete(evsel->fd);
55 void perf_evsel__free_id(struct perf_evsel *evsel)
57 xyarray__delete(evsel->id);
61 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
65 for (cpu = 0; cpu < ncpus; cpu++)
66 for (thread = 0; thread < nthreads; ++thread) {
67 close(FD(evsel, cpu, thread));
68 FD(evsel, cpu, thread) = -1;
72 void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
76 for (cpu = 0; cpu < ncpus; cpu++) {
77 if (evlist->mmap[cpu].base != NULL) {
78 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
79 evlist->mmap[cpu].base = NULL;
84 int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus)
86 evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap));
87 return evlist->mmap != NULL ? 0 : -ENOMEM;
90 void perf_evsel__delete(struct perf_evsel *evsel)
92 assert(list_empty(&evsel->node));
93 xyarray__delete(evsel->fd);
94 xyarray__delete(evsel->id);
98 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
99 int cpu, int thread, bool scale)
101 struct perf_counts_values count;
102 size_t nv = scale ? 3 : 1;
104 if (FD(evsel, cpu, thread) < 0)
107 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
110 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
116 else if (count.run < count.ena)
117 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
119 count.ena = count.run = 0;
121 evsel->counts->cpu[cpu] = count;
125 int __perf_evsel__read(struct perf_evsel *evsel,
126 int ncpus, int nthreads, bool scale)
128 size_t nv = scale ? 3 : 1;
130 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
134 for (cpu = 0; cpu < ncpus; cpu++) {
135 for (thread = 0; thread < nthreads; thread++) {
136 if (FD(evsel, cpu, thread) < 0)
139 if (readn(FD(evsel, cpu, thread),
140 &count, nv * sizeof(u64)) < 0)
143 aggr->val += count.val;
145 aggr->ena += count.ena;
146 aggr->run += count.run;
151 evsel->counts->scaled = 0;
153 if (aggr->run == 0) {
154 evsel->counts->scaled = -1;
159 if (aggr->run < aggr->ena) {
160 evsel->counts->scaled = 1;
161 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
164 aggr->ena = aggr->run = 0;
169 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
170 struct thread_map *threads, bool group, bool inherit)
174 if (evsel->fd == NULL &&
175 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
178 for (cpu = 0; cpu < cpus->nr; cpu++) {
181 evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;
183 for (thread = 0; thread < threads->nr; thread++) {
184 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
185 threads->map[thread],
188 if (FD(evsel, cpu, thread) < 0)
191 if (group && group_fd == -1)
192 group_fd = FD(evsel, cpu, thread);
200 while (--thread >= 0) {
201 close(FD(evsel, cpu, thread));
202 FD(evsel, cpu, thread) = -1;
204 thread = threads->nr;
205 } while (--cpu >= 0);
218 struct thread_map map;
220 } empty_thread_map = {
225 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
226 struct thread_map *threads, bool group, bool inherit)
229 /* Work around old compiler warnings about strict aliasing */
230 cpus = &empty_cpu_map.map;
234 threads = &empty_thread_map.map;
236 return __perf_evsel__open(evsel, cpus, threads, group, inherit);
239 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
240 struct cpu_map *cpus, bool group, bool inherit)
242 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
245 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
246 struct thread_map *threads, bool group, bool inherit)
248 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
251 static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
254 evlist->mmap[cpu].prev = 0;
255 evlist->mmap[cpu].mask = mask;
256 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
258 if (evlist->mmap[cpu].base == MAP_FAILED)
261 perf_evlist__add_pollfd(evlist, fd);
265 static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
266 int cpu, int thread, int fd)
268 struct perf_sample_id *sid;
269 u64 read_data[4] = { 0, };
270 int hash, id_idx = 1; /* The first entry is the counter value */
272 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
273 read(fd, &read_data, sizeof(read_data)) == -1)
276 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
278 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
281 sid = SID(evsel, cpu, thread);
282 sid->id = read_data[id_idx];
284 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
285 hlist_add_head(&sid->node, &evlist->heads[hash]);
289 /** perf_evlist__mmap - Create per cpu maps to receive events
291 * @evlist - list of events
292 * @cpus - cpu map being monitored
293 * @threads - threads map being monitored
294 * @pages - map length in pages
295 * @overwrite - overwrite older events?
297 * If overwrite is false the user needs to signal event consuption using:
299 * struct perf_mmap *m = &evlist->mmap[cpu];
300 * unsigned int head = perf_mmap__read_head(m);
302 * perf_mmap__write_tail(m, head)
304 int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
305 struct thread_map *threads, int pages, bool overwrite)
307 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
308 int mask = pages * page_size - 1, cpu;
309 struct perf_evsel *first_evsel, *evsel;
310 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
312 if (evlist->mmap == NULL &&
313 perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
316 if (evlist->pollfd == NULL &&
317 perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
320 evlist->mmap_len = (pages + 1) * page_size;
321 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
323 list_for_each_entry(evsel, &evlist->entries, node) {
324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
326 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
329 for (cpu = 0; cpu < cpus->nr; cpu++) {
330 for (thread = 0; thread < threads->nr; thread++) {
331 int fd = FD(evsel, cpu, thread);
333 if (evsel->idx || thread) {
334 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
335 FD(first_evsel, cpu, 0)) != 0)
337 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
350 for (cpu = 0; cpu < cpus->nr; cpu++) {
351 if (evlist->mmap[cpu].base != NULL) {
352 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
353 evlist->mmap[cpu].base = NULL;