1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
13 static int perf_session__open(struct perf_session *self, bool force)
15 struct stat input_stat;
17 self->fd = open(self->filename, O_RDONLY);
19 pr_err("failed to open file: %s", self->filename);
20 if (!strcmp(self->filename, "perf.data"))
21 pr_err(" (try 'perf record' first)");
26 if (fstat(self->fd, &input_stat) < 0)
29 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
30 pr_err("file %s not owned by current user or root\n",
35 if (!input_stat.st_size) {
36 pr_info("zero-sized file (%s), nothing to do!\n",
41 if (perf_header__read(&self->header, self->fd) < 0) {
42 pr_err("incompatible file format");
46 self->size = input_stat.st_size;
55 static inline int perf_session__create_kernel_maps(struct perf_session *self)
57 return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
60 struct perf_session *perf_session__new(const char *filename, int mode, bool force)
62 size_t len = filename ? strlen(filename) + 1 : 0;
63 struct perf_session *self = zalloc(sizeof(*self) + len);
68 if (perf_header__init(&self->header) < 0)
71 memcpy(self->filename, filename, len);
72 self->threads = RB_ROOT;
73 self->stats_by_id = RB_ROOT;
74 self->last_match = NULL;
75 self->mmap_window = 32;
78 self->unknown_events = 0;
79 map_groups__init(&self->kmaps);
81 if (mode == O_RDONLY) {
82 if (perf_session__open(self, force) < 0)
84 } else if (mode == O_WRONLY) {
86 * In O_RDONLY mode this will be performed when reading the
87 * kernel MMAP event, in event__process_mmap().
89 if (perf_session__create_kernel_maps(self) < 0)
93 self->sample_type = perf_header__sample_type(&self->header);
100 perf_session__delete(self);
104 void perf_session__delete(struct perf_session *self)
106 perf_header__exit(&self->header);
112 static bool symbol__match_parent_regex(struct symbol *sym)
114 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
120 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
121 struct thread *thread,
122 struct ip_callchain *chain,
123 struct symbol **parent)
125 u8 cpumode = PERF_RECORD_MISC_USER;
126 struct map_symbol *syms = NULL;
129 if (symbol_conf.use_callchain) {
130 syms = calloc(chain->nr, sizeof(*syms));
132 fprintf(stderr, "Can't allocate memory for symbols\n");
137 for (i = 0; i < chain->nr; i++) {
138 u64 ip = chain->ips[i];
139 struct addr_location al;
141 if (ip >= PERF_CONTEXT_MAX) {
143 case PERF_CONTEXT_HV:
144 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
145 case PERF_CONTEXT_KERNEL:
146 cpumode = PERF_RECORD_MISC_KERNEL; break;
147 case PERF_CONTEXT_USER:
148 cpumode = PERF_RECORD_MISC_USER; break;
155 thread__find_addr_location(thread, self, cpumode,
156 MAP__FUNCTION, ip, &al, NULL);
157 if (al.sym != NULL) {
158 if (sort__has_parent && !*parent &&
159 symbol__match_parent_regex(al.sym))
161 if (!symbol_conf.use_callchain)
163 syms[i].map = al.map;
164 syms[i].sym = al.sym;
171 static int process_event_stub(event_t *event __used,
172 struct perf_session *session __used)
174 dump_printf(": unhandled!\n");
178 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
180 if (handler->sample == NULL)
181 handler->sample = process_event_stub;
182 if (handler->mmap == NULL)
183 handler->mmap = process_event_stub;
184 if (handler->comm == NULL)
185 handler->comm = process_event_stub;
186 if (handler->fork == NULL)
187 handler->fork = process_event_stub;
188 if (handler->exit == NULL)
189 handler->exit = process_event_stub;
190 if (handler->lost == NULL)
191 handler->lost = process_event_stub;
192 if (handler->read == NULL)
193 handler->read = process_event_stub;
194 if (handler->throttle == NULL)
195 handler->throttle = process_event_stub;
196 if (handler->unthrottle == NULL)
197 handler->unthrottle = process_event_stub;
200 static const char *event__name[] = {
202 [PERF_RECORD_MMAP] = "MMAP",
203 [PERF_RECORD_LOST] = "LOST",
204 [PERF_RECORD_COMM] = "COMM",
205 [PERF_RECORD_EXIT] = "EXIT",
206 [PERF_RECORD_THROTTLE] = "THROTTLE",
207 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
208 [PERF_RECORD_FORK] = "FORK",
209 [PERF_RECORD_READ] = "READ",
210 [PERF_RECORD_SAMPLE] = "SAMPLE",
213 unsigned long event__total[PERF_RECORD_MAX];
215 void event__print_totals(void)
218 for (i = 0; i < PERF_RECORD_MAX; ++i)
219 pr_info("%10s events: %10ld\n",
220 event__name[i], event__total[i]);
223 void mem_bswap_64(void *src, int byte_size)
227 while (byte_size > 0) {
229 byte_size -= sizeof(u64);
234 static void event__all64_swap(event_t *self)
236 struct perf_event_header *hdr = &self->header;
237 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
240 static void event__comm_swap(event_t *self)
242 self->comm.pid = bswap_32(self->comm.pid);
243 self->comm.tid = bswap_32(self->comm.tid);
246 static void event__mmap_swap(event_t *self)
248 self->mmap.pid = bswap_32(self->mmap.pid);
249 self->mmap.tid = bswap_32(self->mmap.tid);
250 self->mmap.start = bswap_64(self->mmap.start);
251 self->mmap.len = bswap_64(self->mmap.len);
252 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
255 static void event__task_swap(event_t *self)
257 self->fork.pid = bswap_32(self->fork.pid);
258 self->fork.tid = bswap_32(self->fork.tid);
259 self->fork.ppid = bswap_32(self->fork.ppid);
260 self->fork.ptid = bswap_32(self->fork.ptid);
261 self->fork.time = bswap_64(self->fork.time);
264 static void event__read_swap(event_t *self)
266 self->read.pid = bswap_32(self->read.pid);
267 self->read.tid = bswap_32(self->read.tid);
268 self->read.value = bswap_64(self->read.value);
269 self->read.time_enabled = bswap_64(self->read.time_enabled);
270 self->read.time_running = bswap_64(self->read.time_running);
271 self->read.id = bswap_64(self->read.id);
274 typedef void (*event__swap_op)(event_t *self);
276 static event__swap_op event__swap_ops[] = {
277 [PERF_RECORD_MMAP] = event__mmap_swap,
278 [PERF_RECORD_COMM] = event__comm_swap,
279 [PERF_RECORD_FORK] = event__task_swap,
280 [PERF_RECORD_EXIT] = event__task_swap,
281 [PERF_RECORD_LOST] = event__all64_swap,
282 [PERF_RECORD_READ] = event__read_swap,
283 [PERF_RECORD_SAMPLE] = event__all64_swap,
284 [PERF_RECORD_MAX] = NULL,
287 static int perf_session__process_event(struct perf_session *self,
289 struct perf_event_ops *ops,
290 u64 offset, u64 head)
294 if (event->header.type < PERF_RECORD_MAX) {
295 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
296 offset + head, event->header.size,
297 event__name[event->header.type]);
299 ++event__total[event->header.type];
302 if (self->header.needs_swap && event__swap_ops[event->header.type])
303 event__swap_ops[event->header.type](event);
305 switch (event->header.type) {
306 case PERF_RECORD_SAMPLE:
307 return ops->sample(event, self);
308 case PERF_RECORD_MMAP:
309 return ops->mmap(event, self);
310 case PERF_RECORD_COMM:
311 return ops->comm(event, self);
312 case PERF_RECORD_FORK:
313 return ops->fork(event, self);
314 case PERF_RECORD_EXIT:
315 return ops->exit(event, self);
316 case PERF_RECORD_LOST:
317 return ops->lost(event, self);
318 case PERF_RECORD_READ:
319 return ops->read(event, self);
320 case PERF_RECORD_THROTTLE:
321 return ops->throttle(event, self);
322 case PERF_RECORD_UNTHROTTLE:
323 return ops->unthrottle(event, self);
325 self->unknown_events++;
330 void perf_event_header__bswap(struct perf_event_header *self)
332 self->type = bswap_32(self->type);
333 self->misc = bswap_16(self->misc);
334 self->size = bswap_16(self->size);
337 int perf_header__read_build_ids(struct perf_header *self,
338 int input, u64 offset, u64 size)
340 struct build_id_event bev;
341 char filename[PATH_MAX];
342 u64 limit = offset + size;
345 while (offset < limit) {
348 struct list_head *head = &dsos__user;
350 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
353 if (self->needs_swap)
354 perf_event_header__bswap(&bev.header);
356 len = bev.header.size - sizeof(bev);
357 if (read(input, filename, len) != len)
360 if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
361 head = &dsos__kernel;
363 dso = __dsos__findnew(head, filename);
365 dso__set_build_id(dso, &bev.build_id);
366 if (head == &dsos__kernel && filename[0] == '[')
370 offset += bev.header.size;
377 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
379 struct thread *thread = perf_session__findnew(self, 0);
381 if (thread == NULL || thread__set_comm(thread, "swapper")) {
382 pr_err("problem inserting idle task.\n");
389 int __perf_session__process_events(struct perf_session *self,
390 u64 data_offset, u64 data_size,
391 u64 file_size, struct perf_event_ops *ops)
393 int err, mmap_prot, mmap_flags;
400 struct ui_progress *progress = ui_progress__new("Processing events...",
402 if (progress == NULL)
405 perf_event_ops__fill_defaults(ops);
407 page_size = sysconf(_SC_PAGESIZE);
410 shift = page_size * (head / page_size);
414 mmap_prot = PROT_READ;
415 mmap_flags = MAP_SHARED;
417 if (self->header.needs_swap) {
418 mmap_prot |= PROT_WRITE;
419 mmap_flags = MAP_PRIVATE;
422 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
423 mmap_flags, self->fd, offset);
424 if (buf == MAP_FAILED) {
425 pr_err("failed to mmap file\n");
431 event = (event_t *)(buf + head);
432 ui_progress__update(progress, offset);
434 if (self->header.needs_swap)
435 perf_event_header__bswap(&event->header);
436 size = event->header.size;
440 if (head + event->header.size >= page_size * self->mmap_window) {
443 shift = page_size * (head / page_size);
445 munmap_ret = munmap(buf, page_size * self->mmap_window);
446 assert(munmap_ret == 0);
453 size = event->header.size;
455 dump_printf("\n%#Lx [%#x]: event: %d\n",
456 offset + head, event->header.size, event->header.type);
459 perf_session__process_event(self, event, ops, offset, head) < 0) {
460 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
461 offset + head, event->header.size,
464 * assume we lost track of the stream, check alignment, and
465 * increment a single u64 in the hope to catch on again 'soon'.
467 if (unlikely(head & 7))
475 if (offset + head >= data_offset + data_size)
478 if (offset + head < file_size)
483 ui_progress__delete(progress);
487 int perf_session__process_events(struct perf_session *self,
488 struct perf_event_ops *ops)
492 if (perf_session__register_idle_thread(self) == NULL)
495 if (!symbol_conf.full_paths) {
498 if (getcwd(bf, sizeof(bf)) == NULL) {
501 pr_err("failed to get the current directory\n");
504 self->cwd = strdup(bf);
505 if (self->cwd == NULL) {
509 self->cwdlen = strlen(self->cwd);
512 err = __perf_session__process_events(self, self->header.data_offset,
513 self->header.data_size,
519 bool perf_session__has_traces(struct perf_session *self, const char *msg)
521 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
522 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
529 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
530 const char *symbol_name,
536 self->ref_reloc_sym.name = strdup(symbol_name);
537 if (self->ref_reloc_sym.name == NULL)
540 bracket = strchr(self->ref_reloc_sym.name, ']');
544 self->ref_reloc_sym.addr = addr;
546 for (i = 0; i < MAP__NR_TYPES; ++i) {
547 struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
548 kmap->ref_reloc_sym = &self->ref_reloc_sym;