perf session: Embed the host machine data on perf_session
[pandora-kernel.git] / tools / perf / util / session.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include <linux/kernel.h>
4
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8
9 #include "session.h"
10 #include "sort.h"
11 #include "util.h"
12
13 static int perf_session__open(struct perf_session *self, bool force)
14 {
15         struct stat input_stat;
16
17         if (!strcmp(self->filename, "-")) {
18                 self->fd_pipe = true;
19                 self->fd = STDIN_FILENO;
20
21                 if (perf_header__read(self, self->fd) < 0)
22                         pr_err("incompatible file format");
23
24                 return 0;
25         }
26
27         self->fd = open(self->filename, O_RDONLY);
28         if (self->fd < 0) {
29                 pr_err("failed to open file: %s", self->filename);
30                 if (!strcmp(self->filename, "perf.data"))
31                         pr_err("  (try 'perf record' first)");
32                 pr_err("\n");
33                 return -errno;
34         }
35
36         if (fstat(self->fd, &input_stat) < 0)
37                 goto out_close;
38
39         if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
40                 pr_err("file %s not owned by current user or root\n",
41                        self->filename);
42                 goto out_close;
43         }
44
45         if (!input_stat.st_size) {
46                 pr_info("zero-sized file (%s), nothing to do!\n",
47                         self->filename);
48                 goto out_close;
49         }
50
51         if (perf_header__read(self, self->fd) < 0) {
52                 pr_err("incompatible file format");
53                 goto out_close;
54         }
55
56         self->size = input_stat.st_size;
57         return 0;
58
59 out_close:
60         close(self->fd);
61         self->fd = -1;
62         return -1;
63 }
64
65 void perf_session__update_sample_type(struct perf_session *self)
66 {
67         self->sample_type = perf_header__sample_type(&self->header);
68 }
69
70 int perf_session__create_kernel_maps(struct perf_session *self)
71 {
72         struct rb_root *machines = &self->machines;
73         int ret = machines__create_kernel_maps(machines, HOST_KERNEL_ID);
74
75         if (ret >= 0)
76                 ret = machines__create_guest_kernel_maps(machines);
77         return ret;
78 }
79
80 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
81 {
82         size_t len = filename ? strlen(filename) + 1 : 0;
83         struct perf_session *self = zalloc(sizeof(*self) + len);
84
85         if (self == NULL)
86                 goto out;
87
88         if (perf_header__init(&self->header) < 0)
89                 goto out_free;
90
91         memcpy(self->filename, filename, len);
92         self->threads = RB_ROOT;
93         self->stats_by_id = RB_ROOT;
94         self->last_match = NULL;
95         self->mmap_window = 32;
96         self->cwd = NULL;
97         self->cwdlen = 0;
98         self->unknown_events = 0;
99         self->machines = RB_ROOT;
100         self->repipe = repipe;
101         self->ordered_samples.flush_limit = ULLONG_MAX;
102         INIT_LIST_HEAD(&self->ordered_samples.samples_head);
103         machine__init(&self->host_machine, "", HOST_KERNEL_ID);
104
105         if (mode == O_RDONLY) {
106                 if (perf_session__open(self, force) < 0)
107                         goto out_delete;
108         } else if (mode == O_WRONLY) {
109                 /*
110                  * In O_RDONLY mode this will be performed when reading the
111                  * kernel MMAP event, in event__process_mmap().
112                  */
113                 if (perf_session__create_kernel_maps(self) < 0)
114                         goto out_delete;
115         }
116
117         perf_session__update_sample_type(self);
118 out:
119         return self;
120 out_free:
121         free(self);
122         return NULL;
123 out_delete:
124         perf_session__delete(self);
125         return NULL;
126 }
127
128 void perf_session__delete(struct perf_session *self)
129 {
130         perf_header__exit(&self->header);
131         close(self->fd);
132         free(self->cwd);
133         free(self);
134 }
135
136 static bool symbol__match_parent_regex(struct symbol *sym)
137 {
138         if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
139                 return 1;
140
141         return 0;
142 }
143
144 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
145                                                    struct thread *thread,
146                                                    struct ip_callchain *chain,
147                                                    struct symbol **parent)
148 {
149         u8 cpumode = PERF_RECORD_MISC_USER;
150         unsigned int i;
151         struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
152
153         if (!syms)
154                 return NULL;
155
156         for (i = 0; i < chain->nr; i++) {
157                 u64 ip = chain->ips[i];
158                 struct addr_location al;
159
160                 if (ip >= PERF_CONTEXT_MAX) {
161                         switch (ip) {
162                         case PERF_CONTEXT_HV:
163                                 cpumode = PERF_RECORD_MISC_HYPERVISOR;  break;
164                         case PERF_CONTEXT_KERNEL:
165                                 cpumode = PERF_RECORD_MISC_KERNEL;      break;
166                         case PERF_CONTEXT_USER:
167                                 cpumode = PERF_RECORD_MISC_USER;        break;
168                         default:
169                                 break;
170                         }
171                         continue;
172                 }
173
174                 al.filtered = false;
175                 thread__find_addr_location(thread, self, cpumode,
176                                 MAP__FUNCTION, thread->pid, ip, &al, NULL);
177                 if (al.sym != NULL) {
178                         if (sort__has_parent && !*parent &&
179                             symbol__match_parent_regex(al.sym))
180                                 *parent = al.sym;
181                         if (!symbol_conf.use_callchain)
182                                 break;
183                         syms[i].map = al.map;
184                         syms[i].sym = al.sym;
185                 }
186         }
187
188         return syms;
189 }
190
191 static int process_event_stub(event_t *event __used,
192                               struct perf_session *session __used)
193 {
194         dump_printf(": unhandled!\n");
195         return 0;
196 }
197
198 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
199 {
200         if (handler->sample == NULL)
201                 handler->sample = process_event_stub;
202         if (handler->mmap == NULL)
203                 handler->mmap = process_event_stub;
204         if (handler->comm == NULL)
205                 handler->comm = process_event_stub;
206         if (handler->fork == NULL)
207                 handler->fork = process_event_stub;
208         if (handler->exit == NULL)
209                 handler->exit = process_event_stub;
210         if (handler->lost == NULL)
211                 handler->lost = process_event_stub;
212         if (handler->read == NULL)
213                 handler->read = process_event_stub;
214         if (handler->throttle == NULL)
215                 handler->throttle = process_event_stub;
216         if (handler->unthrottle == NULL)
217                 handler->unthrottle = process_event_stub;
218         if (handler->attr == NULL)
219                 handler->attr = process_event_stub;
220         if (handler->event_type == NULL)
221                 handler->event_type = process_event_stub;
222         if (handler->tracing_data == NULL)
223                 handler->tracing_data = process_event_stub;
224         if (handler->build_id == NULL)
225                 handler->build_id = process_event_stub;
226 }
227
228 static const char *event__name[] = {
229         [0]                      = "TOTAL",
230         [PERF_RECORD_MMAP]       = "MMAP",
231         [PERF_RECORD_LOST]       = "LOST",
232         [PERF_RECORD_COMM]       = "COMM",
233         [PERF_RECORD_EXIT]       = "EXIT",
234         [PERF_RECORD_THROTTLE]   = "THROTTLE",
235         [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
236         [PERF_RECORD_FORK]       = "FORK",
237         [PERF_RECORD_READ]       = "READ",
238         [PERF_RECORD_SAMPLE]     = "SAMPLE",
239         [PERF_RECORD_HEADER_ATTR]        = "ATTR",
240         [PERF_RECORD_HEADER_EVENT_TYPE]  = "EVENT_TYPE",
241         [PERF_RECORD_HEADER_TRACING_DATA]        = "TRACING_DATA",
242         [PERF_RECORD_HEADER_BUILD_ID]    = "BUILD_ID",
243 };
244
245 unsigned long event__total[PERF_RECORD_HEADER_MAX];
246
247 void event__print_totals(void)
248 {
249         int i;
250         for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
251                 if (!event__name[i])
252                         continue;
253                 pr_info("%10s events: %10ld\n",
254                         event__name[i], event__total[i]);
255         }
256 }
257
258 void mem_bswap_64(void *src, int byte_size)
259 {
260         u64 *m = src;
261
262         while (byte_size > 0) {
263                 *m = bswap_64(*m);
264                 byte_size -= sizeof(u64);
265                 ++m;
266         }
267 }
268
269 static void event__all64_swap(event_t *self)
270 {
271         struct perf_event_header *hdr = &self->header;
272         mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
273 }
274
275 static void event__comm_swap(event_t *self)
276 {
277         self->comm.pid = bswap_32(self->comm.pid);
278         self->comm.tid = bswap_32(self->comm.tid);
279 }
280
281 static void event__mmap_swap(event_t *self)
282 {
283         self->mmap.pid   = bswap_32(self->mmap.pid);
284         self->mmap.tid   = bswap_32(self->mmap.tid);
285         self->mmap.start = bswap_64(self->mmap.start);
286         self->mmap.len   = bswap_64(self->mmap.len);
287         self->mmap.pgoff = bswap_64(self->mmap.pgoff);
288 }
289
290 static void event__task_swap(event_t *self)
291 {
292         self->fork.pid  = bswap_32(self->fork.pid);
293         self->fork.tid  = bswap_32(self->fork.tid);
294         self->fork.ppid = bswap_32(self->fork.ppid);
295         self->fork.ptid = bswap_32(self->fork.ptid);
296         self->fork.time = bswap_64(self->fork.time);
297 }
298
299 static void event__read_swap(event_t *self)
300 {
301         self->read.pid          = bswap_32(self->read.pid);
302         self->read.tid          = bswap_32(self->read.tid);
303         self->read.value        = bswap_64(self->read.value);
304         self->read.time_enabled = bswap_64(self->read.time_enabled);
305         self->read.time_running = bswap_64(self->read.time_running);
306         self->read.id           = bswap_64(self->read.id);
307 }
308
309 static void event__attr_swap(event_t *self)
310 {
311         size_t size;
312
313         self->attr.attr.type            = bswap_32(self->attr.attr.type);
314         self->attr.attr.size            = bswap_32(self->attr.attr.size);
315         self->attr.attr.config          = bswap_64(self->attr.attr.config);
316         self->attr.attr.sample_period   = bswap_64(self->attr.attr.sample_period);
317         self->attr.attr.sample_type     = bswap_64(self->attr.attr.sample_type);
318         self->attr.attr.read_format     = bswap_64(self->attr.attr.read_format);
319         self->attr.attr.wakeup_events   = bswap_32(self->attr.attr.wakeup_events);
320         self->attr.attr.bp_type         = bswap_32(self->attr.attr.bp_type);
321         self->attr.attr.bp_addr         = bswap_64(self->attr.attr.bp_addr);
322         self->attr.attr.bp_len          = bswap_64(self->attr.attr.bp_len);
323
324         size = self->header.size;
325         size -= (void *)&self->attr.id - (void *)self;
326         mem_bswap_64(self->attr.id, size);
327 }
328
329 static void event__event_type_swap(event_t *self)
330 {
331         self->event_type.event_type.event_id =
332                 bswap_64(self->event_type.event_type.event_id);
333 }
334
335 static void event__tracing_data_swap(event_t *self)
336 {
337         self->tracing_data.size = bswap_32(self->tracing_data.size);
338 }
339
340 typedef void (*event__swap_op)(event_t *self);
341
342 static event__swap_op event__swap_ops[] = {
343         [PERF_RECORD_MMAP]   = event__mmap_swap,
344         [PERF_RECORD_COMM]   = event__comm_swap,
345         [PERF_RECORD_FORK]   = event__task_swap,
346         [PERF_RECORD_EXIT]   = event__task_swap,
347         [PERF_RECORD_LOST]   = event__all64_swap,
348         [PERF_RECORD_READ]   = event__read_swap,
349         [PERF_RECORD_SAMPLE] = event__all64_swap,
350         [PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
351         [PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
352         [PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
353         [PERF_RECORD_HEADER_BUILD_ID]   = NULL,
354         [PERF_RECORD_HEADER_MAX]    = NULL,
355 };
356
357 struct sample_queue {
358         u64                     timestamp;
359         struct sample_event     *event;
360         struct list_head        list;
361 };
362
363 #define FLUSH_PERIOD    (2 * NSEC_PER_SEC)
364
365 static void flush_sample_queue(struct perf_session *s,
366                                struct perf_event_ops *ops)
367 {
368         struct list_head *head = &s->ordered_samples.samples_head;
369         u64 limit = s->ordered_samples.flush_limit;
370         struct sample_queue *tmp, *iter;
371
372         if (!ops->ordered_samples)
373                 return;
374
375         list_for_each_entry_safe(iter, tmp, head, list) {
376                 if (iter->timestamp > limit)
377                         return;
378
379                 if (iter == s->ordered_samples.last_inserted)
380                         s->ordered_samples.last_inserted = NULL;
381
382                 ops->sample((event_t *)iter->event, s);
383
384                 s->ordered_samples.last_flush = iter->timestamp;
385                 list_del(&iter->list);
386                 free(iter->event);
387                 free(iter);
388         }
389 }
390
391 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
392 {
393         struct sample_queue *iter;
394
395         list_for_each_entry_reverse(iter, head, list) {
396                 if (iter->timestamp < new->timestamp) {
397                         list_add(&new->list, &iter->list);
398                         return;
399                 }
400         }
401
402         list_add(&new->list, head);
403 }
404
405 static void __queue_sample_before(struct sample_queue *new,
406                                   struct sample_queue *iter,
407                                   struct list_head *head)
408 {
409         list_for_each_entry_continue_reverse(iter, head, list) {
410                 if (iter->timestamp < new->timestamp) {
411                         list_add(&new->list, &iter->list);
412                         return;
413                 }
414         }
415
416         list_add(&new->list, head);
417 }
418
419 static void __queue_sample_after(struct sample_queue *new,
420                                  struct sample_queue *iter,
421                                  struct list_head *head)
422 {
423         list_for_each_entry_continue(iter, head, list) {
424                 if (iter->timestamp > new->timestamp) {
425                         list_add_tail(&new->list, &iter->list);
426                         return;
427                 }
428         }
429         list_add_tail(&new->list, head);
430 }
431
432 /* The queue is ordered by time */
433 static void __queue_sample_event(struct sample_queue *new,
434                                  struct perf_session *s)
435 {
436         struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
437         struct list_head *head = &s->ordered_samples.samples_head;
438
439
440         if (!last_inserted) {
441                 __queue_sample_end(new, head);
442                 return;
443         }
444
445         /*
446          * Most of the time the current event has a timestamp
447          * very close to the last event inserted, unless we just switched
448          * to another event buffer. Having a sorting based on a list and
449          * on the last inserted event that is close to the current one is
450          * probably more efficient than an rbtree based sorting.
451          */
452         if (last_inserted->timestamp >= new->timestamp)
453                 __queue_sample_before(new, last_inserted, head);
454         else
455                 __queue_sample_after(new, last_inserted, head);
456 }
457
458 static int queue_sample_event(event_t *event, struct sample_data *data,
459                               struct perf_session *s,
460                               struct perf_event_ops *ops)
461 {
462         u64 timestamp = data->time;
463         struct sample_queue *new;
464         u64 flush_limit;
465
466
467         if (s->ordered_samples.flush_limit == ULLONG_MAX)
468                 s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;
469
470         if (timestamp < s->ordered_samples.last_flush) {
471                 printf("Warning: Timestamp below last timeslice flush\n");
472                 return -EINVAL;
473         }
474
475         new = malloc(sizeof(*new));
476         if (!new)
477                 return -ENOMEM;
478
479         new->timestamp = timestamp;
480
481         new->event = malloc(event->header.size);
482         if (!new->event) {
483                 free(new);
484                 return -ENOMEM;
485         }
486
487         memcpy(new->event, event, event->header.size);
488
489         __queue_sample_event(new, s);
490         s->ordered_samples.last_inserted = new;
491
492         /*
493          * We want to have a slice of events covering 2 * FLUSH_PERIOD
494          * If FLUSH_PERIOD is big enough, it ensures every events that occured
495          * in the first half of the timeslice have all been buffered and there
496          * are none remaining (we need that because of the weakly ordered
497          * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
498          * timeslice, we flush the first half to be gentle with the memory
499          * (the second half can still get new events in the middle, so wait
500          * another period to flush it)
501          */
502         flush_limit = s->ordered_samples.flush_limit;
503
504         if (new->timestamp > flush_limit &&
505                 new->timestamp - flush_limit > FLUSH_PERIOD) {
506                 s->ordered_samples.flush_limit += FLUSH_PERIOD;
507                 flush_sample_queue(s, ops);
508         }
509
510         return 0;
511 }
512
513 static int perf_session__process_sample(event_t *event, struct perf_session *s,
514                                         struct perf_event_ops *ops)
515 {
516         struct sample_data data;
517
518         if (!ops->ordered_samples)
519                 return ops->sample(event, s);
520
521         bzero(&data, sizeof(struct sample_data));
522         event__parse_sample(event, s->sample_type, &data);
523
524         queue_sample_event(event, &data, s, ops);
525
526         return 0;
527 }
528
529 static int perf_session__process_event(struct perf_session *self,
530                                        event_t *event,
531                                        struct perf_event_ops *ops,
532                                        u64 offset, u64 head)
533 {
534         trace_event(event);
535
536         if (event->header.type < PERF_RECORD_HEADER_MAX) {
537                 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
538                             offset + head, event->header.size,
539                             event__name[event->header.type]);
540                 ++event__total[0];
541                 ++event__total[event->header.type];
542         }
543
544         if (self->header.needs_swap && event__swap_ops[event->header.type])
545                 event__swap_ops[event->header.type](event);
546
547         switch (event->header.type) {
548         case PERF_RECORD_SAMPLE:
549                 return perf_session__process_sample(event, self, ops);
550         case PERF_RECORD_MMAP:
551                 return ops->mmap(event, self);
552         case PERF_RECORD_COMM:
553                 return ops->comm(event, self);
554         case PERF_RECORD_FORK:
555                 return ops->fork(event, self);
556         case PERF_RECORD_EXIT:
557                 return ops->exit(event, self);
558         case PERF_RECORD_LOST:
559                 return ops->lost(event, self);
560         case PERF_RECORD_READ:
561                 return ops->read(event, self);
562         case PERF_RECORD_THROTTLE:
563                 return ops->throttle(event, self);
564         case PERF_RECORD_UNTHROTTLE:
565                 return ops->unthrottle(event, self);
566         case PERF_RECORD_HEADER_ATTR:
567                 return ops->attr(event, self);
568         case PERF_RECORD_HEADER_EVENT_TYPE:
569                 return ops->event_type(event, self);
570         case PERF_RECORD_HEADER_TRACING_DATA:
571                 /* setup for reading amidst mmap */
572                 lseek(self->fd, offset + head, SEEK_SET);
573                 return ops->tracing_data(event, self);
574         case PERF_RECORD_HEADER_BUILD_ID:
575                 return ops->build_id(event, self);
576         default:
577                 self->unknown_events++;
578                 return -1;
579         }
580 }
581
582 void perf_event_header__bswap(struct perf_event_header *self)
583 {
584         self->type = bswap_32(self->type);
585         self->misc = bswap_16(self->misc);
586         self->size = bswap_16(self->size);
587 }
588
589 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
590 {
591         struct thread *thread = perf_session__findnew(self, 0);
592
593         if (thread == NULL || thread__set_comm(thread, "swapper")) {
594                 pr_err("problem inserting idle task.\n");
595                 thread = NULL;
596         }
597
598         return thread;
599 }
600
601 int do_read(int fd, void *buf, size_t size)
602 {
603         void *buf_start = buf;
604
605         while (size) {
606                 int ret = read(fd, buf, size);
607
608                 if (ret <= 0)
609                         return ret;
610
611                 size -= ret;
612                 buf += ret;
613         }
614
615         return buf - buf_start;
616 }
617
618 #define session_done()  (*(volatile int *)(&session_done))
619 volatile int session_done;
620
621 static int __perf_session__process_pipe_events(struct perf_session *self,
622                                                struct perf_event_ops *ops)
623 {
624         event_t event;
625         uint32_t size;
626         int skip = 0;
627         u64 head;
628         int err;
629         void *p;
630
631         perf_event_ops__fill_defaults(ops);
632
633         head = 0;
634 more:
635         err = do_read(self->fd, &event, sizeof(struct perf_event_header));
636         if (err <= 0) {
637                 if (err == 0)
638                         goto done;
639
640                 pr_err("failed to read event header\n");
641                 goto out_err;
642         }
643
644         if (self->header.needs_swap)
645                 perf_event_header__bswap(&event.header);
646
647         size = event.header.size;
648         if (size == 0)
649                 size = 8;
650
651         p = &event;
652         p += sizeof(struct perf_event_header);
653
654         err = do_read(self->fd, p, size - sizeof(struct perf_event_header));
655         if (err <= 0) {
656                 if (err == 0) {
657                         pr_err("unexpected end of event stream\n");
658                         goto done;
659                 }
660
661                 pr_err("failed to read event data\n");
662                 goto out_err;
663         }
664
665         if (size == 0 ||
666             (skip = perf_session__process_event(self, &event, ops,
667                                                 0, head)) < 0) {
668                 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
669                             head, event.header.size, event.header.type);
670                 /*
671                  * assume we lost track of the stream, check alignment, and
672                  * increment a single u64 in the hope to catch on again 'soon'.
673                  */
674                 if (unlikely(head & 7))
675                         head &= ~7ULL;
676
677                 size = 8;
678         }
679
680         head += size;
681
682         dump_printf("\n%#Lx [%#x]: event: %d\n",
683                     head, event.header.size, event.header.type);
684
685         if (skip > 0)
686                 head += skip;
687
688         if (!session_done())
689                 goto more;
690 done:
691         err = 0;
692 out_err:
693         return err;
694 }
695
696 int __perf_session__process_events(struct perf_session *self,
697                                    u64 data_offset, u64 data_size,
698                                    u64 file_size, struct perf_event_ops *ops)
699 {
700         int err, mmap_prot, mmap_flags;
701         u64 head, shift;
702         u64 offset = 0;
703         size_t  page_size;
704         event_t *event;
705         uint32_t size;
706         char *buf;
707         struct ui_progress *progress = ui_progress__new("Processing events...",
708                                                         self->size);
709         if (progress == NULL)
710                 return -1;
711
712         perf_event_ops__fill_defaults(ops);
713
714         page_size = sysconf(_SC_PAGESIZE);
715
716         head = data_offset;
717         shift = page_size * (head / page_size);
718         offset += shift;
719         head -= shift;
720
721         mmap_prot  = PROT_READ;
722         mmap_flags = MAP_SHARED;
723
724         if (self->header.needs_swap) {
725                 mmap_prot  |= PROT_WRITE;
726                 mmap_flags = MAP_PRIVATE;
727         }
728 remap:
729         buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
730                    mmap_flags, self->fd, offset);
731         if (buf == MAP_FAILED) {
732                 pr_err("failed to mmap file\n");
733                 err = -errno;
734                 goto out_err;
735         }
736
737 more:
738         event = (event_t *)(buf + head);
739         ui_progress__update(progress, offset);
740
741         if (self->header.needs_swap)
742                 perf_event_header__bswap(&event->header);
743         size = event->header.size;
744         if (size == 0)
745                 size = 8;
746
747         if (head + event->header.size >= page_size * self->mmap_window) {
748                 int munmap_ret;
749
750                 shift = page_size * (head / page_size);
751
752                 munmap_ret = munmap(buf, page_size * self->mmap_window);
753                 assert(munmap_ret == 0);
754
755                 offset += shift;
756                 head -= shift;
757                 goto remap;
758         }
759
760         size = event->header.size;
761
762         dump_printf("\n%#Lx [%#x]: event: %d\n",
763                     offset + head, event->header.size, event->header.type);
764
765         if (size == 0 ||
766             perf_session__process_event(self, event, ops, offset, head) < 0) {
767                 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
768                             offset + head, event->header.size,
769                             event->header.type);
770                 /*
771                  * assume we lost track of the stream, check alignment, and
772                  * increment a single u64 in the hope to catch on again 'soon'.
773                  */
774                 if (unlikely(head & 7))
775                         head &= ~7ULL;
776
777                 size = 8;
778         }
779
780         head += size;
781
782         if (offset + head >= data_offset + data_size)
783                 goto done;
784
785         if (offset + head < file_size)
786                 goto more;
787 done:
788         err = 0;
789         /* do the final flush for ordered samples */
790         self->ordered_samples.flush_limit = ULLONG_MAX;
791         flush_sample_queue(self, ops);
792 out_err:
793         ui_progress__delete(progress);
794         return err;
795 }
796
797 int perf_session__process_events(struct perf_session *self,
798                                  struct perf_event_ops *ops)
799 {
800         int err;
801
802         if (perf_session__register_idle_thread(self) == NULL)
803                 return -ENOMEM;
804
805         if (!symbol_conf.full_paths) {
806                 char bf[PATH_MAX];
807
808                 if (getcwd(bf, sizeof(bf)) == NULL) {
809                         err = -errno;
810 out_getcwd_err:
811                         pr_err("failed to get the current directory\n");
812                         goto out_err;
813                 }
814                 self->cwd = strdup(bf);
815                 if (self->cwd == NULL) {
816                         err = -ENOMEM;
817                         goto out_getcwd_err;
818                 }
819                 self->cwdlen = strlen(self->cwd);
820         }
821
822         if (!self->fd_pipe)
823                 err = __perf_session__process_events(self,
824                                                      self->header.data_offset,
825                                                      self->header.data_size,
826                                                      self->size, ops);
827         else
828                 err = __perf_session__process_pipe_events(self, ops);
829 out_err:
830         return err;
831 }
832
833 bool perf_session__has_traces(struct perf_session *self, const char *msg)
834 {
835         if (!(self->sample_type & PERF_SAMPLE_RAW)) {
836                 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
837                 return false;
838         }
839
840         return true;
841 }
842
843 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
844                                              const char *symbol_name,
845                                              u64 addr)
846 {
847         char *bracket;
848         enum map_type i;
849         struct ref_reloc_sym *ref;
850
851         ref = zalloc(sizeof(struct ref_reloc_sym));
852         if (ref == NULL)
853                 return -ENOMEM;
854
855         ref->name = strdup(symbol_name);
856         if (ref->name == NULL) {
857                 free(ref);
858                 return -ENOMEM;
859         }
860
861         bracket = strchr(ref->name, ']');
862         if (bracket)
863                 *bracket = '\0';
864
865         ref->addr = addr;
866
867         for (i = 0; i < MAP__NR_TYPES; ++i) {
868                 struct kmap *kmap = map__kmap(maps[i]);
869                 kmap->ref_reloc_sym = ref;
870         }
871
872         return 0;
873 }
874
875 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
876 {
877         return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
878                __dsos__fprintf(&self->host_machine.user_dsos, fp) +
879                machines__fprintf_dsos(&self->machines, fp);
880 }