Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[pandora-kernel.git] / tools / perf / util / session.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include <linux/kernel.h>
4
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8
9 #include "session.h"
10 #include "sort.h"
11 #include "util.h"
12
13 static int perf_session__open(struct perf_session *self, bool force)
14 {
15         struct stat input_stat;
16
17         if (!strcmp(self->filename, "-")) {
18                 self->fd_pipe = true;
19                 self->fd = STDIN_FILENO;
20
21                 if (perf_header__read(self, self->fd) < 0)
22                         pr_err("incompatible file format");
23
24                 return 0;
25         }
26
27         self->fd = open(self->filename, O_RDONLY);
28         if (self->fd < 0) {
29                 pr_err("failed to open file: %s", self->filename);
30                 if (!strcmp(self->filename, "perf.data"))
31                         pr_err("  (try 'perf record' first)");
32                 pr_err("\n");
33                 return -errno;
34         }
35
36         if (fstat(self->fd, &input_stat) < 0)
37                 goto out_close;
38
39         if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
40                 pr_err("file %s not owned by current user or root\n",
41                        self->filename);
42                 goto out_close;
43         }
44
45         if (!input_stat.st_size) {
46                 pr_info("zero-sized file (%s), nothing to do!\n",
47                         self->filename);
48                 goto out_close;
49         }
50
51         if (perf_header__read(self, self->fd) < 0) {
52                 pr_err("incompatible file format");
53                 goto out_close;
54         }
55
56         self->size = input_stat.st_size;
57         return 0;
58
59 out_close:
60         close(self->fd);
61         self->fd = -1;
62         return -1;
63 }
64
65 void perf_session__update_sample_type(struct perf_session *self)
66 {
67         self->sample_type = perf_header__sample_type(&self->header);
68 }
69
70 int perf_session__create_kernel_maps(struct perf_session *self)
71 {
72         struct rb_root *machines = &self->machines;
73         int ret = machines__create_kernel_maps(machines, HOST_KERNEL_ID);
74
75         if (ret >= 0)
76                 ret = machines__create_guest_kernel_maps(machines);
77         return ret;
78 }
79
80 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
81 {
82         size_t len = filename ? strlen(filename) + 1 : 0;
83         struct perf_session *self = zalloc(sizeof(*self) + len);
84
85         if (self == NULL)
86                 goto out;
87
88         if (perf_header__init(&self->header) < 0)
89                 goto out_free;
90
91         memcpy(self->filename, filename, len);
92         self->threads = RB_ROOT;
93         self->stats_by_id = RB_ROOT;
94         self->last_match = NULL;
95         self->mmap_window = 32;
96         self->cwd = NULL;
97         self->cwdlen = 0;
98         self->unknown_events = 0;
99         self->machines = RB_ROOT;
100         self->repipe = repipe;
101         self->ordered_samples.flush_limit = ULLONG_MAX;
102         INIT_LIST_HEAD(&self->ordered_samples.samples_head);
103
104         if (mode == O_RDONLY) {
105                 if (perf_session__open(self, force) < 0)
106                         goto out_delete;
107         } else if (mode == O_WRONLY) {
108                 /*
109                  * In O_RDONLY mode this will be performed when reading the
110                  * kernel MMAP event, in event__process_mmap().
111                  */
112                 if (perf_session__create_kernel_maps(self) < 0)
113                         goto out_delete;
114         }
115
116         perf_session__update_sample_type(self);
117 out:
118         return self;
119 out_free:
120         free(self);
121         return NULL;
122 out_delete:
123         perf_session__delete(self);
124         return NULL;
125 }
126
127 void perf_session__delete(struct perf_session *self)
128 {
129         perf_header__exit(&self->header);
130         close(self->fd);
131         free(self->cwd);
132         free(self);
133 }
134
135 static bool symbol__match_parent_regex(struct symbol *sym)
136 {
137         if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
138                 return 1;
139
140         return 0;
141 }
142
143 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
144                                                    struct thread *thread,
145                                                    struct ip_callchain *chain,
146                                                    struct symbol **parent)
147 {
148         u8 cpumode = PERF_RECORD_MISC_USER;
149         unsigned int i;
150         struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
151
152         if (!syms)
153                 return NULL;
154
155         for (i = 0; i < chain->nr; i++) {
156                 u64 ip = chain->ips[i];
157                 struct addr_location al;
158
159                 if (ip >= PERF_CONTEXT_MAX) {
160                         switch (ip) {
161                         case PERF_CONTEXT_HV:
162                                 cpumode = PERF_RECORD_MISC_HYPERVISOR;  break;
163                         case PERF_CONTEXT_KERNEL:
164                                 cpumode = PERF_RECORD_MISC_KERNEL;      break;
165                         case PERF_CONTEXT_USER:
166                                 cpumode = PERF_RECORD_MISC_USER;        break;
167                         default:
168                                 break;
169                         }
170                         continue;
171                 }
172
173                 al.filtered = false;
174                 thread__find_addr_location(thread, self, cpumode,
175                                 MAP__FUNCTION, thread->pid, ip, &al, NULL);
176                 if (al.sym != NULL) {
177                         if (sort__has_parent && !*parent &&
178                             symbol__match_parent_regex(al.sym))
179                                 *parent = al.sym;
180                         if (!symbol_conf.use_callchain)
181                                 break;
182                         syms[i].map = al.map;
183                         syms[i].sym = al.sym;
184                 }
185         }
186
187         return syms;
188 }
189
190 static int process_event_stub(event_t *event __used,
191                               struct perf_session *session __used)
192 {
193         dump_printf(": unhandled!\n");
194         return 0;
195 }
196
197 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
198 {
199         if (handler->sample == NULL)
200                 handler->sample = process_event_stub;
201         if (handler->mmap == NULL)
202                 handler->mmap = process_event_stub;
203         if (handler->comm == NULL)
204                 handler->comm = process_event_stub;
205         if (handler->fork == NULL)
206                 handler->fork = process_event_stub;
207         if (handler->exit == NULL)
208                 handler->exit = process_event_stub;
209         if (handler->lost == NULL)
210                 handler->lost = process_event_stub;
211         if (handler->read == NULL)
212                 handler->read = process_event_stub;
213         if (handler->throttle == NULL)
214                 handler->throttle = process_event_stub;
215         if (handler->unthrottle == NULL)
216                 handler->unthrottle = process_event_stub;
217         if (handler->attr == NULL)
218                 handler->attr = process_event_stub;
219         if (handler->event_type == NULL)
220                 handler->event_type = process_event_stub;
221         if (handler->tracing_data == NULL)
222                 handler->tracing_data = process_event_stub;
223         if (handler->build_id == NULL)
224                 handler->build_id = process_event_stub;
225 }
226
227 static const char *event__name[] = {
228         [0]                      = "TOTAL",
229         [PERF_RECORD_MMAP]       = "MMAP",
230         [PERF_RECORD_LOST]       = "LOST",
231         [PERF_RECORD_COMM]       = "COMM",
232         [PERF_RECORD_EXIT]       = "EXIT",
233         [PERF_RECORD_THROTTLE]   = "THROTTLE",
234         [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
235         [PERF_RECORD_FORK]       = "FORK",
236         [PERF_RECORD_READ]       = "READ",
237         [PERF_RECORD_SAMPLE]     = "SAMPLE",
238         [PERF_RECORD_HEADER_ATTR]        = "ATTR",
239         [PERF_RECORD_HEADER_EVENT_TYPE]  = "EVENT_TYPE",
240         [PERF_RECORD_HEADER_TRACING_DATA]        = "TRACING_DATA",
241         [PERF_RECORD_HEADER_BUILD_ID]    = "BUILD_ID",
242 };
243
244 unsigned long event__total[PERF_RECORD_HEADER_MAX];
245
246 void event__print_totals(void)
247 {
248         int i;
249         for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
250                 if (!event__name[i])
251                         continue;
252                 pr_info("%10s events: %10ld\n",
253                         event__name[i], event__total[i]);
254         }
255 }
256
257 void mem_bswap_64(void *src, int byte_size)
258 {
259         u64 *m = src;
260
261         while (byte_size > 0) {
262                 *m = bswap_64(*m);
263                 byte_size -= sizeof(u64);
264                 ++m;
265         }
266 }
267
268 static void event__all64_swap(event_t *self)
269 {
270         struct perf_event_header *hdr = &self->header;
271         mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
272 }
273
274 static void event__comm_swap(event_t *self)
275 {
276         self->comm.pid = bswap_32(self->comm.pid);
277         self->comm.tid = bswap_32(self->comm.tid);
278 }
279
280 static void event__mmap_swap(event_t *self)
281 {
282         self->mmap.pid   = bswap_32(self->mmap.pid);
283         self->mmap.tid   = bswap_32(self->mmap.tid);
284         self->mmap.start = bswap_64(self->mmap.start);
285         self->mmap.len   = bswap_64(self->mmap.len);
286         self->mmap.pgoff = bswap_64(self->mmap.pgoff);
287 }
288
289 static void event__task_swap(event_t *self)
290 {
291         self->fork.pid  = bswap_32(self->fork.pid);
292         self->fork.tid  = bswap_32(self->fork.tid);
293         self->fork.ppid = bswap_32(self->fork.ppid);
294         self->fork.ptid = bswap_32(self->fork.ptid);
295         self->fork.time = bswap_64(self->fork.time);
296 }
297
298 static void event__read_swap(event_t *self)
299 {
300         self->read.pid          = bswap_32(self->read.pid);
301         self->read.tid          = bswap_32(self->read.tid);
302         self->read.value        = bswap_64(self->read.value);
303         self->read.time_enabled = bswap_64(self->read.time_enabled);
304         self->read.time_running = bswap_64(self->read.time_running);
305         self->read.id           = bswap_64(self->read.id);
306 }
307
308 static void event__attr_swap(event_t *self)
309 {
310         size_t size;
311
312         self->attr.attr.type            = bswap_32(self->attr.attr.type);
313         self->attr.attr.size            = bswap_32(self->attr.attr.size);
314         self->attr.attr.config          = bswap_64(self->attr.attr.config);
315         self->attr.attr.sample_period   = bswap_64(self->attr.attr.sample_period);
316         self->attr.attr.sample_type     = bswap_64(self->attr.attr.sample_type);
317         self->attr.attr.read_format     = bswap_64(self->attr.attr.read_format);
318         self->attr.attr.wakeup_events   = bswap_32(self->attr.attr.wakeup_events);
319         self->attr.attr.bp_type         = bswap_32(self->attr.attr.bp_type);
320         self->attr.attr.bp_addr         = bswap_64(self->attr.attr.bp_addr);
321         self->attr.attr.bp_len          = bswap_64(self->attr.attr.bp_len);
322
323         size = self->header.size;
324         size -= (void *)&self->attr.id - (void *)self;
325         mem_bswap_64(self->attr.id, size);
326 }
327
328 static void event__event_type_swap(event_t *self)
329 {
330         self->event_type.event_type.event_id =
331                 bswap_64(self->event_type.event_type.event_id);
332 }
333
334 static void event__tracing_data_swap(event_t *self)
335 {
336         self->tracing_data.size = bswap_32(self->tracing_data.size);
337 }
338
339 typedef void (*event__swap_op)(event_t *self);
340
341 static event__swap_op event__swap_ops[] = {
342         [PERF_RECORD_MMAP]   = event__mmap_swap,
343         [PERF_RECORD_COMM]   = event__comm_swap,
344         [PERF_RECORD_FORK]   = event__task_swap,
345         [PERF_RECORD_EXIT]   = event__task_swap,
346         [PERF_RECORD_LOST]   = event__all64_swap,
347         [PERF_RECORD_READ]   = event__read_swap,
348         [PERF_RECORD_SAMPLE] = event__all64_swap,
349         [PERF_RECORD_HEADER_ATTR]   = event__attr_swap,
350         [PERF_RECORD_HEADER_EVENT_TYPE]   = event__event_type_swap,
351         [PERF_RECORD_HEADER_TRACING_DATA]   = event__tracing_data_swap,
352         [PERF_RECORD_HEADER_BUILD_ID]   = NULL,
353         [PERF_RECORD_HEADER_MAX]    = NULL,
354 };
355
356 struct sample_queue {
357         u64                     timestamp;
358         struct sample_event     *event;
359         struct list_head        list;
360 };
361
362 #define FLUSH_PERIOD    (2 * NSEC_PER_SEC)
363
364 static void flush_sample_queue(struct perf_session *s,
365                                struct perf_event_ops *ops)
366 {
367         struct list_head *head = &s->ordered_samples.samples_head;
368         u64 limit = s->ordered_samples.flush_limit;
369         struct sample_queue *tmp, *iter;
370
371         if (!ops->ordered_samples)
372                 return;
373
374         list_for_each_entry_safe(iter, tmp, head, list) {
375                 if (iter->timestamp > limit)
376                         return;
377
378                 if (iter == s->ordered_samples.last_inserted)
379                         s->ordered_samples.last_inserted = NULL;
380
381                 ops->sample((event_t *)iter->event, s);
382
383                 s->ordered_samples.last_flush = iter->timestamp;
384                 list_del(&iter->list);
385                 free(iter->event);
386                 free(iter);
387         }
388 }
389
390 static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
391 {
392         struct sample_queue *iter;
393
394         list_for_each_entry_reverse(iter, head, list) {
395                 if (iter->timestamp < new->timestamp) {
396                         list_add(&new->list, &iter->list);
397                         return;
398                 }
399         }
400
401         list_add(&new->list, head);
402 }
403
404 static void __queue_sample_before(struct sample_queue *new,
405                                   struct sample_queue *iter,
406                                   struct list_head *head)
407 {
408         list_for_each_entry_continue_reverse(iter, head, list) {
409                 if (iter->timestamp < new->timestamp) {
410                         list_add(&new->list, &iter->list);
411                         return;
412                 }
413         }
414
415         list_add(&new->list, head);
416 }
417
418 static void __queue_sample_after(struct sample_queue *new,
419                                  struct sample_queue *iter,
420                                  struct list_head *head)
421 {
422         list_for_each_entry_continue(iter, head, list) {
423                 if (iter->timestamp > new->timestamp) {
424                         list_add_tail(&new->list, &iter->list);
425                         return;
426                 }
427         }
428         list_add_tail(&new->list, head);
429 }
430
431 /* The queue is ordered by time */
432 static void __queue_sample_event(struct sample_queue *new,
433                                  struct perf_session *s)
434 {
435         struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
436         struct list_head *head = &s->ordered_samples.samples_head;
437
438
439         if (!last_inserted) {
440                 __queue_sample_end(new, head);
441                 return;
442         }
443
444         /*
445          * Most of the time the current event has a timestamp
446          * very close to the last event inserted, unless we just switched
447          * to another event buffer. Having a sorting based on a list and
448          * on the last inserted event that is close to the current one is
449          * probably more efficient than an rbtree based sorting.
450          */
451         if (last_inserted->timestamp >= new->timestamp)
452                 __queue_sample_before(new, last_inserted, head);
453         else
454                 __queue_sample_after(new, last_inserted, head);
455 }
456
457 static int queue_sample_event(event_t *event, struct sample_data *data,
458                               struct perf_session *s,
459                               struct perf_event_ops *ops)
460 {
461         u64 timestamp = data->time;
462         struct sample_queue *new;
463         u64 flush_limit;
464
465
466         if (s->ordered_samples.flush_limit == ULLONG_MAX)
467                 s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;
468
469         if (timestamp < s->ordered_samples.last_flush) {
470                 printf("Warning: Timestamp below last timeslice flush\n");
471                 return -EINVAL;
472         }
473
474         new = malloc(sizeof(*new));
475         if (!new)
476                 return -ENOMEM;
477
478         new->timestamp = timestamp;
479
480         new->event = malloc(event->header.size);
481         if (!new->event) {
482                 free(new);
483                 return -ENOMEM;
484         }
485
486         memcpy(new->event, event, event->header.size);
487
488         __queue_sample_event(new, s);
489         s->ordered_samples.last_inserted = new;
490
491         /*
492          * We want to have a slice of events covering 2 * FLUSH_PERIOD
493          * If FLUSH_PERIOD is big enough, it ensures every events that occured
494          * in the first half of the timeslice have all been buffered and there
495          * are none remaining (we need that because of the weakly ordered
496          * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
497          * timeslice, we flush the first half to be gentle with the memory
498          * (the second half can still get new events in the middle, so wait
499          * another period to flush it)
500          */
501         flush_limit = s->ordered_samples.flush_limit;
502
503         if (new->timestamp > flush_limit &&
504                 new->timestamp - flush_limit > FLUSH_PERIOD) {
505                 s->ordered_samples.flush_limit += FLUSH_PERIOD;
506                 flush_sample_queue(s, ops);
507         }
508
509         return 0;
510 }
511
512 static int perf_session__process_sample(event_t *event, struct perf_session *s,
513                                         struct perf_event_ops *ops)
514 {
515         struct sample_data data;
516
517         if (!ops->ordered_samples)
518                 return ops->sample(event, s);
519
520         bzero(&data, sizeof(struct sample_data));
521         event__parse_sample(event, s->sample_type, &data);
522
523         queue_sample_event(event, &data, s, ops);
524
525         return 0;
526 }
527
528 static int perf_session__process_event(struct perf_session *self,
529                                        event_t *event,
530                                        struct perf_event_ops *ops,
531                                        u64 offset, u64 head)
532 {
533         trace_event(event);
534
535         if (event->header.type < PERF_RECORD_HEADER_MAX) {
536                 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
537                             offset + head, event->header.size,
538                             event__name[event->header.type]);
539                 ++event__total[0];
540                 ++event__total[event->header.type];
541         }
542
543         if (self->header.needs_swap && event__swap_ops[event->header.type])
544                 event__swap_ops[event->header.type](event);
545
546         switch (event->header.type) {
547         case PERF_RECORD_SAMPLE:
548                 return perf_session__process_sample(event, self, ops);
549         case PERF_RECORD_MMAP:
550                 return ops->mmap(event, self);
551         case PERF_RECORD_COMM:
552                 return ops->comm(event, self);
553         case PERF_RECORD_FORK:
554                 return ops->fork(event, self);
555         case PERF_RECORD_EXIT:
556                 return ops->exit(event, self);
557         case PERF_RECORD_LOST:
558                 return ops->lost(event, self);
559         case PERF_RECORD_READ:
560                 return ops->read(event, self);
561         case PERF_RECORD_THROTTLE:
562                 return ops->throttle(event, self);
563         case PERF_RECORD_UNTHROTTLE:
564                 return ops->unthrottle(event, self);
565         case PERF_RECORD_HEADER_ATTR:
566                 return ops->attr(event, self);
567         case PERF_RECORD_HEADER_EVENT_TYPE:
568                 return ops->event_type(event, self);
569         case PERF_RECORD_HEADER_TRACING_DATA:
570                 /* setup for reading amidst mmap */
571                 lseek(self->fd, offset + head, SEEK_SET);
572                 return ops->tracing_data(event, self);
573         case PERF_RECORD_HEADER_BUILD_ID:
574                 return ops->build_id(event, self);
575         default:
576                 self->unknown_events++;
577                 return -1;
578         }
579 }
580
581 void perf_event_header__bswap(struct perf_event_header *self)
582 {
583         self->type = bswap_32(self->type);
584         self->misc = bswap_16(self->misc);
585         self->size = bswap_16(self->size);
586 }
587
588 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
589 {
590         struct thread *thread = perf_session__findnew(self, 0);
591
592         if (thread == NULL || thread__set_comm(thread, "swapper")) {
593                 pr_err("problem inserting idle task.\n");
594                 thread = NULL;
595         }
596
597         return thread;
598 }
599
600 int do_read(int fd, void *buf, size_t size)
601 {
602         void *buf_start = buf;
603
604         while (size) {
605                 int ret = read(fd, buf, size);
606
607                 if (ret <= 0)
608                         return ret;
609
610                 size -= ret;
611                 buf += ret;
612         }
613
614         return buf - buf_start;
615 }
616
617 #define session_done()  (*(volatile int *)(&session_done))
618 volatile int session_done;
619
620 static int __perf_session__process_pipe_events(struct perf_session *self,
621                                                struct perf_event_ops *ops)
622 {
623         event_t event;
624         uint32_t size;
625         int skip = 0;
626         u64 head;
627         int err;
628         void *p;
629
630         perf_event_ops__fill_defaults(ops);
631
632         head = 0;
633 more:
634         err = do_read(self->fd, &event, sizeof(struct perf_event_header));
635         if (err <= 0) {
636                 if (err == 0)
637                         goto done;
638
639                 pr_err("failed to read event header\n");
640                 goto out_err;
641         }
642
643         if (self->header.needs_swap)
644                 perf_event_header__bswap(&event.header);
645
646         size = event.header.size;
647         if (size == 0)
648                 size = 8;
649
650         p = &event;
651         p += sizeof(struct perf_event_header);
652
653         err = do_read(self->fd, p, size - sizeof(struct perf_event_header));
654         if (err <= 0) {
655                 if (err == 0) {
656                         pr_err("unexpected end of event stream\n");
657                         goto done;
658                 }
659
660                 pr_err("failed to read event data\n");
661                 goto out_err;
662         }
663
664         if (size == 0 ||
665             (skip = perf_session__process_event(self, &event, ops,
666                                                 0, head)) < 0) {
667                 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
668                             head, event.header.size, event.header.type);
669                 /*
670                  * assume we lost track of the stream, check alignment, and
671                  * increment a single u64 in the hope to catch on again 'soon'.
672                  */
673                 if (unlikely(head & 7))
674                         head &= ~7ULL;
675
676                 size = 8;
677         }
678
679         head += size;
680
681         dump_printf("\n%#Lx [%#x]: event: %d\n",
682                     head, event.header.size, event.header.type);
683
684         if (skip > 0)
685                 head += skip;
686
687         if (!session_done())
688                 goto more;
689 done:
690         err = 0;
691 out_err:
692         return err;
693 }
694
695 int __perf_session__process_events(struct perf_session *self,
696                                    u64 data_offset, u64 data_size,
697                                    u64 file_size, struct perf_event_ops *ops)
698 {
699         int err, mmap_prot, mmap_flags;
700         u64 head, shift;
701         u64 offset = 0;
702         size_t  page_size;
703         event_t *event;
704         uint32_t size;
705         char *buf;
706         struct ui_progress *progress = ui_progress__new("Processing events...",
707                                                         self->size);
708         if (progress == NULL)
709                 return -1;
710
711         perf_event_ops__fill_defaults(ops);
712
713         page_size = sysconf(_SC_PAGESIZE);
714
715         head = data_offset;
716         shift = page_size * (head / page_size);
717         offset += shift;
718         head -= shift;
719
720         mmap_prot  = PROT_READ;
721         mmap_flags = MAP_SHARED;
722
723         if (self->header.needs_swap) {
724                 mmap_prot  |= PROT_WRITE;
725                 mmap_flags = MAP_PRIVATE;
726         }
727 remap:
728         buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
729                    mmap_flags, self->fd, offset);
730         if (buf == MAP_FAILED) {
731                 pr_err("failed to mmap file\n");
732                 err = -errno;
733                 goto out_err;
734         }
735
736 more:
737         event = (event_t *)(buf + head);
738         ui_progress__update(progress, offset);
739
740         if (self->header.needs_swap)
741                 perf_event_header__bswap(&event->header);
742         size = event->header.size;
743         if (size == 0)
744                 size = 8;
745
746         if (head + event->header.size >= page_size * self->mmap_window) {
747                 int munmap_ret;
748
749                 shift = page_size * (head / page_size);
750
751                 munmap_ret = munmap(buf, page_size * self->mmap_window);
752                 assert(munmap_ret == 0);
753
754                 offset += shift;
755                 head -= shift;
756                 goto remap;
757         }
758
759         size = event->header.size;
760
761         dump_printf("\n%#Lx [%#x]: event: %d\n",
762                     offset + head, event->header.size, event->header.type);
763
764         if (size == 0 ||
765             perf_session__process_event(self, event, ops, offset, head) < 0) {
766                 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
767                             offset + head, event->header.size,
768                             event->header.type);
769                 /*
770                  * assume we lost track of the stream, check alignment, and
771                  * increment a single u64 in the hope to catch on again 'soon'.
772                  */
773                 if (unlikely(head & 7))
774                         head &= ~7ULL;
775
776                 size = 8;
777         }
778
779         head += size;
780
781         if (offset + head >= data_offset + data_size)
782                 goto done;
783
784         if (offset + head < file_size)
785                 goto more;
786 done:
787         err = 0;
788         /* do the final flush for ordered samples */
789         self->ordered_samples.flush_limit = ULLONG_MAX;
790         flush_sample_queue(self, ops);
791 out_err:
792         ui_progress__delete(progress);
793         return err;
794 }
795
796 int perf_session__process_events(struct perf_session *self,
797                                  struct perf_event_ops *ops)
798 {
799         int err;
800
801         if (perf_session__register_idle_thread(self) == NULL)
802                 return -ENOMEM;
803
804         if (!symbol_conf.full_paths) {
805                 char bf[PATH_MAX];
806
807                 if (getcwd(bf, sizeof(bf)) == NULL) {
808                         err = -errno;
809 out_getcwd_err:
810                         pr_err("failed to get the current directory\n");
811                         goto out_err;
812                 }
813                 self->cwd = strdup(bf);
814                 if (self->cwd == NULL) {
815                         err = -ENOMEM;
816                         goto out_getcwd_err;
817                 }
818                 self->cwdlen = strlen(self->cwd);
819         }
820
821         if (!self->fd_pipe)
822                 err = __perf_session__process_events(self,
823                                                      self->header.data_offset,
824                                                      self->header.data_size,
825                                                      self->size, ops);
826         else
827                 err = __perf_session__process_pipe_events(self, ops);
828 out_err:
829         return err;
830 }
831
832 bool perf_session__has_traces(struct perf_session *self, const char *msg)
833 {
834         if (!(self->sample_type & PERF_SAMPLE_RAW)) {
835                 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
836                 return false;
837         }
838
839         return true;
840 }
841
842 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
843                                              const char *symbol_name,
844                                              u64 addr)
845 {
846         char *bracket;
847         enum map_type i;
848         struct ref_reloc_sym *ref;
849
850         ref = zalloc(sizeof(struct ref_reloc_sym));
851         if (ref == NULL)
852                 return -ENOMEM;
853
854         ref->name = strdup(symbol_name);
855         if (ref->name == NULL) {
856                 free(ref);
857                 return -ENOMEM;
858         }
859
860         bracket = strchr(ref->name, ']');
861         if (bracket)
862                 *bracket = '\0';
863
864         ref->addr = addr;
865
866         for (i = 0; i < MAP__NR_TYPES; ++i) {
867                 struct kmap *kmap = map__kmap(maps[i]);
868                 kmap->ref_reloc_sym = ref;
869         }
870
871         return 0;
872 }