Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / tools / perf / util / session.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include <linux/kernel.h>
4
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <sys/types.h>
8 #include <sys/mman.h>
9
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "session.h"
13 #include "sort.h"
14 #include "util.h"
15
16 static int perf_session__open(struct perf_session *self, bool force)
17 {
18         struct stat input_stat;
19
20         if (!strcmp(self->filename, "-")) {
21                 self->fd_pipe = true;
22                 self->fd = STDIN_FILENO;
23
24                 if (perf_session__read_header(self, self->fd) < 0)
25                         pr_err("incompatible file format");
26
27                 return 0;
28         }
29
30         self->fd = open(self->filename, O_RDONLY);
31         if (self->fd < 0) {
32                 int err = errno;
33
34                 pr_err("failed to open %s: %s", self->filename, strerror(err));
35                 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
36                         pr_err("  (try 'perf record' first)");
37                 pr_err("\n");
38                 return -errno;
39         }
40
41         if (fstat(self->fd, &input_stat) < 0)
42                 goto out_close;
43
44         if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
45                 pr_err("file %s not owned by current user or root\n",
46                        self->filename);
47                 goto out_close;
48         }
49
50         if (!input_stat.st_size) {
51                 pr_info("zero-sized file (%s), nothing to do!\n",
52                         self->filename);
53                 goto out_close;
54         }
55
56         if (perf_session__read_header(self, self->fd) < 0) {
57                 pr_err("incompatible file format");
58                 goto out_close;
59         }
60
61         self->size = input_stat.st_size;
62         return 0;
63
64 out_close:
65         close(self->fd);
66         self->fd = -1;
67         return -1;
68 }
69
70 static void perf_session__id_header_size(struct perf_session *session)
71 {
72        struct perf_sample *data;
73        u64 sample_type = session->sample_type;
74        u16 size = 0;
75
76         if (!session->sample_id_all)
77                 goto out;
78
79        if (sample_type & PERF_SAMPLE_TID)
80                size += sizeof(data->tid) * 2;
81
82        if (sample_type & PERF_SAMPLE_TIME)
83                size += sizeof(data->time);
84
85        if (sample_type & PERF_SAMPLE_ID)
86                size += sizeof(data->id);
87
88        if (sample_type & PERF_SAMPLE_STREAM_ID)
89                size += sizeof(data->stream_id);
90
91        if (sample_type & PERF_SAMPLE_CPU)
92                size += sizeof(data->cpu) * 2;
93 out:
94        session->id_hdr_size = size;
95 }
96
97 void perf_session__update_sample_type(struct perf_session *self)
98 {
99         self->sample_type = perf_evlist__sample_type(self->evlist);
100         self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
101         perf_session__id_header_size(self);
102 }
103
104 int perf_session__create_kernel_maps(struct perf_session *self)
105 {
106         int ret = machine__create_kernel_maps(&self->host_machine);
107
108         if (ret >= 0)
109                 ret = machines__create_guest_kernel_maps(&self->machines);
110         return ret;
111 }
112
113 static void perf_session__destroy_kernel_maps(struct perf_session *self)
114 {
115         machine__destroy_kernel_maps(&self->host_machine);
116         machines__destroy_guest_kernel_maps(&self->machines);
117 }
118
119 struct perf_session *perf_session__new(const char *filename, int mode,
120                                        bool force, bool repipe,
121                                        struct perf_event_ops *ops)
122 {
123         size_t len = filename ? strlen(filename) + 1 : 0;
124         struct perf_session *self = zalloc(sizeof(*self) + len);
125
126         if (self == NULL)
127                 goto out;
128
129         memcpy(self->filename, filename, len);
130         self->threads = RB_ROOT;
131         INIT_LIST_HEAD(&self->dead_threads);
132         self->last_match = NULL;
133         /*
134          * On 64bit we can mmap the data file in one go. No need for tiny mmap
135          * slices. On 32bit we use 32MB.
136          */
137 #if BITS_PER_LONG == 64
138         self->mmap_window = ULLONG_MAX;
139 #else
140         self->mmap_window = 32 * 1024 * 1024ULL;
141 #endif
142         self->machines = RB_ROOT;
143         self->repipe = repipe;
144         INIT_LIST_HEAD(&self->ordered_samples.samples);
145         INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
146         INIT_LIST_HEAD(&self->ordered_samples.to_free);
147         machine__init(&self->host_machine, "", HOST_KERNEL_ID);
148
149         if (mode == O_RDONLY) {
150                 if (perf_session__open(self, force) < 0)
151                         goto out_delete;
152                 perf_session__update_sample_type(self);
153         } else if (mode == O_WRONLY) {
154                 /*
155                  * In O_RDONLY mode this will be performed when reading the
156                  * kernel MMAP event, in perf_event__process_mmap().
157                  */
158                 if (perf_session__create_kernel_maps(self) < 0)
159                         goto out_delete;
160         }
161
162         if (ops && ops->ordering_requires_timestamps &&
163             ops->ordered_samples && !self->sample_id_all) {
164                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
165                 ops->ordered_samples = false;
166         }
167
168 out:
169         return self;
170 out_delete:
171         perf_session__delete(self);
172         return NULL;
173 }
174
175 static void perf_session__delete_dead_threads(struct perf_session *self)
176 {
177         struct thread *n, *t;
178
179         list_for_each_entry_safe(t, n, &self->dead_threads, node) {
180                 list_del(&t->node);
181                 thread__delete(t);
182         }
183 }
184
185 static void perf_session__delete_threads(struct perf_session *self)
186 {
187         struct rb_node *nd = rb_first(&self->threads);
188
189         while (nd) {
190                 struct thread *t = rb_entry(nd, struct thread, rb_node);
191
192                 rb_erase(&t->rb_node, &self->threads);
193                 nd = rb_next(nd);
194                 thread__delete(t);
195         }
196 }
197
198 void perf_session__delete(struct perf_session *self)
199 {
200         perf_session__destroy_kernel_maps(self);
201         perf_session__delete_dead_threads(self);
202         perf_session__delete_threads(self);
203         machine__exit(&self->host_machine);
204         close(self->fd);
205         free(self);
206 }
207
208 void perf_session__remove_thread(struct perf_session *self, struct thread *th)
209 {
210         self->last_match = NULL;
211         rb_erase(&th->rb_node, &self->threads);
212         /*
213          * We may have references to this thread, for instance in some hist_entry
214          * instances, so just move them to a separate list.
215          */
216         list_add_tail(&th->node, &self->dead_threads);
217 }
218
219 static bool symbol__match_parent_regex(struct symbol *sym)
220 {
221         if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
222                 return 1;
223
224         return 0;
225 }
226
227 int perf_session__resolve_callchain(struct perf_session *self,
228                                     struct thread *thread,
229                                     struct ip_callchain *chain,
230                                     struct symbol **parent)
231 {
232         u8 cpumode = PERF_RECORD_MISC_USER;
233         unsigned int i;
234         int err;
235
236         callchain_cursor_reset(&self->callchain_cursor);
237
238         for (i = 0; i < chain->nr; i++) {
239                 u64 ip = chain->ips[i];
240                 struct addr_location al;
241
242                 if (ip >= PERF_CONTEXT_MAX) {
243                         switch (ip) {
244                         case PERF_CONTEXT_HV:
245                                 cpumode = PERF_RECORD_MISC_HYPERVISOR;  break;
246                         case PERF_CONTEXT_KERNEL:
247                                 cpumode = PERF_RECORD_MISC_KERNEL;      break;
248                         case PERF_CONTEXT_USER:
249                                 cpumode = PERF_RECORD_MISC_USER;        break;
250                         default:
251                                 break;
252                         }
253                         continue;
254                 }
255
256                 al.filtered = false;
257                 thread__find_addr_location(thread, self, cpumode,
258                                 MAP__FUNCTION, thread->pid, ip, &al, NULL);
259                 if (al.sym != NULL) {
260                         if (sort__has_parent && !*parent &&
261                             symbol__match_parent_regex(al.sym))
262                                 *parent = al.sym;
263                         if (!symbol_conf.use_callchain)
264                                 break;
265                 }
266
267                 err = callchain_cursor_append(&self->callchain_cursor,
268                                               ip, al.map, al.sym);
269                 if (err)
270                         return err;
271         }
272
273         return 0;
274 }
275
276 static int process_event_synth_stub(union perf_event *event __used,
277                                     struct perf_session *session __used)
278 {
279         dump_printf(": unhandled!\n");
280         return 0;
281 }
282
283 static int process_event_sample_stub(union perf_event *event __used,
284                                      struct perf_sample *sample __used,
285                                      struct perf_evsel *evsel __used,
286                                      struct perf_session *session __used)
287 {
288         dump_printf(": unhandled!\n");
289         return 0;
290 }
291
292 static int process_event_stub(union perf_event *event __used,
293                               struct perf_sample *sample __used,
294                               struct perf_session *session __used)
295 {
296         dump_printf(": unhandled!\n");
297         return 0;
298 }
299
300 static int process_finished_round_stub(union perf_event *event __used,
301                                        struct perf_session *session __used,
302                                        struct perf_event_ops *ops __used)
303 {
304         dump_printf(": unhandled!\n");
305         return 0;
306 }
307
308 static int process_finished_round(union perf_event *event,
309                                   struct perf_session *session,
310                                   struct perf_event_ops *ops);
311
312 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
313 {
314         if (handler->sample == NULL)
315                 handler->sample = process_event_sample_stub;
316         if (handler->mmap == NULL)
317                 handler->mmap = process_event_stub;
318         if (handler->comm == NULL)
319                 handler->comm = process_event_stub;
320         if (handler->fork == NULL)
321                 handler->fork = process_event_stub;
322         if (handler->exit == NULL)
323                 handler->exit = process_event_stub;
324         if (handler->lost == NULL)
325                 handler->lost = perf_event__process_lost;
326         if (handler->read == NULL)
327                 handler->read = process_event_stub;
328         if (handler->throttle == NULL)
329                 handler->throttle = process_event_stub;
330         if (handler->unthrottle == NULL)
331                 handler->unthrottle = process_event_stub;
332         if (handler->attr == NULL)
333                 handler->attr = process_event_synth_stub;
334         if (handler->event_type == NULL)
335                 handler->event_type = process_event_synth_stub;
336         if (handler->tracing_data == NULL)
337                 handler->tracing_data = process_event_synth_stub;
338         if (handler->build_id == NULL)
339                 handler->build_id = process_event_synth_stub;
340         if (handler->finished_round == NULL) {
341                 if (handler->ordered_samples)
342                         handler->finished_round = process_finished_round;
343                 else
344                         handler->finished_round = process_finished_round_stub;
345         }
346 }
347
348 void mem_bswap_64(void *src, int byte_size)
349 {
350         u64 *m = src;
351
352         while (byte_size > 0) {
353                 *m = bswap_64(*m);
354                 byte_size -= sizeof(u64);
355                 ++m;
356         }
357 }
358
359 static void perf_event__all64_swap(union perf_event *event)
360 {
361         struct perf_event_header *hdr = &event->header;
362         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
363 }
364
365 static void perf_event__comm_swap(union perf_event *event)
366 {
367         event->comm.pid = bswap_32(event->comm.pid);
368         event->comm.tid = bswap_32(event->comm.tid);
369 }
370
371 static void perf_event__mmap_swap(union perf_event *event)
372 {
373         event->mmap.pid   = bswap_32(event->mmap.pid);
374         event->mmap.tid   = bswap_32(event->mmap.tid);
375         event->mmap.start = bswap_64(event->mmap.start);
376         event->mmap.len   = bswap_64(event->mmap.len);
377         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
378 }
379
380 static void perf_event__task_swap(union perf_event *event)
381 {
382         event->fork.pid  = bswap_32(event->fork.pid);
383         event->fork.tid  = bswap_32(event->fork.tid);
384         event->fork.ppid = bswap_32(event->fork.ppid);
385         event->fork.ptid = bswap_32(event->fork.ptid);
386         event->fork.time = bswap_64(event->fork.time);
387 }
388
389 static void perf_event__read_swap(union perf_event *event)
390 {
391         event->read.pid          = bswap_32(event->read.pid);
392         event->read.tid          = bswap_32(event->read.tid);
393         event->read.value        = bswap_64(event->read.value);
394         event->read.time_enabled = bswap_64(event->read.time_enabled);
395         event->read.time_running = bswap_64(event->read.time_running);
396         event->read.id           = bswap_64(event->read.id);
397 }
398
399 static void perf_event__attr_swap(union perf_event *event)
400 {
401         size_t size;
402
403         event->attr.attr.type           = bswap_32(event->attr.attr.type);
404         event->attr.attr.size           = bswap_32(event->attr.attr.size);
405         event->attr.attr.config         = bswap_64(event->attr.attr.config);
406         event->attr.attr.sample_period  = bswap_64(event->attr.attr.sample_period);
407         event->attr.attr.sample_type    = bswap_64(event->attr.attr.sample_type);
408         event->attr.attr.read_format    = bswap_64(event->attr.attr.read_format);
409         event->attr.attr.wakeup_events  = bswap_32(event->attr.attr.wakeup_events);
410         event->attr.attr.bp_type        = bswap_32(event->attr.attr.bp_type);
411         event->attr.attr.bp_addr        = bswap_64(event->attr.attr.bp_addr);
412         event->attr.attr.bp_len         = bswap_64(event->attr.attr.bp_len);
413
414         size = event->header.size;
415         size -= (void *)&event->attr.id - (void *)event;
416         mem_bswap_64(event->attr.id, size);
417 }
418
419 static void perf_event__event_type_swap(union perf_event *event)
420 {
421         event->event_type.event_type.event_id =
422                 bswap_64(event->event_type.event_type.event_id);
423 }
424
425 static void perf_event__tracing_data_swap(union perf_event *event)
426 {
427         event->tracing_data.size = bswap_32(event->tracing_data.size);
428 }
429
430 typedef void (*perf_event__swap_op)(union perf_event *event);
431
432 static perf_event__swap_op perf_event__swap_ops[] = {
433         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
434         [PERF_RECORD_COMM]                = perf_event__comm_swap,
435         [PERF_RECORD_FORK]                = perf_event__task_swap,
436         [PERF_RECORD_EXIT]                = perf_event__task_swap,
437         [PERF_RECORD_LOST]                = perf_event__all64_swap,
438         [PERF_RECORD_READ]                = perf_event__read_swap,
439         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
440         [PERF_RECORD_HEADER_ATTR]         = perf_event__attr_swap,
441         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
442         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
443         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
444         [PERF_RECORD_HEADER_MAX]          = NULL,
445 };
446
447 struct sample_queue {
448         u64                     timestamp;
449         u64                     file_offset;
450         union perf_event        *event;
451         struct list_head        list;
452 };
453
454 static void perf_session_free_sample_buffers(struct perf_session *session)
455 {
456         struct ordered_samples *os = &session->ordered_samples;
457
458         while (!list_empty(&os->to_free)) {
459                 struct sample_queue *sq;
460
461                 sq = list_entry(os->to_free.next, struct sample_queue, list);
462                 list_del(&sq->list);
463                 free(sq);
464         }
465 }
466
467 static int perf_session_deliver_event(struct perf_session *session,
468                                       union perf_event *event,
469                                       struct perf_sample *sample,
470                                       struct perf_event_ops *ops,
471                                       u64 file_offset);
472
473 static void flush_sample_queue(struct perf_session *s,
474                                struct perf_event_ops *ops)
475 {
476         struct ordered_samples *os = &s->ordered_samples;
477         struct list_head *head = &os->samples;
478         struct sample_queue *tmp, *iter;
479         struct perf_sample sample;
480         u64 limit = os->next_flush;
481         u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
482
483         if (!ops->ordered_samples || !limit)
484                 return;
485
486         list_for_each_entry_safe(iter, tmp, head, list) {
487                 if (iter->timestamp > limit)
488                         break;
489
490                 perf_session__parse_sample(s, iter->event, &sample);
491                 perf_session_deliver_event(s, iter->event, &sample, ops,
492                                            iter->file_offset);
493
494                 os->last_flush = iter->timestamp;
495                 list_del(&iter->list);
496                 list_add(&iter->list, &os->sample_cache);
497         }
498
499         if (list_empty(head)) {
500                 os->last_sample = NULL;
501         } else if (last_ts <= limit) {
502                 os->last_sample =
503                         list_entry(head->prev, struct sample_queue, list);
504         }
505 }
506
507 /*
508  * When perf record finishes a pass on every buffers, it records this pseudo
509  * event.
510  * We record the max timestamp t found in the pass n.
511  * Assuming these timestamps are monotonic across cpus, we know that if
512  * a buffer still has events with timestamps below t, they will be all
513  * available and then read in the pass n + 1.
514  * Hence when we start to read the pass n + 2, we can safely flush every
515  * events with timestamps below t.
516  *
517  *    ============ PASS n =================
518  *       CPU 0         |   CPU 1
519  *                     |
520  *    cnt1 timestamps  |   cnt2 timestamps
521  *          1          |         2
522  *          2          |         3
523  *          -          |         4  <--- max recorded
524  *
525  *    ============ PASS n + 1 ==============
526  *       CPU 0         |   CPU 1
527  *                     |
528  *    cnt1 timestamps  |   cnt2 timestamps
529  *          3          |         5
530  *          4          |         6
531  *          5          |         7 <---- max recorded
532  *
533  *      Flush every events below timestamp 4
534  *
535  *    ============ PASS n + 2 ==============
536  *       CPU 0         |   CPU 1
537  *                     |
538  *    cnt1 timestamps  |   cnt2 timestamps
539  *          6          |         8
540  *          7          |         9
541  *          -          |         10
542  *
543  *      Flush every events below timestamp 7
544  *      etc...
545  */
546 static int process_finished_round(union perf_event *event __used,
547                                   struct perf_session *session,
548                                   struct perf_event_ops *ops)
549 {
550         flush_sample_queue(session, ops);
551         session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
552
553         return 0;
554 }
555
556 /* The queue is ordered by time */
557 static void __queue_event(struct sample_queue *new, struct perf_session *s)
558 {
559         struct ordered_samples *os = &s->ordered_samples;
560         struct sample_queue *sample = os->last_sample;
561         u64 timestamp = new->timestamp;
562         struct list_head *p;
563
564         os->last_sample = new;
565
566         if (!sample) {
567                 list_add(&new->list, &os->samples);
568                 os->max_timestamp = timestamp;
569                 return;
570         }
571
572         /*
573          * last_sample might point to some random place in the list as it's
574          * the last queued event. We expect that the new event is close to
575          * this.
576          */
577         if (sample->timestamp <= timestamp) {
578                 while (sample->timestamp <= timestamp) {
579                         p = sample->list.next;
580                         if (p == &os->samples) {
581                                 list_add_tail(&new->list, &os->samples);
582                                 os->max_timestamp = timestamp;
583                                 return;
584                         }
585                         sample = list_entry(p, struct sample_queue, list);
586                 }
587                 list_add_tail(&new->list, &sample->list);
588         } else {
589                 while (sample->timestamp > timestamp) {
590                         p = sample->list.prev;
591                         if (p == &os->samples) {
592                                 list_add(&new->list, &os->samples);
593                                 return;
594                         }
595                         sample = list_entry(p, struct sample_queue, list);
596                 }
597                 list_add(&new->list, &sample->list);
598         }
599 }
600
601 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct sample_queue))
602
603 static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
604                                     struct perf_sample *sample, u64 file_offset)
605 {
606         struct ordered_samples *os = &s->ordered_samples;
607         struct list_head *sc = &os->sample_cache;
608         u64 timestamp = sample->time;
609         struct sample_queue *new;
610
611         if (!timestamp || timestamp == ~0ULL)
612                 return -ETIME;
613
614         if (timestamp < s->ordered_samples.last_flush) {
615                 printf("Warning: Timestamp below last timeslice flush\n");
616                 return -EINVAL;
617         }
618
619         if (!list_empty(sc)) {
620                 new = list_entry(sc->next, struct sample_queue, list);
621                 list_del(&new->list);
622         } else if (os->sample_buffer) {
623                 new = os->sample_buffer + os->sample_buffer_idx;
624                 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
625                         os->sample_buffer = NULL;
626         } else {
627                 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
628                 if (!os->sample_buffer)
629                         return -ENOMEM;
630                 list_add(&os->sample_buffer->list, &os->to_free);
631                 os->sample_buffer_idx = 2;
632                 new = os->sample_buffer + 1;
633         }
634
635         new->timestamp = timestamp;
636         new->file_offset = file_offset;
637         new->event = event;
638
639         __queue_event(new, s);
640
641         return 0;
642 }
643
644 static void callchain__printf(struct perf_sample *sample)
645 {
646         unsigned int i;
647
648         printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
649
650         for (i = 0; i < sample->callchain->nr; i++)
651                 printf("..... %2d: %016" PRIx64 "\n",
652                        i, sample->callchain->ips[i]);
653 }
654
655 static void perf_session__print_tstamp(struct perf_session *session,
656                                        union perf_event *event,
657                                        struct perf_sample *sample)
658 {
659         if (event->header.type != PERF_RECORD_SAMPLE &&
660             !session->sample_id_all) {
661                 fputs("-1 -1 ", stdout);
662                 return;
663         }
664
665         if ((session->sample_type & PERF_SAMPLE_CPU))
666                 printf("%u ", sample->cpu);
667
668         if (session->sample_type & PERF_SAMPLE_TIME)
669                 printf("%" PRIu64 " ", sample->time);
670 }
671
672 static void dump_event(struct perf_session *session, union perf_event *event,
673                        u64 file_offset, struct perf_sample *sample)
674 {
675         if (!dump_trace)
676                 return;
677
678         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
679                file_offset, event->header.size, event->header.type);
680
681         trace_event(event);
682
683         if (sample)
684                 perf_session__print_tstamp(session, event, sample);
685
686         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
687                event->header.size, perf_event__name(event->header.type));
688 }
689
690 static void dump_sample(struct perf_session *session, union perf_event *event,
691                         struct perf_sample *sample)
692 {
693         if (!dump_trace)
694                 return;
695
696         printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
697                event->header.misc, sample->pid, sample->tid, sample->ip,
698                sample->period);
699
700         if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
701                 callchain__printf(sample);
702 }
703
704 static int perf_session_deliver_event(struct perf_session *session,
705                                       union perf_event *event,
706                                       struct perf_sample *sample,
707                                       struct perf_event_ops *ops,
708                                       u64 file_offset)
709 {
710         struct perf_evsel *evsel;
711
712         dump_event(session, event, file_offset, sample);
713
714         switch (event->header.type) {
715         case PERF_RECORD_SAMPLE:
716                 dump_sample(session, event, sample);
717                 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
718                 if (evsel == NULL) {
719                         ++session->hists.stats.nr_unknown_id;
720                         return -1;
721                 }
722                 return ops->sample(event, sample, evsel, session);
723         case PERF_RECORD_MMAP:
724                 return ops->mmap(event, sample, session);
725         case PERF_RECORD_COMM:
726                 return ops->comm(event, sample, session);
727         case PERF_RECORD_FORK:
728                 return ops->fork(event, sample, session);
729         case PERF_RECORD_EXIT:
730                 return ops->exit(event, sample, session);
731         case PERF_RECORD_LOST:
732                 return ops->lost(event, sample, session);
733         case PERF_RECORD_READ:
734                 return ops->read(event, sample, session);
735         case PERF_RECORD_THROTTLE:
736                 return ops->throttle(event, sample, session);
737         case PERF_RECORD_UNTHROTTLE:
738                 return ops->unthrottle(event, sample, session);
739         default:
740                 ++session->hists.stats.nr_unknown_events;
741                 return -1;
742         }
743 }
744
745 static int perf_session__preprocess_sample(struct perf_session *session,
746                                            union perf_event *event, struct perf_sample *sample)
747 {
748         if (event->header.type != PERF_RECORD_SAMPLE ||
749             !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
750                 return 0;
751
752         if (!ip_callchain__valid(sample->callchain, event)) {
753                 pr_debug("call-chain problem with event, skipping it.\n");
754                 ++session->hists.stats.nr_invalid_chains;
755                 session->hists.stats.total_invalid_chains += sample->period;
756                 return -EINVAL;
757         }
758         return 0;
759 }
760
761 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
762                                             struct perf_event_ops *ops, u64 file_offset)
763 {
764         dump_event(session, event, file_offset, NULL);
765
766         /* These events are processed right away */
767         switch (event->header.type) {
768         case PERF_RECORD_HEADER_ATTR:
769                 return ops->attr(event, session);
770         case PERF_RECORD_HEADER_EVENT_TYPE:
771                 return ops->event_type(event, session);
772         case PERF_RECORD_HEADER_TRACING_DATA:
773                 /* setup for reading amidst mmap */
774                 lseek(session->fd, file_offset, SEEK_SET);
775                 return ops->tracing_data(event, session);
776         case PERF_RECORD_HEADER_BUILD_ID:
777                 return ops->build_id(event, session);
778         case PERF_RECORD_FINISHED_ROUND:
779                 return ops->finished_round(event, session, ops);
780         default:
781                 return -EINVAL;
782         }
783 }
784
785 static int perf_session__process_event(struct perf_session *session,
786                                        union perf_event *event,
787                                        struct perf_event_ops *ops,
788                                        u64 file_offset)
789 {
790         struct perf_sample sample;
791         int ret;
792
793         if (session->header.needs_swap &&
794             perf_event__swap_ops[event->header.type])
795                 perf_event__swap_ops[event->header.type](event);
796
797         if (event->header.type >= PERF_RECORD_HEADER_MAX)
798                 return -EINVAL;
799
800         hists__inc_nr_events(&session->hists, event->header.type);
801
802         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
803                 return perf_session__process_user_event(session, event, ops, file_offset);
804
805         /*
806          * For all kernel events we get the sample data
807          */
808         perf_session__parse_sample(session, event, &sample);
809
810         /* Preprocess sample records - precheck callchains */
811         if (perf_session__preprocess_sample(session, event, &sample))
812                 return 0;
813
814         if (ops->ordered_samples) {
815                 ret = perf_session_queue_event(session, event, &sample,
816                                                file_offset);
817                 if (ret != -ETIME)
818                         return ret;
819         }
820
821         return perf_session_deliver_event(session, event, &sample, ops,
822                                           file_offset);
823 }
824
825 void perf_event_header__bswap(struct perf_event_header *self)
826 {
827         self->type = bswap_32(self->type);
828         self->misc = bswap_16(self->misc);
829         self->size = bswap_16(self->size);
830 }
831
832 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
833 {
834         struct thread *thread = perf_session__findnew(self, 0);
835
836         if (thread == NULL || thread__set_comm(thread, "swapper")) {
837                 pr_err("problem inserting idle task.\n");
838                 thread = NULL;
839         }
840
841         return thread;
842 }
843
844 static void perf_session__warn_about_errors(const struct perf_session *session,
845                                             const struct perf_event_ops *ops)
846 {
847         if (ops->lost == perf_event__process_lost &&
848             session->hists.stats.total_lost != 0) {
849                 ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
850                             "!\n\nCheck IO/CPU overload!\n\n",
851                             session->hists.stats.total_period,
852                             session->hists.stats.total_lost);
853         }
854
855         if (session->hists.stats.nr_unknown_events != 0) {
856                 ui__warning("Found %u unknown events!\n\n"
857                             "Is this an older tool processing a perf.data "
858                             "file generated by a more recent tool?\n\n"
859                             "If that is not the case, consider "
860                             "reporting to linux-kernel@vger.kernel.org.\n\n",
861                             session->hists.stats.nr_unknown_events);
862         }
863
864         if (session->hists.stats.nr_unknown_id != 0) {
865                 ui__warning("%u samples with id not present in the header\n",
866                             session->hists.stats.nr_unknown_id);
867         }
868
869         if (session->hists.stats.nr_invalid_chains != 0) {
870                 ui__warning("Found invalid callchains!\n\n"
871                             "%u out of %u events were discarded for this reason.\n\n"
872                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
873                             session->hists.stats.nr_invalid_chains,
874                             session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
875         }
876 }
877
878 #define session_done()  (*(volatile int *)(&session_done))
879 volatile int session_done;
880
881 static int __perf_session__process_pipe_events(struct perf_session *self,
882                                                struct perf_event_ops *ops)
883 {
884         union perf_event event;
885         uint32_t size;
886         int skip = 0;
887         u64 head;
888         int err;
889         void *p;
890
891         perf_event_ops__fill_defaults(ops);
892
893         head = 0;
894 more:
895         err = readn(self->fd, &event, sizeof(struct perf_event_header));
896         if (err <= 0) {
897                 if (err == 0)
898                         goto done;
899
900                 pr_err("failed to read event header\n");
901                 goto out_err;
902         }
903
904         if (self->header.needs_swap)
905                 perf_event_header__bswap(&event.header);
906
907         size = event.header.size;
908         if (size == 0)
909                 size = 8;
910
911         p = &event;
912         p += sizeof(struct perf_event_header);
913
914         if (size - sizeof(struct perf_event_header)) {
915                 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
916                 if (err <= 0) {
917                         if (err == 0) {
918                                 pr_err("unexpected end of event stream\n");
919                                 goto done;
920                         }
921
922                         pr_err("failed to read event data\n");
923                         goto out_err;
924                 }
925         }
926
927         if (size == 0 ||
928             (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
929                 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
930                             head, event.header.size, event.header.type);
931                 /*
932                  * assume we lost track of the stream, check alignment, and
933                  * increment a single u64 in the hope to catch on again 'soon'.
934                  */
935                 if (unlikely(head & 7))
936                         head &= ~7ULL;
937
938                 size = 8;
939         }
940
941         head += size;
942
943         if (skip > 0)
944                 head += skip;
945
946         if (!session_done())
947                 goto more;
948 done:
949         err = 0;
950 out_err:
951         perf_session__warn_about_errors(self, ops);
952         perf_session_free_sample_buffers(self);
953         return err;
954 }
955
956 int __perf_session__process_events(struct perf_session *session,
957                                    u64 data_offset, u64 data_size,
958                                    u64 file_size, struct perf_event_ops *ops)
959 {
960         u64 head, page_offset, file_offset, file_pos, progress_next;
961         int err, mmap_prot, mmap_flags, map_idx = 0;
962         struct ui_progress *progress;
963         size_t  page_size, mmap_size;
964         char *buf, *mmaps[8];
965         union perf_event *event;
966         uint32_t size;
967
968         perf_event_ops__fill_defaults(ops);
969
970         page_size = sysconf(_SC_PAGESIZE);
971
972         page_offset = page_size * (data_offset / page_size);
973         file_offset = page_offset;
974         head = data_offset - page_offset;
975
976         if (data_offset + data_size < file_size)
977                 file_size = data_offset + data_size;
978
979         progress_next = file_size / 16;
980         progress = ui_progress__new("Processing events...", file_size);
981         if (progress == NULL)
982                 return -1;
983
984         mmap_size = session->mmap_window;
985         if (mmap_size > file_size)
986                 mmap_size = file_size;
987
988         memset(mmaps, 0, sizeof(mmaps));
989
990         mmap_prot  = PROT_READ;
991         mmap_flags = MAP_SHARED;
992
993         if (session->header.needs_swap) {
994                 mmap_prot  |= PROT_WRITE;
995                 mmap_flags = MAP_PRIVATE;
996         }
997 remap:
998         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
999                    file_offset);
1000         if (buf == MAP_FAILED) {
1001                 pr_err("failed to mmap file\n");
1002                 err = -errno;
1003                 goto out_err;
1004         }
1005         mmaps[map_idx] = buf;
1006         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1007         file_pos = file_offset + head;
1008
1009 more:
1010         event = (union perf_event *)(buf + head);
1011
1012         if (session->header.needs_swap)
1013                 perf_event_header__bswap(&event->header);
1014         size = event->header.size;
1015         if (size == 0)
1016                 size = 8;
1017
1018         if (head + event->header.size > mmap_size) {
1019                 if (mmaps[map_idx]) {
1020                         munmap(mmaps[map_idx], mmap_size);
1021                         mmaps[map_idx] = NULL;
1022                 }
1023
1024                 page_offset = page_size * (head / page_size);
1025                 file_offset += page_offset;
1026                 head -= page_offset;
1027                 goto remap;
1028         }
1029
1030         size = event->header.size;
1031
1032         if (size == 0 ||
1033             perf_session__process_event(session, event, ops, file_pos) < 0) {
1034                 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1035                             file_offset + head, event->header.size,
1036                             event->header.type);
1037                 /*
1038                  * assume we lost track of the stream, check alignment, and
1039                  * increment a single u64 in the hope to catch on again 'soon'.
1040                  */
1041                 if (unlikely(head & 7))
1042                         head &= ~7ULL;
1043
1044                 size = 8;
1045         }
1046
1047         head += size;
1048         file_pos += size;
1049
1050         if (file_pos >= progress_next) {
1051                 progress_next += file_size / 16;
1052                 ui_progress__update(progress, file_pos);
1053         }
1054
1055         if (file_pos < file_size)
1056                 goto more;
1057
1058         err = 0;
1059         /* do the final flush for ordered samples */
1060         session->ordered_samples.next_flush = ULLONG_MAX;
1061         flush_sample_queue(session, ops);
1062 out_err:
1063         ui_progress__delete(progress);
1064         perf_session__warn_about_errors(session, ops);
1065         perf_session_free_sample_buffers(session);
1066         return err;
1067 }
1068
1069 int perf_session__process_events(struct perf_session *self,
1070                                  struct perf_event_ops *ops)
1071 {
1072         int err;
1073
1074         if (perf_session__register_idle_thread(self) == NULL)
1075                 return -ENOMEM;
1076
1077         if (!self->fd_pipe)
1078                 err = __perf_session__process_events(self,
1079                                                      self->header.data_offset,
1080                                                      self->header.data_size,
1081                                                      self->size, ops);
1082         else
1083                 err = __perf_session__process_pipe_events(self, ops);
1084
1085         return err;
1086 }
1087
1088 bool perf_session__has_traces(struct perf_session *self, const char *msg)
1089 {
1090         if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1091                 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1092                 return false;
1093         }
1094
1095         return true;
1096 }
1097
1098 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
1099                                              const char *symbol_name,
1100                                              u64 addr)
1101 {
1102         char *bracket;
1103         enum map_type i;
1104         struct ref_reloc_sym *ref;
1105
1106         ref = zalloc(sizeof(struct ref_reloc_sym));
1107         if (ref == NULL)
1108                 return -ENOMEM;
1109
1110         ref->name = strdup(symbol_name);
1111         if (ref->name == NULL) {
1112                 free(ref);
1113                 return -ENOMEM;
1114         }
1115
1116         bracket = strchr(ref->name, ']');
1117         if (bracket)
1118                 *bracket = '\0';
1119
1120         ref->addr = addr;
1121
1122         for (i = 0; i < MAP__NR_TYPES; ++i) {
1123                 struct kmap *kmap = map__kmap(maps[i]);
1124                 kmap->ref_reloc_sym = ref;
1125         }
1126
1127         return 0;
1128 }
1129
1130 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1131 {
1132         return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1133                __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1134                machines__fprintf_dsos(&self->machines, fp);
1135 }
1136
1137 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1138                                           bool with_hits)
1139 {
1140         size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1141         return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1142 }
1143
1144 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1145 {
1146         struct perf_evsel *pos;
1147         size_t ret = fprintf(fp, "Aggregated stats:\n");
1148
1149         ret += hists__fprintf_nr_events(&session->hists, fp);
1150
1151         list_for_each_entry(pos, &session->evlist->entries, node) {
1152                 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1153                 ret += hists__fprintf_nr_events(&pos->hists, fp);
1154         }
1155
1156         return ret;
1157 }
1158
1159 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1160                                               unsigned int type)
1161 {
1162         struct perf_evsel *pos;
1163
1164         list_for_each_entry(pos, &session->evlist->entries, node) {
1165                 if (pos->attr.type == type)
1166                         return pos;
1167         }
1168         return NULL;
1169 }
1170
1171 void perf_session__print_symbols(union perf_event *event,
1172                                 struct perf_sample *sample,
1173                                 struct perf_session *session)
1174 {
1175         struct addr_location al;
1176         const char *symname, *dsoname;
1177         struct callchain_cursor *cursor = &session->callchain_cursor;
1178         struct callchain_cursor_node *node;
1179
1180         if (perf_event__preprocess_sample(event, session, &al, sample,
1181                                           NULL) < 0) {
1182                 error("problem processing %d event, skipping it.\n",
1183                         event->header.type);
1184                 return;
1185         }
1186
1187         if (symbol_conf.use_callchain && sample->callchain) {
1188
1189                 if (perf_session__resolve_callchain(session, al.thread,
1190                                                 sample->callchain, NULL) != 0) {
1191                         if (verbose)
1192                                 error("Failed to resolve callchain. Skipping\n");
1193                         return;
1194                 }
1195                 callchain_cursor_commit(cursor);
1196
1197                 while (1) {
1198                         node = callchain_cursor_current(cursor);
1199                         if (!node)
1200                                 break;
1201
1202                         if (node->sym && node->sym->name)
1203                                 symname = node->sym->name;
1204                         else
1205                                 symname = "";
1206
1207                         if (node->map && node->map->dso && node->map->dso->name)
1208                                 dsoname = node->map->dso->name;
1209                         else
1210                                 dsoname = "";
1211
1212                         printf("\t%16" PRIx64 " %s (%s)\n", node->ip, symname, dsoname);
1213
1214                         callchain_cursor_advance(cursor);
1215                 }
1216
1217         } else {
1218                 if (al.sym && al.sym->name)
1219                         symname = al.sym->name;
1220                 else
1221                         symname = "";
1222
1223                 if (al.map && al.map->dso && al.map->dso->name)
1224                         dsoname = al.map->dso->name;
1225                 else
1226                         dsoname = "";
1227
1228                 printf("%16" PRIx64 " %s (%s)", al.addr, symname, dsoname);
1229         }
1230 }