X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=tools%2Fperf%2Futil%2Fsession.c;h=8f83a1835766e5d1a6e509547d14559529bbba23;hb=ff5f149b6aec8edbfa3698721667acd043009a33;hp=b7aade2184b295b77b78cf3f04c9cd04a74fc8cc;hpb=462b04e28a7ec1339c892117c3f20a40e55d0e83;p=pandora-kernel.git diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index b7aade2184b2..8f83a1835766 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "session.h" #include "sort.h" @@ -69,16 +70,14 @@ void perf_session__update_sample_type(struct perf_session *self) int perf_session__create_kernel_maps(struct perf_session *self) { - int ret; - struct rb_root *root = &self->kerninfo_root; + int ret = machine__create_kernel_maps(&self->host_machine); - ret = map_groups__create_kernel_maps(root, HOST_KERNEL_ID); if (ret >= 0) - ret = map_groups__create_guest_kernel_maps(root); + ret = machines__create_guest_kernel_maps(&self->machines); return ret; } -struct perf_session *perf_session__new(const char *filename, int mode, bool force) +struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe) { size_t len = filename ? strlen(filename) + 1 : 0; struct perf_session *self = zalloc(sizeof(*self) + len); @@ -91,15 +90,15 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc memcpy(self->filename, filename, len); self->threads = RB_ROOT; - self->stats_by_id = RB_ROOT; + self->hists_tree = RB_ROOT; self->last_match = NULL; self->mmap_window = 32; self->cwd = NULL; self->cwdlen = 0; - self->unknown_events = 0; - self->kerninfo_root = RB_ROOT; - self->ordered_samples.flush_limit = ULLONG_MAX; + self->machines = RB_ROOT; + self->repipe = repipe; INIT_LIST_HEAD(&self->ordered_samples.samples_head); + machine__init(&self->host_machine, "", HOST_KERNEL_ID); if (mode == O_RDONLY) { if (perf_session__open(self, force) < 0) @@ -194,6 +193,18 @@ static int process_event_stub(event_t *event __used, return 0; } +static int process_finished_round_stub(event_t *event __used, + struct perf_session *session __used, + struct perf_event_ops *ops __used) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_finished_round(event_t *event, + struct perf_session *session, + struct perf_event_ops *ops); + static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) { if (handler->sample == NULL) @@ -222,35 +233,11 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) handler->tracing_data = process_event_stub; if (handler->build_id == NULL) handler->build_id = process_event_stub; -} - -static const char *event__name[] = { - [0] = "TOTAL", - [PERF_RECORD_MMAP] = "MMAP", - [PERF_RECORD_LOST] = "LOST", - [PERF_RECORD_COMM] = "COMM", - [PERF_RECORD_EXIT] = "EXIT", - [PERF_RECORD_THROTTLE] = "THROTTLE", - [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", - [PERF_RECORD_FORK] = "FORK", - [PERF_RECORD_READ] = "READ", - [PERF_RECORD_SAMPLE] = "SAMPLE", - [PERF_RECORD_HEADER_ATTR] = "ATTR", - [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", - [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", - [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", -}; - -unsigned long event__total[PERF_RECORD_HEADER_MAX]; - -void event__print_totals(void) -{ - int i; - for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { - if (!event__name[i]) - continue; - pr_info("%10s events: %10ld\n", - event__name[i], event__total[i]); + if (handler->finished_round == NULL) { + if (handler->ordered_samples) + handler->finished_round = process_finished_round; + else + handler->finished_round = process_finished_round_stub; } } @@ -359,16 +346,14 @@ struct sample_queue { struct list_head list; }; -#define FLUSH_PERIOD (2 * NSEC_PER_SEC) - static void flush_sample_queue(struct perf_session *s, struct perf_event_ops *ops) { struct list_head *head = &s->ordered_samples.samples_head; - u64 limit = s->ordered_samples.flush_limit; + u64 limit = s->ordered_samples.next_flush; struct sample_queue *tmp, *iter; - if (!ops->ordered_samples) + if (!ops->ordered_samples || !limit) return; list_for_each_entry_safe(iter, tmp, head, list) { @@ -387,6 +372,55 @@ static void flush_sample_queue(struct perf_session *s, } } +/* + * When perf record finishes a pass on every buffers, it records this pseudo + * event. + * We record the max timestamp t found in the pass n. + * Assuming these timestamps are monotonic across cpus, we know that if + * a buffer still has events with timestamps below t, they will be all + * available and then read in the pass n + 1. + * Hence when we start to read the pass n + 2, we can safely flush every + * events with timestamps below t. + * + * ============ PASS n ================= + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 1 | 2 + * 2 | 3 + * - | 4 <--- max recorded + * + * ============ PASS n + 1 ============== + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 3 | 5 + * 4 | 6 + * 5 | 7 <---- max recorded + * + * Flush every events below timestamp 4 + * + * ============ PASS n + 2 ============== + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 6 | 8 + * 7 | 9 + * - | 10 + * + * Flush every events below timestamp 7 + * etc... + */ +static int process_finished_round(event_t *event __used, + struct perf_session *session, + struct perf_event_ops *ops) +{ + flush_sample_queue(session, ops); + session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; + + return 0; +} + static void __queue_sample_end(struct sample_queue *new, struct list_head *head) { struct sample_queue *iter; @@ -455,17 +489,12 @@ static void __queue_sample_event(struct sample_queue *new, } static int queue_sample_event(event_t *event, struct sample_data *data, - struct perf_session *s, - struct perf_event_ops *ops) + struct perf_session *s) { u64 timestamp = data->time; struct sample_queue *new; - u64 flush_limit; - if (s->ordered_samples.flush_limit == ULLONG_MAX) - s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD; - if (timestamp < s->ordered_samples.last_flush) { printf("Warning: Timestamp below last timeslice flush\n"); return -EINVAL; @@ -488,23 +517,8 @@ static int queue_sample_event(event_t *event, struct sample_data *data, __queue_sample_event(new, s); s->ordered_samples.last_inserted = new; - /* - * We want to have a slice of events covering 2 * FLUSH_PERIOD - * If FLUSH_PERIOD is big enough, it ensures every events that occured - * in the first half of the timeslice have all been buffered and there - * are none remaining (we need that because of the weakly ordered - * event recording we have). Then once we reach the 2 * FLUSH_PERIOD - * timeslice, we flush the first half to be gentle with the memory - * (the second half can still get new events in the middle, so wait - * another period to flush it) - */ - flush_limit = s->ordered_samples.flush_limit; - - if (new->timestamp > flush_limit && - new->timestamp - flush_limit > FLUSH_PERIOD) { - s->ordered_samples.flush_limit += FLUSH_PERIOD; - flush_sample_queue(s, ops); - } + if (new->timestamp > s->ordered_samples.max_timestamp) + s->ordered_samples.max_timestamp = new->timestamp; return 0; } @@ -520,7 +534,7 @@ static int perf_session__process_sample(event_t *event, struct perf_session *s, bzero(&data, sizeof(struct sample_data)); event__parse_sample(event, s->sample_type, &data); - queue_sample_event(event, &data, s, ops); + queue_sample_event(event, &data, s); return 0; } @@ -536,8 +550,7 @@ static int perf_session__process_event(struct perf_session *self, dump_printf("%#Lx [%#x]: PERF_RECORD_%s", offset + head, event->header.size, event__name[event->header.type]); - ++event__total[0]; - ++event__total[event->header.type]; + hists__inc_nr_events(&self->hists, event->header.type); } if (self->header.needs_swap && event__swap_ops[event->header.type]) @@ -572,8 +585,10 @@ static int perf_session__process_event(struct perf_session *self, return ops->tracing_data(event, self); case PERF_RECORD_HEADER_BUILD_ID: return ops->build_id(event, self); + case PERF_RECORD_FINISHED_ROUND: + return ops->finished_round(event, self, ops); default: - self->unknown_events++; + ++self->hists.stats.nr_unknown_events; return -1; } } @@ -650,15 +665,18 @@ more: p = &event; p += sizeof(struct perf_event_header); - err = do_read(self->fd, p, size - sizeof(struct perf_event_header)); - if (err <= 0) { - if (err == 0) { - pr_err("unexpected end of event stream\n"); - goto done; - } + if (size - sizeof(struct perf_event_header)) { + err = do_read(self->fd, p, + size - sizeof(struct perf_event_header)); + if (err <= 0) { + if (err == 0) { + pr_err("unexpected end of event stream\n"); + goto done; + } - pr_err("failed to read event data\n"); - goto out_err; + pr_err("failed to read event data\n"); + goto out_err; + } } if (size == 0 || @@ -786,7 +804,7 @@ more: done: err = 0; /* do the final flush for ordered samples */ - self->ordered_samples.flush_limit = ULLONG_MAX; + self->ordered_samples.next_flush = ULLONG_MAX; flush_sample_queue(self, ops); out_err: ui_progress__delete(progress); @@ -870,3 +888,17 @@ int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, return 0; } + +size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) +{ + return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + + __dsos__fprintf(&self->host_machine.user_dsos, fp) + + machines__fprintf_dsos(&self->machines, fp); +} + +size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, + bool with_hits) +{ + size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); + return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); +}