4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include "util/list.h"
14 #include "util/cache.h"
15 #include "util/rbtree.h"
16 #include "util/symbol.h"
17 #include "util/string.h"
21 #include "util/parse-options.h"
22 #include "util/parse-events.h"
28 static char const *input_name = "perf.data";
29 static char *vmlinux = NULL;
31 static char default_sort_order[] = "comm,dso";
32 static char *sort_order = default_sort_order;
35 static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
37 static int dump_trace = 0;
38 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
39 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
42 static int full_paths;
43 static int collapse_syscalls;
45 static unsigned long page_size;
46 static unsigned long mmap_window = 32;
48 struct ip_chain_event {
57 struct perf_event_header header;
60 unsigned char __more_data[];
64 struct perf_event_header header;
69 char filename[PATH_MAX];
73 struct perf_event_header header;
79 struct perf_event_header header;
84 struct perf_event_header header;
90 typedef union event_union {
91 struct perf_event_header header;
93 struct mmap_event mmap;
94 struct comm_event comm;
95 struct fork_event fork;
96 struct period_event period;
99 static LIST_HEAD(dsos);
100 static struct dso *kernel_dso;
101 static struct dso *vdso;
103 static void dsos__add(struct dso *dso)
105 list_add_tail(&dso->node, &dsos);
108 static struct dso *dsos__find(const char *name)
112 list_for_each_entry(pos, &dsos, node)
113 if (strcmp(pos->name, name) == 0)
118 static struct dso *dsos__findnew(const char *name)
120 struct dso *dso = dsos__find(name);
126 dso = dso__new(name, 0);
130 nr = dso__load(dso, NULL, verbose);
133 fprintf(stderr, "Failed to open: %s\n", name);
136 if (!nr && verbose) {
138 "No symbols found in: %s, maybe install a debug package?\n",
151 static void dsos__fprintf(FILE *fp)
155 list_for_each_entry(pos, &dsos, node)
156 dso__fprintf(pos, fp);
159 static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip)
161 return dso__find_symbol(kernel_dso, ip);
164 static int load_kernel(void)
168 kernel_dso = dso__new("[kernel]", 0);
172 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
174 dso__delete(kernel_dso);
177 dsos__add(kernel_dso);
179 vdso = dso__new("[vdso]", 0);
183 vdso->find_symbol = vdso__find_symbol;
190 static char __cwd[PATH_MAX];
191 static char *cwd = __cwd;
194 static int strcommon(const char *pathname)
198 while (pathname[n] == cwd[n] && n < cwdlen)
205 struct list_head node;
209 __u64 (*map_ip)(struct map *, __u64);
213 static __u64 map__map_ip(struct map *map, __u64 ip)
215 return ip - map->start + map->pgoff;
218 static __u64 vdso__map_ip(struct map *map, __u64 ip)
223 static inline int is_anon_memory(const char *filename)
225 return strcmp(filename, "//anon") == 0;
228 static struct map *map__new(struct mmap_event *event)
230 struct map *self = malloc(sizeof(*self));
233 const char *filename = event->filename;
234 char newfilename[PATH_MAX];
238 int n = strcommon(filename);
241 snprintf(newfilename, sizeof(newfilename),
242 ".%s", filename + n);
243 filename = newfilename;
247 anon = is_anon_memory(filename);
250 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
251 filename = newfilename;
254 self->start = event->start;
255 self->end = event->start + event->len;
256 self->pgoff = event->pgoff;
258 self->dso = dsos__findnew(filename);
259 if (self->dso == NULL)
262 if (self->dso == vdso || anon)
263 self->map_ip = vdso__map_ip;
265 self->map_ip = map__map_ip;
273 static struct map *map__clone(struct map *self)
275 struct map *map = malloc(sizeof(*self));
280 memcpy(map, self, sizeof(*self));
285 static int map__overlap(struct map *l, struct map *r)
287 if (l->start > r->start) {
293 if (l->end > r->start)
299 static size_t map__fprintf(struct map *self, FILE *fp)
301 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
302 self->start, self->end, self->pgoff, self->dso->name);
307 struct rb_node rb_node;
308 struct list_head maps;
313 static struct thread *thread__new(pid_t pid)
315 struct thread *self = malloc(sizeof(*self));
319 self->comm = malloc(32);
321 snprintf(self->comm, 32, ":%d", self->pid);
322 INIT_LIST_HEAD(&self->maps);
328 static int thread__set_comm(struct thread *self, const char *comm)
332 self->comm = strdup(comm);
333 return self->comm ? 0 : -ENOMEM;
336 static size_t thread__fprintf(struct thread *self, FILE *fp)
339 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
341 list_for_each_entry(pos, &self->maps, node)
342 ret += map__fprintf(pos, fp);
348 static struct rb_root threads;
349 static struct thread *last_match;
351 static struct thread *threads__findnew(pid_t pid)
353 struct rb_node **p = &threads.rb_node;
354 struct rb_node *parent = NULL;
358 * Font-end cache - PID lookups come in blocks,
359 * so most of the time we dont have to look up
362 if (last_match && last_match->pid == pid)
367 th = rb_entry(parent, struct thread, rb_node);
369 if (th->pid == pid) {
380 th = thread__new(pid);
382 rb_link_node(&th->rb_node, parent, p);
383 rb_insert_color(&th->rb_node, &threads);
390 static void thread__insert_map(struct thread *self, struct map *map)
392 struct map *pos, *tmp;
394 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
395 if (map__overlap(pos, map)) {
396 list_del_init(&pos->node);
402 list_add_tail(&map->node, &self->maps);
405 static int thread__fork(struct thread *self, struct thread *parent)
411 self->comm = strdup(parent->comm);
415 list_for_each_entry(map, &parent->maps, node) {
416 struct map *new = map__clone(map);
419 thread__insert_map(self, new);
425 static struct map *thread__find_map(struct thread *self, __u64 ip)
432 list_for_each_entry(pos, &self->maps, node)
433 if (ip >= pos->start && ip <= pos->end)
439 static size_t threads__fprintf(FILE *fp)
444 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
445 struct thread *pos = rb_entry(nd, struct thread, rb_node);
447 ret += thread__fprintf(pos, fp);
454 * histogram, sorted on item, collects counts
457 static struct rb_root hist;
460 struct rb_node rb_node;
462 struct thread *thread;
473 * configurable sorting bits
477 struct list_head list;
481 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
482 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
483 size_t (*print)(FILE *fp, struct hist_entry *);
489 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
491 return right->thread->pid - left->thread->pid;
495 sort__thread_print(FILE *fp, struct hist_entry *self)
497 return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
500 static struct sort_entry sort_thread = {
501 .header = " Command: Pid",
502 .cmp = sort__thread_cmp,
503 .print = sort__thread_print,
509 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
511 return right->thread->pid - left->thread->pid;
515 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
517 char *comm_l = left->thread->comm;
518 char *comm_r = right->thread->comm;
520 if (!comm_l || !comm_r) {
521 if (!comm_l && !comm_r)
529 return strcmp(comm_l, comm_r);
533 sort__comm_print(FILE *fp, struct hist_entry *self)
535 return fprintf(fp, "%16s", self->thread->comm);
538 static struct sort_entry sort_comm = {
539 .header = " Command",
540 .cmp = sort__comm_cmp,
541 .collapse = sort__comm_collapse,
542 .print = sort__comm_print,
548 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
550 struct dso *dso_l = left->dso;
551 struct dso *dso_r = right->dso;
553 if (!dso_l || !dso_r) {
554 if (!dso_l && !dso_r)
562 return strcmp(dso_l->name, dso_r->name);
566 sort__dso_print(FILE *fp, struct hist_entry *self)
569 return fprintf(fp, "%-25s", self->dso->name);
571 return fprintf(fp, "%016llx ", (__u64)self->ip);
574 static struct sort_entry sort_dso = {
575 .header = "Shared Object ",
576 .cmp = sort__dso_cmp,
577 .print = sort__dso_print,
583 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
587 if (left->sym == right->sym)
590 ip_l = left->sym ? left->sym->start : left->ip;
591 ip_r = right->sym ? right->sym->start : right->ip;
593 return (int64_t)(ip_r - ip_l);
597 sort__sym_print(FILE *fp, struct hist_entry *self)
602 ret += fprintf(fp, "%#018llx ", (__u64)self->ip);
605 ret += fprintf(fp, "[%c] %s",
606 self->dso == kernel_dso ? 'k' : '.', self->sym->name);
608 ret += fprintf(fp, "%#016llx", (__u64)self->ip);
614 static struct sort_entry sort_sym = {
616 .cmp = sort__sym_cmp,
617 .print = sort__sym_print,
620 static int sort__need_collapse = 0;
622 struct sort_dimension {
624 struct sort_entry *entry;
628 static struct sort_dimension sort_dimensions[] = {
629 { .name = "pid", .entry = &sort_thread, },
630 { .name = "comm", .entry = &sort_comm, },
631 { .name = "dso", .entry = &sort_dso, },
632 { .name = "symbol", .entry = &sort_sym, },
635 static LIST_HEAD(hist_entry__sort_list);
637 static int sort_dimension__add(char *tok)
641 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
642 struct sort_dimension *sd = &sort_dimensions[i];
647 if (strncasecmp(tok, sd->name, strlen(tok)))
650 if (sd->entry->collapse)
651 sort__need_collapse = 1;
653 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
663 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
665 struct sort_entry *se;
668 list_for_each_entry(se, &hist_entry__sort_list, list) {
669 cmp = se->cmp(left, right);
678 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
680 struct sort_entry *se;
683 list_for_each_entry(se, &hist_entry__sort_list, list) {
684 int64_t (*f)(struct hist_entry *, struct hist_entry *);
686 f = se->collapse ?: se->cmp;
688 cmp = f(left, right);
697 hist_entry__fprintf(FILE *fp, struct hist_entry *self, __u64 total_samples)
699 struct sort_entry *se;
703 double percent = self->count * 100.0 / total_samples;
704 char *color = PERF_COLOR_NORMAL;
707 * We color high-overhead entries in red, mid-overhead
708 * entries in green - and keep the low overhead places
711 if (percent >= 5.0) {
712 color = PERF_COLOR_RED;
715 color = PERF_COLOR_GREEN;
718 ret = color_fprintf(fp, color, " %6.2f%%",
719 (self->count * 100.0) / total_samples);
721 ret = fprintf(fp, "%12Ld ", self->count);
723 list_for_each_entry(se, &hist_entry__sort_list, list) {
725 ret += se->print(fp, self);
728 ret += fprintf(fp, "\n");
734 * collect histogram counts
738 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
739 struct symbol *sym, __u64 ip, char level, __u64 count)
741 struct rb_node **p = &hist.rb_node;
742 struct rb_node *parent = NULL;
743 struct hist_entry *he;
744 struct hist_entry entry = {
757 he = rb_entry(parent, struct hist_entry, rb_node);
759 cmp = hist_entry__cmp(&entry, he);
772 he = malloc(sizeof(*he));
776 rb_link_node(&he->rb_node, parent, p);
777 rb_insert_color(&he->rb_node, &hist);
782 static void hist_entry__free(struct hist_entry *he)
788 * collapse the histogram
791 static struct rb_root collapse_hists;
793 static void collapse__insert_entry(struct hist_entry *he)
795 struct rb_node **p = &collapse_hists.rb_node;
796 struct rb_node *parent = NULL;
797 struct hist_entry *iter;
802 iter = rb_entry(parent, struct hist_entry, rb_node);
804 cmp = hist_entry__collapse(iter, he);
807 iter->count += he->count;
808 hist_entry__free(he);
818 rb_link_node(&he->rb_node, parent, p);
819 rb_insert_color(&he->rb_node, &collapse_hists);
822 static void collapse__resort(void)
824 struct rb_node *next;
825 struct hist_entry *n;
827 if (!sort__need_collapse)
830 next = rb_first(&hist);
832 n = rb_entry(next, struct hist_entry, rb_node);
833 next = rb_next(&n->rb_node);
835 rb_erase(&n->rb_node, &hist);
836 collapse__insert_entry(n);
841 * reverse the map, sort on count.
844 static struct rb_root output_hists;
846 static void output__insert_entry(struct hist_entry *he)
848 struct rb_node **p = &output_hists.rb_node;
849 struct rb_node *parent = NULL;
850 struct hist_entry *iter;
854 iter = rb_entry(parent, struct hist_entry, rb_node);
856 if (he->count > iter->count)
862 rb_link_node(&he->rb_node, parent, p);
863 rb_insert_color(&he->rb_node, &output_hists);
866 static void output__resort(void)
868 struct rb_node *next;
869 struct hist_entry *n;
870 struct rb_root *tree = &hist;
872 if (sort__need_collapse)
873 tree = &collapse_hists;
875 next = rb_first(tree);
878 n = rb_entry(next, struct hist_entry, rb_node);
879 next = rb_next(&n->rb_node);
881 rb_erase(&n->rb_node, tree);
882 output__insert_entry(n);
886 static size_t output__fprintf(FILE *fp, __u64 total_samples)
888 struct hist_entry *pos;
889 struct sort_entry *se;
895 fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples);
898 fprintf(fp, "# Overhead");
899 list_for_each_entry(se, &hist_entry__sort_list, list)
900 fprintf(fp, " %s", se->header);
903 fprintf(fp, "# ........");
904 list_for_each_entry(se, &hist_entry__sort_list, list) {
908 for (i = 0; i < strlen(se->header); i++)
915 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
916 pos = rb_entry(nd, struct hist_entry, rb_node);
917 ret += hist_entry__fprintf(fp, pos, total_samples);
920 if (!strcmp(sort_order, default_sort_order)) {
922 fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
930 static void register_idle_thread(void)
932 struct thread *thread = threads__findnew(0);
934 if (thread == NULL ||
935 thread__set_comm(thread, "[idle]")) {
936 fprintf(stderr, "problem inserting idle task.\n");
941 static unsigned long total = 0,
948 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
952 struct dso *dso = NULL;
953 struct thread *thread = threads__findnew(event->ip.pid);
954 __u64 ip = event->ip.ip;
956 struct map *map = NULL;
957 void *more_data = event->ip.__more_data;
958 struct ip_chain_event *chain;
960 if (event->header.type & PERF_SAMPLE_PERIOD) {
961 period = *(__u64 *)more_data;
962 more_data += sizeof(__u64);
965 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
966 (void *)(offset + head),
967 (void *)(long)(event->header.size),
973 if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
976 chain = (void *)more_data;
979 dprintf("... chain: u:%d, k:%d, nr:%d\n",
984 for (i = 0; i < chain->nr; i++)
985 dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
987 if (collapse_syscalls) {
989 * Find the all-but-last kernel entry
990 * amongst the call-chains - to get
991 * to the level of system calls:
993 if (chain->kernel >= 2)
994 ip = chain->ips[chain->kernel-2];
998 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1000 if (thread == NULL) {
1001 fprintf(stderr, "problem processing %d event, skipping it.\n",
1002 event->header.type);
1006 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
1012 dprintf(" ...... dso: %s\n", dso->name);
1014 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
1019 map = thread__find_map(thread, ip);
1021 ip = map->map_ip(map, ip);
1025 * If this is outside of all known maps,
1026 * and is a negative address, try to look it
1027 * up in the kernel dso, as it might be a
1028 * vsyscall (which executes in user-mode):
1030 if ((long long)ip < 0)
1033 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
1038 dprintf(" ...... dso: [hypervisor]\n");
1041 if (show & show_mask) {
1042 struct symbol *sym = NULL;
1045 sym = dso->find_symbol(dso, ip);
1047 if (hist_entry__add(thread, map, dso, sym, ip, level, period)) {
1049 "problem incrementing symbol count, skipping event\n");
1059 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1061 struct thread *thread = threads__findnew(event->mmap.pid);
1062 struct map *map = map__new(&event->mmap);
1064 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1065 (void *)(offset + head),
1066 (void *)(long)(event->header.size),
1068 (void *)(long)event->mmap.start,
1069 (void *)(long)event->mmap.len,
1070 (void *)(long)event->mmap.pgoff,
1071 event->mmap.filename);
1073 if (thread == NULL || map == NULL) {
1074 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1078 thread__insert_map(thread, map);
1085 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1087 struct thread *thread = threads__findnew(event->comm.pid);
1089 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1090 (void *)(offset + head),
1091 (void *)(long)(event->header.size),
1092 event->comm.comm, event->comm.pid);
1094 if (thread == NULL ||
1095 thread__set_comm(thread, event->comm.comm)) {
1096 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1105 process_fork_event(event_t *event, unsigned long offset, unsigned long head)
1107 struct thread *thread = threads__findnew(event->fork.pid);
1108 struct thread *parent = threads__findnew(event->fork.ppid);
1110 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
1111 (void *)(offset + head),
1112 (void *)(long)(event->header.size),
1113 event->fork.pid, event->fork.ppid);
1115 if (!thread || !parent || thread__fork(thread, parent)) {
1116 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1125 process_period_event(event_t *event, unsigned long offset, unsigned long head)
1127 dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
1128 (void *)(offset + head),
1129 (void *)(long)(event->header.size),
1132 event->period.sample_period);
1137 static void trace_event(event_t *event)
1139 unsigned char *raw_event = (void *)event;
1140 char *color = PERF_COLOR_BLUE;
1147 cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
1149 for (i = 0; i < event->header.size; i++) {
1150 if ((i & 15) == 0) {
1152 cdprintf(" %04x: ", i);
1155 cdprintf(" %02x", raw_event[i]);
1157 if (((i & 15) == 15) || i == event->header.size-1) {
1159 for (j = 0; j < 15-(i & 15); j++)
1161 for (j = 0; j < (i & 15); j++) {
1162 if (isprint(raw_event[i-15+j]))
1163 cdprintf("%c", raw_event[i-15+j]);
1174 process_event(event_t *event, unsigned long offset, unsigned long head)
1178 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
1179 return process_overflow_event(event, offset, head);
1181 switch (event->header.type) {
1182 case PERF_EVENT_MMAP:
1183 return process_mmap_event(event, offset, head);
1185 case PERF_EVENT_COMM:
1186 return process_comm_event(event, offset, head);
1188 case PERF_EVENT_FORK:
1189 return process_fork_event(event, offset, head);
1191 case PERF_EVENT_PERIOD:
1192 return process_period_event(event, offset, head);
1194 * We dont process them right now but they are fine:
1197 case PERF_EVENT_THROTTLE:
1198 case PERF_EVENT_UNTHROTTLE:
1208 static int __cmd_report(void)
1210 int ret, rc = EXIT_FAILURE;
1211 unsigned long offset = 0;
1212 unsigned long head = 0;
1218 register_idle_thread();
1220 input = open(input_name, O_RDONLY);
1222 fprintf(stderr, " failed to open file: %s", input_name);
1223 if (!strcmp(input_name, "perf.data"))
1224 fprintf(stderr, " (try 'perf record' first)");
1225 fprintf(stderr, "\n");
1229 ret = fstat(input, &stat);
1231 perror("failed to stat file");
1235 if (!stat.st_size) {
1236 fprintf(stderr, "zero-sized file, nothing to do!\n");
1240 if (load_kernel() < 0) {
1241 perror("failed to load kernel symbols");
1242 return EXIT_FAILURE;
1246 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
1247 perror("failed to get the current directory");
1248 return EXIT_FAILURE;
1250 cwdlen = strlen(cwd);
1256 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1257 MAP_SHARED, input, offset);
1258 if (buf == MAP_FAILED) {
1259 perror("failed to mmap file");
1264 event = (event_t *)(buf + head);
1266 size = event->header.size;
1270 if (head + event->header.size >= page_size * mmap_window) {
1271 unsigned long shift = page_size * (head / page_size);
1274 ret = munmap(buf, page_size * mmap_window);
1282 size = event->header.size;
1284 dprintf("\n%p [%p]: event: %d\n",
1285 (void *)(offset + head),
1286 (void *)(long)event->header.size,
1287 event->header.type);
1289 if (!size || process_event(event, offset, head) < 0) {
1291 dprintf("%p [%p]: skipping unknown header type: %d\n",
1292 (void *)(offset + head),
1293 (void *)(long)(event->header.size),
1294 event->header.type);
1299 * assume we lost track of the stream, check alignment, and
1300 * increment a single u64 in the hope to catch on again 'soon'.
1303 if (unlikely(head & 7))
1311 if (offset + head < stat.st_size)
1317 dprintf(" IP events: %10ld\n", total);
1318 dprintf(" mmap events: %10ld\n", total_mmap);
1319 dprintf(" comm events: %10ld\n", total_comm);
1320 dprintf(" fork events: %10ld\n", total_fork);
1321 dprintf(" unknown events: %10ld\n", total_unknown);
1327 threads__fprintf(stdout);
1330 dsos__fprintf(stdout);
1334 output__fprintf(stdout, total);
1339 static const char * const report_usage[] = {
1340 "perf report [<options>] <command>",
1344 static const struct option options[] = {
1345 OPT_STRING('i', "input", &input_name, "file",
1347 OPT_BOOLEAN('v', "verbose", &verbose,
1348 "be more verbose (show symbol address, etc)"),
1349 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1350 "dump raw trace in ASCII"),
1351 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1352 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1353 "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1354 OPT_BOOLEAN('P', "full-paths", &full_paths,
1355 "Don't shorten the pathnames taking into account the cwd"),
1356 OPT_BOOLEAN('S', "syscalls", &collapse_syscalls,
1357 "show per syscall summary overhead, using call graph"),
1361 static void setup_sorting(void)
1363 char *tmp, *tok, *str = strdup(sort_order);
1365 for (tok = strtok_r(str, ", ", &tmp);
1366 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1367 if (sort_dimension__add(tok) < 0) {
1368 error("Unknown --sort key: `%s'", tok);
1369 usage_with_options(report_usage, options);
1376 int cmd_report(int argc, const char **argv, const char *prefix)
1380 page_size = getpagesize();
1382 argc = parse_options(argc, argv, options, report_usage, 0);
1387 * Any (unrecognized) arguments left?
1390 usage_with_options(report_usage, options);
1394 return __cmd_report();