perf evsel: Steal the counter reading routines from stat
[pandora-kernel.git] / tools / perf / builtin-stat.c
1 /*
2  * builtin-stat.c
3  *
4  * Builtin stat command: Give a precise performance counters summary
5  * overview about any workload, CPU or specific PID.
6  *
7  * Sample output:
8
9    $ perf stat ~/hackbench 10
10    Time: 0.104
11
12     Performance counter stats for '/home/mingo/hackbench':
13
14        1255.538611  task clock ticks     #      10.143 CPU utilization factor
15              54011  context switches     #       0.043 M/sec
16                385  CPU migrations       #       0.000 M/sec
17              17755  pagefaults           #       0.014 M/sec
18         3808323185  CPU cycles           #    3033.219 M/sec
19         1575111190  instructions         #    1254.530 M/sec
20           17367895  cache references     #      13.833 M/sec
21            7674421  cache misses         #       6.112 M/sec
22
23     Wall-clock time elapsed:   123.786620 msecs
24
25  *
26  * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
27  *
28  * Improvements and fixes by:
29  *
30  *   Arjan van de Ven <arjan@linux.intel.com>
31  *   Yanmin Zhang <yanmin.zhang@intel.com>
32  *   Wu Fengguang <fengguang.wu@intel.com>
33  *   Mike Galbraith <efault@gmx.de>
34  *   Paul Mackerras <paulus@samba.org>
35  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
36  *
37  * Released under the GPL v2. (and only v2, not any later version)
38  */
39
40 #include "perf.h"
41 #include "builtin.h"
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/evsel.h"
47 #include "util/debug.h"
48 #include "util/header.h"
49 #include "util/cpumap.h"
50 #include "util/thread.h"
51
52 #include <sys/prctl.h>
53 #include <math.h>
54 #include <locale.h>
55
56 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
57
58 #define DEFAULT_SEPARATOR       " "
59
60 static struct perf_event_attr default_attrs[] = {
61
62   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
63   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
64   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
65   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
66
67   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
68   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
69   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
70   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
71   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES        },
72   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES            },
73
74 };
75
76 static bool                     system_wide                     =  false;
77 static int                      nr_cpus                         =  0;
78 static int                      run_idx                         =  0;
79
80 static int                      run_count                       =  1;
81 static bool                     no_inherit                      = false;
82 static bool                     scale                           =  true;
83 static bool                     no_aggr                         = false;
84 static pid_t                    target_pid                      = -1;
85 static pid_t                    target_tid                      = -1;
86 static pid_t                    *all_tids                       =  NULL;
87 static int                      thread_num                      =  0;
88 static pid_t                    child_pid                       = -1;
89 static bool                     null_run                        =  false;
90 static bool                     big_num                         =  true;
91 static int                      big_num_opt                     =  -1;
92 static const char               *cpu_list;
93 static const char               *csv_sep                        = NULL;
94 static bool                     csv_output                      = false;
95
96 static volatile int done = 0;
97
98 struct stats
99 {
100         double n, mean, M2;
101 };
102
103 struct perf_stat {
104         struct stats      res_stats[3];
105 };
106
107 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
108 {
109         evsel->priv = zalloc(sizeof(struct perf_stat));
110         return evsel->priv == NULL ? -ENOMEM : 0;
111 }
112
113 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
114 {
115         free(evsel->priv);
116         evsel->priv = NULL;
117 }
118
119 static void update_stats(struct stats *stats, u64 val)
120 {
121         double delta;
122
123         stats->n++;
124         delta = val - stats->mean;
125         stats->mean += delta / stats->n;
126         stats->M2 += delta*(val - stats->mean);
127 }
128
129 static double avg_stats(struct stats *stats)
130 {
131         return stats->mean;
132 }
133
134 /*
135  * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
136  *
137  *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
138  * s^2 = -------------------------------
139  *                  n - 1
140  *
141  * http://en.wikipedia.org/wiki/Stddev
142  *
143  * The std dev of the mean is related to the std dev by:
144  *
145  *             s
146  * s_mean = -------
147  *          sqrt(n)
148  *
149  */
150 static double stddev_stats(struct stats *stats)
151 {
152         double variance = stats->M2 / (stats->n - 1);
153         double variance_mean = variance / stats->n;
154
155         return sqrt(variance_mean);
156 }
157
158 struct stats                    runtime_nsecs_stats[MAX_NR_CPUS];
159 struct stats                    runtime_cycles_stats[MAX_NR_CPUS];
160 struct stats                    runtime_branches_stats[MAX_NR_CPUS];
161 struct stats                    walltime_nsecs_stats;
162
163 #define ERR_PERF_OPEN \
164 "counter %d, sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information."
165
166 static int create_perf_stat_counter(struct perf_evsel *evsel, bool *perm_err)
167 {
168         struct perf_event_attr *attr = &evsel->attr;
169         int thread;
170         int ncreated = 0;
171
172         if (scale)
173                 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
174                                     PERF_FORMAT_TOTAL_TIME_RUNNING;
175
176         if (system_wide) {
177                 int cpu;
178
179                 for (cpu = 0; cpu < nr_cpus; cpu++) {
180                         FD(evsel, cpu, 0) = sys_perf_event_open(attr,
181                                         -1, cpumap[cpu], -1, 0);
182                         if (FD(evsel, cpu, 0) < 0) {
183                                 if (errno == EPERM || errno == EACCES)
184                                         *perm_err = true;
185                                 error(ERR_PERF_OPEN, evsel->idx,
186                                         FD(evsel, cpu, 0), strerror(errno));
187                         } else {
188                                 ++ncreated;
189                         }
190                 }
191         } else {
192                 attr->inherit = !no_inherit;
193                 if (target_pid == -1 && target_tid == -1) {
194                         attr->disabled = 1;
195                         attr->enable_on_exec = 1;
196                 }
197                 for (thread = 0; thread < thread_num; thread++) {
198                         FD(evsel, 0, thread) = sys_perf_event_open(attr,
199                                 all_tids[thread], -1, -1, 0);
200                         if (FD(evsel, 0, thread) < 0) {
201                                 if (errno == EPERM || errno == EACCES)
202                                         *perm_err = true;
203                                 error(ERR_PERF_OPEN, evsel->idx,
204                                         FD(evsel, 0, thread),
205                                          strerror(errno));
206                         } else {
207                                 ++ncreated;
208                         }
209                 }
210         }
211
212         return ncreated;
213 }
214
215 /*
216  * Does the counter have nsecs as a unit?
217  */
218 static inline int nsec_counter(struct perf_evsel *evsel)
219 {
220         if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
221             perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
222                 return 1;
223
224         return 0;
225 }
226
227 /*
228  * Read out the results of a single counter:
229  * aggregate counts across CPUs in system-wide mode
230  */
231 static int read_counter_aggr(struct perf_evsel *counter)
232 {
233         struct perf_stat *ps = counter->priv;
234         u64 *count = counter->counts->aggr.values;
235         int i;
236
237         if (__perf_evsel__read(counter, nr_cpus, thread_num, scale) < 0)
238                 return -1;
239
240         for (i = 0; i < 3; i++)
241                 update_stats(&ps->res_stats[i], count[i]);
242
243         if (verbose) {
244                 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
245                                 count[0], count[1], count[2]);
246         }
247
248         /*
249          * Save the full runtime - to allow normalization during printout:
250          */
251         if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
252                 update_stats(&runtime_nsecs_stats[0], count[0]);
253         if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
254                 update_stats(&runtime_cycles_stats[0], count[0]);
255         if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
256                 update_stats(&runtime_branches_stats[0], count[0]);
257
258         return 0;
259 }
260
261 /*
262  * Read out the results of a single counter:
263  * do not aggregate counts across CPUs in system-wide mode
264  */
265 static int read_counter(struct perf_evsel *counter)
266 {
267         u64 *count;
268         int cpu;
269
270         for (cpu = 0; cpu < nr_cpus; cpu++) {
271                 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
272                         return -1;
273
274                 count = counter->counts->cpu[cpu].values;
275
276                 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
277                         update_stats(&runtime_nsecs_stats[cpu], count[0]);
278                 if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
279                         update_stats(&runtime_cycles_stats[cpu], count[0]);
280                 if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
281                         update_stats(&runtime_branches_stats[cpu], count[0]);
282         }
283
284         return 0;
285 }
286
287 static int run_perf_stat(int argc __used, const char **argv)
288 {
289         unsigned long long t0, t1;
290         struct perf_evsel *counter;
291         int status = 0;
292         int ncreated = 0;
293         int child_ready_pipe[2], go_pipe[2];
294         bool perm_err = false;
295         const bool forks = (argc > 0);
296         char buf;
297
298         if (!system_wide)
299                 nr_cpus = 1;
300
301         if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
302                 perror("failed to create pipes");
303                 exit(1);
304         }
305
306         if (forks) {
307                 if ((child_pid = fork()) < 0)
308                         perror("failed to fork");
309
310                 if (!child_pid) {
311                         close(child_ready_pipe[0]);
312                         close(go_pipe[1]);
313                         fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
314
315                         /*
316                          * Do a dummy execvp to get the PLT entry resolved,
317                          * so we avoid the resolver overhead on the real
318                          * execvp call.
319                          */
320                         execvp("", (char **)argv);
321
322                         /*
323                          * Tell the parent we're ready to go
324                          */
325                         close(child_ready_pipe[1]);
326
327                         /*
328                          * Wait until the parent tells us to go.
329                          */
330                         if (read(go_pipe[0], &buf, 1) == -1)
331                                 perror("unable to read pipe");
332
333                         execvp(argv[0], (char **)argv);
334
335                         perror(argv[0]);
336                         exit(-1);
337                 }
338
339                 if (target_tid == -1 && target_pid == -1 && !system_wide)
340                         all_tids[0] = child_pid;
341
342                 /*
343                  * Wait for the child to be ready to exec.
344                  */
345                 close(child_ready_pipe[1]);
346                 close(go_pipe[0]);
347                 if (read(child_ready_pipe[0], &buf, 1) == -1)
348                         perror("unable to read pipe");
349                 close(child_ready_pipe[0]);
350         }
351
352         list_for_each_entry(counter, &evsel_list, node)
353                 ncreated += create_perf_stat_counter(counter, &perm_err);
354
355         if (ncreated < nr_counters) {
356                 if (perm_err)
357                         error("You may not have permission to collect %sstats.\n"
358                               "\t Consider tweaking"
359                               " /proc/sys/kernel/perf_event_paranoid or running as root.",
360                               system_wide ? "system-wide " : "");
361                 die("Not all events could be opened.\n");
362                 if (child_pid != -1)
363                         kill(child_pid, SIGTERM);
364                 return -1;
365         }
366
367         /*
368          * Enable counters and exec the command:
369          */
370         t0 = rdclock();
371
372         if (forks) {
373                 close(go_pipe[1]);
374                 wait(&status);
375         } else {
376                 while(!done) sleep(1);
377         }
378
379         t1 = rdclock();
380
381         update_stats(&walltime_nsecs_stats, t1 - t0);
382
383         if (no_aggr) {
384                 list_for_each_entry(counter, &evsel_list, node) {
385                         read_counter(counter);
386                         perf_evsel__close_fd(counter, nr_cpus, 1);
387                 }
388         } else {
389                 list_for_each_entry(counter, &evsel_list, node) {
390                         read_counter_aggr(counter);
391                         perf_evsel__close_fd(counter, nr_cpus, thread_num);
392                 }
393         }
394
395         return WEXITSTATUS(status);
396 }
397
398 static void print_noise(struct perf_evsel *evsel, double avg)
399 {
400         struct perf_stat *ps;
401
402         if (run_count == 1)
403                 return;
404
405         ps = evsel->priv;
406         fprintf(stderr, "   ( +- %7.3f%% )",
407                         100 * stddev_stats(&ps->res_stats[0]) / avg);
408 }
409
410 static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
411 {
412         double msecs = avg / 1e6;
413         char cpustr[16] = { '\0', };
414         const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s";
415
416         if (no_aggr)
417                 sprintf(cpustr, "CPU%*d%s",
418                         csv_output ? 0 : -4,
419                         cpumap[cpu], csv_sep);
420
421         fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
422
423         if (csv_output)
424                 return;
425
426         if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
427                 fprintf(stderr, " # %10.3f CPUs ",
428                                 avg / avg_stats(&walltime_nsecs_stats));
429 }
430
431 static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
432 {
433         double total, ratio = 0.0;
434         char cpustr[16] = { '\0', };
435         const char *fmt;
436
437         if (csv_output)
438                 fmt = "%s%.0f%s%s";
439         else if (big_num)
440                 fmt = "%s%'18.0f%s%-24s";
441         else
442                 fmt = "%s%18.0f%s%-24s";
443
444         if (no_aggr)
445                 sprintf(cpustr, "CPU%*d%s",
446                         csv_output ? 0 : -4,
447                         cpumap[cpu], csv_sep);
448         else
449                 cpu = 0;
450
451         fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
452
453         if (csv_output)
454                 return;
455
456         if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
457                 total = avg_stats(&runtime_cycles_stats[cpu]);
458
459                 if (total)
460                         ratio = avg / total;
461
462                 fprintf(stderr, " # %10.3f IPC  ", ratio);
463         } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
464                         runtime_branches_stats[cpu].n != 0) {
465                 total = avg_stats(&runtime_branches_stats[cpu]);
466
467                 if (total)
468                         ratio = avg * 100 / total;
469
470                 fprintf(stderr, " # %10.3f %%    ", ratio);
471
472         } else if (runtime_nsecs_stats[cpu].n != 0) {
473                 total = avg_stats(&runtime_nsecs_stats[cpu]);
474
475                 if (total)
476                         ratio = 1000.0 * avg / total;
477
478                 fprintf(stderr, " # %10.3f M/sec", ratio);
479         }
480 }
481
482 /*
483  * Print out the results of a single counter:
484  * aggregated counts in system-wide mode
485  */
486 static void print_counter_aggr(struct perf_evsel *counter)
487 {
488         struct perf_stat *ps = counter->priv;
489         double avg = avg_stats(&ps->res_stats[0]);
490         int scaled = counter->counts->scaled;
491
492         if (scaled == -1) {
493                 fprintf(stderr, "%*s%s%-24s\n",
494                         csv_output ? 0 : 18,
495                         "<not counted>", csv_sep, event_name(counter));
496                 return;
497         }
498
499         if (nsec_counter(counter))
500                 nsec_printout(-1, counter, avg);
501         else
502                 abs_printout(-1, counter, avg);
503
504         if (csv_output) {
505                 fputc('\n', stderr);
506                 return;
507         }
508
509         print_noise(counter, avg);
510
511         if (scaled) {
512                 double avg_enabled, avg_running;
513
514                 avg_enabled = avg_stats(&ps->res_stats[1]);
515                 avg_running = avg_stats(&ps->res_stats[2]);
516
517                 fprintf(stderr, "  (scaled from %.2f%%)",
518                                 100 * avg_running / avg_enabled);
519         }
520
521         fprintf(stderr, "\n");
522 }
523
524 /*
525  * Print out the results of a single counter:
526  * does not use aggregated count in system-wide
527  */
528 static void print_counter(struct perf_evsel *counter)
529 {
530         u64 ena, run, val;
531         int cpu;
532
533         for (cpu = 0; cpu < nr_cpus; cpu++) {
534                 val = counter->counts->cpu[cpu].val;
535                 ena = counter->counts->cpu[cpu].ena;
536                 run = counter->counts->cpu[cpu].run;
537                 if (run == 0 || ena == 0) {
538                         fprintf(stderr, "CPU%*d%s%*s%s%-24s",
539                                 csv_output ? 0 : -4,
540                                 cpumap[cpu], csv_sep,
541                                 csv_output ? 0 : 18,
542                                 "<not counted>", csv_sep,
543                                 event_name(counter));
544
545                         fprintf(stderr, "\n");
546                         continue;
547                 }
548
549                 if (nsec_counter(counter))
550                         nsec_printout(cpu, counter, val);
551                 else
552                         abs_printout(cpu, counter, val);
553
554                 if (!csv_output) {
555                         print_noise(counter, 1.0);
556
557                         if (run != ena) {
558                                 fprintf(stderr, "  (scaled from %.2f%%)",
559                                         100.0 * run / ena);
560                         }
561                 }
562                 fprintf(stderr, "\n");
563         }
564 }
565
566 static void print_stat(int argc, const char **argv)
567 {
568         struct perf_evsel *counter;
569         int i;
570
571         fflush(stdout);
572
573         if (!csv_output) {
574                 fprintf(stderr, "\n");
575                 fprintf(stderr, " Performance counter stats for ");
576                 if(target_pid == -1 && target_tid == -1) {
577                         fprintf(stderr, "\'%s", argv[0]);
578                         for (i = 1; i < argc; i++)
579                                 fprintf(stderr, " %s", argv[i]);
580                 } else if (target_pid != -1)
581                         fprintf(stderr, "process id \'%d", target_pid);
582                 else
583                         fprintf(stderr, "thread id \'%d", target_tid);
584
585                 fprintf(stderr, "\'");
586                 if (run_count > 1)
587                         fprintf(stderr, " (%d runs)", run_count);
588                 fprintf(stderr, ":\n\n");
589         }
590
591         if (no_aggr) {
592                 list_for_each_entry(counter, &evsel_list, node)
593                         print_counter(counter);
594         } else {
595                 list_for_each_entry(counter, &evsel_list, node)
596                         print_counter_aggr(counter);
597         }
598
599         if (!csv_output) {
600                 fprintf(stderr, "\n");
601                 fprintf(stderr, " %18.9f  seconds time elapsed",
602                                 avg_stats(&walltime_nsecs_stats)/1e9);
603                 if (run_count > 1) {
604                         fprintf(stderr, "   ( +- %7.3f%% )",
605                                 100*stddev_stats(&walltime_nsecs_stats) /
606                                 avg_stats(&walltime_nsecs_stats));
607                 }
608                 fprintf(stderr, "\n\n");
609         }
610 }
611
612 static volatile int signr = -1;
613
614 static void skip_signal(int signo)
615 {
616         if(child_pid == -1)
617                 done = 1;
618
619         signr = signo;
620 }
621
622 static void sig_atexit(void)
623 {
624         if (child_pid != -1)
625                 kill(child_pid, SIGTERM);
626
627         if (signr == -1)
628                 return;
629
630         signal(signr, SIG_DFL);
631         kill(getpid(), signr);
632 }
633
634 static const char * const stat_usage[] = {
635         "perf stat [<options>] [<command>]",
636         NULL
637 };
638
639 static int stat__set_big_num(const struct option *opt __used,
640                              const char *s __used, int unset)
641 {
642         big_num_opt = unset ? 0 : 1;
643         return 0;
644 }
645
646 static const struct option options[] = {
647         OPT_CALLBACK('e', "event", NULL, "event",
648                      "event selector. use 'perf list' to list available events",
649                      parse_events),
650         OPT_BOOLEAN('i', "no-inherit", &no_inherit,
651                     "child tasks do not inherit counters"),
652         OPT_INTEGER('p', "pid", &target_pid,
653                     "stat events on existing process id"),
654         OPT_INTEGER('t', "tid", &target_tid,
655                     "stat events on existing thread id"),
656         OPT_BOOLEAN('a', "all-cpus", &system_wide,
657                     "system-wide collection from all CPUs"),
658         OPT_BOOLEAN('c', "scale", &scale,
659                     "scale/normalize counters"),
660         OPT_INCR('v', "verbose", &verbose,
661                     "be more verbose (show counter open errors, etc)"),
662         OPT_INTEGER('r', "repeat", &run_count,
663                     "repeat command and print average + stddev (max: 100)"),
664         OPT_BOOLEAN('n', "null", &null_run,
665                     "null run - dont start any counters"),
666         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 
667                            "print large numbers with thousands\' separators",
668                            stat__set_big_num),
669         OPT_STRING('C', "cpu", &cpu_list, "cpu",
670                     "list of cpus to monitor in system-wide"),
671         OPT_BOOLEAN('A', "no-aggr", &no_aggr,
672                     "disable CPU count aggregation"),
673         OPT_STRING('x', "field-separator", &csv_sep, "separator",
674                    "print counts with custom separator"),
675         OPT_END()
676 };
677
678 int cmd_stat(int argc, const char **argv, const char *prefix __used)
679 {
680         struct perf_evsel *pos;
681         int status = -ENOMEM;
682
683         setlocale(LC_ALL, "");
684
685         argc = parse_options(argc, argv, options, stat_usage,
686                 PARSE_OPT_STOP_AT_NON_OPTION);
687
688         if (csv_sep)
689                 csv_output = true;
690         else
691                 csv_sep = DEFAULT_SEPARATOR;
692
693         /*
694          * let the spreadsheet do the pretty-printing
695          */
696         if (csv_output) {
697                 /* User explicitely passed -B? */
698                 if (big_num_opt == 1) {
699                         fprintf(stderr, "-B option not supported with -x\n");
700                         usage_with_options(stat_usage, options);
701                 } else /* Nope, so disable big number formatting */
702                         big_num = false;
703         } else if (big_num_opt == 0) /* User passed --no-big-num */
704                 big_num = false;
705
706         if (!argc && target_pid == -1 && target_tid == -1)
707                 usage_with_options(stat_usage, options);
708         if (run_count <= 0)
709                 usage_with_options(stat_usage, options);
710
711         /* no_aggr is for system-wide only */
712         if (no_aggr && !system_wide)
713                 usage_with_options(stat_usage, options);
714
715         /* Set attrs and nr_counters if no event is selected and !null_run */
716         if (!null_run && !nr_counters) {
717                 size_t c;
718
719                 nr_counters = ARRAY_SIZE(default_attrs);
720
721                 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
722                         pos = perf_evsel__new(default_attrs[c].type,
723                                               default_attrs[c].config,
724                                               nr_counters);
725                         if (pos == NULL)
726                                 goto out;
727                         list_add(&pos->node, &evsel_list);
728                 }
729         }
730
731         if (system_wide)
732                 nr_cpus = read_cpu_map(cpu_list);
733         else
734                 nr_cpus = 1;
735
736         if (nr_cpus < 1)
737                 usage_with_options(stat_usage, options);
738
739         if (target_pid != -1) {
740                 target_tid = target_pid;
741                 thread_num = find_all_tid(target_pid, &all_tids);
742                 if (thread_num <= 0) {
743                         fprintf(stderr, "Can't find all threads of pid %d\n",
744                                         target_pid);
745                         usage_with_options(stat_usage, options);
746                 }
747         } else {
748                 all_tids=malloc(sizeof(pid_t));
749                 if (!all_tids)
750                         return -ENOMEM;
751
752                 all_tids[0] = target_tid;
753                 thread_num = 1;
754         }
755
756         list_for_each_entry(pos, &evsel_list, node) {
757                 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
758                     perf_evsel__alloc_counts(pos, nr_cpus) < 0 ||
759                     perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0)
760                         goto out_free_fd;
761         }
762
763         /*
764          * We dont want to block the signals - that would cause
765          * child tasks to inherit that and Ctrl-C would not work.
766          * What we want is for Ctrl-C to work in the exec()-ed
767          * task, but being ignored by perf stat itself:
768          */
769         atexit(sig_atexit);
770         signal(SIGINT,  skip_signal);
771         signal(SIGALRM, skip_signal);
772         signal(SIGABRT, skip_signal);
773
774         status = 0;
775         for (run_idx = 0; run_idx < run_count; run_idx++) {
776                 if (run_count != 1 && verbose)
777                         fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
778                 status = run_perf_stat(argc, argv);
779         }
780
781         if (status != -1)
782                 print_stat(argc, argv);
783 out_free_fd:
784         list_for_each_entry(pos, &evsel_list, node)
785                 perf_evsel__free_stat_priv(pos);
786 out:
787         return status;
788 }