4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/symbol.h"
24 #include "util/cpumap.h"
34 static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
36 static unsigned int user_interval = UINT_MAX;
37 static long default_interval = 0;
39 static int nr_cpus = 0;
40 static unsigned int page_size;
41 static unsigned int mmap_pages = 128;
42 static unsigned int user_freq = UINT_MAX;
43 static int freq = 1000;
45 static int pipe_output = 0;
46 static const char *output_name = "perf.data";
48 static unsigned int realtime_prio = 0;
49 static bool raw_samples = false;
50 static bool system_wide = false;
51 static int profile_cpu = -1;
52 static pid_t target_pid = -1;
53 static pid_t target_tid = -1;
54 static pid_t *all_tids = NULL;
55 static int thread_num = 0;
56 static pid_t child_pid = -1;
57 static bool inherit = true;
58 static enum write_mode_t write_mode = WRITE_FORCE;
59 static bool call_graph = false;
60 static bool inherit_stat = false;
61 static bool no_samples = false;
62 static bool sample_address = false;
63 static bool multiplex = false;
64 static int multiplex_fd = -1;
66 static long samples = 0;
67 static struct timeval last_read;
68 static struct timeval this_read;
70 static u64 bytes_written = 0;
72 static struct pollfd *event_array;
74 static int nr_poll = 0;
75 static int nr_cpu = 0;
77 static int file_new = 1;
78 static off_t post_processing_offset;
80 static struct perf_session *session;
89 static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
91 static unsigned long mmap_read_head(struct mmap_data *md)
93 struct perf_event_mmap_page *pc = md->base;
102 static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
104 struct perf_event_mmap_page *pc = md->base;
107 * ensure all reads are done before we write the tail out.
110 pc->data_tail = tail;
113 static void advance_output(size_t size)
115 bytes_written += size;
118 static void write_output(void *buf, size_t size)
121 int ret = write(output, buf, size);
124 die("failed to write");
129 bytes_written += ret;
133 static int process_synthesized_event(event_t *event,
134 struct perf_session *self __used)
136 write_output(event, event->header.size);
140 static void mmap_read(struct mmap_data *md)
142 unsigned int head = mmap_read_head(md);
143 unsigned int old = md->prev;
144 unsigned char *data = md->base + page_size;
149 gettimeofday(&this_read, NULL);
152 * If we're further behind than half the buffer, there's a chance
153 * the writer will bite our tail and mess up the samples under us.
155 * If we somehow ended up ahead of the head, we got messed up.
157 * In either case, truncate and restart at head.
164 timersub(&this_read, &last_read, &iv);
165 msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
167 fprintf(stderr, "WARNING: failed to keep up with mmap data."
168 " Last read %lu msecs ago.\n", msecs);
171 * head points to a known good entry, start there.
176 last_read = this_read;
183 if ((old & md->mask) + size != (head & md->mask)) {
184 buf = &data[old & md->mask];
185 size = md->mask + 1 - (old & md->mask);
188 write_output(buf, size);
191 buf = &data[old & md->mask];
195 write_output(buf, size);
198 mmap_write_tail(md, old);
201 static volatile int done = 0;
202 static volatile int signr = -1;
204 static void sig_handler(int sig)
210 static void sig_atexit(void)
213 kill(child_pid, SIGTERM);
218 signal(signr, SIG_DFL);
219 kill(getpid(), signr);
224 static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
226 struct perf_header_attr *h_attr;
228 if (nr < session->header.attrs) {
229 h_attr = session->header.attr[nr];
231 h_attr = perf_header_attr__new(a);
233 if (perf_header__add_attr(&session->header, h_attr) < 0) {
234 perf_header_attr__delete(h_attr);
242 static void create_counter(int counter, int cpu)
244 char *filter = filters[counter];
245 struct perf_event_attr *attr = attrs + counter;
246 struct perf_header_attr *h_attr;
247 int track = !counter; /* only the first counter needs these */
257 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
258 PERF_FORMAT_TOTAL_TIME_RUNNING |
261 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
264 attr->sample_type |= PERF_SAMPLE_ID;
267 * We default some events to a 1 default interval. But keep
268 * it a weak assumption overridable by the user.
270 if (!attr->sample_period || (user_freq != UINT_MAX &&
271 user_interval != UINT_MAX)) {
273 attr->sample_type |= PERF_SAMPLE_PERIOD;
275 attr->sample_freq = freq;
277 attr->sample_period = default_interval;
282 attr->sample_freq = 0;
285 attr->inherit_stat = 1;
288 attr->sample_type |= PERF_SAMPLE_ADDR;
291 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
294 attr->sample_type |= PERF_SAMPLE_TIME;
295 attr->sample_type |= PERF_SAMPLE_RAW;
296 attr->sample_type |= PERF_SAMPLE_CPU;
301 attr->inherit = inherit;
302 if (target_pid == -1 && !system_wide) {
304 attr->enable_on_exec = 1;
307 for (thread_index = 0; thread_index < thread_num; thread_index++) {
309 fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr,
310 all_tids[thread_index], cpu, group_fd, 0);
312 if (fd[nr_cpu][counter][thread_index] < 0) {
315 if (err == EPERM || err == EACCES)
316 die("Permission error - are you root?\n"
317 "\t Consider tweaking"
318 " /proc/sys/kernel/perf_event_paranoid.\n");
319 else if (err == ENODEV && profile_cpu != -1) {
320 die("No such device - did you specify"
321 " an out-of-range profile CPU?\n");
325 * If it's cycles then fall back to hrtimer
326 * based cpu-clock-tick sw counter, which
327 * is always available even if no PMU support:
329 if (attr->type == PERF_TYPE_HARDWARE
330 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
333 warning(" ... trying to fall back to cpu-clock-ticks\n");
334 attr->type = PERF_TYPE_SOFTWARE;
335 attr->config = PERF_COUNT_SW_CPU_CLOCK;
339 error("perfcounter syscall returned with %d (%s)\n",
340 fd[nr_cpu][counter][thread_index], strerror(err));
342 #if defined(__i386__) || defined(__x86_64__)
343 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
344 die("No hardware sampling interrupt available."
345 " No APIC? If so then you can boot the kernel"
346 " with the \"lapic\" boot parameter to"
347 " force-enable it.\n");
350 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
354 h_attr = get_header_attr(attr, counter);
359 if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
360 fprintf(stderr, "incompatible append\n");
365 if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
366 perror("Unable to read perf file descriptor\n");
370 if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
371 pr_warning("Not enough memory to add id\n");
375 assert(fd[nr_cpu][counter][thread_index] >= 0);
376 fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK);
379 * First counter acts as the group leader:
381 if (group && group_fd == -1)
382 group_fd = fd[nr_cpu][counter][thread_index];
383 if (multiplex && multiplex_fd == -1)
384 multiplex_fd = fd[nr_cpu][counter][thread_index];
386 if (multiplex && fd[nr_cpu][counter][thread_index] != multiplex_fd) {
388 ret = ioctl(fd[nr_cpu][counter][thread_index], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
391 event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
392 event_array[nr_poll].events = POLLIN;
395 mmap_array[nr_cpu][counter][thread_index].counter = counter;
396 mmap_array[nr_cpu][counter][thread_index].prev = 0;
397 mmap_array[nr_cpu][counter][thread_index].mask = mmap_pages*page_size - 1;
398 mmap_array[nr_cpu][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
399 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
400 if (mmap_array[nr_cpu][counter][thread_index].base == MAP_FAILED) {
401 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
406 if (filter != NULL) {
407 ret = ioctl(fd[nr_cpu][counter][thread_index],
408 PERF_EVENT_IOC_SET_FILTER, filter);
410 error("failed to set filter with %d (%s)\n", errno,
418 static void open_counters(int cpu)
423 for (counter = 0; counter < nr_counters; counter++)
424 create_counter(counter, cpu);
429 static int process_buildids(void)
431 u64 size = lseek(output, 0, SEEK_CUR);
436 session->fd = output;
437 return __perf_session__process_events(session, post_processing_offset,
438 size - post_processing_offset,
439 size, &build_id__mark_dso_hit_ops);
442 static void atexit_header(void)
445 session->header.data_size += bytes_written;
448 perf_header__write(&session->header, output, true);
452 err = event__synthesize_build_ids(process_synthesized_event,
455 pr_err("Couldn't synthesize build ids.\n");
459 static void event__synthesize_guest_os(struct machine *machine, void *data)
462 char *guest_kallsyms;
464 struct perf_session *psession = data;
466 if (machine__is_host(machine))
470 *As for guest kernel when processing subcommand record&report,
471 *we arrange module mmap prior to guest kernel mmap and trigger
472 *a preload dso because default guest module symbols are loaded
473 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
474 *method is used to avoid symbol missing when the first addr is
475 *in module instead of in guest kernel.
477 err = event__synthesize_modules(process_synthesized_event,
480 pr_err("Couldn't record guest kernel [%d]'s reference"
481 " relocation symbol.\n", machine->pid);
483 if (machine__is_default_guest(machine))
484 guest_kallsyms = (char *) symbol_conf.default_guest_kallsyms;
486 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
487 guest_kallsyms = path;
491 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
492 * have no _text sometimes.
494 err = event__synthesize_kernel_mmap(process_synthesized_event,
495 psession, machine, "_text");
497 err = event__synthesize_kernel_mmap(process_synthesized_event,
498 psession, machine, "_stext");
500 pr_err("Couldn't record guest kernel [%d]'s reference"
501 " relocation symbol.\n", machine->pid);
504 static int __cmd_record(int argc, const char **argv)
511 unsigned long waking = 0;
512 int child_ready_pipe[2], go_pipe[2];
513 const bool forks = argc > 0;
515 struct machine *machine;
517 page_size = sysconf(_SC_PAGE_SIZE);
520 signal(SIGCHLD, sig_handler);
521 signal(SIGINT, sig_handler);
523 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
524 perror("failed to create pipes");
528 if (!strcmp(output_name, "-"))
530 else if (!stat(output_name, &st) && st.st_size) {
531 if (write_mode == WRITE_FORCE) {
532 char oldname[PATH_MAX];
533 snprintf(oldname, sizeof(oldname), "%s.old",
536 rename(output_name, oldname);
538 } else if (write_mode == WRITE_APPEND) {
539 write_mode = WRITE_FORCE;
542 flags = O_CREAT|O_RDWR;
543 if (write_mode == WRITE_APPEND)
549 output = STDOUT_FILENO;
551 output = open(output_name, flags, S_IRUSR | S_IWUSR);
553 perror("failed to create output file");
557 session = perf_session__new(output_name, O_WRONLY,
558 write_mode == WRITE_FORCE);
559 if (session == NULL) {
560 pr_err("Not enough memory for reading perf file header\n");
565 err = perf_header__read(session, output);
571 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
573 for (i = 0; i < nr_counters; i++) {
574 if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
575 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
581 atexit(atexit_header);
586 perror("failed to fork");
593 close(child_ready_pipe[0]);
595 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
598 * Do a dummy execvp to get the PLT entry resolved,
599 * so we avoid the resolver overhead on the real
602 execvp("", (char **)argv);
605 * Tell the parent we're ready to go
607 close(child_ready_pipe[1]);
610 * Wait until the parent tells us to go.
612 if (read(go_pipe[0], &buf, 1) == -1)
613 perror("unable to read pipe");
615 execvp(argv[0], (char **)argv);
621 if (!system_wide && target_tid == -1 && target_pid == -1)
622 all_tids[0] = child_pid;
624 close(child_ready_pipe[1]);
627 * wait for child to settle
629 if (read(child_ready_pipe[0], &buf, 1) == -1) {
630 perror("unable to read pipe");
633 close(child_ready_pipe[0]);
636 if ((!system_wide && !inherit) || profile_cpu != -1) {
637 open_counters(profile_cpu);
639 nr_cpus = read_cpu_map();
640 for (i = 0; i < nr_cpus; i++)
641 open_counters(cpumap[i]);
645 err = perf_header__write_pipe(output);
648 } else if (file_new) {
649 err = perf_header__write(&session->header, output, false);
654 post_processing_offset = lseek(output, 0, SEEK_CUR);
657 err = event__synthesize_attrs(&session->header,
658 process_synthesized_event,
661 pr_err("Couldn't synthesize attrs.\n");
665 err = event__synthesize_event_types(process_synthesized_event,
668 pr_err("Couldn't synthesize event_types.\n");
672 err = event__synthesize_tracing_data(output, attrs,
674 process_synthesized_event,
677 pr_err("Couldn't record tracing data.\n");
684 machine = perf_session__find_host_machine(session);
686 pr_err("Couldn't find native kernel information.\n");
690 err = event__synthesize_kernel_mmap(process_synthesized_event,
691 session, machine, "_text");
693 err = event__synthesize_kernel_mmap(process_synthesized_event,
694 session, machine, "_stext");
696 pr_err("Couldn't record kernel reference relocation symbol.\n");
700 err = event__synthesize_modules(process_synthesized_event,
703 pr_err("Couldn't record kernel reference relocation symbol.\n");
707 perf_session__process_machines(session, event__synthesize_guest_os);
709 if (!system_wide && profile_cpu == -1)
710 event__synthesize_thread(target_tid, process_synthesized_event,
713 event__synthesize_threads(process_synthesized_event, session);
716 struct sched_param param;
718 param.sched_priority = realtime_prio;
719 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
720 pr_err("Could not set realtime priority.\n");
735 for (i = 0; i < nr_cpu; i++) {
736 for (counter = 0; counter < nr_counters; counter++) {
738 thread < thread_num; thread++) {
739 if (mmap_array[i][counter][thread].base)
740 mmap_read(&mmap_array[i][counter][thread]);
746 if (hits == samples) {
749 err = poll(event_array, nr_poll, -1);
754 for (i = 0; i < nr_cpu; i++) {
756 counter < nr_counters;
761 ioctl(fd[i][counter][thread],
762 PERF_EVENT_IOC_DISABLE);
768 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
771 * Approximate RIP event size: 24 bytes.
774 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
775 (double)bytes_written / 1024.0 / 1024.0,
782 static const char * const record_usage[] = {
783 "perf record [<options>] [<command>]",
784 "perf record [<options>] -- <command> [<options>]",
788 static bool force, append_file;
790 static const struct option options[] = {
791 OPT_CALLBACK('e', "event", NULL, "event",
792 "event selector. use 'perf list' to list available events",
794 OPT_CALLBACK(0, "filter", NULL, "filter",
795 "event filter", parse_filter),
796 OPT_INTEGER('p', "pid", &target_pid,
797 "record events on existing process id"),
798 OPT_INTEGER('t', "tid", &target_tid,
799 "record events on existing thread id"),
800 OPT_INTEGER('r', "realtime", &realtime_prio,
801 "collect data with this RT SCHED_FIFO priority"),
802 OPT_BOOLEAN('R', "raw-samples", &raw_samples,
803 "collect raw sample records from all opened counters"),
804 OPT_BOOLEAN('a', "all-cpus", &system_wide,
805 "system-wide collection from all CPUs"),
806 OPT_BOOLEAN('A', "append", &append_file,
807 "append to the output file to do incremental profiling"),
808 OPT_INTEGER('C', "profile_cpu", &profile_cpu,
809 "CPU to profile on"),
810 OPT_BOOLEAN('f', "force", &force,
811 "overwrite existing data file (deprecated)"),
812 OPT_LONG('c', "count", &user_interval,
813 "event period to sample"),
814 OPT_STRING('o', "output", &output_name, "file",
816 OPT_BOOLEAN('i', "inherit", &inherit,
817 "child tasks inherit counters"),
818 OPT_INTEGER('F', "freq", &user_freq,
819 "profile at this frequency"),
820 OPT_INTEGER('m', "mmap-pages", &mmap_pages,
821 "number of mmap data pages"),
822 OPT_BOOLEAN('g', "call-graph", &call_graph,
823 "do call-graph (stack chain/backtrace) recording"),
824 OPT_INCR('v', "verbose", &verbose,
825 "be more verbose (show counter open errors, etc)"),
826 OPT_BOOLEAN('s', "stat", &inherit_stat,
827 "per thread counts"),
828 OPT_BOOLEAN('d', "data", &sample_address,
830 OPT_BOOLEAN('n', "no-samples", &no_samples,
832 OPT_BOOLEAN('M', "multiplex", &multiplex,
833 "multiplex counter output in a single channel"),
837 int cmd_record(int argc, const char **argv, const char *prefix __used)
841 argc = parse_options(argc, argv, options, record_usage,
842 PARSE_OPT_STOP_AT_NON_OPTION);
843 if (!argc && target_pid == -1 && target_tid == -1 &&
844 !system_wide && profile_cpu == -1)
845 usage_with_options(record_usage, options);
847 if (force && append_file) {
848 fprintf(stderr, "Can't overwrite and append at the same time."
849 " You need to choose between -f and -A");
850 usage_with_options(record_usage, options);
851 } else if (append_file) {
852 write_mode = WRITE_APPEND;
854 write_mode = WRITE_FORCE;
861 attrs[0].type = PERF_TYPE_HARDWARE;
862 attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
865 if (target_pid != -1) {
866 target_tid = target_pid;
867 thread_num = find_all_tid(target_pid, &all_tids);
868 if (thread_num <= 0) {
869 fprintf(stderr, "Can't find all threads of pid %d\n",
871 usage_with_options(record_usage, options);
874 all_tids=malloc(sizeof(pid_t));
878 all_tids[0] = target_tid;
882 for (i = 0; i < MAX_NR_CPUS; i++) {
883 for (j = 0; j < MAX_COUNTERS; j++) {
884 fd[i][j] = malloc(sizeof(int)*thread_num);
885 mmap_array[i][j] = zalloc(
886 sizeof(struct mmap_data)*thread_num);
887 if (!fd[i][j] || !mmap_array[i][j])
891 event_array = malloc(
892 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
896 if (user_interval != UINT_MAX)
897 default_interval = user_interval;
898 if (user_freq != UINT_MAX)
902 * User specified count overrides default frequency.
904 if (default_interval)
907 default_interval = freq;
909 fprintf(stderr, "frequency and count are zero, aborting\n");
913 return __cmd_record(argc, argv);