5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <linux/rbtree.h>
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static struct perf_header *header;
24 static u64 sample_type;
26 static int alloc_flag;
27 static int caller_flag;
29 static int alloc_lines = -1;
30 static int caller_lines = -1;
34 static char default_sort_order[] = "frag,hit,bytes";
36 static int *cpunode_map;
37 static int max_cpu_num;
52 static struct rb_root root_alloc_stat;
53 static struct rb_root root_alloc_sorted;
54 static struct rb_root root_caller_stat;
55 static struct rb_root root_caller_sorted;
57 static unsigned long total_requested, total_allocated;
58 static unsigned long nr_allocs, nr_cross_allocs;
60 struct raw_event_sample {
65 #define PATH_SYS_NODE "/sys/devices/system/node"
67 static void init_cpunode_map(void)
72 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
78 if (fscanf(fp, "%d", &max_cpu_num) < 1)
79 die("Failed to read 'kernel_max' from sysfs");
82 cpunode_map = calloc(max_cpu_num, sizeof(int));
85 for (i = 0; i < max_cpu_num; i++)
90 static void setup_cpunode_map(void)
92 struct dirent *dent1, *dent2;
94 unsigned int cpu, mem;
99 dir1 = opendir(PATH_SYS_NODE);
104 dent1 = readdir(dir1);
108 if (sscanf(dent1->d_name, "node%u", &mem) < 1)
111 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
116 dent2 = readdir(dir2);
119 if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
121 cpunode_map[cpu] = mem;
126 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
127 int bytes_req, int bytes_alloc, int cpu)
129 struct rb_node **node = &root_alloc_stat.rb_node;
130 struct rb_node *parent = NULL;
131 struct alloc_stat *data = NULL;
135 data = rb_entry(*node, struct alloc_stat, node);
138 node = &(*node)->rb_right;
139 else if (ptr < data->ptr)
140 node = &(*node)->rb_left;
145 if (data && data->ptr == ptr) {
147 data->bytes_req += bytes_req;
148 data->bytes_alloc += bytes_req;
150 data = malloc(sizeof(*data));
156 data->bytes_req = bytes_req;
157 data->bytes_alloc = bytes_alloc;
159 rb_link_node(&data->node, parent, node);
160 rb_insert_color(&data->node, &root_alloc_stat);
162 data->call_site = call_site;
163 data->alloc_cpu = cpu;
166 static void insert_caller_stat(unsigned long call_site,
167 int bytes_req, int bytes_alloc)
169 struct rb_node **node = &root_caller_stat.rb_node;
170 struct rb_node *parent = NULL;
171 struct alloc_stat *data = NULL;
175 data = rb_entry(*node, struct alloc_stat, node);
177 if (call_site > data->call_site)
178 node = &(*node)->rb_right;
179 else if (call_site < data->call_site)
180 node = &(*node)->rb_left;
185 if (data && data->call_site == call_site) {
187 data->bytes_req += bytes_req;
188 data->bytes_alloc += bytes_req;
190 data = malloc(sizeof(*data));
193 data->call_site = call_site;
196 data->bytes_req = bytes_req;
197 data->bytes_alloc = bytes_alloc;
199 rb_link_node(&data->node, parent, node);
200 rb_insert_color(&data->node, &root_caller_stat);
204 static void process_alloc_event(struct raw_event_sample *raw,
207 u64 timestamp __used,
208 struct thread *thread __used,
211 unsigned long call_site;
217 ptr = raw_field_value(event, "ptr", raw->data);
218 call_site = raw_field_value(event, "call_site", raw->data);
219 bytes_req = raw_field_value(event, "bytes_req", raw->data);
220 bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
222 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
223 insert_caller_stat(call_site, bytes_req, bytes_alloc);
225 total_requested += bytes_req;
226 total_allocated += bytes_alloc;
229 node1 = cpunode_map[cpu];
230 node2 = raw_field_value(event, "node", raw->data);
237 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
238 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
240 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
241 unsigned long call_site,
242 struct rb_root *root,
245 struct rb_node *node = root->rb_node;
246 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
249 struct alloc_stat *data;
252 data = rb_entry(node, struct alloc_stat, node);
254 cmp = sort_fn(&key, data);
256 node = node->rb_left;
258 node = node->rb_right;
265 static void process_free_event(struct raw_event_sample *raw,
268 u64 timestamp __used,
269 struct thread *thread __used)
272 struct alloc_stat *s_alloc, *s_caller;
274 ptr = raw_field_value(event, "ptr", raw->data);
276 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
280 if (cpu != s_alloc->alloc_cpu) {
283 s_caller = search_alloc_stat(0, s_alloc->call_site,
284 &root_caller_stat, callsite_cmp);
286 s_caller->pingpong++;
288 s_alloc->alloc_cpu = -1;
292 process_raw_event(event_t *raw_event __used, void *more_data,
293 int cpu, u64 timestamp, struct thread *thread)
295 struct raw_event_sample *raw = more_data;
299 type = trace_parse_common_type(raw->data);
300 event = trace_find_event(type);
302 if (!strcmp(event->name, "kmalloc") ||
303 !strcmp(event->name, "kmem_cache_alloc")) {
304 process_alloc_event(raw, event, cpu, timestamp, thread, 0);
308 if (!strcmp(event->name, "kmalloc_node") ||
309 !strcmp(event->name, "kmem_cache_alloc_node")) {
310 process_alloc_event(raw, event, cpu, timestamp, thread, 1);
314 if (!strcmp(event->name, "kfree") ||
315 !strcmp(event->name, "kmem_cache_free")) {
316 process_free_event(raw, event, cpu, timestamp, thread);
321 static int process_sample_event(event_t *event)
323 u64 ip = event->ip.ip;
327 void *more_data = event->ip.__more_data;
328 struct thread *thread = threads__findnew(event->ip.pid);
330 if (sample_type & PERF_SAMPLE_TIME) {
331 timestamp = *(u64 *)more_data;
332 more_data += sizeof(u64);
335 if (sample_type & PERF_SAMPLE_CPU) {
336 cpu = *(u32 *)more_data;
337 more_data += sizeof(u32);
338 more_data += sizeof(u32); /* reserved */
341 if (sample_type & PERF_SAMPLE_PERIOD) {
342 period = *(u64 *)more_data;
343 more_data += sizeof(u64);
346 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
348 event->ip.pid, event->ip.tid,
352 if (thread == NULL) {
353 pr_debug("problem processing %d event, skipping it.\n",
358 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
360 process_raw_event(event, more_data, cpu, timestamp, thread);
365 static int sample_type_check(u64 type)
369 if (!(sample_type & PERF_SAMPLE_RAW)) {
371 "No trace sample to read. Did you call perf record "
379 static struct perf_file_handler file_handler = {
380 .process_sample_event = process_sample_event,
381 .process_comm_event = event__process_comm,
382 .sample_type_check = sample_type_check,
385 static int read_events(void)
387 register_idle_thread();
388 register_perf_file_handler(&file_handler);
390 return mmap_dispatch_perf_file(&header, input_name, 0, 0,
391 &event__cwdlen, &event__cwd);
394 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
399 return 100.0 - (100.0 * n_req / n_alloc);
402 static void __print_result(struct rb_root *root, int n_lines, int is_caller)
404 struct rb_node *next;
406 printf("%.102s\n", graph_dotted_line);
407 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
408 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
409 printf("%.102s\n", graph_dotted_line);
411 next = rb_first(root);
413 while (next && n_lines--) {
414 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
416 struct symbol *sym = NULL;
421 addr = data->call_site;
423 sym = thread__find_function(kthread, addr, NULL);
428 snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
431 snprintf(buf, sizeof(buf), "%#Lx", addr);
432 printf(" %-34s |", buf);
434 printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n",
435 (unsigned long long)data->bytes_alloc,
436 (unsigned long)data->bytes_alloc / data->hit,
437 (unsigned long long)data->bytes_req,
438 (unsigned long)data->bytes_req / data->hit,
439 (unsigned long)data->hit,
440 (unsigned long)data->pingpong,
441 fragmentation(data->bytes_req, data->bytes_alloc));
443 next = rb_next(next);
447 printf(" ... | ... | ... | ... | ... | ... \n");
449 printf("%.102s\n", graph_dotted_line);
452 static void print_summary(void)
454 printf("\nSUMMARY\n=======\n");
455 printf("Total bytes requested: %lu\n", total_requested);
456 printf("Total bytes allocated: %lu\n", total_allocated);
457 printf("Total bytes wasted on internal fragmentation: %lu\n",
458 total_allocated - total_requested);
459 printf("Internal fragmentation: %f%%\n",
460 fragmentation(total_requested, total_allocated));
461 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
464 static void print_result(void)
467 __print_result(&root_caller_sorted, caller_lines, 1);
469 __print_result(&root_alloc_sorted, alloc_lines, 0);
473 struct sort_dimension {
476 struct list_head list;
479 static LIST_HEAD(caller_sort);
480 static LIST_HEAD(alloc_sort);
482 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
483 struct list_head *sort_list)
485 struct rb_node **new = &(root->rb_node);
486 struct rb_node *parent = NULL;
487 struct sort_dimension *sort;
490 struct alloc_stat *this;
493 this = rb_entry(*new, struct alloc_stat, node);
496 list_for_each_entry(sort, sort_list, list) {
497 cmp = sort->cmp(data, this);
503 new = &((*new)->rb_left);
505 new = &((*new)->rb_right);
508 rb_link_node(&data->node, parent, new);
509 rb_insert_color(&data->node, root);
512 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
513 struct list_head *sort_list)
515 struct rb_node *node;
516 struct alloc_stat *data;
519 node = rb_first(root);
523 rb_erase(node, root);
524 data = rb_entry(node, struct alloc_stat, node);
525 sort_insert(root_sorted, data, sort_list);
529 static void sort_result(void)
531 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
532 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
535 static int __cmd_kmem(void)
545 static const char * const kmem_usage[] = {
546 "perf kmem [<options>] {record}",
550 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
554 else if (l->ptr > r->ptr)
559 static struct sort_dimension ptr_sort_dimension = {
564 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
566 if (l->call_site < r->call_site)
568 else if (l->call_site > r->call_site)
573 static struct sort_dimension callsite_sort_dimension = {
578 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
582 else if (l->hit > r->hit)
587 static struct sort_dimension hit_sort_dimension = {
592 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
594 if (l->bytes_alloc < r->bytes_alloc)
596 else if (l->bytes_alloc > r->bytes_alloc)
601 static struct sort_dimension bytes_sort_dimension = {
606 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
610 x = fragmentation(l->bytes_req, l->bytes_alloc);
611 y = fragmentation(r->bytes_req, r->bytes_alloc);
620 static struct sort_dimension frag_sort_dimension = {
625 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
627 if (l->pingpong < r->pingpong)
629 else if (l->pingpong > r->pingpong)
634 static struct sort_dimension pingpong_sort_dimension = {
639 static struct sort_dimension *avail_sorts[] = {
641 &callsite_sort_dimension,
643 &bytes_sort_dimension,
644 &frag_sort_dimension,
645 &pingpong_sort_dimension,
648 #define NUM_AVAIL_SORTS \
649 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
651 static int sort_dimension__add(const char *tok, struct list_head *list)
653 struct sort_dimension *sort;
656 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
657 if (!strcmp(avail_sorts[i]->name, tok)) {
658 sort = malloc(sizeof(*sort));
661 memcpy(sort, avail_sorts[i], sizeof(*sort));
662 list_add_tail(&sort->list, list);
670 static int setup_sorting(struct list_head *sort_list, const char *arg)
673 char *str = strdup(arg);
679 tok = strsep(&str, ",");
682 if (sort_dimension__add(tok, sort_list) < 0) {
683 error("Unknown --sort key: '%s'", tok);
692 static int parse_sort_opt(const struct option *opt __used,
693 const char *arg, int unset __used)
698 if (caller_flag > alloc_flag)
699 return setup_sorting(&caller_sort, arg);
701 return setup_sorting(&alloc_sort, arg);
706 static int parse_stat_opt(const struct option *opt __used,
707 const char *arg, int unset __used)
712 if (strcmp(arg, "alloc") == 0)
713 alloc_flag = (caller_flag + 1);
714 else if (strcmp(arg, "caller") == 0)
715 caller_flag = (alloc_flag + 1);
721 static int parse_line_opt(const struct option *opt __used,
722 const char *arg, int unset __used)
729 lines = strtoul(arg, NULL, 10);
731 if (caller_flag > alloc_flag)
732 caller_lines = lines;
739 static const struct option kmem_options[] = {
740 OPT_STRING('i', "input", &input_name, "file",
742 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
743 "stat selector, Pass 'alloc' or 'caller'.",
745 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
746 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
748 OPT_CALLBACK('l', "line", NULL, "num",
751 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
755 static const char *record_args[] = {
762 "-e", "kmem:kmalloc",
763 "-e", "kmem:kmalloc_node",
765 "-e", "kmem:kmem_cache_alloc",
766 "-e", "kmem:kmem_cache_alloc_node",
767 "-e", "kmem:kmem_cache_free",
770 static int __cmd_record(int argc, const char **argv)
772 unsigned int rec_argc, i, j;
773 const char **rec_argv;
775 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
776 rec_argv = calloc(rec_argc + 1, sizeof(char *));
778 for (i = 0; i < ARRAY_SIZE(record_args); i++)
779 rec_argv[i] = strdup(record_args[i]);
781 for (j = 1; j < (unsigned int)argc; j++, i++)
782 rec_argv[i] = argv[j];
784 return cmd_record(i, rec_argv, NULL);
787 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
791 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
793 if (argc && !strncmp(argv[0], "rec", 3))
794 return __cmd_record(argc, argv);
796 usage_with_options(kmem_usage, kmem_options);
798 if (list_empty(&caller_sort))
799 setup_sorting(&caller_sort, default_sort_order);
800 if (list_empty(&alloc_sort))
801 setup_sorting(&alloc_sort, default_sort_order);