5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
14 #include "util/debug.h"
16 #include <linux/rbtree.h>
19 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 static char const *input_name = "perf.data";
23 static u64 sample_type;
25 static int alloc_flag;
26 static int caller_flag;
28 static int alloc_lines = -1;
29 static int caller_lines = -1;
33 static char default_sort_order[] = "frag,hit,bytes";
35 static int *cpunode_map;
36 static int max_cpu_num;
51 static struct rb_root root_alloc_stat;
52 static struct rb_root root_alloc_sorted;
53 static struct rb_root root_caller_stat;
54 static struct rb_root root_caller_sorted;
56 static unsigned long total_requested, total_allocated;
57 static unsigned long nr_allocs, nr_cross_allocs;
59 #define PATH_SYS_NODE "/sys/devices/system/node"
61 static void init_cpunode_map(void)
66 fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
72 if (fscanf(fp, "%d", &max_cpu_num) < 1)
73 die("Failed to read 'kernel_max' from sysfs");
76 cpunode_map = calloc(max_cpu_num, sizeof(int));
79 for (i = 0; i < max_cpu_num; i++)
84 static void setup_cpunode_map(void)
86 struct dirent *dent1, *dent2;
88 unsigned int cpu, mem;
93 dir1 = opendir(PATH_SYS_NODE);
98 dent1 = readdir(dir1);
102 if (sscanf(dent1->d_name, "node%u", &mem) < 1)
105 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
110 dent2 = readdir(dir2);
113 if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
115 cpunode_map[cpu] = mem;
120 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
121 int bytes_req, int bytes_alloc, int cpu)
123 struct rb_node **node = &root_alloc_stat.rb_node;
124 struct rb_node *parent = NULL;
125 struct alloc_stat *data = NULL;
129 data = rb_entry(*node, struct alloc_stat, node);
132 node = &(*node)->rb_right;
133 else if (ptr < data->ptr)
134 node = &(*node)->rb_left;
139 if (data && data->ptr == ptr) {
141 data->bytes_req += bytes_req;
142 data->bytes_alloc += bytes_req;
144 data = malloc(sizeof(*data));
150 data->bytes_req = bytes_req;
151 data->bytes_alloc = bytes_alloc;
153 rb_link_node(&data->node, parent, node);
154 rb_insert_color(&data->node, &root_alloc_stat);
156 data->call_site = call_site;
157 data->alloc_cpu = cpu;
160 static void insert_caller_stat(unsigned long call_site,
161 int bytes_req, int bytes_alloc)
163 struct rb_node **node = &root_caller_stat.rb_node;
164 struct rb_node *parent = NULL;
165 struct alloc_stat *data = NULL;
169 data = rb_entry(*node, struct alloc_stat, node);
171 if (call_site > data->call_site)
172 node = &(*node)->rb_right;
173 else if (call_site < data->call_site)
174 node = &(*node)->rb_left;
179 if (data && data->call_site == call_site) {
181 data->bytes_req += bytes_req;
182 data->bytes_alloc += bytes_req;
184 data = malloc(sizeof(*data));
187 data->call_site = call_site;
190 data->bytes_req = bytes_req;
191 data->bytes_alloc = bytes_alloc;
193 rb_link_node(&data->node, parent, node);
194 rb_insert_color(&data->node, &root_caller_stat);
198 static void process_alloc_event(void *data,
201 u64 timestamp __used,
202 struct thread *thread __used,
205 unsigned long call_site;
211 ptr = raw_field_value(event, "ptr", data);
212 call_site = raw_field_value(event, "call_site", data);
213 bytes_req = raw_field_value(event, "bytes_req", data);
214 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
216 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
217 insert_caller_stat(call_site, bytes_req, bytes_alloc);
219 total_requested += bytes_req;
220 total_allocated += bytes_alloc;
223 node1 = cpunode_map[cpu];
224 node2 = raw_field_value(event, "node", data);
231 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
232 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
234 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
235 unsigned long call_site,
236 struct rb_root *root,
239 struct rb_node *node = root->rb_node;
240 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
243 struct alloc_stat *data;
246 data = rb_entry(node, struct alloc_stat, node);
248 cmp = sort_fn(&key, data);
250 node = node->rb_left;
252 node = node->rb_right;
259 static void process_free_event(void *data,
262 u64 timestamp __used,
263 struct thread *thread __used)
266 struct alloc_stat *s_alloc, *s_caller;
268 ptr = raw_field_value(event, "ptr", data);
270 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
274 if (cpu != s_alloc->alloc_cpu) {
277 s_caller = search_alloc_stat(0, s_alloc->call_site,
278 &root_caller_stat, callsite_cmp);
280 s_caller->pingpong++;
282 s_alloc->alloc_cpu = -1;
286 process_raw_event(event_t *raw_event __used, void *data,
287 int cpu, u64 timestamp, struct thread *thread)
292 type = trace_parse_common_type(data);
293 event = trace_find_event(type);
295 if (!strcmp(event->name, "kmalloc") ||
296 !strcmp(event->name, "kmem_cache_alloc")) {
297 process_alloc_event(data, event, cpu, timestamp, thread, 0);
301 if (!strcmp(event->name, "kmalloc_node") ||
302 !strcmp(event->name, "kmem_cache_alloc_node")) {
303 process_alloc_event(data, event, cpu, timestamp, thread, 1);
307 if (!strcmp(event->name, "kfree") ||
308 !strcmp(event->name, "kmem_cache_free")) {
309 process_free_event(data, event, cpu, timestamp, thread);
314 static int process_sample_event(event_t *event, struct perf_session *session)
316 struct sample_data data;
317 struct thread *thread;
319 memset(&data, 0, sizeof(data));
324 event__parse_sample(event, sample_type, &data);
326 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
329 (void *)(long)data.ip,
330 (long long)data.period);
332 thread = perf_session__findnew(session, event->ip.pid);
333 if (thread == NULL) {
334 pr_debug("problem processing %d event, skipping it.\n",
339 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
341 process_raw_event(event, data.raw_data, data.cpu,
347 static int sample_type_check(u64 type, struct perf_session *session __used)
351 if (!(sample_type & PERF_SAMPLE_RAW)) {
353 "No trace sample to read. Did you call perf record "
361 static struct perf_event_ops event_ops = {
362 .process_sample_event = process_sample_event,
363 .process_comm_event = event__process_comm,
364 .sample_type_check = sample_type_check,
367 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
372 return 100.0 - (100.0 * n_req / n_alloc);
375 static void __print_result(struct rb_root *root, struct perf_session *session,
376 int n_lines, int is_caller)
378 struct rb_node *next;
380 printf("%.102s\n", graph_dotted_line);
381 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
382 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
383 printf("%.102s\n", graph_dotted_line);
385 next = rb_first(root);
387 while (next && n_lines--) {
388 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
390 struct symbol *sym = NULL;
395 addr = data->call_site;
397 sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
402 snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
405 snprintf(buf, sizeof(buf), "%#Lx", addr);
406 printf(" %-34s |", buf);
408 printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n",
409 (unsigned long long)data->bytes_alloc,
410 (unsigned long)data->bytes_alloc / data->hit,
411 (unsigned long long)data->bytes_req,
412 (unsigned long)data->bytes_req / data->hit,
413 (unsigned long)data->hit,
414 (unsigned long)data->pingpong,
415 fragmentation(data->bytes_req, data->bytes_alloc));
417 next = rb_next(next);
421 printf(" ... | ... | ... | ... | ... | ... \n");
423 printf("%.102s\n", graph_dotted_line);
426 static void print_summary(void)
428 printf("\nSUMMARY\n=======\n");
429 printf("Total bytes requested: %lu\n", total_requested);
430 printf("Total bytes allocated: %lu\n", total_allocated);
431 printf("Total bytes wasted on internal fragmentation: %lu\n",
432 total_allocated - total_requested);
433 printf("Internal fragmentation: %f%%\n",
434 fragmentation(total_requested, total_allocated));
435 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
438 static void print_result(struct perf_session *session)
441 __print_result(&root_caller_sorted, session, caller_lines, 1);
443 __print_result(&root_alloc_sorted, session, alloc_lines, 0);
447 struct sort_dimension {
450 struct list_head list;
453 static LIST_HEAD(caller_sort);
454 static LIST_HEAD(alloc_sort);
456 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
457 struct list_head *sort_list)
459 struct rb_node **new = &(root->rb_node);
460 struct rb_node *parent = NULL;
461 struct sort_dimension *sort;
464 struct alloc_stat *this;
467 this = rb_entry(*new, struct alloc_stat, node);
470 list_for_each_entry(sort, sort_list, list) {
471 cmp = sort->cmp(data, this);
477 new = &((*new)->rb_left);
479 new = &((*new)->rb_right);
482 rb_link_node(&data->node, parent, new);
483 rb_insert_color(&data->node, root);
486 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
487 struct list_head *sort_list)
489 struct rb_node *node;
490 struct alloc_stat *data;
493 node = rb_first(root);
497 rb_erase(node, root);
498 data = rb_entry(node, struct alloc_stat, node);
499 sort_insert(root_sorted, data, sort_list);
503 static void sort_result(void)
505 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
506 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
509 static int __cmd_kmem(void)
512 struct perf_session *session = perf_session__new(input_name, O_RDONLY,
518 err = perf_session__process_events(session, &event_ops);
522 print_result(session);
524 perf_session__delete(session);
528 static const char * const kmem_usage[] = {
529 "perf kmem [<options>] {record|stat}",
533 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
537 else if (l->ptr > r->ptr)
542 static struct sort_dimension ptr_sort_dimension = {
547 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
549 if (l->call_site < r->call_site)
551 else if (l->call_site > r->call_site)
556 static struct sort_dimension callsite_sort_dimension = {
561 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
565 else if (l->hit > r->hit)
570 static struct sort_dimension hit_sort_dimension = {
575 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
577 if (l->bytes_alloc < r->bytes_alloc)
579 else if (l->bytes_alloc > r->bytes_alloc)
584 static struct sort_dimension bytes_sort_dimension = {
589 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
593 x = fragmentation(l->bytes_req, l->bytes_alloc);
594 y = fragmentation(r->bytes_req, r->bytes_alloc);
603 static struct sort_dimension frag_sort_dimension = {
608 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
610 if (l->pingpong < r->pingpong)
612 else if (l->pingpong > r->pingpong)
617 static struct sort_dimension pingpong_sort_dimension = {
622 static struct sort_dimension *avail_sorts[] = {
624 &callsite_sort_dimension,
626 &bytes_sort_dimension,
627 &frag_sort_dimension,
628 &pingpong_sort_dimension,
631 #define NUM_AVAIL_SORTS \
632 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
634 static int sort_dimension__add(const char *tok, struct list_head *list)
636 struct sort_dimension *sort;
639 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
640 if (!strcmp(avail_sorts[i]->name, tok)) {
641 sort = malloc(sizeof(*sort));
644 memcpy(sort, avail_sorts[i], sizeof(*sort));
645 list_add_tail(&sort->list, list);
653 static int setup_sorting(struct list_head *sort_list, const char *arg)
656 char *str = strdup(arg);
662 tok = strsep(&str, ",");
665 if (sort_dimension__add(tok, sort_list) < 0) {
666 error("Unknown --sort key: '%s'", tok);
675 static int parse_sort_opt(const struct option *opt __used,
676 const char *arg, int unset __used)
681 if (caller_flag > alloc_flag)
682 return setup_sorting(&caller_sort, arg);
684 return setup_sorting(&alloc_sort, arg);
689 static int parse_caller_opt(const struct option *opt __used,
690 const char *arg __used, int unset __used)
692 caller_flag = (alloc_flag + 1);
696 static int parse_alloc_opt(const struct option *opt __used,
697 const char *arg __used, int unset __used)
699 alloc_flag = (caller_flag + 1);
703 static int parse_line_opt(const struct option *opt __used,
704 const char *arg, int unset __used)
711 lines = strtoul(arg, NULL, 10);
713 if (caller_flag > alloc_flag)
714 caller_lines = lines;
721 static const struct option kmem_options[] = {
722 OPT_STRING('i', "input", &input_name, "file",
724 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
725 "show per-callsite statistics",
727 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
728 "show per-allocation statistics",
730 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
731 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
733 OPT_CALLBACK('l', "line", NULL, "num",
736 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
740 static const char *record_args[] = {
747 "-e", "kmem:kmalloc",
748 "-e", "kmem:kmalloc_node",
750 "-e", "kmem:kmem_cache_alloc",
751 "-e", "kmem:kmem_cache_alloc_node",
752 "-e", "kmem:kmem_cache_free",
755 static int __cmd_record(int argc, const char **argv)
757 unsigned int rec_argc, i, j;
758 const char **rec_argv;
760 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
761 rec_argv = calloc(rec_argc + 1, sizeof(char *));
763 for (i = 0; i < ARRAY_SIZE(record_args); i++)
764 rec_argv[i] = strdup(record_args[i]);
766 for (j = 1; j < (unsigned int)argc; j++, i++)
767 rec_argv[i] = argv[j];
769 return cmd_record(i, rec_argv, NULL);
772 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
776 argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
779 usage_with_options(kmem_usage, kmem_options);
781 if (!strncmp(argv[0], "rec", 3)) {
782 return __cmd_record(argc, argv);
783 } else if (!strcmp(argv[0], "stat")) {
786 if (list_empty(&caller_sort))
787 setup_sorting(&caller_sort, default_sort_order);
788 if (list_empty(&alloc_sort))
789 setup_sorting(&alloc_sort, default_sort_order);