tcp: add tcp_min_snd_mss sysctl
[pandora-kernel.git] / tools / perf / builtin-sched.c
1 #include "builtin.h"
2 #include "perf.h"
3
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
10
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
13
14 #include "util/debug.h"
15
16 #include <sys/prctl.h>
17 #include <sys/resource.h>
18
19 #include <semaphore.h>
20 #include <pthread.h>
21 #include <math.h>
22
23 static char                     const *input_name = "perf.data";
24
25 static char                     default_sort_order[] = "avg, max, switch, runtime";
26 static const char               *sort_order = default_sort_order;
27
28 static int                      profile_cpu = -1;
29
30 #define PR_SET_NAME             15               /* Set process name */
31 #define MAX_CPUS                4096
32
33 static u64                      run_measurement_overhead;
34 static u64                      sleep_measurement_overhead;
35
36 #define COMM_LEN                20
37 #define SYM_LEN                 129
38
39 #define MAX_PID                 65536
40
41 static unsigned long            nr_tasks;
42
43 struct sched_atom;
44
45 struct task_desc {
46         unsigned long           nr;
47         unsigned long           pid;
48         char                    comm[COMM_LEN];
49
50         unsigned long           nr_events;
51         unsigned long           curr_event;
52         struct sched_atom       **atoms;
53
54         pthread_t               thread;
55         sem_t                   sleep_sem;
56
57         sem_t                   ready_for_work;
58         sem_t                   work_done_sem;
59
60         u64                     cpu_usage;
61 };
62
63 enum sched_event_type {
64         SCHED_EVENT_RUN,
65         SCHED_EVENT_SLEEP,
66         SCHED_EVENT_WAKEUP,
67         SCHED_EVENT_MIGRATION,
68 };
69
70 struct sched_atom {
71         enum sched_event_type   type;
72         int                     specific_wait;
73         u64                     timestamp;
74         u64                     duration;
75         unsigned long           nr;
76         sem_t                   *wait_sem;
77         struct task_desc        *wakee;
78 };
79
80 static struct task_desc         *pid_to_task[MAX_PID];
81
82 static struct task_desc         **tasks;
83
84 static pthread_mutex_t          start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
85 static u64                      start_time;
86
87 static pthread_mutex_t          work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
88
89 static unsigned long            nr_run_events;
90 static unsigned long            nr_sleep_events;
91 static unsigned long            nr_wakeup_events;
92
93 static unsigned long            nr_sleep_corrections;
94 static unsigned long            nr_run_events_optimized;
95
96 static unsigned long            targetless_wakeups;
97 static unsigned long            multitarget_wakeups;
98
99 static u64                      cpu_usage;
100 static u64                      runavg_cpu_usage;
101 static u64                      parent_cpu_usage;
102 static u64                      runavg_parent_cpu_usage;
103
104 static unsigned long            nr_runs;
105 static u64                      sum_runtime;
106 static u64                      sum_fluct;
107 static u64                      run_avg;
108
109 static unsigned int             replay_repeat = 10;
110 static unsigned long            nr_timestamps;
111 static unsigned long            nr_unordered_timestamps;
112 static unsigned long            nr_state_machine_bugs;
113 static unsigned long            nr_context_switch_bugs;
114 static unsigned long            nr_events;
115 static unsigned long            nr_lost_chunks;
116 static unsigned long            nr_lost_events;
117
118 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
119
120 enum thread_state {
121         THREAD_SLEEPING = 0,
122         THREAD_WAIT_CPU,
123         THREAD_SCHED_IN,
124         THREAD_IGNORE
125 };
126
127 struct work_atom {
128         struct list_head        list;
129         enum thread_state       state;
130         u64                     sched_out_time;
131         u64                     wake_up_time;
132         u64                     sched_in_time;
133         u64                     runtime;
134 };
135
136 struct work_atoms {
137         struct list_head        work_list;
138         struct thread           *thread;
139         struct rb_node          node;
140         u64                     max_lat;
141         u64                     max_lat_at;
142         u64                     total_lat;
143         u64                     nb_atoms;
144         u64                     total_runtime;
145 };
146
147 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
148
149 static struct rb_root           atom_root, sorted_atom_root;
150
151 static u64                      all_runtime;
152 static u64                      all_count;
153
154
155 static u64 get_nsecs(void)
156 {
157         struct timespec ts;
158
159         clock_gettime(CLOCK_MONOTONIC, &ts);
160
161         return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
162 }
163
164 static void burn_nsecs(u64 nsecs)
165 {
166         u64 T0 = get_nsecs(), T1;
167
168         do {
169                 T1 = get_nsecs();
170         } while (T1 + run_measurement_overhead < T0 + nsecs);
171 }
172
173 static void sleep_nsecs(u64 nsecs)
174 {
175         struct timespec ts;
176
177         ts.tv_nsec = nsecs % 999999999;
178         ts.tv_sec = nsecs / 999999999;
179
180         nanosleep(&ts, NULL);
181 }
182
183 static void calibrate_run_measurement_overhead(void)
184 {
185         u64 T0, T1, delta, min_delta = 1000000000ULL;
186         int i;
187
188         for (i = 0; i < 10; i++) {
189                 T0 = get_nsecs();
190                 burn_nsecs(0);
191                 T1 = get_nsecs();
192                 delta = T1-T0;
193                 min_delta = min(min_delta, delta);
194         }
195         run_measurement_overhead = min_delta;
196
197         printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
198 }
199
200 static void calibrate_sleep_measurement_overhead(void)
201 {
202         u64 T0, T1, delta, min_delta = 1000000000ULL;
203         int i;
204
205         for (i = 0; i < 10; i++) {
206                 T0 = get_nsecs();
207                 sleep_nsecs(10000);
208                 T1 = get_nsecs();
209                 delta = T1-T0;
210                 min_delta = min(min_delta, delta);
211         }
212         min_delta -= 10000;
213         sleep_measurement_overhead = min_delta;
214
215         printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
216 }
217
218 static struct sched_atom *
219 get_new_event(struct task_desc *task, u64 timestamp)
220 {
221         struct sched_atom *event = zalloc(sizeof(*event));
222         unsigned long idx = task->nr_events;
223         size_t size;
224
225         event->timestamp = timestamp;
226         event->nr = idx;
227
228         task->nr_events++;
229         size = sizeof(struct sched_atom *) * task->nr_events;
230         task->atoms = realloc(task->atoms, size);
231         BUG_ON(!task->atoms);
232
233         task->atoms[idx] = event;
234
235         return event;
236 }
237
238 static struct sched_atom *last_event(struct task_desc *task)
239 {
240         if (!task->nr_events)
241                 return NULL;
242
243         return task->atoms[task->nr_events - 1];
244 }
245
246 static void
247 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
248 {
249         struct sched_atom *event, *curr_event = last_event(task);
250
251         /*
252          * optimize an existing RUN event by merging this one
253          * to it:
254          */
255         if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
256                 nr_run_events_optimized++;
257                 curr_event->duration += duration;
258                 return;
259         }
260
261         event = get_new_event(task, timestamp);
262
263         event->type = SCHED_EVENT_RUN;
264         event->duration = duration;
265
266         nr_run_events++;
267 }
268
269 static void
270 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
271                        struct task_desc *wakee)
272 {
273         struct sched_atom *event, *wakee_event;
274
275         event = get_new_event(task, timestamp);
276         event->type = SCHED_EVENT_WAKEUP;
277         event->wakee = wakee;
278
279         wakee_event = last_event(wakee);
280         if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
281                 targetless_wakeups++;
282                 return;
283         }
284         if (wakee_event->wait_sem) {
285                 multitarget_wakeups++;
286                 return;
287         }
288
289         wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
290         sem_init(wakee_event->wait_sem, 0, 0);
291         wakee_event->specific_wait = 1;
292         event->wait_sem = wakee_event->wait_sem;
293
294         nr_wakeup_events++;
295 }
296
297 static void
298 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
299                       u64 task_state __used)
300 {
301         struct sched_atom *event = get_new_event(task, timestamp);
302
303         event->type = SCHED_EVENT_SLEEP;
304
305         nr_sleep_events++;
306 }
307
308 static struct task_desc *register_pid(unsigned long pid, const char *comm)
309 {
310         struct task_desc *task;
311
312         BUG_ON(pid >= MAX_PID);
313
314         task = pid_to_task[pid];
315
316         if (task)
317                 return task;
318
319         task = zalloc(sizeof(*task));
320         task->pid = pid;
321         task->nr = nr_tasks;
322         strcpy(task->comm, comm);
323         /*
324          * every task starts in sleeping state - this gets ignored
325          * if there's no wakeup pointing to this sleep state:
326          */
327         add_sched_event_sleep(task, 0, 0);
328
329         pid_to_task[pid] = task;
330         nr_tasks++;
331         tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
332         BUG_ON(!tasks);
333         tasks[task->nr] = task;
334
335         if (verbose)
336                 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
337
338         return task;
339 }
340
341
342 static void print_task_traces(void)
343 {
344         struct task_desc *task;
345         unsigned long i;
346
347         for (i = 0; i < nr_tasks; i++) {
348                 task = tasks[i];
349                 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
350                         task->nr, task->comm, task->pid, task->nr_events);
351         }
352 }
353
354 static void add_cross_task_wakeups(void)
355 {
356         struct task_desc *task1, *task2;
357         unsigned long i, j;
358
359         for (i = 0; i < nr_tasks; i++) {
360                 task1 = tasks[i];
361                 j = i + 1;
362                 if (j == nr_tasks)
363                         j = 0;
364                 task2 = tasks[j];
365                 add_sched_event_wakeup(task1, 0, task2);
366         }
367 }
368
369 static void
370 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
371 {
372         int ret = 0;
373
374         switch (atom->type) {
375                 case SCHED_EVENT_RUN:
376                         burn_nsecs(atom->duration);
377                         break;
378                 case SCHED_EVENT_SLEEP:
379                         if (atom->wait_sem)
380                                 ret = sem_wait(atom->wait_sem);
381                         BUG_ON(ret);
382                         break;
383                 case SCHED_EVENT_WAKEUP:
384                         if (atom->wait_sem)
385                                 ret = sem_post(atom->wait_sem);
386                         BUG_ON(ret);
387                         break;
388                 case SCHED_EVENT_MIGRATION:
389                         break;
390                 default:
391                         BUG_ON(1);
392         }
393 }
394
395 static u64 get_cpu_usage_nsec_parent(void)
396 {
397         struct rusage ru;
398         u64 sum;
399         int err;
400
401         err = getrusage(RUSAGE_SELF, &ru);
402         BUG_ON(err);
403
404         sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
405         sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
406
407         return sum;
408 }
409
410 static int self_open_counters(void)
411 {
412         struct perf_event_attr attr;
413         int fd;
414
415         memset(&attr, 0, sizeof(attr));
416
417         attr.type = PERF_TYPE_SOFTWARE;
418         attr.config = PERF_COUNT_SW_TASK_CLOCK;
419
420         fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
421
422         if (fd < 0)
423                 die("Error: sys_perf_event_open() syscall returned"
424                     "with %d (%s)\n", fd, strerror(errno));
425         return fd;
426 }
427
428 static u64 get_cpu_usage_nsec_self(int fd)
429 {
430         u64 runtime;
431         int ret;
432
433         ret = read(fd, &runtime, sizeof(runtime));
434         BUG_ON(ret != sizeof(runtime));
435
436         return runtime;
437 }
438
439 static void *thread_func(void *ctx)
440 {
441         struct task_desc *this_task = ctx;
442         u64 cpu_usage_0, cpu_usage_1;
443         unsigned long i, ret;
444         char comm2[22];
445         int fd;
446
447         sprintf(comm2, ":%s", this_task->comm);
448         prctl(PR_SET_NAME, comm2);
449         fd = self_open_counters();
450
451 again:
452         ret = sem_post(&this_task->ready_for_work);
453         BUG_ON(ret);
454         ret = pthread_mutex_lock(&start_work_mutex);
455         BUG_ON(ret);
456         ret = pthread_mutex_unlock(&start_work_mutex);
457         BUG_ON(ret);
458
459         cpu_usage_0 = get_cpu_usage_nsec_self(fd);
460
461         for (i = 0; i < this_task->nr_events; i++) {
462                 this_task->curr_event = i;
463                 process_sched_event(this_task, this_task->atoms[i]);
464         }
465
466         cpu_usage_1 = get_cpu_usage_nsec_self(fd);
467         this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
468         ret = sem_post(&this_task->work_done_sem);
469         BUG_ON(ret);
470
471         ret = pthread_mutex_lock(&work_done_wait_mutex);
472         BUG_ON(ret);
473         ret = pthread_mutex_unlock(&work_done_wait_mutex);
474         BUG_ON(ret);
475
476         goto again;
477 }
478
479 static void create_tasks(void)
480 {
481         struct task_desc *task;
482         pthread_attr_t attr;
483         unsigned long i;
484         int err;
485
486         err = pthread_attr_init(&attr);
487         BUG_ON(err);
488         err = pthread_attr_setstacksize(&attr,
489                         (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
490         BUG_ON(err);
491         err = pthread_mutex_lock(&start_work_mutex);
492         BUG_ON(err);
493         err = pthread_mutex_lock(&work_done_wait_mutex);
494         BUG_ON(err);
495         for (i = 0; i < nr_tasks; i++) {
496                 task = tasks[i];
497                 sem_init(&task->sleep_sem, 0, 0);
498                 sem_init(&task->ready_for_work, 0, 0);
499                 sem_init(&task->work_done_sem, 0, 0);
500                 task->curr_event = 0;
501                 err = pthread_create(&task->thread, &attr, thread_func, task);
502                 BUG_ON(err);
503         }
504 }
505
506 static void wait_for_tasks(void)
507 {
508         u64 cpu_usage_0, cpu_usage_1;
509         struct task_desc *task;
510         unsigned long i, ret;
511
512         start_time = get_nsecs();
513         cpu_usage = 0;
514         pthread_mutex_unlock(&work_done_wait_mutex);
515
516         for (i = 0; i < nr_tasks; i++) {
517                 task = tasks[i];
518                 ret = sem_wait(&task->ready_for_work);
519                 BUG_ON(ret);
520                 sem_init(&task->ready_for_work, 0, 0);
521         }
522         ret = pthread_mutex_lock(&work_done_wait_mutex);
523         BUG_ON(ret);
524
525         cpu_usage_0 = get_cpu_usage_nsec_parent();
526
527         pthread_mutex_unlock(&start_work_mutex);
528
529         for (i = 0; i < nr_tasks; i++) {
530                 task = tasks[i];
531                 ret = sem_wait(&task->work_done_sem);
532                 BUG_ON(ret);
533                 sem_init(&task->work_done_sem, 0, 0);
534                 cpu_usage += task->cpu_usage;
535                 task->cpu_usage = 0;
536         }
537
538         cpu_usage_1 = get_cpu_usage_nsec_parent();
539         if (!runavg_cpu_usage)
540                 runavg_cpu_usage = cpu_usage;
541         runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
542
543         parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
544         if (!runavg_parent_cpu_usage)
545                 runavg_parent_cpu_usage = parent_cpu_usage;
546         runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
547                                    parent_cpu_usage)/10;
548
549         ret = pthread_mutex_lock(&start_work_mutex);
550         BUG_ON(ret);
551
552         for (i = 0; i < nr_tasks; i++) {
553                 task = tasks[i];
554                 sem_init(&task->sleep_sem, 0, 0);
555                 task->curr_event = 0;
556         }
557 }
558
559 static void run_one_test(void)
560 {
561         u64 T0, T1, delta, avg_delta, fluct;
562
563         T0 = get_nsecs();
564         wait_for_tasks();
565         T1 = get_nsecs();
566
567         delta = T1 - T0;
568         sum_runtime += delta;
569         nr_runs++;
570
571         avg_delta = sum_runtime / nr_runs;
572         if (delta < avg_delta)
573                 fluct = avg_delta - delta;
574         else
575                 fluct = delta - avg_delta;
576         sum_fluct += fluct;
577         if (!run_avg)
578                 run_avg = delta;
579         run_avg = (run_avg*9 + delta)/10;
580
581         printf("#%-3ld: %0.3f, ",
582                 nr_runs, (double)delta/1000000.0);
583
584         printf("ravg: %0.2f, ",
585                 (double)run_avg/1e6);
586
587         printf("cpu: %0.2f / %0.2f",
588                 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
589
590 #if 0
591         /*
592          * rusage statistics done by the parent, these are less
593          * accurate than the sum_exec_runtime based statistics:
594          */
595         printf(" [%0.2f / %0.2f]",
596                 (double)parent_cpu_usage/1e6,
597                 (double)runavg_parent_cpu_usage/1e6);
598 #endif
599
600         printf("\n");
601
602         if (nr_sleep_corrections)
603                 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
604         nr_sleep_corrections = 0;
605 }
606
607 static void test_calibrations(void)
608 {
609         u64 T0, T1;
610
611         T0 = get_nsecs();
612         burn_nsecs(1e6);
613         T1 = get_nsecs();
614
615         printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
616
617         T0 = get_nsecs();
618         sleep_nsecs(1e6);
619         T1 = get_nsecs();
620
621         printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
622 }
623
624 #define FILL_FIELD(ptr, field, event, data)     \
625         ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
626
627 #define FILL_ARRAY(ptr, array, event, data)                     \
628 do {                                                            \
629         void *__array = raw_field_ptr(event, #array, data);     \
630         memcpy(ptr.array, __array, sizeof(ptr.array));  \
631 } while(0)
632
633 #define FILL_COMMON_FIELDS(ptr, event, data)                    \
634 do {                                                            \
635         FILL_FIELD(ptr, common_type, event, data);              \
636         FILL_FIELD(ptr, common_flags, event, data);             \
637         FILL_FIELD(ptr, common_preempt_count, event, data);     \
638         FILL_FIELD(ptr, common_pid, event, data);               \
639         FILL_FIELD(ptr, common_tgid, event, data);              \
640 } while (0)
641
642
643
644 struct trace_switch_event {
645         u32 size;
646
647         u16 common_type;
648         u8 common_flags;
649         u8 common_preempt_count;
650         u32 common_pid;
651         u32 common_tgid;
652
653         char prev_comm[16];
654         u32 prev_pid;
655         u32 prev_prio;
656         u64 prev_state;
657         char next_comm[16];
658         u32 next_pid;
659         u32 next_prio;
660 };
661
662 struct trace_runtime_event {
663         u32 size;
664
665         u16 common_type;
666         u8 common_flags;
667         u8 common_preempt_count;
668         u32 common_pid;
669         u32 common_tgid;
670
671         char comm[16];
672         u32 pid;
673         u64 runtime;
674         u64 vruntime;
675 };
676
677 struct trace_wakeup_event {
678         u32 size;
679
680         u16 common_type;
681         u8 common_flags;
682         u8 common_preempt_count;
683         u32 common_pid;
684         u32 common_tgid;
685
686         char comm[16];
687         u32 pid;
688
689         u32 prio;
690         u32 success;
691         u32 cpu;
692 };
693
694 struct trace_fork_event {
695         u32 size;
696
697         u16 common_type;
698         u8 common_flags;
699         u8 common_preempt_count;
700         u32 common_pid;
701         u32 common_tgid;
702
703         char parent_comm[16];
704         u32 parent_pid;
705         char child_comm[16];
706         u32 child_pid;
707 };
708
709 struct trace_migrate_task_event {
710         u32 size;
711
712         u16 common_type;
713         u8 common_flags;
714         u8 common_preempt_count;
715         u32 common_pid;
716         u32 common_tgid;
717
718         char comm[16];
719         u32 pid;
720
721         u32 prio;
722         u32 cpu;
723 };
724
725 struct trace_sched_handler {
726         void (*switch_event)(struct trace_switch_event *,
727                              struct perf_session *,
728                              struct event *,
729                              int cpu,
730                              u64 timestamp,
731                              struct thread *thread);
732
733         void (*runtime_event)(struct trace_runtime_event *,
734                               struct perf_session *,
735                               struct event *,
736                               int cpu,
737                               u64 timestamp,
738                               struct thread *thread);
739
740         void (*wakeup_event)(struct trace_wakeup_event *,
741                              struct perf_session *,
742                              struct event *,
743                              int cpu,
744                              u64 timestamp,
745                              struct thread *thread);
746
747         void (*fork_event)(struct trace_fork_event *,
748                            struct event *,
749                            int cpu,
750                            u64 timestamp,
751                            struct thread *thread);
752
753         void (*migrate_task_event)(struct trace_migrate_task_event *,
754                            struct perf_session *session,
755                            struct event *,
756                            int cpu,
757                            u64 timestamp,
758                            struct thread *thread);
759 };
760
761
762 static void
763 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
764                     struct perf_session *session __used,
765                     struct event *event,
766                     int cpu __used,
767                     u64 timestamp __used,
768                     struct thread *thread __used)
769 {
770         struct task_desc *waker, *wakee;
771
772         if (verbose) {
773                 printf("sched_wakeup event %p\n", event);
774
775                 printf(" ... pid %d woke up %s/%d\n",
776                         wakeup_event->common_pid,
777                         wakeup_event->comm,
778                         wakeup_event->pid);
779         }
780
781         waker = register_pid(wakeup_event->common_pid, "<unknown>");
782         wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
783
784         add_sched_event_wakeup(waker, timestamp, wakee);
785 }
786
787 static u64 cpu_last_switched[MAX_CPUS];
788
789 static void
790 replay_switch_event(struct trace_switch_event *switch_event,
791                     struct perf_session *session __used,
792                     struct event *event,
793                     int cpu,
794                     u64 timestamp,
795                     struct thread *thread __used)
796 {
797         struct task_desc *prev, __used *next;
798         u64 timestamp0;
799         s64 delta;
800
801         if (verbose)
802                 printf("sched_switch event %p\n", event);
803
804         if (cpu >= MAX_CPUS || cpu < 0)
805                 return;
806
807         timestamp0 = cpu_last_switched[cpu];
808         if (timestamp0)
809                 delta = timestamp - timestamp0;
810         else
811                 delta = 0;
812
813         if (delta < 0)
814                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
815
816         if (verbose) {
817                 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
818                         switch_event->prev_comm, switch_event->prev_pid,
819                         switch_event->next_comm, switch_event->next_pid,
820                         delta);
821         }
822
823         prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
824         next = register_pid(switch_event->next_pid, switch_event->next_comm);
825
826         cpu_last_switched[cpu] = timestamp;
827
828         add_sched_event_run(prev, timestamp, delta);
829         add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
830 }
831
832
833 static void
834 replay_fork_event(struct trace_fork_event *fork_event,
835                   struct event *event,
836                   int cpu __used,
837                   u64 timestamp __used,
838                   struct thread *thread __used)
839 {
840         if (verbose) {
841                 printf("sched_fork event %p\n", event);
842                 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
843                 printf("...  child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
844         }
845         register_pid(fork_event->parent_pid, fork_event->parent_comm);
846         register_pid(fork_event->child_pid, fork_event->child_comm);
847 }
848
849 static struct trace_sched_handler replay_ops  = {
850         .wakeup_event           = replay_wakeup_event,
851         .switch_event           = replay_switch_event,
852         .fork_event             = replay_fork_event,
853 };
854
855 struct sort_dimension {
856         const char              *name;
857         sort_fn_t               cmp;
858         struct list_head        list;
859 };
860
861 static LIST_HEAD(cmp_pid);
862
863 static int
864 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
865 {
866         struct sort_dimension *sort;
867         int ret = 0;
868
869         BUG_ON(list_empty(list));
870
871         list_for_each_entry(sort, list, list) {
872                 ret = sort->cmp(l, r);
873                 if (ret)
874                         return ret;
875         }
876
877         return ret;
878 }
879
880 static struct work_atoms *
881 thread_atoms_search(struct rb_root *root, struct thread *thread,
882                          struct list_head *sort_list)
883 {
884         struct rb_node *node = root->rb_node;
885         struct work_atoms key = { .thread = thread };
886
887         while (node) {
888                 struct work_atoms *atoms;
889                 int cmp;
890
891                 atoms = container_of(node, struct work_atoms, node);
892
893                 cmp = thread_lat_cmp(sort_list, &key, atoms);
894                 if (cmp > 0)
895                         node = node->rb_left;
896                 else if (cmp < 0)
897                         node = node->rb_right;
898                 else {
899                         BUG_ON(thread != atoms->thread);
900                         return atoms;
901                 }
902         }
903         return NULL;
904 }
905
906 static void
907 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
908                          struct list_head *sort_list)
909 {
910         struct rb_node **new = &(root->rb_node), *parent = NULL;
911
912         while (*new) {
913                 struct work_atoms *this;
914                 int cmp;
915
916                 this = container_of(*new, struct work_atoms, node);
917                 parent = *new;
918
919                 cmp = thread_lat_cmp(sort_list, data, this);
920
921                 if (cmp > 0)
922                         new = &((*new)->rb_left);
923                 else
924                         new = &((*new)->rb_right);
925         }
926
927         rb_link_node(&data->node, parent, new);
928         rb_insert_color(&data->node, root);
929 }
930
931 static void thread_atoms_insert(struct thread *thread)
932 {
933         struct work_atoms *atoms = zalloc(sizeof(*atoms));
934         if (!atoms)
935                 die("No memory");
936
937         atoms->thread = thread;
938         INIT_LIST_HEAD(&atoms->work_list);
939         __thread_latency_insert(&atom_root, atoms, &cmp_pid);
940 }
941
942 static void
943 latency_fork_event(struct trace_fork_event *fork_event __used,
944                    struct event *event __used,
945                    int cpu __used,
946                    u64 timestamp __used,
947                    struct thread *thread __used)
948 {
949         /* should insert the newcomer */
950 }
951
952 __used
953 static char sched_out_state(struct trace_switch_event *switch_event)
954 {
955         const char *str = TASK_STATE_TO_CHAR_STR;
956
957         return str[switch_event->prev_state];
958 }
959
960 static void
961 add_sched_out_event(struct work_atoms *atoms,
962                     char run_state,
963                     u64 timestamp)
964 {
965         struct work_atom *atom = zalloc(sizeof(*atom));
966         if (!atom)
967                 die("Non memory");
968
969         atom->sched_out_time = timestamp;
970
971         if (run_state == 'R') {
972                 atom->state = THREAD_WAIT_CPU;
973                 atom->wake_up_time = atom->sched_out_time;
974         }
975
976         list_add_tail(&atom->list, &atoms->work_list);
977 }
978
979 static void
980 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
981 {
982         struct work_atom *atom;
983
984         BUG_ON(list_empty(&atoms->work_list));
985
986         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
987
988         atom->runtime += delta;
989         atoms->total_runtime += delta;
990 }
991
992 static void
993 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
994 {
995         struct work_atom *atom;
996         u64 delta;
997
998         if (list_empty(&atoms->work_list))
999                 return;
1000
1001         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1002
1003         if (atom->state != THREAD_WAIT_CPU)
1004                 return;
1005
1006         if (timestamp < atom->wake_up_time) {
1007                 atom->state = THREAD_IGNORE;
1008                 return;
1009         }
1010
1011         atom->state = THREAD_SCHED_IN;
1012         atom->sched_in_time = timestamp;
1013
1014         delta = atom->sched_in_time - atom->wake_up_time;
1015         atoms->total_lat += delta;
1016         if (delta > atoms->max_lat) {
1017                 atoms->max_lat = delta;
1018                 atoms->max_lat_at = timestamp;
1019         }
1020         atoms->nb_atoms++;
1021 }
1022
1023 static void
1024 latency_switch_event(struct trace_switch_event *switch_event,
1025                      struct perf_session *session,
1026                      struct event *event __used,
1027                      int cpu,
1028                      u64 timestamp,
1029                      struct thread *thread __used)
1030 {
1031         struct work_atoms *out_events, *in_events;
1032         struct thread *sched_out, *sched_in;
1033         u64 timestamp0;
1034         s64 delta;
1035
1036         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1037
1038         timestamp0 = cpu_last_switched[cpu];
1039         cpu_last_switched[cpu] = timestamp;
1040         if (timestamp0)
1041                 delta = timestamp - timestamp0;
1042         else
1043                 delta = 0;
1044
1045         if (delta < 0)
1046                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1047
1048
1049         sched_out = perf_session__findnew(session, switch_event->prev_pid);
1050         sched_in = perf_session__findnew(session, switch_event->next_pid);
1051
1052         out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1053         if (!out_events) {
1054                 thread_atoms_insert(sched_out);
1055                 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1056                 if (!out_events)
1057                         die("out-event: Internal tree error");
1058         }
1059         add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1060
1061         in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1062         if (!in_events) {
1063                 thread_atoms_insert(sched_in);
1064                 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1065                 if (!in_events)
1066                         die("in-event: Internal tree error");
1067                 /*
1068                  * Take came in we have not heard about yet,
1069                  * add in an initial atom in runnable state:
1070                  */
1071                 add_sched_out_event(in_events, 'R', timestamp);
1072         }
1073         add_sched_in_event(in_events, timestamp);
1074 }
1075
1076 static void
1077 latency_runtime_event(struct trace_runtime_event *runtime_event,
1078                      struct perf_session *session,
1079                      struct event *event __used,
1080                      int cpu,
1081                      u64 timestamp,
1082                      struct thread *this_thread __used)
1083 {
1084         struct thread *thread = perf_session__findnew(session, runtime_event->pid);
1085         struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1086
1087         BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1088         if (!atoms) {
1089                 thread_atoms_insert(thread);
1090                 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1091                 if (!atoms)
1092                         die("in-event: Internal tree error");
1093                 add_sched_out_event(atoms, 'R', timestamp);
1094         }
1095
1096         add_runtime_event(atoms, runtime_event->runtime, timestamp);
1097 }
1098
1099 static void
1100 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1101                      struct perf_session *session,
1102                      struct event *__event __used,
1103                      int cpu __used,
1104                      u64 timestamp,
1105                      struct thread *thread __used)
1106 {
1107         struct work_atoms *atoms;
1108         struct work_atom *atom;
1109         struct thread *wakee;
1110
1111         /* Note for later, it may be interesting to observe the failing cases */
1112         if (!wakeup_event->success)
1113                 return;
1114
1115         wakee = perf_session__findnew(session, wakeup_event->pid);
1116         atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1117         if (!atoms) {
1118                 thread_atoms_insert(wakee);
1119                 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1120                 if (!atoms)
1121                         die("wakeup-event: Internal tree error");
1122                 add_sched_out_event(atoms, 'S', timestamp);
1123         }
1124
1125         BUG_ON(list_empty(&atoms->work_list));
1126
1127         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1128
1129         /*
1130          * You WILL be missing events if you've recorded only
1131          * one CPU, or are only looking at only one, so don't
1132          * make useless noise.
1133          */
1134         if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1135                 nr_state_machine_bugs++;
1136
1137         nr_timestamps++;
1138         if (atom->sched_out_time > timestamp) {
1139                 nr_unordered_timestamps++;
1140                 return;
1141         }
1142
1143         atom->state = THREAD_WAIT_CPU;
1144         atom->wake_up_time = timestamp;
1145 }
1146
1147 static void
1148 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1149                      struct perf_session *session,
1150                      struct event *__event __used,
1151                      int cpu __used,
1152                      u64 timestamp,
1153                      struct thread *thread __used)
1154 {
1155         struct work_atoms *atoms;
1156         struct work_atom *atom;
1157         struct thread *migrant;
1158
1159         /*
1160          * Only need to worry about migration when profiling one CPU.
1161          */
1162         if (profile_cpu == -1)
1163                 return;
1164
1165         migrant = perf_session__findnew(session, migrate_task_event->pid);
1166         atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1167         if (!atoms) {
1168                 thread_atoms_insert(migrant);
1169                 register_pid(migrant->pid, migrant->comm);
1170                 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171                 if (!atoms)
1172                         die("migration-event: Internal tree error");
1173                 add_sched_out_event(atoms, 'R', timestamp);
1174         }
1175
1176         BUG_ON(list_empty(&atoms->work_list));
1177
1178         atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1179         atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1180
1181         nr_timestamps++;
1182
1183         if (atom->sched_out_time > timestamp)
1184                 nr_unordered_timestamps++;
1185 }
1186
1187 static struct trace_sched_handler lat_ops  = {
1188         .wakeup_event           = latency_wakeup_event,
1189         .switch_event           = latency_switch_event,
1190         .runtime_event          = latency_runtime_event,
1191         .fork_event             = latency_fork_event,
1192         .migrate_task_event     = latency_migrate_task_event,
1193 };
1194
1195 static void output_lat_thread(struct work_atoms *work_list)
1196 {
1197         int i;
1198         int ret;
1199         u64 avg;
1200
1201         if (!work_list->nb_atoms)
1202                 return;
1203         /*
1204          * Ignore idle threads:
1205          */
1206         if (!strcmp(work_list->thread->comm, "swapper"))
1207                 return;
1208
1209         all_runtime += work_list->total_runtime;
1210         all_count += work_list->nb_atoms;
1211
1212         ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1213
1214         for (i = 0; i < 24 - ret; i++)
1215                 printf(" ");
1216
1217         avg = work_list->total_lat / work_list->nb_atoms;
1218
1219         printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1220               (double)work_list->total_runtime / 1e6,
1221                  work_list->nb_atoms, (double)avg / 1e6,
1222                  (double)work_list->max_lat / 1e6,
1223                  (double)work_list->max_lat_at / 1e9);
1224 }
1225
1226 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1227 {
1228         if (l->thread->pid < r->thread->pid)
1229                 return -1;
1230         if (l->thread->pid > r->thread->pid)
1231                 return 1;
1232
1233         return 0;
1234 }
1235
1236 static struct sort_dimension pid_sort_dimension = {
1237         .name                   = "pid",
1238         .cmp                    = pid_cmp,
1239 };
1240
1241 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1242 {
1243         u64 avgl, avgr;
1244
1245         if (!l->nb_atoms)
1246                 return -1;
1247
1248         if (!r->nb_atoms)
1249                 return 1;
1250
1251         avgl = l->total_lat / l->nb_atoms;
1252         avgr = r->total_lat / r->nb_atoms;
1253
1254         if (avgl < avgr)
1255                 return -1;
1256         if (avgl > avgr)
1257                 return 1;
1258
1259         return 0;
1260 }
1261
1262 static struct sort_dimension avg_sort_dimension = {
1263         .name                   = "avg",
1264         .cmp                    = avg_cmp,
1265 };
1266
1267 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1268 {
1269         if (l->max_lat < r->max_lat)
1270                 return -1;
1271         if (l->max_lat > r->max_lat)
1272                 return 1;
1273
1274         return 0;
1275 }
1276
1277 static struct sort_dimension max_sort_dimension = {
1278         .name                   = "max",
1279         .cmp                    = max_cmp,
1280 };
1281
1282 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1283 {
1284         if (l->nb_atoms < r->nb_atoms)
1285                 return -1;
1286         if (l->nb_atoms > r->nb_atoms)
1287                 return 1;
1288
1289         return 0;
1290 }
1291
1292 static struct sort_dimension switch_sort_dimension = {
1293         .name                   = "switch",
1294         .cmp                    = switch_cmp,
1295 };
1296
1297 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1298 {
1299         if (l->total_runtime < r->total_runtime)
1300                 return -1;
1301         if (l->total_runtime > r->total_runtime)
1302                 return 1;
1303
1304         return 0;
1305 }
1306
1307 static struct sort_dimension runtime_sort_dimension = {
1308         .name                   = "runtime",
1309         .cmp                    = runtime_cmp,
1310 };
1311
1312 static struct sort_dimension *available_sorts[] = {
1313         &pid_sort_dimension,
1314         &avg_sort_dimension,
1315         &max_sort_dimension,
1316         &switch_sort_dimension,
1317         &runtime_sort_dimension,
1318 };
1319
1320 #define NB_AVAILABLE_SORTS      (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1321
1322 static LIST_HEAD(sort_list);
1323
1324 static int sort_dimension__add(const char *tok, struct list_head *list)
1325 {
1326         int i;
1327
1328         for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1329                 if (!strcmp(available_sorts[i]->name, tok)) {
1330                         list_add_tail(&available_sorts[i]->list, list);
1331
1332                         return 0;
1333                 }
1334         }
1335
1336         return -1;
1337 }
1338
1339 static void setup_sorting(void);
1340
1341 static void sort_lat(void)
1342 {
1343         struct rb_node *node;
1344
1345         for (;;) {
1346                 struct work_atoms *data;
1347                 node = rb_first(&atom_root);
1348                 if (!node)
1349                         break;
1350
1351                 rb_erase(node, &atom_root);
1352                 data = rb_entry(node, struct work_atoms, node);
1353                 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1354         }
1355 }
1356
1357 static struct trace_sched_handler *trace_handler;
1358
1359 static void
1360 process_sched_wakeup_event(void *data, struct perf_session *session,
1361                            struct event *event,
1362                            int cpu __used,
1363                            u64 timestamp __used,
1364                            struct thread *thread __used)
1365 {
1366         struct trace_wakeup_event wakeup_event;
1367
1368         FILL_COMMON_FIELDS(wakeup_event, event, data);
1369
1370         FILL_ARRAY(wakeup_event, comm, event, data);
1371         FILL_FIELD(wakeup_event, pid, event, data);
1372         FILL_FIELD(wakeup_event, prio, event, data);
1373         FILL_FIELD(wakeup_event, success, event, data);
1374         FILL_FIELD(wakeup_event, cpu, event, data);
1375
1376         if (trace_handler->wakeup_event)
1377                 trace_handler->wakeup_event(&wakeup_event, session, event,
1378                                             cpu, timestamp, thread);
1379 }
1380
1381 /*
1382  * Track the current task - that way we can know whether there's any
1383  * weird events, such as a task being switched away that is not current.
1384  */
1385 static int max_cpu;
1386
1387 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1388
1389 static struct thread *curr_thread[MAX_CPUS];
1390
1391 static char next_shortname1 = 'A';
1392 static char next_shortname2 = '0';
1393
1394 static void
1395 map_switch_event(struct trace_switch_event *switch_event,
1396                  struct perf_session *session,
1397                  struct event *event __used,
1398                  int this_cpu,
1399                  u64 timestamp,
1400                  struct thread *thread __used)
1401 {
1402         struct thread *sched_out __used, *sched_in;
1403         int new_shortname;
1404         u64 timestamp0;
1405         s64 delta;
1406         int cpu;
1407
1408         BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1409
1410         if (this_cpu > max_cpu)
1411                 max_cpu = this_cpu;
1412
1413         timestamp0 = cpu_last_switched[this_cpu];
1414         cpu_last_switched[this_cpu] = timestamp;
1415         if (timestamp0)
1416                 delta = timestamp - timestamp0;
1417         else
1418                 delta = 0;
1419
1420         if (delta < 0)
1421                 die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1422
1423
1424         sched_out = perf_session__findnew(session, switch_event->prev_pid);
1425         sched_in = perf_session__findnew(session, switch_event->next_pid);
1426
1427         curr_thread[this_cpu] = sched_in;
1428
1429         printf("  ");
1430
1431         new_shortname = 0;
1432         if (!sched_in->shortname[0]) {
1433                 sched_in->shortname[0] = next_shortname1;
1434                 sched_in->shortname[1] = next_shortname2;
1435
1436                 if (next_shortname1 < 'Z') {
1437                         next_shortname1++;
1438                 } else {
1439                         next_shortname1='A';
1440                         if (next_shortname2 < '9') {
1441                                 next_shortname2++;
1442                         } else {
1443                                 next_shortname2='0';
1444                         }
1445                 }
1446                 new_shortname = 1;
1447         }
1448
1449         for (cpu = 0; cpu <= max_cpu; cpu++) {
1450                 if (cpu != this_cpu)
1451                         printf(" ");
1452                 else
1453                         printf("*");
1454
1455                 if (curr_thread[cpu]) {
1456                         if (curr_thread[cpu]->pid)
1457                                 printf("%2s ", curr_thread[cpu]->shortname);
1458                         else
1459                                 printf(".  ");
1460                 } else
1461                         printf("   ");
1462         }
1463
1464         printf("  %12.6f secs ", (double)timestamp/1e9);
1465         if (new_shortname) {
1466                 printf("%s => %s:%d\n",
1467                         sched_in->shortname, sched_in->comm, sched_in->pid);
1468         } else {
1469                 printf("\n");
1470         }
1471 }
1472
1473
1474 static void
1475 process_sched_switch_event(void *data, struct perf_session *session,
1476                            struct event *event,
1477                            int this_cpu,
1478                            u64 timestamp __used,
1479                            struct thread *thread __used)
1480 {
1481         struct trace_switch_event switch_event;
1482
1483         FILL_COMMON_FIELDS(switch_event, event, data);
1484
1485         FILL_ARRAY(switch_event, prev_comm, event, data);
1486         FILL_FIELD(switch_event, prev_pid, event, data);
1487         FILL_FIELD(switch_event, prev_prio, event, data);
1488         FILL_FIELD(switch_event, prev_state, event, data);
1489         FILL_ARRAY(switch_event, next_comm, event, data);
1490         FILL_FIELD(switch_event, next_pid, event, data);
1491         FILL_FIELD(switch_event, next_prio, event, data);
1492
1493         if (curr_pid[this_cpu] != (u32)-1) {
1494                 /*
1495                  * Are we trying to switch away a PID that is
1496                  * not current?
1497                  */
1498                 if (curr_pid[this_cpu] != switch_event.prev_pid)
1499                         nr_context_switch_bugs++;
1500         }
1501         if (trace_handler->switch_event)
1502                 trace_handler->switch_event(&switch_event, session, event,
1503                                             this_cpu, timestamp, thread);
1504
1505         curr_pid[this_cpu] = switch_event.next_pid;
1506 }
1507
1508 static void
1509 process_sched_runtime_event(void *data, struct perf_session *session,
1510                            struct event *event,
1511                            int cpu __used,
1512                            u64 timestamp __used,
1513                            struct thread *thread __used)
1514 {
1515         struct trace_runtime_event runtime_event;
1516
1517         FILL_ARRAY(runtime_event, comm, event, data);
1518         FILL_FIELD(runtime_event, pid, event, data);
1519         FILL_FIELD(runtime_event, runtime, event, data);
1520         FILL_FIELD(runtime_event, vruntime, event, data);
1521
1522         if (trace_handler->runtime_event)
1523                 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
1524 }
1525
1526 static void
1527 process_sched_fork_event(void *data,
1528                          struct event *event,
1529                          int cpu __used,
1530                          u64 timestamp __used,
1531                          struct thread *thread __used)
1532 {
1533         struct trace_fork_event fork_event;
1534
1535         FILL_COMMON_FIELDS(fork_event, event, data);
1536
1537         FILL_ARRAY(fork_event, parent_comm, event, data);
1538         FILL_FIELD(fork_event, parent_pid, event, data);
1539         FILL_ARRAY(fork_event, child_comm, event, data);
1540         FILL_FIELD(fork_event, child_pid, event, data);
1541
1542         if (trace_handler->fork_event)
1543                 trace_handler->fork_event(&fork_event, event,
1544                                           cpu, timestamp, thread);
1545 }
1546
1547 static void
1548 process_sched_exit_event(struct event *event,
1549                          int cpu __used,
1550                          u64 timestamp __used,
1551                          struct thread *thread __used)
1552 {
1553         if (verbose)
1554                 printf("sched_exit event %p\n", event);
1555 }
1556
1557 static void
1558 process_sched_migrate_task_event(void *data, struct perf_session *session,
1559                            struct event *event,
1560                            int cpu __used,
1561                            u64 timestamp __used,
1562                            struct thread *thread __used)
1563 {
1564         struct trace_migrate_task_event migrate_task_event;
1565
1566         FILL_COMMON_FIELDS(migrate_task_event, event, data);
1567
1568         FILL_ARRAY(migrate_task_event, comm, event, data);
1569         FILL_FIELD(migrate_task_event, pid, event, data);
1570         FILL_FIELD(migrate_task_event, prio, event, data);
1571         FILL_FIELD(migrate_task_event, cpu, event, data);
1572
1573         if (trace_handler->migrate_task_event)
1574                 trace_handler->migrate_task_event(&migrate_task_event, session,
1575                                                  event, cpu, timestamp, thread);
1576 }
1577
1578 static void process_raw_event(union perf_event *raw_event __used,
1579                               struct perf_session *session, void *data, int cpu,
1580                               u64 timestamp, struct thread *thread)
1581 {
1582         struct event *event;
1583         int type;
1584
1585
1586         type = trace_parse_common_type(data);
1587         event = trace_find_event(type);
1588
1589         if (!strcmp(event->name, "sched_switch"))
1590                 process_sched_switch_event(data, session, event, cpu, timestamp, thread);
1591         if (!strcmp(event->name, "sched_stat_runtime"))
1592                 process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
1593         if (!strcmp(event->name, "sched_wakeup"))
1594                 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1595         if (!strcmp(event->name, "sched_wakeup_new"))
1596                 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1597         if (!strcmp(event->name, "sched_process_fork"))
1598                 process_sched_fork_event(data, event, cpu, timestamp, thread);
1599         if (!strcmp(event->name, "sched_process_exit"))
1600                 process_sched_exit_event(event, cpu, timestamp, thread);
1601         if (!strcmp(event->name, "sched_migrate_task"))
1602                 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
1603 }
1604
1605 static int process_sample_event(union perf_event *event,
1606                                 struct perf_sample *sample,
1607                                 struct perf_evsel *evsel __used,
1608                                 struct perf_session *session)
1609 {
1610         struct thread *thread;
1611
1612         if (!(session->sample_type & PERF_SAMPLE_RAW))
1613                 return 0;
1614
1615         thread = perf_session__findnew(session, sample->pid);
1616         if (thread == NULL) {
1617                 pr_debug("problem processing %d event, skipping it.\n",
1618                          event->header.type);
1619                 return -1;
1620         }
1621
1622         dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1623
1624         if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
1625                 return 0;
1626
1627         process_raw_event(event, session, sample->raw_data, sample->cpu,
1628                           sample->time, thread);
1629
1630         return 0;
1631 }
1632
1633 static struct perf_event_ops event_ops = {
1634         .sample                 = process_sample_event,
1635         .comm                   = perf_event__process_comm,
1636         .lost                   = perf_event__process_lost,
1637         .fork                   = perf_event__process_task,
1638         .ordered_samples        = true,
1639 };
1640
1641 static void read_events(bool destroy, struct perf_session **psession)
1642 {
1643         int err = -EINVAL;
1644         struct perf_session *session = perf_session__new(input_name, O_RDONLY,
1645                                                          0, false, &event_ops);
1646         if (session == NULL)
1647                 die("No Memory");
1648
1649         if (perf_session__has_traces(session, "record -R")) {
1650                 err = perf_session__process_events(session, &event_ops);
1651                 if (err)
1652                         die("Failed to process events, error %d", err);
1653
1654                 nr_events      = session->hists.stats.nr_events[0];
1655                 nr_lost_events = session->hists.stats.total_lost;
1656                 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1657         }
1658
1659         if (destroy)
1660                 perf_session__delete(session);
1661
1662         if (psession)
1663                 *psession = session;
1664 }
1665
1666 static void print_bad_events(void)
1667 {
1668         if (nr_unordered_timestamps && nr_timestamps) {
1669                 printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1670                         (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1671                         nr_unordered_timestamps, nr_timestamps);
1672         }
1673         if (nr_lost_events && nr_events) {
1674                 printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1675                         (double)nr_lost_events/(double)nr_events*100.0,
1676                         nr_lost_events, nr_events, nr_lost_chunks);
1677         }
1678         if (nr_state_machine_bugs && nr_timestamps) {
1679                 printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1680                         (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1681                         nr_state_machine_bugs, nr_timestamps);
1682                 if (nr_lost_events)
1683                         printf(" (due to lost events?)");
1684                 printf("\n");
1685         }
1686         if (nr_context_switch_bugs && nr_timestamps) {
1687                 printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1688                         (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1689                         nr_context_switch_bugs, nr_timestamps);
1690                 if (nr_lost_events)
1691                         printf(" (due to lost events?)");
1692                 printf("\n");
1693         }
1694 }
1695
1696 static void __cmd_lat(void)
1697 {
1698         struct rb_node *next;
1699         struct perf_session *session;
1700
1701         setup_pager();
1702         read_events(false, &session);
1703         sort_lat();
1704
1705         printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1706         printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1707         printf(" ---------------------------------------------------------------------------------------------------------------\n");
1708
1709         next = rb_first(&sorted_atom_root);
1710
1711         while (next) {
1712                 struct work_atoms *work_list;
1713
1714                 work_list = rb_entry(next, struct work_atoms, node);
1715                 output_lat_thread(work_list);
1716                 next = rb_next(next);
1717         }
1718
1719         printf(" -----------------------------------------------------------------------------------------\n");
1720         printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1721                 (double)all_runtime/1e6, all_count);
1722
1723         printf(" ---------------------------------------------------\n");
1724
1725         print_bad_events();
1726         printf("\n");
1727
1728         perf_session__delete(session);
1729 }
1730
1731 static struct trace_sched_handler map_ops  = {
1732         .wakeup_event           = NULL,
1733         .switch_event           = map_switch_event,
1734         .runtime_event          = NULL,
1735         .fork_event             = NULL,
1736 };
1737
1738 static void __cmd_map(void)
1739 {
1740         max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1741
1742         setup_pager();
1743         read_events(true, NULL);
1744         print_bad_events();
1745 }
1746
1747 static void __cmd_replay(void)
1748 {
1749         unsigned long i;
1750
1751         calibrate_run_measurement_overhead();
1752         calibrate_sleep_measurement_overhead();
1753
1754         test_calibrations();
1755
1756         read_events(true, NULL);
1757
1758         printf("nr_run_events:        %ld\n", nr_run_events);
1759         printf("nr_sleep_events:      %ld\n", nr_sleep_events);
1760         printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
1761
1762         if (targetless_wakeups)
1763                 printf("target-less wakeups:  %ld\n", targetless_wakeups);
1764         if (multitarget_wakeups)
1765                 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1766         if (nr_run_events_optimized)
1767                 printf("run atoms optimized: %ld\n",
1768                         nr_run_events_optimized);
1769
1770         print_task_traces();
1771         add_cross_task_wakeups();
1772
1773         create_tasks();
1774         printf("------------------------------------------------------------\n");
1775         for (i = 0; i < replay_repeat; i++)
1776                 run_one_test();
1777 }
1778
1779
1780 static const char * const sched_usage[] = {
1781         "perf sched [<options>] {record|latency|map|replay|script}",
1782         NULL
1783 };
1784
1785 static const struct option sched_options[] = {
1786         OPT_STRING('i', "input", &input_name, "file",
1787                     "input file name"),
1788         OPT_INCR('v', "verbose", &verbose,
1789                     "be more verbose (show symbol address, etc)"),
1790         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1791                     "dump raw trace in ASCII"),
1792         OPT_END()
1793 };
1794
1795 static const char * const latency_usage[] = {
1796         "perf sched latency [<options>]",
1797         NULL
1798 };
1799
1800 static const struct option latency_options[] = {
1801         OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1802                    "sort by key(s): runtime, switch, avg, max"),
1803         OPT_INCR('v', "verbose", &verbose,
1804                     "be more verbose (show symbol address, etc)"),
1805         OPT_INTEGER('C', "CPU", &profile_cpu,
1806                     "CPU to profile on"),
1807         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1808                     "dump raw trace in ASCII"),
1809         OPT_END()
1810 };
1811
1812 static const char * const replay_usage[] = {
1813         "perf sched replay [<options>]",
1814         NULL
1815 };
1816
1817 static const struct option replay_options[] = {
1818         OPT_UINTEGER('r', "repeat", &replay_repeat,
1819                      "repeat the workload replay N times (-1: infinite)"),
1820         OPT_INCR('v', "verbose", &verbose,
1821                     "be more verbose (show symbol address, etc)"),
1822         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1823                     "dump raw trace in ASCII"),
1824         OPT_END()
1825 };
1826
1827 static void setup_sorting(void)
1828 {
1829         char *tmp, *tok, *str = strdup(sort_order);
1830
1831         for (tok = strtok_r(str, ", ", &tmp);
1832                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1833                 if (sort_dimension__add(tok, &sort_list) < 0) {
1834                         error("Unknown --sort key: `%s'", tok);
1835                         usage_with_options(latency_usage, latency_options);
1836                 }
1837         }
1838
1839         free(str);
1840
1841         sort_dimension__add("pid", &cmp_pid);
1842 }
1843
1844 static const char *record_args[] = {
1845         "record",
1846         "-a",
1847         "-R",
1848         "-f",
1849         "-m", "1024",
1850         "-c", "1",
1851         "-e", "sched:sched_switch",
1852         "-e", "sched:sched_stat_wait",
1853         "-e", "sched:sched_stat_sleep",
1854         "-e", "sched:sched_stat_iowait",
1855         "-e", "sched:sched_stat_runtime",
1856         "-e", "sched:sched_process_exit",
1857         "-e", "sched:sched_process_fork",
1858         "-e", "sched:sched_wakeup",
1859         "-e", "sched:sched_migrate_task",
1860 };
1861
1862 static int __cmd_record(int argc, const char **argv)
1863 {
1864         unsigned int rec_argc, i, j;
1865         const char **rec_argv;
1866
1867         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1868         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1869
1870         if (rec_argv == NULL)
1871                 return -ENOMEM;
1872
1873         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1874                 rec_argv[i] = strdup(record_args[i]);
1875
1876         for (j = 1; j < (unsigned int)argc; j++, i++)
1877                 rec_argv[i] = argv[j];
1878
1879         BUG_ON(i != rec_argc);
1880
1881         return cmd_record(i, rec_argv, NULL);
1882 }
1883
1884 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1885 {
1886         argc = parse_options(argc, argv, sched_options, sched_usage,
1887                              PARSE_OPT_STOP_AT_NON_OPTION);
1888         if (!argc)
1889                 usage_with_options(sched_usage, sched_options);
1890
1891         /*
1892          * Aliased to 'perf script' for now:
1893          */
1894         if (!strcmp(argv[0], "script"))
1895                 return cmd_script(argc, argv, prefix);
1896
1897         symbol__init();
1898         if (!strncmp(argv[0], "rec", 3)) {
1899                 return __cmd_record(argc, argv);
1900         } else if (!strncmp(argv[0], "lat", 3)) {
1901                 trace_handler = &lat_ops;
1902                 if (argc > 1) {
1903                         argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1904                         if (argc)
1905                                 usage_with_options(latency_usage, latency_options);
1906                 }
1907                 setup_sorting();
1908                 __cmd_lat();
1909         } else if (!strcmp(argv[0], "map")) {
1910                 trace_handler = &map_ops;
1911                 setup_sorting();
1912                 __cmd_map();
1913         } else if (!strncmp(argv[0], "rep", 3)) {
1914                 trace_handler = &replay_ops;
1915                 if (argc) {
1916                         argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1917                         if (argc)
1918                                 usage_with_options(replay_usage, replay_options);
1919                 }
1920                 __cmd_replay();
1921         } else {
1922                 usage_with_options(sched_usage, sched_options);
1923         }
1924
1925         return 0;
1926 }