Merge branch 'slub/hotplug' into slab/urgent
[pandora-kernel.git] / kernel / sched_debug.c
1 /*
2  * kernel/time/sched_debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18
19 /*
20  * This allows printing both to /proc/sched_debug and
21  * to the console
22  */
23 #define SEQ_printf(m, x...)                     \
24  do {                                           \
25         if (m)                                  \
26                 seq_printf(m, x);               \
27         else                                    \
28                 printk(x);                      \
29  } while (0)
30
31 /*
32  * Ease the printing of nsec fields:
33  */
34 static long long nsec_high(unsigned long long nsec)
35 {
36         if ((long long)nsec < 0) {
37                 nsec = -nsec;
38                 do_div(nsec, 1000000);
39                 return -nsec;
40         }
41         do_div(nsec, 1000000);
42
43         return nsec;
44 }
45
46 static unsigned long nsec_low(unsigned long long nsec)
47 {
48         if ((long long)nsec < 0)
49                 nsec = -nsec;
50
51         return do_div(nsec, 1000000);
52 }
53
54 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
55
56 #ifdef CONFIG_FAIR_GROUP_SCHED
57 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
58 {
59         struct sched_entity *se = tg->se[cpu];
60         if (!se)
61                 return;
62
63 #define P(F) \
64         SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
65 #define PN(F) \
66         SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
67
68         PN(se->exec_start);
69         PN(se->vruntime);
70         PN(se->sum_exec_runtime);
71 #ifdef CONFIG_SCHEDSTATS
72         PN(se->statistics.wait_start);
73         PN(se->statistics.sleep_start);
74         PN(se->statistics.block_start);
75         PN(se->statistics.sleep_max);
76         PN(se->statistics.block_max);
77         PN(se->statistics.exec_max);
78         PN(se->statistics.slice_max);
79         PN(se->statistics.wait_max);
80         PN(se->statistics.wait_sum);
81         P(se->statistics.wait_count);
82 #endif
83         P(se->load.weight);
84 #undef PN
85 #undef P
86 }
87 #endif
88
89 static void
90 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
91 {
92         if (rq->curr == p)
93                 SEQ_printf(m, "R");
94         else
95                 SEQ_printf(m, " ");
96
97         SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
98                 p->comm, p->pid,
99                 SPLIT_NS(p->se.vruntime),
100                 (long long)(p->nvcsw + p->nivcsw),
101                 p->prio);
102 #ifdef CONFIG_SCHEDSTATS
103         SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
104                 SPLIT_NS(p->se.vruntime),
105                 SPLIT_NS(p->se.sum_exec_runtime),
106                 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
107 #else
108         SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
109                 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
110 #endif
111
112         SEQ_printf(m, "\n");
113 }
114
115 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
116 {
117         struct task_struct *g, *p;
118         unsigned long flags;
119
120         SEQ_printf(m,
121         "\nrunnable tasks:\n"
122         "            task   PID         tree-key  switches  prio"
123         "     exec-runtime         sum-exec        sum-sleep\n"
124         "------------------------------------------------------"
125         "----------------------------------------------------\n");
126
127         read_lock_irqsave(&tasklist_lock, flags);
128
129         do_each_thread(g, p) {
130                 if (!p->se.on_rq || task_cpu(p) != rq_cpu)
131                         continue;
132
133                 print_task(m, rq, p);
134         } while_each_thread(g, p);
135
136         read_unlock_irqrestore(&tasklist_lock, flags);
137 }
138
139 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140 {
141         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
142                 spread, rq0_min_vruntime, spread0;
143         struct rq *rq = cpu_rq(cpu);
144         struct sched_entity *last;
145         unsigned long flags;
146
147         SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
148         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
149                         SPLIT_NS(cfs_rq->exec_clock));
150
151         raw_spin_lock_irqsave(&rq->lock, flags);
152         if (cfs_rq->rb_leftmost)
153                 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
154         last = __pick_last_entity(cfs_rq);
155         if (last)
156                 max_vruntime = last->vruntime;
157         min_vruntime = cfs_rq->min_vruntime;
158         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
159         raw_spin_unlock_irqrestore(&rq->lock, flags);
160         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
161                         SPLIT_NS(MIN_vruntime));
162         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
163                         SPLIT_NS(min_vruntime));
164         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
165                         SPLIT_NS(max_vruntime));
166         spread = max_vruntime - MIN_vruntime;
167         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
168                         SPLIT_NS(spread));
169         spread0 = min_vruntime - rq0_min_vruntime;
170         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
171                         SPLIT_NS(spread0));
172         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
173                         cfs_rq->nr_spread_over);
174         SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
175         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
176 #ifdef CONFIG_FAIR_GROUP_SCHED
177 #ifdef CONFIG_SMP
178         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_avg",
179                         SPLIT_NS(cfs_rq->load_avg));
180         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_period",
181                         SPLIT_NS(cfs_rq->load_period));
182         SEQ_printf(m, "  .%-30s: %ld\n", "load_contrib",
183                         cfs_rq->load_contribution);
184         SEQ_printf(m, "  .%-30s: %d\n", "load_tg",
185                         atomic_read(&cfs_rq->tg->load_weight));
186 #endif
187
188         print_cfs_group_stats(m, cpu, cfs_rq->tg);
189 #endif
190 }
191
192 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
193 {
194         SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
195
196 #define P(x) \
197         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
198 #define PN(x) \
199         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
200
201         P(rt_nr_running);
202         P(rt_throttled);
203         PN(rt_time);
204         PN(rt_runtime);
205
206 #undef PN
207 #undef P
208 }
209
210 extern __read_mostly int sched_clock_running;
211
212 static void print_cpu(struct seq_file *m, int cpu)
213 {
214         struct rq *rq = cpu_rq(cpu);
215
216 #ifdef CONFIG_X86
217         {
218                 unsigned int freq = cpu_khz ? : 1;
219
220                 SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
221                            cpu, freq / 1000, (freq % 1000));
222         }
223 #else
224         SEQ_printf(m, "\ncpu#%d\n", cpu);
225 #endif
226
227 #define P(x) \
228         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
229 #define PN(x) \
230         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
231
232         P(nr_running);
233         SEQ_printf(m, "  .%-30s: %lu\n", "load",
234                    rq->load.weight);
235         P(nr_switches);
236         P(nr_load_updates);
237         P(nr_uninterruptible);
238         PN(next_balance);
239         P(curr->pid);
240         PN(clock);
241         P(cpu_load[0]);
242         P(cpu_load[1]);
243         P(cpu_load[2]);
244         P(cpu_load[3]);
245         P(cpu_load[4]);
246 #undef P
247 #undef PN
248
249 #ifdef CONFIG_SCHEDSTATS
250 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
251 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
252
253         P(yld_count);
254
255         P(sched_switch);
256         P(sched_count);
257         P(sched_goidle);
258 #ifdef CONFIG_SMP
259         P64(avg_idle);
260 #endif
261
262         P(ttwu_count);
263         P(ttwu_local);
264
265         P(bkl_count);
266
267 #undef P
268 #endif
269         print_cfs_stats(m, cpu);
270         print_rt_stats(m, cpu);
271
272         print_rq(m, rq, cpu);
273 }
274
275 static const char *sched_tunable_scaling_names[] = {
276         "none",
277         "logaritmic",
278         "linear"
279 };
280
281 static int sched_debug_show(struct seq_file *m, void *v)
282 {
283         u64 ktime, sched_clk, cpu_clk;
284         unsigned long flags;
285         int cpu;
286
287         local_irq_save(flags);
288         ktime = ktime_to_ns(ktime_get());
289         sched_clk = sched_clock();
290         cpu_clk = local_clock();
291         local_irq_restore(flags);
292
293         SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
294                 init_utsname()->release,
295                 (int)strcspn(init_utsname()->version, " "),
296                 init_utsname()->version);
297
298 #define P(x) \
299         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
300 #define PN(x) \
301         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
302         PN(ktime);
303         PN(sched_clk);
304         PN(cpu_clk);
305         P(jiffies);
306 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
307         P(sched_clock_stable);
308 #endif
309 #undef PN
310 #undef P
311
312         SEQ_printf(m, "\n");
313         SEQ_printf(m, "sysctl_sched\n");
314
315 #define P(x) \
316         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
317 #define PN(x) \
318         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
319         PN(sysctl_sched_latency);
320         PN(sysctl_sched_min_granularity);
321         PN(sysctl_sched_wakeup_granularity);
322         P(sysctl_sched_child_runs_first);
323         P(sysctl_sched_features);
324 #undef PN
325 #undef P
326
327         SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
328                 sysctl_sched_tunable_scaling,
329                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
330
331         for_each_online_cpu(cpu)
332                 print_cpu(m, cpu);
333
334         SEQ_printf(m, "\n");
335
336         return 0;
337 }
338
339 static void sysrq_sched_debug_show(void)
340 {
341         sched_debug_show(NULL, NULL);
342 }
343
344 static int sched_debug_open(struct inode *inode, struct file *filp)
345 {
346         return single_open(filp, sched_debug_show, NULL);
347 }
348
349 static const struct file_operations sched_debug_fops = {
350         .open           = sched_debug_open,
351         .read           = seq_read,
352         .llseek         = seq_lseek,
353         .release        = single_release,
354 };
355
356 static int __init init_sched_debug_procfs(void)
357 {
358         struct proc_dir_entry *pe;
359
360         pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
361         if (!pe)
362                 return -ENOMEM;
363         return 0;
364 }
365
366 __initcall(init_sched_debug_procfs);
367
368 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
369 {
370         unsigned long nr_switches;
371
372         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
373                                                 get_nr_threads(p));
374         SEQ_printf(m,
375                 "---------------------------------------------------------\n");
376 #define __P(F) \
377         SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
378 #define P(F) \
379         SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
380 #define __PN(F) \
381         SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
382 #define PN(F) \
383         SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
384
385         PN(se.exec_start);
386         PN(se.vruntime);
387         PN(se.sum_exec_runtime);
388
389         nr_switches = p->nvcsw + p->nivcsw;
390
391 #ifdef CONFIG_SCHEDSTATS
392         PN(se.statistics.wait_start);
393         PN(se.statistics.sleep_start);
394         PN(se.statistics.block_start);
395         PN(se.statistics.sleep_max);
396         PN(se.statistics.block_max);
397         PN(se.statistics.exec_max);
398         PN(se.statistics.slice_max);
399         PN(se.statistics.wait_max);
400         PN(se.statistics.wait_sum);
401         P(se.statistics.wait_count);
402         PN(se.statistics.iowait_sum);
403         P(se.statistics.iowait_count);
404         P(sched_info.bkl_count);
405         P(se.nr_migrations);
406         P(se.statistics.nr_migrations_cold);
407         P(se.statistics.nr_failed_migrations_affine);
408         P(se.statistics.nr_failed_migrations_running);
409         P(se.statistics.nr_failed_migrations_hot);
410         P(se.statistics.nr_forced_migrations);
411         P(se.statistics.nr_wakeups);
412         P(se.statistics.nr_wakeups_sync);
413         P(se.statistics.nr_wakeups_migrate);
414         P(se.statistics.nr_wakeups_local);
415         P(se.statistics.nr_wakeups_remote);
416         P(se.statistics.nr_wakeups_affine);
417         P(se.statistics.nr_wakeups_affine_attempts);
418         P(se.statistics.nr_wakeups_passive);
419         P(se.statistics.nr_wakeups_idle);
420
421         {
422                 u64 avg_atom, avg_per_cpu;
423
424                 avg_atom = p->se.sum_exec_runtime;
425                 if (nr_switches)
426                         do_div(avg_atom, nr_switches);
427                 else
428                         avg_atom = -1LL;
429
430                 avg_per_cpu = p->se.sum_exec_runtime;
431                 if (p->se.nr_migrations) {
432                         avg_per_cpu = div64_u64(avg_per_cpu,
433                                                 p->se.nr_migrations);
434                 } else {
435                         avg_per_cpu = -1LL;
436                 }
437
438                 __PN(avg_atom);
439                 __PN(avg_per_cpu);
440         }
441 #endif
442         __P(nr_switches);
443         SEQ_printf(m, "%-35s:%21Ld\n",
444                    "nr_voluntary_switches", (long long)p->nvcsw);
445         SEQ_printf(m, "%-35s:%21Ld\n",
446                    "nr_involuntary_switches", (long long)p->nivcsw);
447
448         P(se.load.weight);
449         P(policy);
450         P(prio);
451 #undef PN
452 #undef __PN
453 #undef P
454 #undef __P
455
456         {
457                 unsigned int this_cpu = raw_smp_processor_id();
458                 u64 t0, t1;
459
460                 t0 = cpu_clock(this_cpu);
461                 t1 = cpu_clock(this_cpu);
462                 SEQ_printf(m, "%-35s:%21Ld\n",
463                            "clock-delta", (long long)(t1-t0));
464         }
465 }
466
467 void proc_sched_set_task(struct task_struct *p)
468 {
469 #ifdef CONFIG_SCHEDSTATS
470         memset(&p->se.statistics, 0, sizeof(p->se.statistics));
471 #endif
472 }