2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * Code for /proc/lockdep and /proc/lockdep_stats:
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sort.h>
21 #include <asm/uaccess.h>
22 #include <asm/div64.h>
24 #include "lockdep_internals.h"
26 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28 struct lock_class *class;
32 if (v == SEQ_START_TOKEN)
37 if (class->lock_entry.next != &all_lock_classes)
38 class = list_entry(class->lock_entry.next,
39 struct lock_class, lock_entry);
47 static void *l_start(struct seq_file *m, loff_t *pos)
49 struct lock_class *class;
53 return SEQ_START_TOKEN;
55 list_for_each_entry(class, &all_lock_classes, lock_entry) {
62 static void l_stop(struct seq_file *m, void *v)
66 static unsigned long count_forward_deps(struct lock_class *class)
68 struct lock_list *entry;
69 unsigned long ret = 1;
72 * Recurse this class's dependency list:
74 list_for_each_entry(entry, &class->locks_after, entry)
75 ret += count_forward_deps(entry->class);
80 static unsigned long count_backward_deps(struct lock_class *class)
82 struct lock_list *entry;
83 unsigned long ret = 1;
86 * Recurse this class's dependency list:
88 list_for_each_entry(entry, &class->locks_before, entry)
89 ret += count_backward_deps(entry->class);
94 static void print_name(struct seq_file *m, struct lock_class *class)
97 const char *name = class->name;
100 name = __get_key_name(class->key, str);
101 seq_printf(m, "%s", name);
103 seq_printf(m, "%s", name);
104 if (class->name_version > 1)
105 seq_printf(m, "#%d", class->name_version);
107 seq_printf(m, "/%d", class->subclass);
111 static int l_show(struct seq_file *m, void *v)
113 unsigned long nr_forward_deps, nr_backward_deps;
114 struct lock_class *class = v;
115 struct lock_list *entry;
118 if (v == SEQ_START_TOKEN) {
119 seq_printf(m, "all lock classes:\n");
123 seq_printf(m, "%p", class->key);
124 #ifdef CONFIG_DEBUG_LOCKDEP
125 seq_printf(m, " OPS:%8ld", class->ops);
127 nr_forward_deps = count_forward_deps(class);
128 seq_printf(m, " FD:%5ld", nr_forward_deps);
130 nr_backward_deps = count_backward_deps(class);
131 seq_printf(m, " BD:%5ld", nr_backward_deps);
133 get_usage_chars(class, &c1, &c2, &c3, &c4);
134 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
137 print_name(m, class);
140 list_for_each_entry(entry, &class->locks_after, entry) {
141 if (entry->distance == 1) {
142 seq_printf(m, " -> [%p] ", entry->class);
143 print_name(m, entry->class);
152 static const struct seq_operations lockdep_ops = {
159 static int lockdep_open(struct inode *inode, struct file *file)
161 int res = seq_open(file, &lockdep_ops);
163 struct seq_file *m = file->private_data;
165 if (!list_empty(&all_lock_classes))
166 m->private = list_entry(all_lock_classes.next,
167 struct lock_class, lock_entry);
174 static const struct file_operations proc_lockdep_operations = {
175 .open = lockdep_open,
178 .release = seq_release,
181 static void lockdep_stats_debug_show(struct seq_file *m)
183 #ifdef CONFIG_DEBUG_LOCKDEP
184 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
185 hi2 = debug_atomic_read(&hardirqs_off_events),
186 hr1 = debug_atomic_read(&redundant_hardirqs_on),
187 hr2 = debug_atomic_read(&redundant_hardirqs_off),
188 si1 = debug_atomic_read(&softirqs_on_events),
189 si2 = debug_atomic_read(&softirqs_off_events),
190 sr1 = debug_atomic_read(&redundant_softirqs_on),
191 sr2 = debug_atomic_read(&redundant_softirqs_off);
193 seq_printf(m, " chain lookup misses: %11u\n",
194 debug_atomic_read(&chain_lookup_misses));
195 seq_printf(m, " chain lookup hits: %11u\n",
196 debug_atomic_read(&chain_lookup_hits));
197 seq_printf(m, " cyclic checks: %11u\n",
198 debug_atomic_read(&nr_cyclic_checks));
199 seq_printf(m, " cyclic-check recursions: %11u\n",
200 debug_atomic_read(&nr_cyclic_check_recursions));
201 seq_printf(m, " find-mask forwards checks: %11u\n",
202 debug_atomic_read(&nr_find_usage_forwards_checks));
203 seq_printf(m, " find-mask forwards recursions: %11u\n",
204 debug_atomic_read(&nr_find_usage_forwards_recursions));
205 seq_printf(m, " find-mask backwards checks: %11u\n",
206 debug_atomic_read(&nr_find_usage_backwards_checks));
207 seq_printf(m, " find-mask backwards recursions:%11u\n",
208 debug_atomic_read(&nr_find_usage_backwards_recursions));
210 seq_printf(m, " hardirq on events: %11u\n", hi1);
211 seq_printf(m, " hardirq off events: %11u\n", hi2);
212 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
213 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
214 seq_printf(m, " softirq on events: %11u\n", si1);
215 seq_printf(m, " softirq off events: %11u\n", si2);
216 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
217 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
221 static int lockdep_stats_show(struct seq_file *m, void *v)
223 struct lock_class *class;
224 unsigned long nr_unused = 0, nr_uncategorized = 0,
225 nr_irq_safe = 0, nr_irq_unsafe = 0,
226 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
227 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
228 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
229 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
230 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
231 sum_forward_deps = 0, factor = 0;
233 list_for_each_entry(class, &all_lock_classes, lock_entry) {
235 if (class->usage_mask == 0)
237 if (class->usage_mask == LOCKF_USED)
239 if (class->usage_mask & LOCKF_USED_IN_IRQ)
241 if (class->usage_mask & LOCKF_ENABLED_IRQS)
243 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
245 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
247 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
249 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
251 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
253 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
254 nr_irq_read_unsafe++;
255 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
256 nr_softirq_read_safe++;
257 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
258 nr_softirq_read_unsafe++;
259 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
260 nr_hardirq_read_safe++;
261 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
262 nr_hardirq_read_unsafe++;
264 sum_forward_deps += count_forward_deps(class);
266 #ifdef CONFIG_DEBUG_LOCKDEP
267 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
269 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
270 nr_lock_classes, MAX_LOCKDEP_KEYS);
271 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
272 nr_list_entries, MAX_LOCKDEP_ENTRIES);
273 seq_printf(m, " indirect dependencies: %11lu\n",
277 * Total number of dependencies:
279 * All irq-safe locks may nest inside irq-unsafe locks,
280 * plus all the other known dependencies:
282 seq_printf(m, " all direct dependencies: %11lu\n",
283 nr_irq_unsafe * nr_irq_safe +
284 nr_hardirq_unsafe * nr_hardirq_safe +
288 * Estimated factor between direct and indirect
292 factor = sum_forward_deps / nr_list_entries;
294 #ifdef CONFIG_PROVE_LOCKING
295 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
296 nr_lock_chains, MAX_LOCKDEP_CHAINS);
299 #ifdef CONFIG_TRACE_IRQFLAGS
300 seq_printf(m, " in-hardirq chains: %11u\n",
302 seq_printf(m, " in-softirq chains: %11u\n",
305 seq_printf(m, " in-process chains: %11u\n",
307 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
308 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
309 seq_printf(m, " combined max dependencies: %11u\n",
310 (nr_hardirq_chains + 1) *
311 (nr_softirq_chains + 1) *
312 (nr_process_chains + 1)
314 seq_printf(m, " hardirq-safe locks: %11lu\n",
316 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
318 seq_printf(m, " softirq-safe locks: %11lu\n",
320 seq_printf(m, " softirq-unsafe locks: %11lu\n",
322 seq_printf(m, " irq-safe locks: %11lu\n",
324 seq_printf(m, " irq-unsafe locks: %11lu\n",
327 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
328 nr_hardirq_read_safe);
329 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
330 nr_hardirq_read_unsafe);
331 seq_printf(m, " softirq-read-safe locks: %11lu\n",
332 nr_softirq_read_safe);
333 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
334 nr_softirq_read_unsafe);
335 seq_printf(m, " irq-read-safe locks: %11lu\n",
337 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
340 seq_printf(m, " uncategorized locks: %11lu\n",
342 seq_printf(m, " unused locks: %11lu\n",
344 seq_printf(m, " max locking depth: %11u\n",
346 seq_printf(m, " max recursion depth: %11u\n",
347 max_recursion_depth);
348 lockdep_stats_debug_show(m);
349 seq_printf(m, " debug_locks: %11u\n",
355 static int lockdep_stats_open(struct inode *inode, struct file *file)
357 return single_open(file, lockdep_stats_show, NULL);
360 static const struct file_operations proc_lockdep_stats_operations = {
361 .open = lockdep_stats_open,
364 .release = single_release,
367 #ifdef CONFIG_LOCK_STAT
369 struct lock_stat_data {
370 struct lock_class *class;
371 struct lock_class_stats stats;
374 struct lock_stat_seq {
375 struct lock_stat_data *iter;
376 struct lock_stat_data *iter_end;
377 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
381 * sort on absolute number of contentions
383 static int lock_stat_cmp(const void *l, const void *r)
385 const struct lock_stat_data *dl = l, *dr = r;
386 unsigned long nl, nr;
388 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
389 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
394 static void seq_line(struct seq_file *m, char c, int offset, int length)
398 for (i = 0; i < offset; i++)
400 for (i = 0; i < length; i++)
401 seq_printf(m, "%c", c);
405 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
409 rem = do_div(nr, 1000); /* XXX: do_div_signed */
410 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10);
413 static void seq_time(struct seq_file *m, s64 time)
417 snprint_time(num, sizeof(num), time);
418 seq_printf(m, " %14s", num);
421 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
423 seq_printf(m, "%14lu", lt->nr);
424 seq_time(m, lt->min);
425 seq_time(m, lt->max);
426 seq_time(m, lt->total);
429 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
432 struct lock_class *class;
433 struct lock_class_stats *stats;
437 stats = &data->stats;
440 if (class->name_version > 1)
441 namelen -= 2; /* XXX truncates versions > 9 */
446 char str[KSYM_NAME_LEN];
447 const char *key_name;
449 key_name = __get_key_name(class->key, str);
450 snprintf(name, namelen, "%s", key_name);
452 snprintf(name, namelen, "%s", class->name);
454 namelen = strlen(name);
455 if (class->name_version > 1) {
456 snprintf(name+namelen, 3, "#%d", class->name_version);
459 if (class->subclass) {
460 snprintf(name+namelen, 3, "/%d", class->subclass);
464 if (stats->write_holdtime.nr) {
465 if (stats->read_holdtime.nr)
466 seq_printf(m, "%38s-W:", name);
468 seq_printf(m, "%40s:", name);
470 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
471 seq_lock_time(m, &stats->write_waittime);
472 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
473 seq_lock_time(m, &stats->write_holdtime);
477 if (stats->read_holdtime.nr) {
478 seq_printf(m, "%38s-R:", name);
479 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
480 seq_lock_time(m, &stats->read_waittime);
481 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
482 seq_lock_time(m, &stats->read_holdtime);
486 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
489 if (stats->read_holdtime.nr)
492 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
493 char sym[KSYM_SYMBOL_LEN];
496 if (class->contention_point[i] == 0)
500 seq_line(m, '-', 40-namelen, namelen);
502 sprint_symbol(sym, class->contention_point[i]);
503 snprintf(ip, sizeof(ip), "[<%p>]",
504 (void *)class->contention_point[i]);
505 seq_printf(m, "%40s %14lu %29s %s\n", name,
506 stats->contention_point[i],
511 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
516 static void seq_header(struct seq_file *m)
518 seq_printf(m, "lock_stat version 0.2\n");
519 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
520 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
533 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
537 static void *ls_start(struct seq_file *m, loff_t *pos)
539 struct lock_stat_seq *data = m->private;
542 return SEQ_START_TOKEN;
544 data->iter = data->stats + *pos;
545 if (data->iter >= data->iter_end)
551 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
553 struct lock_stat_seq *data = m->private;
557 if (v == SEQ_START_TOKEN)
558 data->iter = data->stats;
564 if (data->iter == data->iter_end)
570 static void ls_stop(struct seq_file *m, void *v)
574 static int ls_show(struct seq_file *m, void *v)
576 if (v == SEQ_START_TOKEN)
584 static struct seq_operations lockstat_ops = {
591 static int lock_stat_open(struct inode *inode, struct file *file)
594 struct lock_class *class;
595 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
600 res = seq_open(file, &lockstat_ops);
602 struct lock_stat_data *iter = data->stats;
603 struct seq_file *m = file->private_data;
606 list_for_each_entry(class, &all_lock_classes, lock_entry) {
608 iter->stats = lock_stats(class);
611 data->iter_end = iter;
613 sort(data->stats, data->iter_end - data->iter,
614 sizeof(struct lock_stat_data),
615 lock_stat_cmp, NULL);
624 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
625 size_t count, loff_t *ppos)
627 struct lock_class *class;
631 if (get_user(c, buf))
637 list_for_each_entry(class, &all_lock_classes, lock_entry)
638 clear_lock_stats(class);
643 static int lock_stat_release(struct inode *inode, struct file *file)
645 struct seq_file *seq = file->private_data;
649 return seq_release(inode, file);
652 static const struct file_operations proc_lock_stat_operations = {
653 .open = lock_stat_open,
654 .write = lock_stat_write,
657 .release = lock_stat_release,
659 #endif /* CONFIG_LOCK_STAT */
661 static int __init lockdep_proc_init(void)
663 proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
664 proc_create("lockdep_stats", S_IRUSR, NULL,
665 &proc_lockdep_stats_operations);
667 #ifdef CONFIG_LOCK_STAT
668 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
674 __initcall(lockdep_proc_init);