Merge git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-hrt
[pandora-kernel.git] / kernel / lockdep.c
index 70ca4db..55fe0c7 100644 (file)
@@ -5,7 +5,8 @@
  *
  * Started by Ingo Molnar:
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *
  * this code maps all the lock dependencies as they occur in a live kernel
  * and will warn about the following classes of locking bugs:
@@ -37,6 +38,7 @@
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
 #include <linux/utsname.h>
+#include <linux/hash.h>
 
 #include <asm/sections.h>
 
@@ -149,6 +151,53 @@ static void lock_time_inc(struct lock_time *lt, s64 time)
        lt->nr++;
 }
 
+static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
+{
+       dst->min += src->min;
+       dst->max += src->max;
+       dst->total += src->total;
+       dst->nr += src->nr;
+}
+
+struct lock_class_stats lock_stats(struct lock_class *class)
+{
+       struct lock_class_stats stats;
+       int cpu, i;
+
+       memset(&stats, 0, sizeof(struct lock_class_stats));
+       for_each_possible_cpu(cpu) {
+               struct lock_class_stats *pcs =
+                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+
+               for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
+                       stats.contention_point[i] += pcs->contention_point[i];
+
+               lock_time_add(&pcs->read_waittime, &stats.read_waittime);
+               lock_time_add(&pcs->write_waittime, &stats.write_waittime);
+
+               lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
+               lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
+
+               for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
+                       stats.bounces[i] += pcs->bounces[i];
+       }
+
+       return stats;
+}
+
+void clear_lock_stats(struct lock_class *class)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct lock_class_stats *cpu_stats =
+                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+
+               memset(cpu_stats, 0, sizeof(struct lock_class_stats));
+       }
+       memset(class->contention_point, 0, sizeof(class->contention_point));
+}
+
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 {
        return &get_cpu_var(lock_stats)[class - lock_classes];
@@ -194,8 +243,7 @@ LIST_HEAD(all_lock_classes);
  */
 #define CLASSHASH_BITS         (MAX_LOCKDEP_KEYS_BITS - 1)
 #define CLASSHASH_SIZE         (1UL << CLASSHASH_BITS)
-#define CLASSHASH_MASK         (CLASSHASH_SIZE - 1)
-#define __classhashfn(key)     ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
+#define __classhashfn(key)     hash_long((unsigned long)key, CLASSHASH_BITS)
 #define classhashentry(key)    (classhash_table + __classhashfn((key)))
 
 static struct list_head classhash_table[CLASSHASH_SIZE];
@@ -206,9 +254,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
  */
 #define CHAINHASH_BITS         (MAX_LOCKDEP_CHAINS_BITS-1)
 #define CHAINHASH_SIZE         (1UL << CHAINHASH_BITS)
-#define CHAINHASH_MASK         (CHAINHASH_SIZE - 1)
-#define __chainhashfn(chain) \
-               (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
+#define __chainhashfn(chain)   hash_long(chain, CHAINHASH_BITS)
 #define chainhashentry(chain)  (chainhash_table + __chainhashfn((chain)))
 
 static struct list_head chainhash_table[CHAINHASH_SIZE];
@@ -329,6 +375,11 @@ unsigned int max_recursion_depth;
  * about it later on, in lockdep_info().
  */
 static int lockdep_init_error;
+static unsigned long lockdep_init_trace_data[20];
+static struct stack_trace lockdep_init_trace = {
+       .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
+       .entries = lockdep_init_trace_data,
+};
 
 /*
  * Various lockdep statistics:
@@ -460,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr)
        int i, depth = curr->lockdep_depth;
 
        if (!depth) {
-               printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
+               printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
                return;
        }
        printk("%d lock%s held by %s/%d:\n",
-               depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
+               depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
 
        for (i = 0; i < depth; i++) {
                printk(" #%d: ", i);
@@ -616,6 +667,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        if (unlikely(!lockdep_initialized)) {
                lockdep_init();
                lockdep_init_error = 1;
+               save_stack_trace(&lockdep_init_trace);
        }
 #endif
 
@@ -632,7 +684,8 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
         * (or spin_lock_init()) call - which acts as the key. For static
         * locks we use the lock object itself as the key.
         */
-       BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
+       BUILD_BUG_ON(sizeof(struct lock_class_key) >
+                       sizeof(struct lockdep_map));
 
        key = lock->key->subkeys + subclass;
 
@@ -642,9 +695,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
         * We can walk the hash lockfree, because the hash only
         * grows, and we are careful when adding entries to the end:
         */
-       list_for_each_entry(class, hash_head, hash_entry)
-               if (class->key == key)
+       list_for_each_entry(class, hash_head, hash_entry) {
+               if (class->key == key) {
+                       WARN_ON_ONCE(class->name != lock->name);
                        return class;
+               }
+       }
 
        return NULL;
 }
@@ -848,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
        print_kernel_version();
        printk(  "-------------------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        print_lock(check_source);
        printk("\nbut task is already holding lock:\n");
        print_lock(check_target);
@@ -1029,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr,
        print_kernel_version();
        printk(  "------------------------------------------------------\n");
        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
-               curr->comm, curr->pid,
+               curr->comm, task_pid_nr(curr),
                curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
                curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
                curr->hardirqs_enabled,
@@ -1181,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
        print_kernel_version();
        printk(  "---------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        print_lock(next);
        printk("\nbut task is already holding lock:\n");
        print_lock(prev);
@@ -1465,7 +1521,7 @@ cache_hit:
 }
 
 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
-               struct held_lock *hlock, int chain_head)
+               struct held_lock *hlock, int chain_head, u64 chain_key)
 {
        /*
         * Trylock needs to maintain the stack of held locks, but it
@@ -1478,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
         * graph_lock for us)
         */
        if (!hlock->trylock && (hlock->check == 2) &&
-                       lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
+                       lookup_chain_cache(chain_key, hlock->class)) {
                /*
                 * Check whether last held lock:
                 *
@@ -1520,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
 #else
 static inline int validate_chain(struct task_struct *curr,
                struct lockdep_map *lock, struct held_lock *hlock,
-               int chain_head)
+               int chain_head, u64 chain_key)
 {
        return 1;
 }
@@ -1585,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
                usage_str[prev_bit], usage_str[new_bit]);
 
        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
-               curr->comm, curr->pid,
+               curr->comm, task_pid_nr(curr),
                trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
                trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
                trace_hardirqs_enabled(curr),
@@ -1638,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
        print_kernel_version();
        printk(  "---------------------------------------------------------\n");
        printk("%s/%d just changed the state of lock:\n",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        print_lock(this);
        if (forwards)
                printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
@@ -2278,6 +2334,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        lock->name = name;
        lock->key = key;
        lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+       lock->cpu = raw_smp_processor_id();
+#endif
        if (subclass)
                register_lock_class(lock, subclass, 1);
 }
@@ -2391,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                chain_head = 1;
        }
        chain_key = iterate_chain_key(chain_key, id);
-       curr->curr_chain_key = chain_key;
 
-       if (!validate_chain(curr, lock, hlock, chain_head))
+       if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
                return 0;
 
+       curr->curr_chain_key = chain_key;
        curr->lockdep_depth++;
        check_chain_key(curr);
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -2428,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
        printk(  "[ BUG: bad unlock balance detected! ]\n");
        printk(  "-------------------------------------\n");
        printk("%s/%d is trying to release lock (",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        print_lockdep_cache(lock);
        printk(") at:\n");
        print_ip_sym(ip);
@@ -2678,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
        printk(  "[ BUG: bad contention detected! ]\n");
        printk(  "---------------------------------\n");
        printk("%s/%d is trying to contend lock (",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        print_lockdep_cache(lock);
        printk(") at:\n");
        print_ip_sym(ip);
@@ -2728,6 +2787,8 @@ found_it:
        stats = get_lock_stats(hlock->class);
        if (point < ARRAY_SIZE(stats->contention_point))
                stats->contention_point[i]++;
+       if (lock->cpu != smp_processor_id())
+               stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
 }
 
@@ -2739,8 +2800,8 @@ __lock_acquired(struct lockdep_map *lock)
        struct lock_class_stats *stats;
        unsigned int depth;
        u64 now;
-       s64 waittime;
-       int i;
+       s64 waittime = 0;
+       int i, cpu;
 
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -2762,19 +2823,25 @@ __lock_acquired(struct lockdep_map *lock)
        return;
 
 found_it:
-       if (!hlock->waittime_stamp)
-               return;
-
-       now = sched_clock();
-       waittime = now - hlock->waittime_stamp;
-       hlock->holdtime_stamp = now;
+       cpu = smp_processor_id();
+       if (hlock->waittime_stamp) {
+               now = sched_clock();
+               waittime = now - hlock->waittime_stamp;
+               hlock->holdtime_stamp = now;
+       }
 
        stats = get_lock_stats(hlock->class);
-       if (hlock->read)
-               lock_time_inc(&stats->read_waittime, waittime);
-       else
-               lock_time_inc(&stats->write_waittime, waittime);
+       if (waittime) {
+               if (hlock->read)
+                       lock_time_inc(&stats->read_waittime, waittime);
+               else
+                       lock_time_inc(&stats->write_waittime, waittime);
+       }
+       if (lock->cpu != cpu)
+               stats->bounces[bounce_acquired + !!hlock->read]++;
        put_lock_stats(stats);
+
+       lock->cpu = cpu;
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -2979,8 +3046,11 @@ void __init lockdep_info(void)
                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
 
 #ifdef CONFIG_DEBUG_LOCKDEP
-       if (lockdep_init_error)
-               printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
+       if (lockdep_init_error) {
+               printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+               printk("Call stack leading to lockdep invocation was:\n");
+               print_stack_trace(&lockdep_init_trace, 0);
+       }
 #endif
 }
 
@@ -3002,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
        printk(  "[ BUG: held lock freed! ]\n");
        printk(  "-------------------------\n");
        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
-               curr->comm, curr->pid, mem_from, mem_to-1);
+               curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
        print_lock(hlock);
        lockdep_print_held_locks(curr);
 
@@ -3055,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr)
        printk(  "[ BUG: lock held at task exit time! ]\n");
        printk(  "-------------------------------------\n");
        printk("%s/%d is exiting with locks still held!\n",
-               curr->comm, curr->pid);
+               curr->comm, task_pid_nr(curr));
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -3129,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task)
 }
 
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
+
+void lockdep_sys_exit(void)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(curr->lockdep_depth)) {
+               if (!debug_locks_off())
+                       return;
+               printk("\n================================================\n");
+               printk(  "[ BUG: lock held when returning to user space! ]\n");
+               printk(  "------------------------------------------------\n");
+               printk("%s/%d is leaving the kernel with locks still held!\n",
+                               curr->comm, curr->pid);
+               lockdep_print_held_locks(curr);
+       }
+}