[Bluetooth] Always include MTU in L2CAP config responses
[pandora-kernel.git] / kernel / lockdep.c
index f32ca78..c9fefdb 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/stacktrace.h>
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
+#include <linux/utsname.h>
 
 #include <asm/sections.h>
 
@@ -121,8 +122,8 @@ static struct list_head chainhash_table[CHAINHASH_SIZE];
  * unique.
  */
 #define iterate_chain_key(key1, key2) \
-       (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \
-       ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \
+       (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
+       ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
        (key2))
 
 void lockdep_off(void)
@@ -169,22 +170,17 @@ EXPORT_SYMBOL(lockdep_internal);
  */
 static int class_filter(struct lock_class *class)
 {
+#if 0
+       /* Example */
        if (class->name_version == 1 &&
-                       !strcmp(class->name, "&rl->lock"))
+                       !strcmp(class->name, "lockname"))
                return 1;
        if (class->name_version == 1 &&
-                       !strcmp(class->name, "&ni->mrec_lock"))
+                       !strcmp(class->name, "&struct->lockfield"))
                return 1;
-       if (class->name_version == 1 &&
-                       !strcmp(class->name, "mft_ni_runlist_lock"))
-               return 1;
-       if (class->name_version == 1 &&
-                       !strcmp(class->name, "mft_ni_mrec_lock"))
-               return 1;
-       if (class->name_version == 1 &&
-                       !strcmp(class->name, "&vol->lcnbmp_lock"))
-               return 1;
-       return 0;
+#endif
+       /* Allow everything else. 0 would be filter everything else */
+       return 1;
 }
 #endif
 
@@ -229,7 +225,14 @@ static int save_trace(struct stack_trace *trace)
        trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
        trace->entries = stack_trace + nr_stack_trace_entries;
 
-       save_stack_trace(trace, NULL, 0, 3);
+       trace->skip = 3;
+       trace->all_contexts = 0;
+
+       /* Make sure to not recurse in case the the unwinder needs to tak
+e         locks. */
+       lockdep_off();
+       save_stack_trace(trace, NULL);
+       lockdep_on();
 
        trace->max_entries = trace->nr_entries;
 
@@ -408,23 +411,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
                print_lock(curr->held_locks + i);
        }
 }
-/*
- * Helper to print a nice hierarchy of lock dependencies:
- */
-static void print_spaces(int nr)
-{
-       int i;
-
-       for (i = 0; i < nr; i++)
-               printk("  ");
-}
 
 static void print_lock_class_header(struct lock_class *class, int depth)
 {
        int bit;
 
-       print_spaces(depth);
-       printk("->");
+       printk("%*s->", depth, "");
        print_lock_name(class);
        printk(" ops: %lu", class->ops);
        printk(" {\n");
@@ -433,17 +425,14 @@ static void print_lock_class_header(struct lock_class *class, int depth)
                if (class->usage_mask & (1 << bit)) {
                        int len = depth;
 
-                       print_spaces(depth);
-                       len += printk("   %s", usage_str[bit]);
+                       len += printk("%*s   %s", depth, "", usage_str[bit]);
                        len += printk(" at:\n");
                        print_stack_trace(class->usage_traces + bit, len);
                }
        }
-       print_spaces(depth);
-       printk(" }\n");
+       printk("%*s }\n", depth, "");
 
-       print_spaces(depth);
-       printk(" ... key      at: ");
+       printk("%*s ... key      at: ",depth,"");
        print_ip_sym((unsigned long)class->key);
 }
 
@@ -463,8 +452,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
                DEBUG_LOCKS_WARN_ON(!entry->class);
                print_lock_dependencies(entry->class, depth + 1);
 
-               print_spaces(depth);
-               printk(" ... acquired at:\n");
+               printk("%*s ... acquired at:\n",depth,"");
                print_stack_trace(&entry->trace, 2);
                printk("\n");
        }
@@ -528,6 +516,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
        return 0;
 }
 
+static void print_kernel_version(void)
+{
+       printk("%s %.*s\n", init_utsname()->release,
+               (int)strcspn(init_utsname()->version, " "),
+               init_utsname()->version);
+}
+
 /*
  * When a circular dependency is detected, print the
  * header first:
@@ -544,6 +539,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 
        printk("\n=======================================================\n");
        printk(  "[ INFO: possible circular locking dependency detected ]\n");
+       print_kernel_version();
        printk(  "-------------------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, curr->pid);
@@ -579,6 +575,8 @@ static noinline int print_circular_bug_tail(void)
        return 0;
 }
 
+#define RECURSION_LIMIT 40
+
 static int noinline print_infinite_recursion_bug(void)
 {
        __raw_spin_unlock(&hash_lock);
@@ -599,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
        debug_atomic_inc(&nr_cyclic_check_recursions);
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
        /*
         * Check this lock's dependency list:
@@ -649,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
 
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
 
        debug_atomic_inc(&nr_find_usage_forwards_checks);
@@ -688,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
 
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
 
        debug_atomic_inc(&nr_find_usage_backwards_checks);
@@ -725,6 +723,7 @@ print_bad_irq_dependency(struct task_struct *curr,
        printk("\n======================================================\n");
        printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
                irqclass, irqclass);
+       print_kernel_version();
        printk(  "------------------------------------------------------\n");
        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
                curr->comm, curr->pid,
@@ -806,6 +805,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 
        printk("\n=============================================\n");
        printk(  "[ INFO: possible recursive locking detected ]\n");
+       print_kernel_version();
        printk(  "---------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, curr->pid);
@@ -1081,7 +1081,8 @@ static int static_obj(void *obj)
         */
        for_each_possible_cpu(i) {
                start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
-               end   = (unsigned long) &__per_cpu_end   + per_cpu_offset(i);
+               end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+                                       + per_cpu_offset(i);
 
                if ((addr >= start) && (addr < end))
                        return 1;
@@ -1116,15 +1117,13 @@ static int count_matching_names(struct lock_class *new_class)
        return count + 1;
 }
 
-extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
-
 /*
  * Register a lock's class in the hash-table, if the class is not present
  * yet. Otherwise we look it up. We cache the result in the lock object
  * itself, so actual lookup of the hash should be once per lock object.
  */
 static inline struct lock_class *
-register_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
        struct lockdep_subclass_key *key;
        struct list_head *hash_head;
@@ -1155,8 +1154,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
         * (or spin_lock_init()) call - which acts as the key. For static
         * locks we use the lock object itself as the key.
         */
-       if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
-               __error_too_big_MAX_LOCKDEP_SUBCLASSES();
+       BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
 
        key = lock->key->subkeys + subclass;
 
@@ -1168,7 +1166,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
         */
        list_for_each_entry(class, hash_head, hash_entry)
                if (class->key == key)
-                       goto out_set;
+                       return class;
+
+       return NULL;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+{
+       struct lockdep_subclass_key *key;
+       struct list_head *hash_head;
+       struct lock_class *class;
+
+       class = look_up_lock_class(lock, subclass);
+       if (likely(class))
+               return class;
 
        /*
         * Debug-check: all keys must be persistent!
@@ -1183,6 +1200,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
                return NULL;
        }
 
+       key = lock->key->subkeys + subclass;
+       hash_head = classhashentry(key);
+
        __raw_spin_lock(&hash_lock);
        /*
         * We have to do the hash-walk again, to avoid races
@@ -1229,8 +1249,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
 out_unlock_set:
        __raw_spin_unlock(&hash_lock);
 
-out_set:
-       lock->class[subclass] = class;
+       if (!subclass || force)
+               lock->class_cache = class;
 
        DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
 
@@ -1366,6 +1386,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
 
        printk("\n=========================================================\n");
        printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
+       print_kernel_version();
        printk(  "---------------------------------------------------------\n");
        printk("%s/%d just changed the state of lock:\n",
                curr->comm, curr->pid);
@@ -1460,6 +1481,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
 
        printk("\n=================================\n");
        printk(  "[ INFO: inconsistent lock state ]\n");
+       print_kernel_version();
        printk(  "---------------------------------\n");
 
        printk("inconsistent {%s} -> {%s} usage.\n",
@@ -1915,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip)
  * Initialize a lock instance's lock-class mapping info:
  */
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
-                     struct lock_class_key *key)
+                     struct lock_class_key *key, int subclass)
 {
        if (unlikely(!debug_locks))
                return;
@@ -1934,7 +1956,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        }
        lock->name = name;
        lock->key = key;
-       memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
+       lock->class_cache = NULL;
+       if (subclass)
+               register_lock_class(lock, subclass, 1);
 }
 
 EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1948,8 +1972,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          unsigned long ip)
 {
        struct task_struct *curr = current;
+       struct lock_class *class = NULL;
        struct held_lock *hlock;
-       struct lock_class *class;
        unsigned int depth, id;
        int chain_head = 0;
        u64 chain_key;
@@ -1967,10 +1991,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                return 0;
        }
 
-       class = lock->class[subclass];
-       /* not cached yet? */
+       if (!subclass)
+               class = lock->class_cache;
+       /*
+        * Not cached yet or subclass?
+        */
        if (unlikely(!class)) {
-               class = register_lock_class(lock, subclass);
+               class = register_lock_class(lock, subclass, 0);
                if (!class)
                        return 0;
        }
@@ -2469,48 +2496,44 @@ void lockdep_free_key_range(void *start, unsigned long size)
 
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
-       struct lock_class *class, *next, *entry;
+       struct lock_class *class, *next;
        struct list_head *head;
        unsigned long flags;
        int i, j;
 
        raw_local_irq_save(flags);
-       __raw_spin_lock(&hash_lock);
 
        /*
-        * Remove all classes this lock has:
+        * Remove all classes this lock might have:
+        */
+       for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
+               /*
+                * If the class exists we look it up and zap it:
+                */
+               class = look_up_lock_class(lock, j);
+               if (class)
+                       zap_class(class);
+       }
+       /*
+        * Debug check: in the end all mapped classes should
+        * be gone.
         */
+       __raw_spin_lock(&hash_lock);
        for (i = 0; i < CLASSHASH_SIZE; i++) {
                head = classhash_table + i;
                if (list_empty(head))
                        continue;
                list_for_each_entry_safe(class, next, head, hash_entry) {
-                       for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
-                               entry = lock->class[j];
-                               if (class == entry) {
-                                       zap_class(class);
-                                       lock->class[j] = NULL;
-                                       break;
-                               }
+                       if (unlikely(class == lock->class_cache)) {
+                               __raw_spin_unlock(&hash_lock);
+                               DEBUG_LOCKS_WARN_ON(1);
+                               goto out_restore;
                        }
                }
        }
-
-       /*
-        * Debug check: in the end all mapped classes should
-        * be gone.
-        */
-       for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
-               entry = lock->class[j];
-               if (!entry)
-                       continue;
-               __raw_spin_unlock(&hash_lock);
-               DEBUG_LOCKS_WARN_ON(1);
-               raw_local_irq_restore(flags);
-               return;
-       }
-
        __raw_spin_unlock(&hash_lock);
+
+out_restore:
        raw_local_irq_restore(flags);
 }
 
@@ -2571,7 +2594,7 @@ static inline int in_range(const void *start, const void *addr, const void *end)
 
 static void
 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
-                    const void *mem_to)
+                    const void *mem_to, struct held_lock *hlock)
 {
        if (!debug_locks_off())
                return;
@@ -2583,6 +2606,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
        printk(  "-------------------------\n");
        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
                curr->comm, curr->pid, mem_from, mem_to-1);
+       print_lock(hlock);
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -2616,7 +2640,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
                                        !in_range(mem_from, lock_to, mem_to))
                        continue;
 
-               print_freed_lock_bug(curr, mem_from, mem_to);
+               print_freed_lock_bug(curr, mem_from, mem_to, hlock);
                break;
        }
        local_irq_restore(flags);