Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / lockdep.c
index 1e48f1c..e69434b 100644 (file)
@@ -96,8 +96,13 @@ static int graph_lock(void)
 
 static inline int graph_unlock(void)
 {
-       if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
+       if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
+               /*
+                * The lockdep graph lock isn't locked while we expect it to
+                * be, we're confused now, bye!
+                */
                return DEBUG_LOCKS_WARN_ON(1);
+       }
 
        current->lockdep_recursion--;
        arch_spin_unlock(&lockdep_lock);
@@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 static inline struct lock_class *hlock_class(struct held_lock *hlock)
 {
        if (!hlock->class_idx) {
+               /*
+                * Someone passed in garbage, we give up.
+                */
                DEBUG_LOCKS_WARN_ON(1);
                return NULL;
        }
@@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
         */
        list_for_each_entry(class, hash_head, hash_entry) {
                if (class->key == key) {
+                       /*
+                        * Huh! same key, different name? Did someone trample
+                        * on some memory? We're most confused.
+                        */
                        WARN_ON_ONCE(class->name != lock->name);
                        return class;
                }
@@ -800,6 +812,10 @@ out_unlock_set:
        else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
                lock->class_cache[subclass] = class;
 
+       /*
+        * Hash collision, did we smoke some? We found a class with a matching
+        * hash but the subclass -- which is hashed in -- didn't match.
+        */
        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
                return NULL;
 
@@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
        unsigned long nr;
 
        nr = lock - list_entries;
-       WARN_ON(nr >= nr_list_entries);
+       WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
        lock->parent = parent;
        lock->class->dep_gen_id = lockdep_dependency_gen_id;
 }
@@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
        unsigned long nr;
 
        nr = lock - list_entries;
-       WARN_ON(nr >= nr_list_entries);
+       WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
        return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 }
 
@@ -1197,6 +1213,9 @@ static noinline int print_bfs_bug(int ret)
        if (!debug_locks_off_graph_unlock())
                return 0;
 
+       /*
+        * Breadth-first-search failed, graph got corrupted?
+        */
        WARN(1, "lockdep bfs error:%d\n", ret);
 
        return 0;
@@ -1947,6 +1966,11 @@ out_bug:
        if (!debug_locks_off_graph_unlock())
                return 0;
 
+       /*
+        * Clearly we all shouldn't be here, but since we made it we
+        * can reliable say we messed up our state. See the above two
+        * gotos for reasons why we could possibly end up here.
+        */
        WARN_ON(1);
 
        return 0;
@@ -1978,6 +2002,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
        struct held_lock *hlock_curr, *hlock_next;
        int i, j;
 
+       /*
+        * We might need to take the graph lock, ensure we've got IRQs
+        * disabled to make this an IRQ-safe lock.. for recursion reasons
+        * lockdep won't complain about its own locking errors.
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
        /*
@@ -2129,6 +2158,10 @@ static void check_chain_key(struct task_struct *curr)
                hlock = curr->held_locks + i;
                if (chain_key != hlock->prev_chain_key) {
                        debug_locks_off();
+                       /*
+                        * We got mighty confused, our chain keys don't match
+                        * with what we expect, someone trample on our task state?
+                        */
                        WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
                                curr->lockdep_depth, i,
                                (unsigned long long)chain_key,
@@ -2136,6 +2169,9 @@ static void check_chain_key(struct task_struct *curr)
                        return;
                }
                id = hlock->class_idx - 1;
+               /*
+                * Whoops ran out of static storage again?
+                */
                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                        return;
 
@@ -2147,6 +2183,10 @@ static void check_chain_key(struct task_struct *curr)
        }
        if (chain_key != curr->curr_chain_key) {
                debug_locks_off();
+               /*
+                * More smoking hash instead of calculating it, damn see these
+                * numbers float.. I bet that a pink elephant stepped on my memory.
+                */
                WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
                        curr->lockdep_depth, i,
                        (unsigned long long)chain_key,
@@ -2530,12 +2570,24 @@ void trace_hardirqs_on_caller(unsigned long ip)
                return;
        }
 
+       /*
+        * We're enabling irqs and according to our state above irqs weren't
+        * already enabled, yet we find the hardware thinks they are in fact
+        * enabled.. someone messed up their IRQ state tracing.
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
 
+       /*
+        * See the fine text that goes along with this variable definition.
+        */
        if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
                return;
 
+       /*
+        * Can't allow enabling interrupts while in an interrupt handler,
+        * that's general bad form and such. Recursion, limited stack etc..
+        */
        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
                return;
 
@@ -2563,6 +2615,10 @@ void trace_hardirqs_off_caller(unsigned long ip)
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
+       /*
+        * So we're supposed to get called after you mask local IRQs, but for
+        * some reason the hardware doesn't quite think you did a proper job.
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
 
@@ -2595,6 +2651,10 @@ void trace_softirqs_on(unsigned long ip)
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
+       /*
+        * We fancy IRQs being disabled here, see softirq.c, avoids
+        * funny state and nesting things.
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
 
@@ -2631,6 +2691,9 @@ void trace_softirqs_off(unsigned long ip)
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
+       /*
+        * We fancy IRQs being disabled here, see softirq.c
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
 
@@ -2642,6 +2705,9 @@ void trace_softirqs_off(unsigned long ip)
                curr->softirq_disable_ip = ip;
                curr->softirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(softirqs_off_events);
+               /*
+                * Whoops, we wanted softirqs off, so why aren't they?
+                */
                DEBUG_LOCKS_WARN_ON(!softirq_count());
        } else
                debug_atomic_inc(redundant_softirqs_off);
@@ -2666,6 +2732,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
        if (!(gfp_mask & __GFP_FS))
                return;
 
+       /*
+        * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
+        */
        if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
                return;
 
@@ -2778,13 +2847,13 @@ static int separate_irq_context(struct task_struct *curr,
        return 0;
 }
 
-#else
+#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
 
 static inline
 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit new_bit)
 {
-       WARN_ON(1);
+       WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
        return 1;
 }
 
@@ -2804,7 +2873,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
 {
 }
 
-#endif
+#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
 
 /*
  * Mark a lock with a usage bit, and validate the state transition:
@@ -2885,6 +2954,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        lock->cpu = raw_smp_processor_id();
 #endif
 
+       /*
+        * Can't be having no nameless bastards around this place!
+        */
        if (DEBUG_LOCKS_WARN_ON(!name)) {
                lock->name = "NULL";
                return;
@@ -2892,6 +2964,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
 
        lock->name = name;
 
+       /*
+        * No key, no joy, we need to hash something.
+        */
        if (DEBUG_LOCKS_WARN_ON(!key))
                return;
        /*
@@ -2899,6 +2974,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
         */
        if (!static_obj(key)) {
                printk("BUG: key %p not in .data!\n", key);
+               /*
+                * What it says above ^^^^^, I suggest you read it.
+                */
                DEBUG_LOCKS_WARN_ON(1);
                return;
        }
@@ -2937,6 +3015,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (unlikely(!debug_locks))
                return 0;
 
+       /*
+        * Lockdep should run with IRQs disabled, otherwise we could
+        * get an interrupt which would want to take locks, which would
+        * end up in lockdep and have you got a head-ache already?
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
 
@@ -2968,6 +3051,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
         * dependency checks are done)
         */
        depth = curr->lockdep_depth;
+       /*
+        * Ran out of static storage for our per-task lock stack again have we?
+        */
        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
                return 0;
 
@@ -2986,6 +3072,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        }
 
        hlock = curr->held_locks + depth;
+       /*
+        * Plain impossible, we just registered it and checked it weren't no
+        * NULL like.. I bet this mushroom I ate was good!
+        */
        if (DEBUG_LOCKS_WARN_ON(!class))
                return 0;
        hlock->class_idx = class_idx;
@@ -3020,11 +3110,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
         * the hash, not class->key.
         */
        id = class - lock_classes;
+       /*
+        * Whoops, we did it again.. ran straight out of our static allocation.
+        */
        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                return 0;
 
        chain_key = curr->curr_chain_key;
        if (!depth) {
+               /*
+                * How can we have a chain hash when we ain't got no keys?!
+                */
                if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
                        return 0;
                chain_head = 1;
@@ -3097,6 +3193,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
 {
        if (unlikely(!debug_locks))
                return 0;
+       /*
+        * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
+        */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
 
@@ -3126,6 +3225,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
                if (!class)
                        return 0;
 
+               /*
+                * References, but not a lock we're actually ref-counting?
+                * State got messed up, follow the sites that change ->references
+                * and try to make sense of it.
+                */
                if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
                        return 0;
 
@@ -3148,6 +3252,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
        int i;
 
        depth = curr->lockdep_depth;
+       /*
+        * This function is about (re)setting the class of a held lock,
+        * yet we're not actually holding any locks. Naughty user!
+        */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return 0;
 
@@ -3183,6 +3291,10 @@ found_it:
                        return 0;
        }
 
+       /*
+        * I took it apart and put it back together again, except now I have
+        * these 'spare' parts.. where shall I put them.
+        */
        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
                return 0;
        return 1;
@@ -3207,6 +3319,10 @@ lock_release_non_nested(struct task_struct *curr,
         * of held locks:
         */
        depth = curr->lockdep_depth;
+       /*
+        * So we're all set to release this lock.. wait what lock? We don't
+        * own any locks, you've been drinking again?
+        */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return 0;
 
@@ -3259,6 +3375,10 @@ found_it:
                        return 0;
        }
 
+       /*
+        * We had N bottles of beer on the wall, we drank one, but now
+        * there's not N-1 bottles of beer left on the wall...
+        */
        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
                return 0;
        return 1;
@@ -3289,6 +3409,9 @@ static int lock_release_nested(struct task_struct *curr,
                return lock_release_non_nested(curr, lock, ip);
        curr->lockdep_depth--;
 
+       /*
+        * No more locks, but somehow we've got hash left over, who left it?
+        */
        if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
                return 0;
 
@@ -3371,10 +3494,13 @@ static void check_flags(unsigned long flags)
         * check if not in hardirq contexts:
         */
        if (!hardirq_count()) {
-               if (softirq_count())
+               if (softirq_count()) {
+                       /* like the above, but with softirqs */
                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
-               else
+               } else {
+                       /* lick the above, does it taste good? */
                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+               }
        }
 
        if (!debug_locks)
@@ -3513,6 +3639,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
        int i, contention_point, contending_point;
 
        depth = curr->lockdep_depth;
+       /*
+        * Whee, we contended on this lock, except it seems we're not
+        * actually trying to acquire anything much at all..
+        */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return;
 
@@ -3562,6 +3692,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
        int i, cpu;
 
        depth = curr->lockdep_depth;
+       /*
+        * Yay, we acquired ownership of this lock we didn't try to
+        * acquire, how the heck did that happen?
+        */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return;
 
@@ -3766,8 +3900,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                                match |= class == lock->class_cache[j];
 
                        if (unlikely(match)) {
-                               if (debug_locks_off_graph_unlock())
+                               if (debug_locks_off_graph_unlock()) {
+                                       /*
+                                        * We all just reset everything, how did it match?
+                                        */
                                        WARN_ON(1);
+                               }
                                goto out_restore;
                        }
                }