softirq,rcu: Inform RCU of irq_exit() activity
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 19 Jul 2011 22:32:00 +0000 (15:32 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 20 Jul 2011 17:50:12 +0000 (10:50 -0700)
The rcu_read_unlock_special() function relies on in_irq() to exclude
scheduler activity from interrupt level.  This fails because exit_irq()
can invoke the scheduler after clearing the preempt_count() bits that
in_irq() uses to determine that it is at interrupt level.  This situation
can result in failures as follows:

 $task IRQ SoftIRQ

 rcu_read_lock()

 /* do stuff */

 <preempt> |= UNLOCK_BLOCKED

 rcu_read_unlock()
   --t->rcu_read_lock_nesting

irq_enter();
/* do stuff, don't use RCU */
irq_exit();
  sub_preempt_count(IRQ_EXIT_OFFSET);
  invoke_softirq()

ttwu();
  spin_lock_irq(&pi->lock)
  rcu_read_lock();
  /* do stuff */
  rcu_read_unlock();
    rcu_read_unlock_special()
      rcu_report_exp_rnp()
        ttwu()
          spin_lock_irq(&pi->lock) /* deadlock */

   rcu_read_unlock_special(t);

Ed can simply trigger this 'easy' because invoke_softirq() immediately
does a ttwu() of ksoftirqd/# instead of doing the in-place softirq stuff
first, but even without that the above happens.

Cure this by also excluding softirqs from the
rcu_read_unlock_special() handler and ensuring the force_irqthreads
ksoftirqd/# wakeup is done from full softirq context.

[ Alternatively, delaying the ->rcu_read_lock_nesting decrement
  until after the special handling would make the thing more robust
  in the face of interrupts as well.  And there is a separate patch
  for that. ]

Cc: Thomas Gleixner <tglx@linutronix.de>
Reported-and-tested-by: Ed Tomlinson <edt@aei.ca>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcutree_plugin.h
kernel/softirq.c

index d9d7a89..8aafbb8 100644 (file)
@@ -318,7 +318,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
index 40cf63d..fca82c3 100644 (file)
@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                __do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #else
 static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #endif