Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / kernel / rcutree.c
index e336313..d443734 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 #include <linux/time.h>
+#include <linux/kernel_stat.h>
 
 #include "rcutree.h"
 
@@ -53,8 +54,8 @@
 
 static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
 
-#define RCU_STATE_INITIALIZER(name) { \
-       .level = { &name.node[0] }, \
+#define RCU_STATE_INITIALIZER(structname) { \
+       .level = { &structname.node[0] }, \
        .levelcnt = { \
                NUM_RCU_LVL_0,  /* root of hierarchy. */ \
                NUM_RCU_LVL_1, \
@@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
        .signaled = RCU_GP_IDLE, \
        .gpnum = -300, \
        .completed = -300, \
-       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \
+       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \
        .orphan_cbs_list = NULL, \
-       .orphan_cbs_tail = &name.orphan_cbs_list, \
+       .orphan_cbs_tail = &structname.orphan_cbs_list, \
        .orphan_qlen = 0, \
-       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \
+       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \
        .n_force_qs = 0, \
        .n_force_qs_ngp = 0, \
+       .name = #structname, \
 }
 
 struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
@@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
 struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
+int rcu_scheduler_active __read_mostly;
+EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+
 /*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
@@ -445,6 +450,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 
 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
 
+int rcu_cpu_stall_panicking __read_mostly;
+
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
        rsp->gp_start = jiffies;
@@ -477,7 +484,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
 
        /* OK, time to rat on our buddy... */
 
-       printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+       printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
+              rsp->name);
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rcu_print_task_stall(rnp);
@@ -488,7 +496,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
                        if (rnp->qsmask & (1UL << cpu))
                                printk(" %d", rnp->grplo + cpu);
        }
-       printk(" (detected by %d, t=%ld jiffies)\n",
+       printk("} (detected by %d, t=%ld jiffies)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start));
        trigger_all_cpu_backtrace();
 
@@ -504,8 +512,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
        unsigned long flags;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
-                       smp_processor_id(), jiffies - rsp->gp_start);
+       printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
+              rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
        trigger_all_cpu_backtrace();
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -522,6 +530,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        long delta;
        struct rcu_node *rnp;
 
+       if (rcu_cpu_stall_panicking)
+               return;
        delta = jiffies - rsp->jiffies_stall;
        rnp = rdp->mynode;
        if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
@@ -536,6 +546,21 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 }
 
+static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
+{
+       rcu_cpu_stall_panicking = 1;
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rcu_panic_block = {
+       .notifier_call = rcu_panic,
+};
+
+static void __init check_cpu_stall_init(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
+}
+
 #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
 static void record_gp_stall_check_time(struct rcu_state *rsp)
@@ -546,6 +571,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
 }
 
+static void __init check_cpu_stall_init(void)
+{
+}
+
 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
 /*
@@ -1132,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 void rcu_check_callbacks(int cpu, int user)
 {
-       if (!rcu_pending(cpu))
-               return; /* if nothing for RCU to do. */
        if (user ||
            (idle_cpu(cpu) && rcu_scheduler_active &&
             !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1165,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user)
                rcu_bh_qs(cpu);
        }
        rcu_preempt_check_callbacks(cpu);
-       raise_softirq(RCU_SOFTIRQ);
+       if (rcu_pending(cpu))
+               raise_softirq(RCU_SOFTIRQ);
 }
 
 #ifdef CONFIG_SMP
@@ -1456,11 +1484,13 @@ void synchronize_sched(void)
        if (rcu_blocking_is_gp())
                return;
 
+       init_rcu_head_on_stack(&rcu.head);
        init_completion(&rcu.completion);
        /* Will wake me after RCU finished. */
        call_rcu_sched(&rcu.head, wakeme_after_rcu);
        /* Wait for it. */
        wait_for_completion(&rcu.completion);
+       destroy_rcu_head_on_stack(&rcu.head);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
@@ -1480,11 +1510,13 @@ void synchronize_rcu_bh(void)
        if (rcu_blocking_is_gp())
                return;
 
+       init_rcu_head_on_stack(&rcu.head);
        init_completion(&rcu.completion);
        /* Will wake me after RCU finished. */
        call_rcu_bh(&rcu.head, wakeme_after_rcu);
        /* Wait for it. */
        wait_for_completion(&rcu.completion);
+       destroy_rcu_head_on_stack(&rcu.head);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
 
@@ -1505,18 +1537,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        check_cpu_stall(rsp, rdp);
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
-       if (rdp->qs_pending) {
+       if (rdp->qs_pending && !rdp->passed_quiesc) {
 
                /*
                 * If force_quiescent_state() coming soon and this CPU
                 * needs a quiescent state, and this is either RCU-sched
                 * or RCU-bh, force a local reschedule.
                 */
+               rdp->n_rp_qs_pending++;
                if (!rdp->preemptable &&
                    ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
                                 jiffies))
                        set_need_resched();
-               rdp->n_rp_qs_pending++;
+       } else if (rdp->qs_pending && rdp->passed_quiesc) {
+               rdp->n_rp_report_qs++;
                return 1;
        }
 
@@ -1783,6 +1817,21 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
+/*
+ * This function is invoked towards the end of the scheduler's initialization
+ * process.  Before this is called, the idle task might contain
+ * RCU read-side critical sections (during which time, this idle
+ * task is booting the system).  After this function is called, the
+ * idle tasks are prohibited from containing RCU read-side critical
+ * sections.  This function also enables RCU lockdep checking.
+ */
+void rcu_scheduler_starting(void)
+{
+       WARN_ON(num_online_cpus() != 1);
+       WARN_ON(nr_context_switches() > 0);
+       rcu_scheduler_active = 1;
+}
+
 /*
  * Compute the per-level fanout, either using the exact fanout specified
  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
@@ -1869,7 +1918,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 
        rnp = rsp->level[NUM_RCU_LVLS - 1];
        for_each_possible_cpu(i) {
-               if (i > rnp->grphi)
+               while (i > rnp->grphi)
                        rnp++;
                rsp->rda[i]->mynode = rnp;
                rcu_boot_init_percpu_data(i, rsp);
@@ -1896,12 +1945,6 @@ void __init rcu_init(void)
        int cpu;
 
        rcu_bootup_announce();
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-       printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-#if NUM_RCU_LVL_4 != 0
-       printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
-#endif /* #if NUM_RCU_LVL_4 != 0 */
        RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
        RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
        __rcu_init_preempt();
@@ -1915,6 +1958,7 @@ void __init rcu_init(void)
        cpu_notifier(rcu_cpu_notify, 0);
        for_each_online_cpu(cpu)
                rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+       check_cpu_stall_init();
 }
 
 #include "rcutree_plugin.h"