Merge branches 'doc.2015.02.26a', 'earlycb.2015.03.03a', 'fixes.2015.03.03a', 'gpexp...
[pandora-kernel.git] / kernel / rcu / tree_plugin.h
index d45e961..8c0ec0f 100644 (file)
@@ -58,38 +58,33 @@ static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
  */
 static void __init rcu_bootup_announce_oddness(void)
 {
-#ifdef CONFIG_RCU_TRACE
-       pr_info("\tRCU debugfs-based tracing is enabled.\n");
-#endif
-#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
-       pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
-              CONFIG_RCU_FANOUT);
-#endif
-#ifdef CONFIG_RCU_FANOUT_EXACT
-       pr_info("\tHierarchical RCU autobalancing is disabled.\n");
-#endif
-#ifdef CONFIG_RCU_FAST_NO_HZ
-       pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
-#endif
-#ifdef CONFIG_PROVE_RCU
-       pr_info("\tRCU lockdep checking is enabled.\n");
-#endif
-#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
-       pr_info("\tRCU torture testing starts during boot.\n");
-#endif
-#if defined(CONFIG_RCU_CPU_STALL_INFO)
-       pr_info("\tAdditional per-CPU info printed with stalls.\n");
-#endif
-#if NUM_RCU_LVL_4 != 0
-       pr_info("\tFour-level hierarchy is enabled.\n");
-#endif
+       if (IS_ENABLED(CONFIG_RCU_TRACE))
+               pr_info("\tRCU debugfs-based tracing is enabled.\n");
+       if ((IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) ||
+           (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32))
+               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
+                      CONFIG_RCU_FANOUT);
+       if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT))
+               pr_info("\tHierarchical RCU autobalancing is disabled.\n");
+       if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
+               pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
+       if (IS_ENABLED(CONFIG_PROVE_RCU))
+               pr_info("\tRCU lockdep checking is enabled.\n");
+       if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
+               pr_info("\tRCU torture testing starts during boot.\n");
+       if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO))
+               pr_info("\tAdditional per-CPU info printed with stalls.\n");
+       if (NUM_RCU_LVL_4 != 0)
+               pr_info("\tFour-level hierarchy is enabled.\n");
+       if (CONFIG_RCU_FANOUT_LEAF != 16)
+               pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
+                       CONFIG_RCU_FANOUT_LEAF);
        if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
                pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
-#ifdef CONFIG_RCU_BOOST
-       pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
-#endif
+       if (IS_ENABLED(CONFIG_RCU_BOOST))
+               pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
 }
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -296,7 +291,13 @@ void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block, complain if they get here. */
-       if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
+       if (in_irq() || in_serving_softirq()) {
+               lockdep_rcu_suspicious(__FILE__, __LINE__,
+                                      "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
+               pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
+                        t->rcu_read_unlock_special.s,
+                        t->rcu_read_unlock_special.b.blocked,
+                        t->rcu_read_unlock_special.b.need_qs);
                local_irq_restore(flags);
                return;
        }
@@ -535,7 +536,7 @@ void synchronize_rcu(void)
                           "Illegal synchronize_rcu() in RCU read-side critical section");
        if (!rcu_scheduler_active)
                return;
-       if (rcu_expedited)
+       if (rcu_gp_is_expedited())
                synchronize_rcu_expedited();
        else
                wait_rcu_gp(call_rcu);
@@ -1940,7 +1941,8 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
                rhp = ACCESS_ONCE(rdp->nocb_follower_head);
 
        /* Having no rcuo kthread but CBs after scheduler starts is bad! */
-       if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
+       if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp &&
+           rcu_scheduler_fully_active) {
                /* RCU callback enqueued before CPU first came online??? */
                pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
                       cpu, rhp->func);
@@ -2387,18 +2389,8 @@ void __init rcu_init_nohz(void)
                pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
 
        for_each_rcu_flavor(rsp) {
-               for_each_cpu(cpu, rcu_nocb_mask) {
-                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-
-                       /*
-                        * If there are early callbacks, they will need
-                        * to be moved to the nocb lists.
-                        */
-                       WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
-                                    &rdp->nxtlist &&
-                                    rdp->nxttail[RCU_NEXT_TAIL] != NULL);
-                       init_nocb_callback_list(rdp);
-               }
+               for_each_cpu(cpu, rcu_nocb_mask)
+                       init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
                rcu_organize_nocb_kthreads(rsp);
        }
 }
@@ -2535,6 +2527,16 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
        if (!rcu_is_nocb_cpu(rdp->cpu))
                return false;
 
+       /* If there are early-boot callbacks, move them to nocb lists. */
+       if (rdp->nxtlist) {
+               rdp->nocb_head = rdp->nxtlist;
+               rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
+               atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
+               atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
+               rdp->nxtlist = NULL;
+               rdp->qlen = 0;
+               rdp->qlen_lazy = 0;
+       }
        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
        return true;
 }
@@ -2758,7 +2760,8 @@ static void rcu_sysidle_exit(int irq)
 
 /*
  * Check to see if the current CPU is idle.  Note that usermode execution
- * does not count as idle.  The caller must have disabled interrupts.
+ * does not count as idle.  The caller must have disabled interrupts,
+ * and must be running on tick_do_timer_cpu.
  */
 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
                                  unsigned long *maxj)
@@ -2779,8 +2782,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
        if (!*isidle || rdp->rsp != rcu_state_p ||
            cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
                return;
-       if (rcu_gp_in_progress(rdp->rsp))
-               WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
+       /* Verify affinity of current kthread. */
+       WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
 
        /* Pick up current idle and NMI-nesting counter and check. */
        cur = atomic_read(&rdtp->dynticks_idle);
@@ -3063,11 +3066,10 @@ static void rcu_bind_gp_kthread(void)
                return;
 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
        cpu = tick_do_timer_cpu;
-       if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
+       if (cpu >= 0 && cpu < nr_cpu_ids)
                set_cpus_allowed_ptr(current, cpumask_of(cpu));
 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-       if (!is_housekeeping_cpu(raw_smp_processor_id()))
-               housekeeping_affine(current);
+       housekeeping_affine(current);
 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 }