rcu: Kick rcuo kthreads after their CPU goes offline
[pandora-kernel.git] / kernel / rcu / tree.c
index 133e472..07bf4aa 100644 (file)
@@ -152,19 +152,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
-#ifdef CONFIG_RCU_BOOST
-
-/*
- * Control variables for per-CPU and per-rcu_node kthreads.  These
- * handle all flavors of RCU.
- */
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -2963,6 +2950,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
  */
 void synchronize_sched_expedited(void)
 {
+       cpumask_var_t cm;
+       bool cma = false;
+       int cpu;
        long firstsnap, s, snap;
        int trycount = 0;
        struct rcu_state *rsp = &rcu_sched_state;
@@ -2997,11 +2987,26 @@ void synchronize_sched_expedited(void)
        }
        WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
 
+       /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
+       cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
+       if (cma) {
+               cpumask_copy(cm, cpu_online_mask);
+               cpumask_clear_cpu(raw_smp_processor_id(), cm);
+               for_each_cpu(cpu, cm) {
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+                       if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+                               cpumask_clear_cpu(cpu, cm);
+               }
+               if (cpumask_weight(cm) == 0)
+                       goto all_cpus_idle;
+       }
+
        /*
         * Each pass through the following loop attempts to force a
         * context switch on each CPU.
         */
-       while (try_stop_cpus(cpu_online_mask,
+       while (try_stop_cpus(cma ? cm : cpu_online_mask,
                             synchronize_sched_expedited_cpu_stop,
                             NULL) == -EAGAIN) {
                put_online_cpus();
@@ -3013,6 +3018,7 @@ void synchronize_sched_expedited(void)
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone1);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3022,6 +3028,7 @@ void synchronize_sched_expedited(void)
                } else {
                        wait_rcu_gp(call_rcu_sched);
                        atomic_long_inc(&rsp->expedited_normal);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3031,6 +3038,7 @@ void synchronize_sched_expedited(void)
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone2);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3045,6 +3053,7 @@ void synchronize_sched_expedited(void)
                        /* CPU hotplug operation in flight, use normal GP. */
                        wait_rcu_gp(call_rcu_sched);
                        atomic_long_inc(&rsp->expedited_normal);
+                       free_cpumask_var(cm);
                        return;
                }
                snap = atomic_long_read(&rsp->expedited_start);
@@ -3052,6 +3061,9 @@ void synchronize_sched_expedited(void)
        }
        atomic_long_inc(&rsp->expedited_stoppedcpus);
 
+all_cpus_idle:
+       free_cpumask_var(cm);
+
        /*
         * Everyone up to our most recent fetch is covered by our grace
         * period.  Update the counter, but only if our work is still
@@ -3299,11 +3311,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
                        continue;
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (rcu_is_nocb_cpu(cpu)) {
-                       _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
-                                          rsp->n_barrier_done);
-                       atomic_inc(&rsp->barrier_cpu_count);
-                       __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
-                                  rsp, cpu, 0);
+                       if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
+                               _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                       } else {
+                               _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                               atomic_inc(&rsp->barrier_cpu_count);
+                               __call_rcu(&rdp->barrier_head,
+                                          rcu_barrier_callback, rsp, cpu, 0);
+                       }
                } else if (ACCESS_ONCE(rdp->qlen)) {
                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
                                           rsp->n_barrier_done);
@@ -3480,8 +3497,10 @@ static int rcu_cpu_notify(struct notifier_block *self,
        case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
-               for_each_rcu_flavor(rsp)
+               for_each_rcu_flavor(rsp) {
                        rcu_cleanup_dead_cpu(cpu, rsp);
+                       do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
+               }
                break;
        default:
                break;