sc92031: remove bogus unlikely()
[pandora-kernel.git] / kernel / rcupreempt.c
index a5aabb1..e1cdf19 100644 (file)
  *             to Suparna Bhattacharya for pushing me completely away
  *             from atomic instructions on the read side.
  *
+ *  - Added handling of Dynamic Ticks
+ *      Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
+ *                     - Steven Rostedt <srostedt@redhat.com>
+ *
  * Papers:  http://www.rdrop.com/users/paulmck/RCU
  *
  * Design Document: http://lwn.net/Articles/253651/
@@ -147,6 +151,8 @@ static char *rcu_try_flip_state_names[] =
        { "idle", "waitack", "waitzero", "waitmb" };
 #endif /* #ifdef CONFIG_RCU_TRACE */
 
+static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
+
 /*
  * Enum and per-CPU flag to determine when each CPU has seen
  * the most recent counter flip.
@@ -407,6 +413,212 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
        }
 }
 
+#ifdef CONFIG_NO_HZ
+
+DEFINE_PER_CPU(long, dynticks_progress_counter) = 1;
+static DEFINE_PER_CPU(long, rcu_dyntick_snapshot);
+static DEFINE_PER_CPU(int, rcu_update_flag);
+
+/**
+ * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
+ *
+ * If the CPU was idle with dynamic ticks active, this updates the
+ * dynticks_progress_counter to let the RCU handling know that the
+ * CPU is active.
+ */
+void rcu_irq_enter(void)
+{
+       int cpu = smp_processor_id();
+
+       if (per_cpu(rcu_update_flag, cpu))
+               per_cpu(rcu_update_flag, cpu)++;
+
+       /*
+        * Only update if we are coming from a stopped ticks mode
+        * (dynticks_progress_counter is even).
+        */
+       if (!in_interrupt() &&
+           (per_cpu(dynticks_progress_counter, cpu) & 0x1) == 0) {
+               /*
+                * The following might seem like we could have a race
+                * with NMI/SMIs. But this really isn't a problem.
+                * Here we do a read/modify/write, and the race happens
+                * when an NMI/SMI comes in after the read and before
+                * the write. But NMI/SMIs will increment this counter
+                * twice before returning, so the zero bit will not
+                * be corrupted by the NMI/SMI which is the most important
+                * part.
+                *
+                * The only thing is that we would bring back the counter
+                * to a postion that it was in during the NMI/SMI.
+                * But the zero bit would be set, so the rest of the
+                * counter would again be ignored.
+                *
+                * On return from the IRQ, the counter may have the zero
+                * bit be 0 and the counter the same as the return from
+                * the NMI/SMI. If the state machine was so unlucky to
+                * see that, it still doesn't matter, since all
+                * RCU read-side critical sections on this CPU would
+                * have already completed.
+                */
+               per_cpu(dynticks_progress_counter, cpu)++;
+               /*
+                * The following memory barrier ensures that any
+                * rcu_read_lock() primitives in the irq handler
+                * are seen by other CPUs to follow the above
+                * increment to dynticks_progress_counter. This is
+                * required in order for other CPUs to correctly
+                * determine when it is safe to advance the RCU
+                * grace-period state machine.
+                */
+               smp_mb(); /* see above block comment. */
+               /*
+                * Since we can't determine the dynamic tick mode from
+                * the dynticks_progress_counter after this routine,
+                * we use a second flag to acknowledge that we came
+                * from an idle state with ticks stopped.
+                */
+               per_cpu(rcu_update_flag, cpu)++;
+               /*
+                * If we take an NMI/SMI now, they will also increment
+                * the rcu_update_flag, and will not update the
+                * dynticks_progress_counter on exit. That is for
+                * this IRQ to do.
+                */
+       }
+}
+
+/**
+ * rcu_irq_exit - Called from exiting Hard irq context.
+ *
+ * If the CPU was idle with dynamic ticks active, update the
+ * dynticks_progress_counter to put let the RCU handling be
+ * aware that the CPU is going back to idle with no ticks.
+ */
+void rcu_irq_exit(void)
+{
+       int cpu = smp_processor_id();
+
+       /*
+        * rcu_update_flag is set if we interrupted the CPU
+        * when it was idle with ticks stopped.
+        * Once this occurs, we keep track of interrupt nesting
+        * because a NMI/SMI could also come in, and we still
+        * only want the IRQ that started the increment of the
+        * dynticks_progress_counter to be the one that modifies
+        * it on exit.
+        */
+       if (per_cpu(rcu_update_flag, cpu)) {
+               if (--per_cpu(rcu_update_flag, cpu))
+                       return;
+
+               /* This must match the interrupt nesting */
+               WARN_ON(in_interrupt());
+
+               /*
+                * If an NMI/SMI happens now we are still
+                * protected by the dynticks_progress_counter being odd.
+                */
+
+               /*
+                * The following memory barrier ensures that any
+                * rcu_read_unlock() primitives in the irq handler
+                * are seen by other CPUs to preceed the following
+                * increment to dynticks_progress_counter. This
+                * is required in order for other CPUs to determine
+                * when it is safe to advance the RCU grace-period
+                * state machine.
+                */
+               smp_mb(); /* see above block comment. */
+               per_cpu(dynticks_progress_counter, cpu)++;
+               WARN_ON(per_cpu(dynticks_progress_counter, cpu) & 0x1);
+       }
+}
+
+static void dyntick_save_progress_counter(int cpu)
+{
+       per_cpu(rcu_dyntick_snapshot, cpu) =
+               per_cpu(dynticks_progress_counter, cpu);
+}
+
+static inline int
+rcu_try_flip_waitack_needed(int cpu)
+{
+       long curr;
+       long snap;
+
+       curr = per_cpu(dynticks_progress_counter, cpu);
+       snap = per_cpu(rcu_dyntick_snapshot, cpu);
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+       /*
+        * If the CPU remained in dynticks mode for the entire time
+        * and didn't take any interrupts, NMIs, SMIs, or whatever,
+        * then it cannot be in the middle of an rcu_read_lock(), so
+        * the next rcu_read_lock() it executes must use the new value
+        * of the counter.  So we can safely pretend that this CPU
+        * already acknowledged the counter.
+        */
+
+       if ((curr == snap) && ((curr & 0x1) == 0))
+               return 0;
+
+       /*
+        * If the CPU passed through or entered a dynticks idle phase with
+        * no active irq handlers, then, as above, we can safely pretend
+        * that this CPU already acknowledged the counter.
+        */
+
+       if ((curr - snap) > 2 || (snap & 0x1) == 0)
+               return 0;
+
+       /* We need this CPU to explicitly acknowledge the counter flip. */
+
+       return 1;
+}
+
+static inline int
+rcu_try_flip_waitmb_needed(int cpu)
+{
+       long curr;
+       long snap;
+
+       curr = per_cpu(dynticks_progress_counter, cpu);
+       snap = per_cpu(rcu_dyntick_snapshot, cpu);
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+       /*
+        * If the CPU remained in dynticks mode for the entire time
+        * and didn't take any interrupts, NMIs, SMIs, or whatever,
+        * then it cannot have executed an RCU read-side critical section
+        * during that time, so there is no need for it to execute a
+        * memory barrier.
+        */
+
+       if ((curr == snap) && ((curr & 0x1) == 0))
+               return 0;
+
+       /*
+        * If the CPU either entered or exited an outermost interrupt,
+        * SMI, NMI, or whatever handler, then we know that it executed
+        * a memory barrier when doing so.  So we don't need another one.
+        */
+       if (curr != snap)
+               return 0;
+
+       /* We need the CPU to execute a memory barrier. */
+
+       return 1;
+}
+
+#else /* !CONFIG_NO_HZ */
+
+# define dyntick_save_progress_counter(cpu)    do { } while (0)
+# define rcu_try_flip_waitack_needed(cpu)      (1)
+# define rcu_try_flip_waitmb_needed(cpu)       (1)
+
+#endif /* CONFIG_NO_HZ */
+
 /*
  * Get here when RCU is idle.  Decide whether we need to
  * move out of idle state, and return non-zero if so.
@@ -445,8 +657,10 @@ rcu_try_flip_idle(void)
 
        /* Now ask each CPU for acknowledgement of the flip. */
 
-       for_each_possible_cpu(cpu)
+       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
+               dyntick_save_progress_counter(cpu);
+       }
 
        return 1;
 }
@@ -461,8 +675,9 @@ rcu_try_flip_waitack(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
-       for_each_possible_cpu(cpu)
-               if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
+       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+               if (rcu_try_flip_waitack_needed(cpu) &&
+                   per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
                        return 0;
                }
@@ -492,7 +707,7 @@ rcu_try_flip_waitzero(void)
        /* Check to see if the sum of the "last" counters is zero. */
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
-       for_each_possible_cpu(cpu)
+       for_each_cpu_mask(cpu, rcu_cpu_online_map)
                sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
        if (sum != 0) {
                RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -507,8 +722,10 @@ rcu_try_flip_waitzero(void)
        smp_mb();  /*  ^^^^^^^^^^^^ */
 
        /* Call for a memory barrier from each CPU. */
-       for_each_possible_cpu(cpu)
+       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
+               dyntick_save_progress_counter(cpu);
+       }
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
        return 1;
@@ -525,8 +742,9 @@ rcu_try_flip_waitmb(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
-       for_each_possible_cpu(cpu)
-               if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
+       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+               if (rcu_try_flip_waitmb_needed(cpu) &&
+                   per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
                        return 0;
                }
@@ -637,13 +855,108 @@ void rcu_advance_callbacks(int cpu, int user)
        spin_unlock_irqrestore(&rdp->lock, flags);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
+               *dsttail = srclist; \
+               if (srclist != NULL) { \
+                       dsttail = srctail; \
+                       srclist = NULL; \
+                       srctail = &srclist;\
+               } \
+       } while (0)
+
+void rcu_offline_cpu(int cpu)
+{
+       int i;
+       struct rcu_head *list = NULL;
+       unsigned long flags;
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+       struct rcu_head **tail = &list;
+
+       /*
+        * Remove all callbacks from the newly dead CPU, retaining order.
+        * Otherwise rcu_barrier() will fail
+        */
+
+       spin_lock_irqsave(&rdp->lock, flags);
+       rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
+       for (i = GP_STAGES - 1; i >= 0; i--)
+               rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
+                                               list, tail);
+       rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
+       spin_unlock_irqrestore(&rdp->lock, flags);
+       rdp->waitlistcount = 0;
+
+       /* Disengage the newly dead CPU from the grace-period computation. */
+
+       spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+       rcu_check_mb(cpu);
+       if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
+               smp_mb();  /* Subsequent counter accesses must see new value */
+               per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
+               smp_mb();  /* Subsequent RCU read-side critical sections */
+                          /*  seen -after- acknowledgement. */
+       }
+
+       RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
+       RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
+
+       RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
+       RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
+
+       cpu_clear(cpu, rcu_cpu_online_map);
+
+       spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+
+       /*
+        * Place the removed callbacks on the current CPU's queue.
+        * Make them all start a new grace period: simple approach,
+        * in theory could starve a given set of callbacks, but
+        * you would need to be doing some serious CPU hotplugging
+        * to make this happen.  If this becomes a problem, adding
+        * a synchronize_rcu() to the hotplug path would be a simple
+        * fix.
+        */
+
+       local_irq_save(flags);
+       rdp = RCU_DATA_ME();
+       spin_lock(&rdp->lock);
+       *rdp->nexttail = list;
+       if (list)
+               rdp->nexttail = tail;
+       spin_unlock_irqrestore(&rdp->lock, flags);
+}
+
+void __devinit rcu_online_cpu(int cpu)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+       cpu_set(cpu, rcu_cpu_online_map);
+       spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+void rcu_offline_cpu(int cpu)
+{
+}
+
+void __devinit rcu_online_cpu(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
 static void rcu_process_callbacks(struct softirq_action *unused)
 {
        unsigned long flags;
        struct rcu_head *next, *list;
-       struct rcu_data *rdp = RCU_DATA_ME();
+       struct rcu_data *rdp;
 
-       spin_lock_irqsave(&rdp->lock, flags);
+       local_irq_save(flags);
+       rdp = RCU_DATA_ME();
+       spin_lock(&rdp->lock);
        list = rdp->donelist;
        if (list == NULL) {
                spin_unlock_irqrestore(&rdp->lock, flags);
@@ -694,10 +1007,10 @@ void __synchronize_sched(void)
        if (sched_getaffinity(0, &oldmask) < 0)
                oldmask = cpu_possible_map;
        for_each_online_cpu(cpu) {
-               sched_setaffinity(0, cpumask_of_cpu(cpu));
+               sched_setaffinity(0, &cpumask_of_cpu(cpu));
                schedule();
        }
-       sched_setaffinity(0, oldmask);
+       sched_setaffinity(0, &oldmask);
 }
 EXPORT_SYMBOL_GPL(__synchronize_sched);
 
@@ -746,6 +1059,32 @@ int rcu_pending(int cpu)
        return 0;
 }
 
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               rcu_online_cpu(cpu);
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               rcu_offline_cpu(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+       .notifier_call = rcu_cpu_notify,
+};
+
 void __init __rcu_init(void)
 {
        int cpu;
@@ -769,6 +1108,23 @@ void __init __rcu_init(void)
                rdp->rcu_flipctr[0] = 0;
                rdp->rcu_flipctr[1] = 0;
        }
+       register_cpu_notifier(&rcu_nb);
+
+       /*
+        * We don't need protection against CPU-Hotplug here
+        * since
+        * a) If a CPU comes online while we are iterating over the
+        *    cpu_online_map below, we would only end up making a
+        *    duplicate call to rcu_online_cpu() which sets the corresponding
+        *    CPU's mask in the rcu_cpu_online_map.
+        *
+        * b) A CPU cannot go offline at this point in time since the user
+        *    does not have access to the sysfs interface, nor do we
+        *    suspend the system.
+        */
+       for_each_online_cpu(cpu)
+               rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
+
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
 }