b43: bus: abstract bus and core operations
[pandora-kernel.git] / kernel / rcutree.c
index bb84dec..f07d2f0 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/wait.h>
 #include <linux/kthread.h>
+#include <linux/prefetch.h>
 
 #include "rcutree.h"
 
@@ -92,6 +93,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 static char rcu_kthreads_spawnable;
@@ -155,11 +158,12 @@ void rcu_note_context_switch(int cpu)
        rcu_sched_qs(cpu);
        rcu_preempt_note_context_switch(cpu);
 }
+EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
 #ifdef CONFIG_NO_HZ
 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
        .dynticks_nesting = 1,
-       .dynticks = ATOMIC_INIT(1),
+       .dynticks = 1,
 };
 #endif /* #ifdef CONFIG_NO_HZ */
 
@@ -288,8 +292,8 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
                return 1;
        }
 
-       /* If preemptable RCU, no point in sending reschedule IPI. */
-       if (rdp->preemptable)
+       /* If preemptible RCU, no point in sending reschedule IPI. */
+       if (rdp->preemptible)
                return 0;
 
        /* The CPU is online, so send it a reschedule IPI. */
@@ -318,25 +322,13 @@ void rcu_enter_nohz(void)
        unsigned long flags;
        struct rcu_dynticks *rdtp;
 
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
        local_irq_save(flags);
        rdtp = &__get_cpu_var(rcu_dynticks);
-       if (--rdtp->dynticks_nesting) {
-               local_irq_restore(flags);
-               return;
-       }
-       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic_inc();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       rdtp->dynticks++;
+       rdtp->dynticks_nesting--;
+       WARN_ON_ONCE(rdtp->dynticks & 0x1);
        local_irq_restore(flags);
-
-       /* If the interrupt queued a callback, get out of dyntick mode. */
-       if (in_irq() &&
-           (__get_cpu_var(rcu_sched_data).nxtlist ||
-            __get_cpu_var(rcu_bh_data).nxtlist ||
-            rcu_preempt_needs_cpu(smp_processor_id())))
-               set_need_resched();
 }
 
 /*
@@ -352,16 +344,11 @@ void rcu_exit_nohz(void)
 
        local_irq_save(flags);
        rdtp = &__get_cpu_var(rcu_dynticks);
-       if (rdtp->dynticks_nesting++) {
-               local_irq_restore(flags);
-               return;
-       }
-       smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
-       atomic_inc(&rdtp->dynticks);
-       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic_inc();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       rdtp->dynticks++;
+       rdtp->dynticks_nesting++;
+       WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
        local_irq_restore(flags);
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
 }
 
 /**
@@ -375,15 +362,11 @@ void rcu_nmi_enter(void)
 {
        struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
 
-       if (rdtp->dynticks_nmi_nesting == 0 &&
-           (atomic_read(&rdtp->dynticks) & 0x1))
+       if (rdtp->dynticks & 0x1)
                return;
-       rdtp->dynticks_nmi_nesting++;
-       smp_mb__before_atomic_inc();  /* Force delay from prior write. */
-       atomic_inc(&rdtp->dynticks);
-       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic_inc();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       rdtp->dynticks_nmi++;
+       WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
 }
 
 /**
@@ -397,14 +380,11 @@ void rcu_nmi_exit(void)
 {
        struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
 
-       if (rdtp->dynticks_nmi_nesting == 0 ||
-           --rdtp->dynticks_nmi_nesting != 0)
+       if (rdtp->dynticks & 0x1)
                return;
-       /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
-       smp_mb__before_atomic_inc();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
-       smp_mb__after_atomic_inc();  /* Force delay to next write. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       rdtp->dynticks_nmi++;
+       WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
 }
 
 /**
@@ -415,7 +395,13 @@ void rcu_nmi_exit(void)
  */
 void rcu_irq_enter(void)
 {
-       rcu_exit_nohz();
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (rdtp->dynticks_nesting++)
+               return;
+       rdtp->dynticks++;
+       WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
 }
 
 /**
@@ -427,7 +413,18 @@ void rcu_irq_enter(void)
  */
 void rcu_irq_exit(void)
 {
-       rcu_enter_nohz();
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (--rdtp->dynticks_nesting)
+               return;
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       rdtp->dynticks++;
+       WARN_ON_ONCE(rdtp->dynticks & 0x1);
+
+       /* If the interrupt queued a callback, get out of dyntick mode. */
+       if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+           __this_cpu_read(rcu_bh_data.nxtlist))
+               set_need_resched();
 }
 
 #ifdef CONFIG_SMP
@@ -439,8 +436,19 @@ void rcu_irq_exit(void)
  */
 static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
-       rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
-       return 0;
+       int ret;
+       int snap;
+       int snap_nmi;
+
+       snap = rdp->dynticks->dynticks;
+       snap_nmi = rdp->dynticks->dynticks_nmi;
+       smp_mb();       /* Order sampling of snap with end of grace period. */
+       rdp->dynticks_snap = snap;
+       rdp->dynticks_nmi_snap = snap_nmi;
+       ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
+       if (ret)
+               rdp->dynticks_fqs++;
+       return ret;
 }
 
 /*
@@ -451,11 +459,16 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
  */
 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 {
-       unsigned long curr;
-       unsigned long snap;
+       long curr;
+       long curr_nmi;
+       long snap;
+       long snap_nmi;
 
-       curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
-       snap = (unsigned long)rdp->dynticks_snap;
+       curr = rdp->dynticks->dynticks;
+       snap = rdp->dynticks_snap;
+       curr_nmi = rdp->dynticks->dynticks_nmi;
+       snap_nmi = rdp->dynticks_nmi_snap;
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
 
        /*
         * If the CPU passed through or entered a dynticks idle phase with
@@ -465,7 +478,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         * read-side critical section that started before the beginning
         * of the current RCU grace period.
         */
-       if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
+       if ((curr != snap || (curr & 0x1) == 0) &&
+           (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
                rdp->dynticks_fqs++;
                return 1;
        }
@@ -579,21 +593,24 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       long delta;
+       unsigned long j;
+       unsigned long js;
        struct rcu_node *rnp;
 
        if (rcu_cpu_stall_suppress)
                return;
-       delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
+       j = ACCESS_ONCE(jiffies);
+       js = ACCESS_ONCE(rsp->jiffies_stall);
        rnp = rdp->mynode;
-       if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
+       if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
 
-       } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
+       } else if (rcu_gp_in_progress(rsp) &&
+                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 
-               /* They had two time units to dump stack, so complain. */
+               /* They had a few time units to dump stack, so complain. */
                print_other_cpu_stall(rsp);
        }
 }
@@ -888,13 +905,12 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
-       WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+       unsigned long gp_duration;
 
-       /*
-        * Ensure that all grace-period and pre-grace-period activity
-        * is seen before the assignment to rsp->completed.
-        */
-       smp_mb(); /* See above block comment. */
+       WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+       gp_duration = jiffies - rsp->gp_start;
+       if (gp_duration > rsp->gp_max)
+               rsp->gp_max = gp_duration;
        rsp->completed = rsp->gpnum;
        rsp->signaled = RCU_GP_IDLE;
        rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
@@ -1122,22 +1138,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        if (need_report & RCU_OFL_TASKS_EXP_GP)
                rcu_report_exp_rnp(rsp, rnp);
-
-       /*
-        * If there are no more online CPUs for this rcu_node structure,
-        * kill the rcu_node structure's kthread.  Otherwise, adjust its
-        * affinity.
-        */
-       t = rnp->node_kthread_task;
-       if (t != NULL &&
-           rnp->qsmaskinit == 0) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               rnp->node_kthread_task = NULL;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               kthread_stop(t);
-               rcu_stop_boost_kthread(rnp);
-       } else
-               rcu_node_kthread_setaffinity(rnp, -1);
+       rcu_node_kthread_setaffinity(rnp, -1);
 }
 
 /*
@@ -1199,7 +1200,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
                next = list->next;
                prefetch(next);
                debug_rcu_head_unqueue(list);
-               list->func(list);
+               __rcu_reclaim(list);
                list = next;
                if (++count >= rdp->blimit)
                        break;
@@ -1309,8 +1310,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
                        return;
                }
                if (rnp->qsmask == 0) {
-                       rcu_initiate_boost(rnp);
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
                        continue;
                }
                cpu = rnp->grplo;
@@ -1329,10 +1329,10 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
        rnp = rcu_get_root(rsp);
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       if (rnp->qsmask == 0)
-               rcu_initiate_boost(rnp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       if (rnp->qsmask == 0) {
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+       }
 }
 
 /*
@@ -1455,11 +1455,25 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_process_callbacks(void)
 {
+       /*
+        * Memory references from any prior RCU read-side critical sections
+        * executed by the interrupted code must be seen before any RCU
+        * grace-period manipulations below.
+        */
+       smp_mb(); /* See above block comment. */
+
        __rcu_process_callbacks(&rcu_sched_state,
                                &__get_cpu_var(rcu_sched_data));
        __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
        rcu_preempt_process_callbacks();
 
+       /*
+        * Memory references from any later RCU read-side critical sections
+        * executed by the interrupted code must be seen after any RCU
+        * grace-period manipulations above.
+        */
+       smp_mb(); /* See above block comment. */
+
        /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
        rcu_needs_cpu_flush();
 }
@@ -1473,24 +1487,21 @@ static void rcu_process_callbacks(void)
 static void invoke_rcu_cpu_kthread(void)
 {
        unsigned long flags;
-       wait_queue_head_t *q;
-       int cpu;
 
        local_irq_save(flags);
-       cpu = smp_processor_id();
-       per_cpu(rcu_cpu_has_work, cpu) = 1;
-       if (per_cpu(rcu_cpu_kthread_task, cpu) == NULL) {
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
                local_irq_restore(flags);
                return;
        }
-       q = &per_cpu(rcu_cpu_wq, cpu);
-       wake_up(q);
+       wake_up(&__get_cpu_var(rcu_cpu_wq));
        local_irq_restore(flags);
 }
 
 /*
  * Wake up the specified per-rcu_node-structure kthread.
- * The caller must hold ->lock.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
  */
 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
 {
@@ -1539,8 +1550,8 @@ static void rcu_cpu_kthread_timer(unsigned long arg)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->wakemask |= rdp->grpmask;
-       invoke_rcu_node_kthread(rnp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       invoke_rcu_node_kthread(rnp);
 }
 
 /*
@@ -1558,6 +1569,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
        mod_timer(&yield_timer, jiffies + 2);
        sp.sched_priority = 0;
        sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+       set_user_nice(current, 19);
        schedule();
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
@@ -1583,12 +1595,15 @@ static int rcu_cpu_kthread_should_stop(int cpu)
               smp_processor_id() != cpu) {
                if (kthread_should_stop())
                        return 1;
+               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
                local_bh_enable();
                schedule_timeout_uninterruptible(1);
                if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
                        set_cpus_allowed_ptr(current, cpumask_of(cpu));
                local_bh_disable();
        }
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
        return 0;
 }
 
@@ -1616,6 +1631,7 @@ static int rcu_cpu_kthread(void *arg)
                        break;
                }
                *statusp = RCU_KTHREAD_RUNNING;
+               per_cpu(rcu_cpu_kthread_loops, cpu)++;
                local_irq_save(flags);
                work = *workp;
                *workp = 0;
@@ -1656,6 +1672,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        if (IS_ERR(t))
                return PTR_ERR(t);
        kthread_bind(t, cpu);
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
        WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
        per_cpu(rcu_cpu_kthread_task, cpu) = t;
        wake_up_process(t);
@@ -1681,16 +1698,12 @@ static int rcu_node_kthread(void *arg)
 
        for (;;) {
                rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
-                                                      kthread_should_stop());
-               if (kthread_should_stop())
-                       break;
+               wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
                rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                mask = rnp->wakemask;
                rnp->wakemask = 0;
-               rcu_initiate_boost(rnp);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
                for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
                        if ((mask & 0x1) == 0)
                                continue;
@@ -1706,6 +1719,7 @@ static int rcu_node_kthread(void *arg)
                        preempt_enable();
                }
        }
+       /* NOTREACHED */
        rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
        return 0;
 }
@@ -1725,7 +1739,7 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        int cpu;
        unsigned long mask = rnp->qsmaskinit;
 
-       if (rnp->node_kthread_task == NULL || mask == 0)
+       if (rnp->node_kthread_task == NULL)
                return;
        if (!alloc_cpumask_var(&cm, GFP_KERNEL))
                return;
@@ -1830,6 +1844,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        /* Add the callback to our list. */
        *rdp->nxttail[RCU_NEXT_TAIL] = head;
        rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+       rdp->qlen++;
+
+       /* If interrupts were disabled, don't dive into RCU core. */
+       if (irqs_disabled_flags(flags)) {
+               local_irq_restore(flags);
+               return;
+       }
 
        /*
         * Force the grace period if too many callbacks or too long waiting.
@@ -1838,7 +1859,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
         * invoking force_quiescent_state() if the newly enqueued callback
         * is the only one waiting for a grace period to complete.
         */
-       if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+       if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
 
                /* Are we ignoring a completed grace period? */
                rcu_process_gp_end(rsp, rdp);
@@ -1974,7 +1995,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
                 * or RCU-bh, force a local reschedule.
                 */
                rdp->n_rp_qs_pending++;
-               if (!rdp->preemptable &&
+               if (!rdp->preemptible &&
                    ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
                                 jiffies))
                        set_need_resched();
@@ -2151,7 +2172,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  * that this CPU cannot possibly have any RCU callbacks in flight yet.
  */
 static void __cpuinit
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
 {
        unsigned long flags;
        unsigned long mask;
@@ -2163,7 +2184,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
        rdp->passed_quiesc = 0;  /* We could be racing with new GP, */
        rdp->qs_pending = 1;     /*  so set up to respond to current GP. */
        rdp->beenonline = 1;     /* We have now been online. */
-       rdp->preemptable = preemptable;
+       rdp->preemptible = preemptible;
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;