skbuff: Fix not waking applications when errors are enqueued
[pandora-kernel.git] / kernel / rcutree_plugin.h
index 8aafbb8..4b9b9f8 100644 (file)
 #include <linux/delay.h>
 #include <linux/stop_machine.h>
 
+#define RCU_KTHREAD_PRIO 1
+
+#ifdef CONFIG_RCU_BOOST
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+#else
+#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
+#endif
+
 /*
  * Check the RCU kernel configuration parameters and print informative
  * messages about anything out of the ordinary.  If you like #ifdef, you
@@ -64,7 +72,7 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
@@ -122,9 +130,11 @@ static void rcu_preempt_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
 
-       rdp->passed_quiesc_completed = rdp->gpnum - 1;
+       rdp->passed_quiesce_gpnum = rdp->gpnum;
        barrier();
-       rdp->passed_quiesc = 1;
+       if (rdp->passed_quiesce == 0)
+               trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+       rdp->passed_quiesce = 1;
        current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -190,6 +200,11 @@ static void rcu_preempt_note_context_switch(int cpu)
                        if (rnp->qsmask & rdp->grpmask)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
+               trace_rcu_preempt_task(rdp->rsp->name,
+                                      t->pid,
+                                      (rnp->qsmask & rdp->grpmask)
+                                      ? rnp->gpnum
+                                      : rnp->gpnum + 1);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        } else if (t->rcu_read_lock_nesting < 0 &&
                   t->rcu_read_unlock_special) {
@@ -299,6 +314,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
        int empty_exp;
        unsigned long flags;
        struct list_head *np;
+#ifdef CONFIG_RCU_BOOST
+       struct rt_mutex *rbmp = NULL;
+#endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        int special;
 
@@ -344,6 +362,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
+               t->rcu_blocked_node = NULL;
+               trace_rcu_unlock_preempted_task("rcu_preempt",
+                                               rnp->gpnum, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -351,30 +372,34 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
-               /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
-               if (t->rcu_boosted) {
-                       special |= RCU_READ_UNLOCK_BOOSTED;
-                       t->rcu_boosted = 0;
+               /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
+               if (t->rcu_boost_mutex) {
+                       rbmp = t->rcu_boost_mutex;
+                       t->rcu_boost_mutex = NULL;
                }
 #endif /* #ifdef CONFIG_RCU_BOOST */
-               t->rcu_blocked_node = NULL;
 
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
                 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
                 */
-               if (empty)
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               else
+               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+                       trace_rcu_quiescent_state_report("preempt_rcu",
+                                                        rnp->gpnum,
+                                                        0, rnp->qsmask,
+                                                        rnp->level,
+                                                        rnp->grplo,
+                                                        rnp->grphi,
+                                                        !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rnp, flags);
+               } else
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       rt_mutex_unlock(t->rcu_boost_mutex);
-                       t->rcu_boost_mutex = NULL;
-               }
+               if (rbmp)
+                       rt_mutex_unlock(rbmp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -399,10 +424,10 @@ void __rcu_read_unlock(void)
 {
        struct task_struct *t = current;
 
-       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
        if (t->rcu_read_lock_nesting != 1)
                --t->rcu_read_lock_nesting;
        else {
+               barrier();  /* critical section before exit code. */
                t->rcu_read_lock_nesting = INT_MIN;
                barrier();  /* assign before ->rcu_read_unlock_special load */
                if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
@@ -466,16 +491,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        struct task_struct *t;
+       int ndetected = 0;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return;
+               return 0;
        t = list_entry(rnp->gp_tasks,
                       struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
                printk(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
 }
 
 /*
@@ -656,18 +685,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 void synchronize_rcu(void)
 {
-       struct rcu_synchronize rcu;
-
        if (!rcu_scheduler_active)
                return;
-
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       call_rcu(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       wait_rcu_gp(call_rcu);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
@@ -968,8 +988,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
+       return 0;
 }
 
 /*
@@ -1136,6 +1157,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
+static struct lock_class_key rcu_boost_class;
+
 /*
  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1198,8 +1221,10 @@ static int rcu_boost(struct rcu_node *rnp)
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
+       /* Avoid lockdep false positives.  This rt_mutex is its own thing. */
+       lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
+                                  "rcu_boost_mutex");
        t->rcu_boost_mutex = &mtx;
-       t->rcu_boosted = 1;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
@@ -1228,9 +1253,12 @@ static int rcu_boost_kthread(void *arg)
        int spincnt = 0;
        int more2boost;
 
+       trace_rcu_utilization("Start boost kthread@init");
        for (;;) {
                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End boost kthread@rcu_wait");
                rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+               trace_rcu_utilization("Start boost kthread@rcu_wait");
                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
                more2boost = rcu_boost(rnp);
                if (more2boost)
@@ -1238,11 +1266,14 @@ static int rcu_boost_kthread(void *arg)
                else
                        spincnt = 0;
                if (spincnt > 10) {
+                       trace_rcu_utilization("End boost kthread@rcu_yield");
                        rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+                       trace_rcu_utilization("Start boost kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        /* NOTREACHED */
+       trace_rcu_utilization("End boost kthread@notreached");
        return 0;
 }
 
@@ -1291,11 +1322,9 @@ static void invoke_rcu_callbacks_kthread(void)
 
        local_irq_save(flags);
        __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+           current != __this_cpu_read(rcu_cpu_kthread_task))
+               wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
        local_irq_restore(flags);
 }
 
@@ -1343,13 +1372,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
-                          "rcub%d", rnp_index);
+                          "rcub/%d", rnp_index);
        if (IS_ERR(t))
                return PTR_ERR(t);
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sp.sched_priority = RCU_BOOST_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
@@ -1444,6 +1473,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
 {
        struct sched_param sp;
        struct timer_list yield_timer;
+       int prio = current->rt_priority;
 
        setup_timer_on_stack(&yield_timer, f, arg);
        mod_timer(&yield_timer, jiffies + 2);
@@ -1451,7 +1481,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
        sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
        set_user_nice(current, 19);
        schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       set_user_nice(current, 0);
+       sp.sched_priority = prio;
        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
        del_timer(&yield_timer);
 }
@@ -1489,7 +1520,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
 
 /*
  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
  */
 static int rcu_cpu_kthread(void *arg)
 {
@@ -1500,9 +1532,12 @@ static int rcu_cpu_kthread(void *arg)
        char work;
        char *workp = &per_cpu(rcu_cpu_has_work, cpu);
 
+       trace_rcu_utilization("Start CPU kthread@init");
        for (;;) {
                *statusp = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End CPU kthread@rcu_wait");
                rcu_wait(*workp != 0 || kthread_should_stop());
+               trace_rcu_utilization("Start CPU kthread@rcu_wait");
                local_bh_disable();
                if (rcu_cpu_kthread_should_stop(cpu)) {
                        local_bh_enable();
@@ -1523,11 +1558,14 @@ static int rcu_cpu_kthread(void *arg)
                        spincnt = 0;
                if (spincnt > 10) {
                        *statusp = RCU_KTHREAD_YIELDING;
+                       trace_rcu_utilization("End CPU kthread@rcu_yield");
                        rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       trace_rcu_utilization("Start CPU kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        *statusp = RCU_KTHREAD_STOPPED;
+       trace_rcu_utilization("End CPU kthread@term");
        return 0;
 }
 
@@ -1560,7 +1598,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        if (!rcu_scheduler_fully_active ||
            per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
                return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       t = kthread_create_on_node(rcu_cpu_kthread,
+                                  (void *)(long)cpu,
+                                  cpu_to_node(cpu),
+                                  "rcuc/%d", cpu);
        if (IS_ERR(t))
                return PTR_ERR(t);
        if (cpu_online(cpu))
@@ -1669,7 +1710,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
                return 0;
        if (rnp->node_kthread_task == NULL) {
                t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
+                                  "rcun/%d", rnp_index);
                if (IS_ERR(t))
                        return PTR_ERR(t);
                raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1907,15 +1948,6 @@ int rcu_needs_cpu(int cpu)
        return rcu_needs_cpu_quick_check(cpu);
 }
 
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
- * entry is not configured, so we never do need to.
- */
-static void rcu_needs_cpu_flush(void)
-{
-}
-
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #define RCU_NEEDS_CPU_FLUSHES 5
@@ -1991,20 +2023,4 @@ int rcu_needs_cpu(int cpu)
        return c;
 }
 
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
- */
-static void rcu_needs_cpu_flush(void)
-{
-       int cpu = smp_processor_id();
-       unsigned long flags;
-
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
-               return;
-       local_irq_save(flags);
-       (void)rcu_needs_cpu(cpu);
-       local_irq_restore(flags);
-}
-
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */