ASoC: Don't use -1 to boostrap subseq so it can be used by drivers
[pandora-kernel.git] / kernel / rcutree.h
index 2576648..7b9a08b 100644 (file)
  * Dynticks per-CPU state.
  */
 struct rcu_dynticks {
-       int dynticks_nesting;   /* Track nesting level, sort of. */
-       int dynticks;           /* Even value for dynticks-idle, else odd. */
-       int dynticks_nmi;       /* Even value for either dynticks-idle or */
-                               /*  not in nmi handler, else odd.  So this */
-                               /*  remains even for nmi from irq handler. */
+       int dynticks_nesting;   /* Track irq/process nesting level. */
+       int dynticks_nmi_nesting; /* Track NMI nesting level. */
+       atomic_t dynticks;      /* Even value for dynticks-idle, else odd. */
 };
 
 /* RCU's kthread states for tracing. */
@@ -121,7 +119,9 @@ struct rcu_node {
                                /*  elements that need to drain to allow the */
                                /*  current expedited grace period to */
                                /*  complete (only for TREE_PREEMPT_RCU). */
-       unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */
+       atomic_t wakemask;      /* CPUs whose kthread needs to be awakened. */
+                               /*  Since this has meaning only for leaf */
+                               /*  rcu_node structures, 32 bits suffices. */
        unsigned long qsmaskinit;
                                /* Per-GP initial value for qsmask & expmask. */
        unsigned long grpmask;  /* Mask to apply to parent qsmask. */
@@ -159,9 +159,6 @@ struct rcu_node {
        struct task_struct *boost_kthread_task;
                                /* kthread that takes care of priority */
                                /*  boosting for this rcu_node structure. */
-       wait_queue_head_t boost_wq;
-                               /* Wait queue on which to park the boost */
-                               /*  kthread. */
        unsigned int boost_kthread_status;
                                /* State of boost_kthread_task for tracing. */
        unsigned long n_tasks_boosted;
@@ -188,9 +185,6 @@ struct rcu_node {
                                /* kthread that takes care of this rcu_node */
                                /*  structure, for example, awakening the */
                                /*  per-CPU kthreads as needed. */
-       wait_queue_head_t node_wq;
-                               /* Wait queue on which to park the per-node */
-                               /*  kthread. */
        unsigned int node_kthread_status;
                                /* State of node_kthread_task for tracing. */
 } ____cacheline_internodealigned_in_smp;
@@ -284,7 +278,6 @@ struct rcu_data {
        /* 3) dynticks interface. */
        struct rcu_dynticks *dynticks;  /* Shared per-CPU dynticks state. */
        int dynticks_snap;              /* Per-GP tracking for dynticks. */
-       int dynticks_nmi_snap;          /* Per-GP tracking for dynticks_nmi. */
 #endif /* #ifdef CONFIG_NO_HZ */
 
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
@@ -337,6 +330,16 @@ struct rcu_data {
                                                /*  scheduling clock irq */
                                                /*  before ratting on them. */
 
+#define rcu_wait(cond)                                                 \
+do {                                                                   \
+       for (;;) {                                                      \
+               set_current_state(TASK_INTERRUPTIBLE);                  \
+               if (cond)                                               \
+                       break;                                          \
+               schedule();                                             \
+       }                                                               \
+       __set_current_state(TASK_RUNNING);                              \
+} while (0)
 
 /*
  * RCU global state, including node hierarchy.  This hierarchy is
@@ -446,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
 static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
                                          cpumask_var_t cm);