sh: remove warning and warning_symbol from struct stacktrace_ops
[pandora-kernel.git] / kernel / sched_rt.c
index 9ca4f5f..64b2a37 100644 (file)
@@ -183,6 +183,14 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
        return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 }
 
+typedef struct task_group *rt_rq_iter_t;
+
+#define for_each_rt_rq(rt_rq, iter, rq) \
+       for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
+            (&iter->list != &task_groups) && \
+            (rt_rq = iter->rt_rq[cpu_of(rq)]); \
+            iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
+
 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 {
        list_add_rcu(&rt_rq->leaf_rt_rq_list,
@@ -288,6 +296,11 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
        return ktime_to_ns(def_rt_bandwidth.rt_period);
 }
 
+typedef struct rt_rq *rt_rq_iter_t;
+
+#define for_each_rt_rq(rt_rq, iter, rq) \
+       for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 {
 }
@@ -402,12 +415,13 @@ next:
 static void __disable_runtime(struct rq *rq)
 {
        struct root_domain *rd = rq->rd;
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        if (unlikely(!scheduler_running))
                return;
 
-       for_each_leaf_rt_rq(rt_rq, rq) {
+       for_each_rt_rq(rt_rq, iter, rq) {
                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
                s64 want;
                int i;
@@ -487,6 +501,7 @@ static void disable_runtime(struct rq *rq)
 
 static void __enable_runtime(struct rq *rq)
 {
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        if (unlikely(!scheduler_running))
@@ -495,7 +510,7 @@ static void __enable_runtime(struct rq *rq)
        /*
         * Reset each runqueue's bandwidth settings
         */
-       for_each_leaf_rt_rq(rt_rq, rq) {
+       for_each_rt_rq(rt_rq, iter, rq) {
                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 
                raw_spin_lock(&rt_b->rt_runtime_lock);
@@ -562,6 +577,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                        if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
                                rt_rq->rt_throttled = 0;
                                enqueue = 1;
+
+                               /*
+                                * Force a clock update if the CPU was idle,
+                                * lest wakeup -> unthrottle time accumulate.
+                                */
+                               if (rt_rq->rt_nr_running && rq->curr == rq->idle)
+                                       rq->skip_clock_update = -1;
                        }
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
@@ -977,13 +999,23 @@ static void yield_task_rt(struct rq *rq)
 static int find_lowest_rq(struct task_struct *task);
 
 static int
-select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 {
+       struct task_struct *curr;
+       struct rq *rq;
+       int cpu;
+
        if (sd_flag != SD_BALANCE_WAKE)
                return smp_processor_id();
 
+       cpu = task_cpu(p);
+       rq = cpu_rq(cpu);
+
+       rcu_read_lock();
+       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+
        /*
-        * If the current task is an RT task, then
+        * If the current task on @p's runqueue is an RT task, then
         * try to see if we can wake this RT task up on another
         * runqueue. Otherwise simply start this RT task
         * on its current runqueue.
@@ -997,21 +1029,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
         * lock?
         *
         * For equal prio tasks, we just let the scheduler sort it out.
+        *
+        * Otherwise, just let it ride on the affined RQ and the
+        * post-schedule router will push the preempted task away
+        *
+        * This test is optimistic, if we get it wrong the load-balancer
+        * will have to sort it out.
         */
-       if (unlikely(rt_task(rq->curr)) &&
-           (rq->curr->rt.nr_cpus_allowed < 2 ||
-            rq->curr->prio < p->prio) &&
+       if (curr && unlikely(rt_task(curr)) &&
+           (curr->rt.nr_cpus_allowed < 2 ||
+            curr->prio < p->prio) &&
            (p->rt.nr_cpus_allowed > 1)) {
-               int cpu = find_lowest_rq(p);
+               int target = find_lowest_rq(p);
 
-               return (cpu == -1) ? task_cpu(p) : cpu;
+               if (target != -1)
+                       cpu = target;
        }
+       rcu_read_unlock();
 
-       /*
-        * Otherwise, just let it ride on the affined RQ and the
-        * post-schedule router will push the preempted task away
-        */
-       return task_cpu(p);
+       return cpu;
 }
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
@@ -1796,10 +1832,11 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
 static void print_rt_stats(struct seq_file *m, int cpu)
 {
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        rcu_read_lock();
-       for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
+       for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
                print_rt_rq(m, cpu, rt_rq);
        rcu_read_unlock();
 }