Merge branch 'kvm-updates/2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / sched_rt.c
index 47ceac9..1113157 100644 (file)
@@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 
 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
+       if (rt_rq->rt_nr_running)
+               resched_task(rq_of_rt_rq(rt_rq)->curr);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
@@ -240,7 +242,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
 
        spin_lock(&rt_b->rt_runtime_lock);
        rt_period = ktime_to_ns(rt_b->rt_period);
-       for_each_cpu_mask(i, rd->span) {
+       for_each_cpu_mask_nr(i, rd->span) {
                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                s64 diff;
 
@@ -253,7 +255,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
 
                diff = iter->rt_runtime - iter->rt_time;
                if (diff > 0) {
-                       do_div(diff, weight);
+                       diff = div_u64((u64)diff, weight);
                        if (rt_rq->rt_runtime + diff > rt_period)
                                diff = rt_period - rt_rq->rt_runtime;
                        iter->rt_runtime -= diff;
@@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq)
                        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                        s64 diff;
 
-                       if (iter == rt_rq)
+                       if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
                                continue;
 
                        spin_lock(&iter->rt_runtime_lock);
@@ -348,6 +350,7 @@ static void __enable_runtime(struct rq *rq)
                spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_b->rt_runtime;
                rt_rq->rt_time = 0;
+               rt_rq->rt_throttled = 0;
                spin_unlock(&rt_rq->rt_runtime_lock);
                spin_unlock(&rt_b->rt_runtime_lock);
        }
@@ -438,9 +441,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
        u64 runtime = sched_rt_runtime(rt_rq);
 
-       if (runtime == RUNTIME_INF)
-               return 0;
-
        if (rt_rq->rt_throttled)
                return rt_rq_throttled(rt_rq);
 
@@ -491,9 +491,11 @@ static void update_curr_rt(struct rq *rq)
                rt_rq = rt_rq_of_se(rt_se);
 
                spin_lock(&rt_rq->rt_runtime_lock);
-               rt_rq->rt_time += delta_exec;
-               if (sched_rt_runtime_exceeded(rt_rq))
-                       resched_task(curr);
+               if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
+                       rt_rq->rt_time += delta_exec;
+                       if (sched_rt_runtime_exceeded(rt_rq))
+                               resched_task(curr);
+               }
                spin_unlock(&rt_rq->rt_runtime_lock);
        }
 }
@@ -505,7 +507,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        rt_rq->rt_nr_running++;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
+#ifdef CONFIG_SMP
                struct rq *rq = rq_of_rt_rq(rt_rq);
+#endif
 
                rt_rq->highest_prio = rt_se_prio(rt_se);
 #ifdef CONFIG_SMP
@@ -599,11 +603,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
 
-       if (rt_se->nr_cpus_allowed == 1)
-               list_add(&rt_se->run_list, queue);
-       else
-               list_add_tail(&rt_se->run_list, queue);
-
+       list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);
 
        inc_rt_tasks(rt_se, rt_rq);
@@ -688,32 +688,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
  * Put task to the end of the run list without the overhead of dequeue
  * followed by enqueue.
  */
-static
-void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+static void
+requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
 {
-       struct rt_prio_array *array = &rt_rq->active;
-
        if (on_rt_rq(rt_se)) {
-               list_del_init(&rt_se->run_list);
-               list_add_tail(&rt_se->run_list,
-                             array->queue + rt_se_prio(rt_se));
+               struct rt_prio_array *array = &rt_rq->active;
+               struct list_head *queue = array->queue + rt_se_prio(rt_se);
+
+               if (head)
+                       list_move(&rt_se->run_list, queue);
+               else
+                       list_move_tail(&rt_se->run_list, queue);
        }
 }
 
-static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 {
        struct sched_rt_entity *rt_se = &p->rt;
        struct rt_rq *rt_rq;
 
        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
-               requeue_rt_entity(rt_rq, rt_se);
+               requeue_rt_entity(rt_rq, rt_se, head);
        }
 }
 
 static void yield_task_rt(struct rq *rq)
 {
-       requeue_task_rt(rq, rq->curr);
+       requeue_task_rt(rq, rq->curr, 0);
 }
 
 #ifdef CONFIG_SMP
@@ -753,6 +755,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
         */
        return task_cpu(p);
 }
+
+static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
+{
+       cpumask_t mask;
+
+       if (rq->curr->rt.nr_cpus_allowed == 1)
+               return;
+
+       if (p->rt.nr_cpus_allowed != 1
+           && cpupri_find(&rq->rd->cpupri, p, &mask))
+               return;
+
+       if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+               return;
+
+       /*
+        * There appears to be other cpus that can accept
+        * current and none to run 'p', so lets reschedule
+        * to try and push current away:
+        */
+       requeue_task_rt(rq, p, 1);
+       resched_task(rq->curr);
+}
+
 #endif /* CONFIG_SMP */
 
 /*
@@ -778,18 +804,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
-       if((p->prio == rq->curr->prio)
-          && p->rt.nr_cpus_allowed == 1
-          && rq->curr->rt.nr_cpus_allowed != 1) {
-               cpumask_t mask;
-
-               if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
-                       /*
-                        * There appears to be other cpus that can accept
-                        * current, so lets reschedule to try and push it away
-                        */
-                       resched_task(rq->curr);
-       }
+       if (p->prio == rq->curr->prio && !need_resched())
+               check_preempt_equal_prio(rq, p);
 #endif
 }
 
@@ -847,6 +863,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 #define RT_MAX_TRIES 3
 
 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -921,6 +939,13 @@ static int find_lowest_rq(struct task_struct *task)
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
                return -1; /* No targets found */
 
+       /*
+        * Only consider CPUs that are usable for migration.
+        * I guess we might want to change cpupri_find() to ignore those
+        * in the first place.
+        */
+       cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
+
        /*
         * At this point we have built a mask of cpus representing the
         * lowest priority tasks in the system.  Now we want to elect
@@ -1001,7 +1026,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                /* try again */
-               spin_unlock(&lowest_rq->lock);
+               double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
        }
 
@@ -1070,7 +1095,7 @@ static int push_rt_task(struct rq *rq)
 
        resched_task(lowest_rq->curr);
 
-       spin_unlock(&lowest_rq->lock);
+       double_unlock_balance(rq, lowest_rq);
 
        ret = 1;
 out:
@@ -1107,7 +1132,7 @@ static int pull_rt_task(struct rq *this_rq)
 
        next = pick_next_task_rt(this_rq);
 
-       for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
+       for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
 
@@ -1176,7 +1201,7 @@ static int pull_rt_task(struct rq *this_rq)
 
                }
  skip:
-               spin_unlock(&src_rq->lock);
+               double_unlock_balance(this_rq, src_rq);
        }
 
        return ret;
@@ -1415,7 +1440,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
         * on the queue:
         */
        if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p);
+               requeue_task_rt(rq, p, 0);
                set_tsk_need_resched(p);
        }
 }