V4L/DVB (7856): cx18/: possible cleanups
[pandora-kernel.git] / kernel / sched_fair.c
index 89fa32b..e24ecd3 100644 (file)
@@ -662,10 +662,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        if (!initial) {
                /* sleeps upto a single latency don't count. */
                if (sched_feat(NEW_FAIR_SLEEPERS)) {
+                       unsigned long thresh = sysctl_sched_latency;
+
+                       /*
+                        * convert the sleeper threshold into virtual time
+                        */
                        if (sched_feat(NORMALIZED_SLEEPER))
-                               vruntime -= calc_delta_weight(sysctl_sched_latency, se);
-                       else
-                               vruntime -= sysctl_sched_latency;
+                               thresh = calc_delta_fair(thresh, se);
+
+                       vruntime -= thresh;
                }
 
                /* ensure we never gain time by being placed backwards. */
@@ -682,6 +687,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
+       account_entity_enqueue(cfs_rq, se);
 
        if (wakeup) {
                place_entity(cfs_rq, se, 0);
@@ -692,7 +698,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
        check_spread(cfs_rq, se);
        if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
-       account_entity_enqueue(cfs_rq, se);
 }
 
 static void update_avg(u64 *avg, u64 sample)
@@ -841,8 +846,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * queued ticks are scheduled to match the slice, so don't bother
         * validating it and just reschedule.
         */
-       if (queued)
-               return resched_task(rq_of(cfs_rq)->curr);
+       if (queued) {
+               resched_task(rq_of(cfs_rq)->curr);
+               return;
+       }
        /*
         * don't let the period tick interfere with the hrtick preemption
         */
@@ -957,7 +964,7 @@ static void yield_task_fair(struct rq *rq)
                return;
 
        if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
-               __update_rq_clock(rq);
+               update_rq_clock(rq);
                /*
                 * Update run-time statistics of the 'current'.
                 */
@@ -1007,7 +1014,7 @@ static int wake_idle(int cpu, struct task_struct *p)
         * sibling runqueue info. This will avoid the checks and cache miss
         * penalities associated with that.
         */
-       if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
+       if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
                return cpu;
 
        for_each_domain(cpu, sd) {
@@ -1611,30 +1618,6 @@ static const struct sched_class fair_sched_class = {
 };
 
 #ifdef CONFIG_SCHED_DEBUG
-static void
-print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth)
-{
-       struct sched_entity *se;
-
-       if (!cfs_rq)
-               return;
-
-       list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) {
-               int i;
-
-               for (i = depth; i; i--)
-                       seq_puts(m, "  ");
-
-               seq_printf(m, "%lu %s %lu\n",
-                               se->load.weight,
-                               entity_is_task(se) ? "T" : "G",
-                               calc_delta_weight(SCHED_LOAD_SCALE, se)
-                               );
-               if (!entity_is_task(se))
-                       print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1);
-       }
-}
-
 static void print_cfs_stats(struct seq_file *m, int cpu)
 {
        struct cfs_rq *cfs_rq;
@@ -1642,9 +1625,6 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
        rcu_read_lock();
        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
                print_cfs_rq(m, cpu, cfs_rq);
-
-       seq_printf(m, "\nWeight tree:\n");
-       print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1);
        rcu_read_unlock();
 }
 #endif