Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / sched_fair.c
index c62ebae..3547699 100644 (file)
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->nr_running--;
 }
 
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
                                            int global_update)
 {
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+                               long weight_delta)
+{
+       long load_weight, load, shares;
+
+       load = cfs_rq->load.weight + weight_delta;
+
+       load_weight = atomic_read(&tg->load_weight);
+       load_weight -= cfs_rq->load_contribution;
+       load_weight += load;
+
+       shares = (tg->shares * load);
+       if (load_weight)
+               shares /= load_weight;
+
+       if (shares < MIN_SHARES)
+               shares = MIN_SHARES;
+       if (shares > tg->shares)
+               shares = tg->shares;
+
+       return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+               update_cfs_load(cfs_rq, 0);
+               update_cfs_shares(cfs_rq, 0);
+       }
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+                               long weight_delta)
+{
+       return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
 {
        struct task_group *tg;
        struct sched_entity *se;
-       long load_weight, load, shares;
+       long shares;
 
        if (!cfs_rq)
                return;
@@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
        se = tg->se[cpu_of(rq_of(cfs_rq))];
        if (!se)
                return;
-
-       load = cfs_rq->load.weight + weight_delta;
-
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight -= cfs_rq->load_contribution;
-       load_weight += load;
-
-       shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
-
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       if (shares > tg->shares)
-               shares = tg->shares;
+#ifndef CONFIG_SMP
+       if (likely(se->load.weight == tg->shares))
+               return;
+#endif
+       shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
 
        reweight_entity(cfs_rq_of(se), se, shares);
 }
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
-               update_cfs_load(cfs_rq, 0);
-               update_cfs_shares(cfs_rq, 0);
-       }
-}
 #else /* CONFIG_FAIR_GROUP_SCHED */
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
@@ -1062,6 +1090,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                struct sched_entity *se = __pick_next_entity(cfs_rq);
                s64 delta = curr->vruntime - se->vruntime;
 
+               if (delta < 0)
+                       return;
+
                if (delta > ideal_runtime)
                        resched_task(rq_of(cfs_rq)->curr);
        }
@@ -1362,27 +1393,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long S, rw, s, a, b;
+               long lw, w;
 
-               S = se->my_q->tg->shares;
-               s = se->load.weight;
-               rw = se->my_q->load.weight;
+               tg = se->my_q->tg;
+               w = se->my_q->load.weight;
 
-               a = S*(rw + wl);
-               b = S*rw + s*wg;
+               /* use this cpu's instantaneous contribution */
+               lw = atomic_read(&tg->load_weight);
+               lw -= se->my_q->load_contribution;
+               lw += w + wg;
 
-               wl = s*(a-b);
+               wl += w;
 
-               if (likely(b))
-                       wl /= b;
+               if (lw > 0 && wl < lw)
+                       wl = (wl * tg->shares) / lw;
+               else
+                       wl = tg->shares;
 
-               /*
-                * Assume the group is already running and will
-                * thus already be accounted for in the weight.
-                *
-                * That is, moving shares between CPUs, does not
-                * alter the group weight.
-                */
+               /* zero point is MIN_SHARES */
+               if (wl < MIN_SHARES)
+                       wl = MIN_SHARES;
+               wl -= se->load.weight;
                wg = 0;
        }