Pull asus into release branch
[pandora-kernel.git] / kernel / sched.c
index 1fd67e1..b9a6837 100644 (file)
@@ -1853,6 +1853,13 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm = next->mm;
        struct mm_struct *oldmm = prev->active_mm;
 
+       /*
+        * For paravirt, this is coupled with an exit in switch_to to
+        * combine the page table reload and the switch backend into
+        * one hypercall.
+        */
+       arch_enter_lazy_cpu_mode();
+
        if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
@@ -2897,14 +2904,16 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
 static void update_load(struct rq *this_rq)
 {
        unsigned long this_load;
-       int i, scale;
+       unsigned int i, scale;
 
        this_load = this_rq->raw_weighted_load;
 
        /* Update our load: */
-       for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
+       for (i = 0, scale = 1; i < 3; i++, scale += scale) {
                unsigned long old_load, new_load;
 
+               /* scale is effectively 1 << i now, and >> i divides by scale */
+
                old_load = this_rq->cpu_load[i];
                new_load = this_load;
                /*
@@ -2914,7 +2923,7 @@ static void update_load(struct rq *this_rq)
                 */
                if (new_load > old_load)
                        new_load += scale-1;
-               this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
+               this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
        }
 }
 
@@ -2997,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
 }
 #endif
 
-static inline void wake_priority_sleeper(struct rq *rq)
-{
-#ifdef CONFIG_SCHED_SMT
-       if (!rq->nr_running)
-               return;
-
-       spin_lock(&rq->lock);
-       /*
-        * If an SMT sibling task has been put to sleep for priority
-        * reasons reschedule the idle task to see if it can now run.
-        */
-       if (rq->nr_running)
-               resched_task(rq->idle);
-       spin_unlock(&rq->lock);
-#endif
-}
-
 DEFINE_PER_CPU(struct kernel_stat, kstat);
 
 EXPORT_PER_CPU_SYMBOL(kstat);
@@ -3230,10 +3222,7 @@ void scheduler_tick(void)
 
        update_cpu_clock(p, rq, now);
 
-       if (p == rq->idle)
-               /* Task on the idle queue */
-               wake_priority_sleeper(rq);
-       else
+       if (p != rq->idle)
                task_running_tick(rq, p);
 #ifdef CONFIG_SMP
        update_load(rq);
@@ -3242,136 +3231,6 @@ void scheduler_tick(void)
 #endif
 }
 
-#ifdef CONFIG_SCHED_SMT
-static inline void wakeup_busy_runqueue(struct rq *rq)
-{
-       /* If an SMT runqueue is sleeping due to priority reasons wake it up */
-       if (rq->curr == rq->idle && rq->nr_running)
-               resched_task(rq->idle);
-}
-
-/*
- * Called with interrupt disabled and this_rq's runqueue locked.
- */
-static void wake_sleeping_dependent(int this_cpu)
-{
-       struct sched_domain *tmp, *sd = NULL;
-       int i;
-
-       for_each_domain(this_cpu, tmp) {
-               if (tmp->flags & SD_SHARE_CPUPOWER) {
-                       sd = tmp;
-                       break;
-               }
-       }
-
-       if (!sd)
-               return;
-
-       for_each_cpu_mask(i, sd->span) {
-               struct rq *smt_rq = cpu_rq(i);
-
-               if (i == this_cpu)
-                       continue;
-               if (unlikely(!spin_trylock(&smt_rq->lock)))
-                       continue;
-
-               wakeup_busy_runqueue(smt_rq);
-               spin_unlock(&smt_rq->lock);
-       }
-}
-
-/*
- * number of 'lost' timeslices this task wont be able to fully
- * utilize, if another task runs on a sibling. This models the
- * slowdown effect of other tasks running on siblings:
- */
-static inline unsigned long
-smt_slice(struct task_struct *p, struct sched_domain *sd)
-{
-       return p->time_slice * (100 - sd->per_cpu_gain) / 100;
-}
-
-/*
- * To minimise lock contention and not have to drop this_rq's runlock we only
- * trylock the sibling runqueues and bypass those runqueues if we fail to
- * acquire their lock. As we only trylock the normal locking order does not
- * need to be obeyed.
- */
-static int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
-       struct sched_domain *tmp, *sd = NULL;
-       int ret = 0, i;
-
-       /* kernel/rt threads do not participate in dependent sleeping */
-       if (!p->mm || rt_task(p))
-               return 0;
-
-       for_each_domain(this_cpu, tmp) {
-               if (tmp->flags & SD_SHARE_CPUPOWER) {
-                       sd = tmp;
-                       break;
-               }
-       }
-
-       if (!sd)
-               return 0;
-
-       for_each_cpu_mask(i, sd->span) {
-               struct task_struct *smt_curr;
-               struct rq *smt_rq;
-
-               if (i == this_cpu)
-                       continue;
-
-               smt_rq = cpu_rq(i);
-               if (unlikely(!spin_trylock(&smt_rq->lock)))
-                       continue;
-
-               smt_curr = smt_rq->curr;
-
-               if (!smt_curr->mm)
-                       goto unlock;
-
-               /*
-                * If a user task with lower static priority than the
-                * running task on the SMT sibling is trying to schedule,
-                * delay it till there is proportionately less timeslice
-                * left of the sibling task to prevent a lower priority
-                * task from using an unfair proportion of the
-                * physical cpu's resources. -ck
-                */
-               if (rt_task(smt_curr)) {
-                       /*
-                        * With real time tasks we run non-rt tasks only
-                        * per_cpu_gain% of the time.
-                        */
-                       if ((jiffies % DEF_TIMESLICE) >
-                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
-                                       ret = 1;
-               } else {
-                       if (smt_curr->static_prio < p->static_prio &&
-                               !TASK_PREEMPTS_CURR(p, smt_rq) &&
-                               smt_slice(smt_curr, sd) > task_timeslice(p))
-                                       ret = 1;
-               }
-unlock:
-               spin_unlock(&smt_rq->lock);
-       }
-       return ret;
-}
-#else
-static inline void wake_sleeping_dependent(int this_cpu)
-{
-}
-static inline int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
-       return 0;
-}
-#endif
-
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
 
 void fastcall add_preempt_count(int val)
@@ -3498,7 +3357,6 @@ need_resched_nonpreemptible:
                if (!rq->nr_running) {
                        next = rq->idle;
                        rq->expired_timestamp = 0;
-                       wake_sleeping_dependent(cpu);
                        goto switch_tasks;
                }
        }
@@ -3538,8 +3396,6 @@ need_resched_nonpreemptible:
                }
        }
        next->sleep_type = SLEEP_NORMAL;
-       if (dependent_sleeper(cpu, rq, next))
-               next = rq->idle;
 switch_tasks:
        if (next == rq->idle)
                schedstat_inc(rq, sched_goidle);
@@ -3557,7 +3413,7 @@ switch_tasks:
 
        sched_info_switch(prev, next);
        if (likely(prev != next)) {
-               next->timestamp = now;
+               next->timestamp = next->last_ran = now;
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
@@ -4831,32 +4687,10 @@ out_unlock:
        return retval;
 }
 
-static inline struct task_struct *eldest_child(struct task_struct *p)
-{
-       if (list_empty(&p->children))
-               return NULL;
-       return list_entry(p->children.next,struct task_struct,sibling);
-}
-
-static inline struct task_struct *older_sibling(struct task_struct *p)
-{
-       if (p->sibling.prev==&p->parent->children)
-               return NULL;
-       return list_entry(p->sibling.prev,struct task_struct,sibling);
-}
-
-static inline struct task_struct *younger_sibling(struct task_struct *p)
-{
-       if (p->sibling.next==&p->parent->children)
-               return NULL;
-       return list_entry(p->sibling.next,struct task_struct,sibling);
-}
-
 static const char stat_nam[] = "RSDTtZX";
 
 static void show_task(struct task_struct *p)
 {
-       struct task_struct *relative;
        unsigned long free = 0;
        unsigned state;
 
@@ -4882,19 +4716,7 @@ static void show_task(struct task_struct *p)
                free = (unsigned long)n - (unsigned long)end_of_stack(p);
        }
 #endif
-       printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
-       if ((relative = eldest_child(p)))
-               printk("%5d ", relative->pid);
-       else
-               printk("      ");
-       if ((relative = younger_sibling(p)))
-               printk("%7d", relative->pid);
-       else
-               printk("       ");
-       if ((relative = older_sibling(p)))
-               printk(" %5d", relative->pid);
-       else
-               printk("      ");
+       printk("%5lu %5d %6d", free, p->pid, p->parent->pid);
        if (!p->mm)
                printk(" (L-TLB)\n");
        else