Merge commit 'v2.6.30-rc1' into sched/urgent
authorIngo Molnar <mingo@elte.hu>
Wed, 8 Apr 2009 15:25:42 +0000 (17:25 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 8 Apr 2009 15:26:00 +0000 (17:26 +0200)
Merge reason: update to latest upstream to queue up fix

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
kernel/posix-cpu-timers.c
kernel/sched.c
kernel/sched_rt.c

@@@ -224,12 -224,77 +224,77 @@@ static int cpu_clock_sample(const clock
                cpu->cpu = virt_ticks(p);
                break;
        case CPUCLOCK_SCHED:
 -              cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
 +              cpu->sched = task_sched_runtime(p);
                break;
        }
        return 0;
  }
  
+ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+ {
+       struct sighand_struct *sighand;
+       struct signal_struct *sig;
+       struct task_struct *t;
+       *times = INIT_CPUTIME;
+       rcu_read_lock();
+       sighand = rcu_dereference(tsk->sighand);
+       if (!sighand)
+               goto out;
+       sig = tsk->signal;
+       t = tsk;
+       do {
+               times->utime = cputime_add(times->utime, t->utime);
+               times->stime = cputime_add(times->stime, t->stime);
+               times->sum_exec_runtime += t->se.sum_exec_runtime;
+               t = next_thread(t);
+       } while (t != tsk);
+       times->utime = cputime_add(times->utime, sig->utime);
+       times->stime = cputime_add(times->stime, sig->stime);
+       times->sum_exec_runtime += sig->sum_sched_runtime;
+ out:
+       rcu_read_unlock();
+ }
+ static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
+ {
+       if (cputime_gt(b->utime, a->utime))
+               a->utime = b->utime;
+       if (cputime_gt(b->stime, a->stime))
+               a->stime = b->stime;
+       if (b->sum_exec_runtime > a->sum_exec_runtime)
+               a->sum_exec_runtime = b->sum_exec_runtime;
+ }
+ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
+ {
+       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct task_cputime sum;
+       unsigned long flags;
+       spin_lock_irqsave(&cputimer->lock, flags);
+       if (!cputimer->running) {
+               cputimer->running = 1;
+               /*
+                * The POSIX timer interface allows for absolute time expiry
+                * values through the TIMER_ABSTIME flag, therefore we have
+                * to synchronize the timer to the clock every time we start
+                * it.
+                */
+               thread_group_cputime(tsk, &sum);
+               update_gt_cputime(&cputimer->cputime, &sum);
+       }
+       *times = cputimer->cputime;
+       spin_unlock_irqrestore(&cputimer->lock, flags);
+ }
  /*
   * Sample a process (thread group) clock for the given group_leader task.
   * Must be called with tasklist_lock held for reading.
@@@ -240,19 -305,18 +305,19 @@@ static int cpu_clock_sample_group(cons
  {
        struct task_cputime cputime;
  
 -      thread_group_cputime(p, &cputime);
        switch (CPUCLOCK_WHICH(which_clock)) {
        default:
                return -EINVAL;
        case CPUCLOCK_PROF:
 +              thread_group_cputime(p, &cputime);
                cpu->cpu = cputime_add(cputime.utime, cputime.stime);
                break;
        case CPUCLOCK_VIRT:
 +              thread_group_cputime(p, &cputime);
                cpu->cpu = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
 -              cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
 +              cpu->sched = thread_group_sched_runtime(p);
                break;
        }
        return 0;
@@@ -458,7 -522,7 +523,7 @@@ void posix_cpu_timers_exit_group(struc
  {
        struct task_cputime cputime;
  
-       thread_group_cputime(tsk, &cputime);
+       thread_group_cputimer(tsk, &cputime);
        cleanup_timers(tsk->signal->cpu_timers,
                       cputime.utime, cputime.stime, cputime.sum_exec_runtime);
  }
@@@ -616,6 -680,33 +681,33 @@@ static void cpu_timer_fire(struct k_iti
        }
  }
  
+ /*
+  * Sample a process (thread group) timer for the given group_leader task.
+  * Must be called with tasklist_lock held for reading.
+  */
+ static int cpu_timer_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 union cpu_time_count *cpu)
+ {
+       struct task_cputime cputime;
+       thread_group_cputimer(p, &cputime);
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
+               cpu->cpu = cputime.utime;
+               break;
+       case CPUCLOCK_SCHED:
+               cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+               break;
+       }
+       return 0;
+ }
  /*
   * Guts of sys_timer_settime for CPU timers.
   * This is called with the timer locked and interrupts disabled.
@@@ -677,7 -768,7 +769,7 @@@ int posix_cpu_timer_set(struct k_itime
        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
                cpu_clock_sample(timer->it_clock, p, &val);
        } else {
-               cpu_clock_sample_group(timer->it_clock, p, &val);
+               cpu_timer_sample_group(timer->it_clock, p, &val);
        }
  
        if (old) {
@@@ -825,7 -916,7 +917,7 @@@ void posix_cpu_timer_get(struct k_itime
                        read_unlock(&tasklist_lock);
                        goto dead;
                } else {
-                       cpu_clock_sample_group(timer->it_clock, p, &now);
+                       cpu_timer_sample_group(timer->it_clock, p, &now);
                        clear_dead = (unlikely(p->exit_state) &&
                                      thread_group_empty(p));
                }
@@@ -965,6 -1056,19 +1057,19 @@@ static void check_thread_timers(struct 
        }
  }
  
+ static void stop_process_timers(struct task_struct *tsk)
+ {
+       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       unsigned long flags;
+       if (!cputimer->running)
+               return;
+       spin_lock_irqsave(&cputimer->lock, flags);
+       cputimer->running = 0;
+       spin_unlock_irqrestore(&cputimer->lock, flags);
+ }
  /*
   * Check for any per-thread CPU timers that have fired and move them
   * off the tsk->*_timers list onto the firing list.  Per-thread timers
@@@ -988,13 -1092,15 +1093,15 @@@ static void check_process_timers(struc
            sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
            list_empty(&timers[CPUCLOCK_VIRT]) &&
            cputime_eq(sig->it_virt_expires, cputime_zero) &&
-           list_empty(&timers[CPUCLOCK_SCHED]))
+           list_empty(&timers[CPUCLOCK_SCHED])) {
+               stop_process_timers(tsk);
                return;
+       }
  
        /*
         * Collect the current process totals.
         */
-       thread_group_cputime(tsk, &cputime);
+       thread_group_cputimer(tsk, &cputime);
        utime = cputime.utime;
        ptime = cputime_add(utime, cputime.stime);
        sum_sched_runtime = cputime.sum_exec_runtime;
@@@ -1165,7 -1271,7 +1272,7 @@@ void posix_cpu_timer_schedule(struct k_
                        clear_dead_task(timer, now);
                        goto out_unlock;
                }
-               cpu_clock_sample_group(timer->it_clock, p, &now);
+               cpu_timer_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
                /* Leave the tasklist_lock locked for the call below.  */
        }
@@@ -1260,11 -1366,12 +1367,12 @@@ static inline int fastpath_timer_check(
        if (!task_cputime_zero(&sig->cputime_expires)) {
                struct task_cputime group_sample;
  
-               thread_group_cputime(tsk, &group_sample);
+               thread_group_cputimer(tsk, &group_sample);
                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
                        return 1;
        }
-       return 0;
+       return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
  }
  
  /*
@@@ -1342,7 -1449,7 +1450,7 @@@ void set_process_cpu_timer(struct task_
        struct list_head *head;
  
        BUG_ON(clock_idx == CPUCLOCK_SCHED);
-       cpu_clock_sample_group(clock_idx, tsk, &now);
+       cpu_timer_sample_group(clock_idx, tsk, &now);
  
        if (oldval) {
                if (!cputime_eq(*oldval, cputime_zero)) {
diff --combined kernel/sched.c
@@@ -223,7 -223,7 +223,7 @@@ static void start_rt_bandwidth(struct r
  {
        ktime_t now;
  
-       if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
+       if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
  
        if (hrtimer_active(&rt_b->rt_period_timer))
  
        spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
+               unsigned long delta;
+               ktime_t soft, hard;
                if (hrtimer_active(&rt_b->rt_period_timer))
                        break;
  
                now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
                hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
-               hrtimer_start_expires(&rt_b->rt_period_timer,
-                               HRTIMER_MODE_ABS);
+               soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
+               hard = hrtimer_get_expires(&rt_b->rt_period_timer);
+               delta = ktime_to_ns(ktime_sub(hard, soft));
+               __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
+                               HRTIMER_MODE_ABS, 0);
        }
        spin_unlock(&rt_b->rt_runtime_lock);
  }
@@@ -331,6 -338,13 +338,13 @@@ static DEFINE_PER_CPU(struct rt_rq, ini
   */
  static DEFINE_SPINLOCK(task_group_lock);
  
+ #ifdef CONFIG_SMP
+ static int root_task_group_empty(void)
+ {
+       return list_empty(&root_task_group.children);
+ }
+ #endif
  #ifdef CONFIG_FAIR_GROUP_SCHED
  #ifdef CONFIG_USER_SCHED
  # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@@ -391,6 -405,13 +405,13 @@@ static inline void set_task_rq(struct t
  
  #else
  
+ #ifdef CONFIG_SMP
+ static int root_task_group_empty(void)
+ {
+       return 1;
+ }
+ #endif
  static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  static inline struct task_group *task_group(struct task_struct *p)
  {
@@@ -467,11 -488,17 +488,17 @@@ struct rt_rq 
        struct rt_prio_array active;
        unsigned long rt_nr_running;
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       int highest_prio; /* highest queued rt task prio */
+       struct {
+               int curr; /* highest queued rt task prio */
+ #ifdef CONFIG_SMP
+               int next; /* next highest */
+ #endif
+       } highest_prio;
  #endif
  #ifdef CONFIG_SMP
        unsigned long rt_nr_migratory;
        int overloaded;
+       struct plist_head pushable_tasks;
  #endif
        int rt_throttled;
        u64 rt_time;
@@@ -549,7 -576,6 +576,6 @@@ struct rq 
        unsigned long nr_running;
        #define CPU_LOAD_IDX_MAX 5
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
-       unsigned char idle_at_tick;
  #ifdef CONFIG_NO_HZ
        unsigned long last_tick_seen;
        unsigned char in_nohz_recently;
        struct root_domain *rd;
        struct sched_domain *sd;
  
+       unsigned char idle_at_tick;
        /* For active balancing */
        int active_balance;
        int push_cpu;
        /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  
        /* sys_sched_yield() stats */
-       unsigned int yld_exp_empty;
-       unsigned int yld_act_empty;
-       unsigned int yld_both_empty;
        unsigned int yld_count;
  
        /* schedule() stats */
@@@ -1093,7 -1117,7 +1117,7 @@@ static void hrtick_start(struct rq *rq
        if (rq == this_rq()) {
                hrtimer_restart(timer);
        } else if (!rq->hrtick_csd_pending) {
-               __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
+               __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
                rq->hrtick_csd_pending = 1;
        }
  }
@@@ -1129,7 -1153,8 +1153,8 @@@ static __init void init_hrtick(void
   */
  static void hrtick_start(struct rq *rq, u64 delay)
  {
-       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
+       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+                       HRTIMER_MODE_REL, 0);
  }
  
  static inline void init_hrtick(void)
@@@ -1183,10 -1208,10 +1208,10 @@@ static void resched_task(struct task_st
  
        assert_spin_locked(&task_rq(p)->lock);
  
-       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+       if (test_tsk_need_resched(p))
                return;
  
-       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+       set_tsk_need_resched(p);
  
        cpu = task_cpu(p);
        if (cpu == smp_processor_id())
@@@ -1242,7 -1267,7 +1267,7 @@@ void wake_up_idle_cpu(int cpu
         * lockless. The worst case is that the other CPU runs the
         * idle task through an additional NOOP schedule()
         */
-       set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
+       set_tsk_need_resched(rq->idle);
  
        /* NEED_RESCHED must be visible before we test polling */
        smp_mb();
@@@ -1393,22 -1418,10 +1418,22 @@@ iter_move_one_task(struct rq *this_rq, 
                   struct rq_iterator *iterator);
  #endif
  
 +/* Time spent by the tasks of the cpu accounting group executing in ... */
 +enum cpuacct_stat_index {
 +      CPUACCT_STAT_USER,      /* ... user mode */
 +      CPUACCT_STAT_SYSTEM,    /* ... kernel mode */
 +
 +      CPUACCT_STAT_NSTATS,
 +};
 +
  #ifdef CONFIG_CGROUP_CPUACCT
  static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 +static void cpuacct_update_stats(struct task_struct *tsk,
 +              enum cpuacct_stat_index idx, cputime_t val);
  #else
  static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 +static inline void cpuacct_update_stats(struct task_struct *tsk,
 +              enum cpuacct_stat_index idx, cputime_t val) {}
  #endif
  
  static inline void inc_cpu_load(struct rq *rq, unsigned long load)
@@@ -1622,21 -1635,42 +1647,42 @@@ static inline void update_shares_locked
  
  #endif
  
+ #ifdef CONFIG_PREEMPT
  /*
-  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+  * fair double_lock_balance: Safely acquires both rq->locks in a fair
+  * way at the expense of forcing extra atomic operations in all
+  * invocations.  This assures that the double_lock is acquired using the
+  * same underlying policy as the spinlock_t on this architecture, which
+  * reduces latency compared to the unfair variant below.  However, it
+  * also adds more overhead and therefore may reduce throughput.
   */
- static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(this_rq->lock)
+       __acquires(busiest->lock)
+       __acquires(this_rq->lock)
+ {
+       spin_unlock(&this_rq->lock);
+       double_rq_lock(this_rq, busiest);
+       return 1;
+ }
+ #else
+ /*
+  * Unfair double_lock_balance: Optimizes throughput at the expense of
+  * latency by eliminating extra atomic operations when the locks are
+  * already in proper order on entry.  This favors lower cpu-ids and will
+  * grant the double lock to lower cpus over higher ids under contention,
+  * regardless of entry order into the function.
+  */
+ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(this_rq->lock)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
  {
        int ret = 0;
  
-       if (unlikely(!irqs_disabled())) {
-               /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
-               BUG_ON(1);
-       }
        if (unlikely(!spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
        return ret;
  }
  
+ #endif /* CONFIG_PREEMPT */
+ /*
+  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+  */
+ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ {
+       if (unlikely(!irqs_disabled())) {
+               /* printk() doesn't work good under rq->lock */
+               spin_unlock(&this_rq->lock);
+               BUG_ON(1);
+       }
+       return _double_lock_balance(this_rq, busiest);
+ }
  static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
  {
@@@ -1717,6 -1767,9 +1779,9 @@@ static void update_avg(u64 *avg, u64 sa
  
  static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
  {
+       if (wakeup)
+               p->se.start_runtime = p->se.sum_exec_runtime;
        sched_info_queued(p);
        p->sched_class->enqueue_task(rq, p, wakeup);
        p->se.on_rq = 1;
  
  static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
  {
-       if (sleep && p->se.last_wakeup) {
-               update_avg(&p->se.avg_overlap,
-                          p->se.sum_exec_runtime - p->se.last_wakeup);
-               p->se.last_wakeup = 0;
+       if (sleep) {
+               if (p->se.last_wakeup) {
+                       update_avg(&p->se.avg_overlap,
+                               p->se.sum_exec_runtime - p->se.last_wakeup);
+                       p->se.last_wakeup = 0;
+               } else {
+                       update_avg(&p->se.avg_wakeup,
+                               sysctl_sched_wakeup_granularity);
+               }
        }
  
        sched_info_dequeued(p);
@@@ -2029,7 -2087,7 +2099,7 @@@ unsigned long wait_task_inactive(struc
                 * it must be off the runqueue _entirely_, and not
                 * preempted!
                 *
-                * So if it wa still runnable (but just not actively
+                * So if it was still runnable (but just not actively
                 * running right now), it's preempted, and we should
                 * yield - it could be a while.
                 */
@@@ -2278,18 -2336,8 +2348,8 @@@ static int try_to_wake_up(struct task_s
        if (!sched_feat(SYNC_WAKEUPS))
                sync = 0;
  
-       if (!sync) {
-               if (current->se.avg_overlap < sysctl_sched_migration_cost &&
-                         p->se.avg_overlap < sysctl_sched_migration_cost)
-                       sync = 1;
-       } else {
-               if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
-                         p->se.avg_overlap >= sysctl_sched_migration_cost)
-                       sync = 0;
-       }
  #ifdef CONFIG_SMP
-       if (sched_feat(LB_WAKEUP_UPDATE)) {
+       if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
                struct sched_domain *sd;
  
                this_cpu = raw_smp_processor_id();
@@@ -2367,6 -2415,22 +2427,22 @@@ out_activate
        activate_task(rq, p, 1);
        success = 1;
  
+       /*
+        * Only attribute actual wakeups done by this task.
+        */
+       if (!in_interrupt()) {
+               struct sched_entity *se = &current->se;
+               u64 sample = se->sum_exec_runtime;
+               if (se->last_wakeup)
+                       sample -= se->last_wakeup;
+               else
+                       sample -= se->start_runtime;
+               update_avg(&se->avg_wakeup, sample);
+               se->last_wakeup = se->sum_exec_runtime;
+       }
  out_running:
        trace_sched_wakeup(rq, p, success);
        check_preempt_curr(rq, p, sync);
                p->sched_class->task_wake_up(rq, p);
  #endif
  out:
-       current->se.last_wakeup = current->se.sum_exec_runtime;
        task_rq_unlock(rq, &flags);
  
        return success;
@@@ -2408,6 -2470,8 +2482,8 @@@ static void __sched_fork(struct task_st
        p->se.prev_sum_exec_runtime     = 0;
        p->se.last_wakeup               = 0;
        p->se.avg_overlap               = 0;
+       p->se.start_runtime             = 0;
+       p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
  
  #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                = 0;
@@@ -2470,6 -2534,8 +2546,8 @@@ void sched_fork(struct task_struct *p, 
        /* Want to start with kernel preemption disabled. */
        task_thread_info(p)->preempt_count = 1;
  #endif
+       plist_node_init(&p->pushable_tasks, MAX_PRIO);
        put_cpu();
  }
  
@@@ -2513,7 -2579,7 +2591,7 @@@ void wake_up_new_task(struct task_struc
  #ifdef CONFIG_PREEMPT_NOTIFIERS
  
  /**
-  * preempt_notifier_register - tell me when current is being being preempted & rescheduled
+  * preempt_notifier_register - tell me when current is being preempted & rescheduled
   * @notifier: notifier struct to register
   */
  void preempt_notifier_register(struct preempt_notifier *notifier)
@@@ -2610,6 -2676,12 +2688,12 @@@ static void finish_task_switch(struct r
  {
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
+ #ifdef CONFIG_SMP
+       int post_schedule = 0;
+       if (current->sched_class->needs_post_schedule)
+               post_schedule = current->sched_class->needs_post_schedule(rq);
+ #endif
  
        rq->prev_mm = NULL;
  
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
  #ifdef CONFIG_SMP
-       if (current->sched_class->post_schedule)
+       if (post_schedule)
                current->sched_class->post_schedule(rq);
  #endif
  
@@@ -2935,6 -3007,7 +3019,7 @@@ int can_migrate_task(struct task_struc
                     struct sched_domain *sd, enum cpu_idle_type idle,
                     int *all_pinned)
  {
+       int tsk_cache_hot = 0;
        /*
         * We do not migrate tasks that are:
         * 1) running (obviously), or
         * 2) too many balance attempts have failed.
         */
  
-       if (!task_hot(p, rq->clock, sd) ||
-                       sd->nr_balance_failed > sd->cache_nice_tries) {
+       tsk_cache_hot = task_hot(p, rq->clock, sd);
+       if (!tsk_cache_hot ||
+               sd->nr_balance_failed > sd->cache_nice_tries) {
  #ifdef CONFIG_SCHEDSTATS
-               if (task_hot(p, rq->clock, sd)) {
+               if (tsk_cache_hot) {
                        schedstat_inc(sd, lb_hot_gained[idle]);
                        schedstat_inc(p, se.nr_forced_migrations);
                }
                return 1;
        }
  
-       if (task_hot(p, rq->clock, sd)) {
+       if (tsk_cache_hot) {
                schedstat_inc(p, se.nr_failed_migrations_hot);
                return 0;
        }
@@@ -3009,6 -3083,16 +3095,16 @@@ next
        pulled++;
        rem_load_move -= p->se.load.weight;
  
+ #ifdef CONFIG_PREEMPT
+       /*
+        * NEWIDLE balancing is a source of latency, so preemptible kernels
+        * will stop after the first task is pulled to minimize the critical
+        * section.
+        */
+       if (idle == CPU_NEWLY_IDLE)
+               goto out;
+ #endif
        /*
         * We only want to steal up to the prescribed amount of weighted load.
         */
@@@ -3055,9 -3139,15 +3151,15 @@@ static int move_tasks(struct rq *this_r
                                sd, idle, all_pinned, &this_best_prio);
                class = class->next;
  
+ #ifdef CONFIG_PREEMPT
+               /*
+                * NEWIDLE balancing is a source of latency, so preemptible
+                * kernels will stop after the first task is pulled to minimize
+                * the critical section.
+                */
                if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
                        break;
+ #endif
        } while (class && max_load_move > total_load_moved);
  
        return total_load_moved > 0;
@@@ -3107,246 -3197,480 +3209,480 @@@ static int move_one_task(struct rq *thi
  
        return 0;
  }
+ /********** Helpers for find_busiest_group ************************/
  /*
-  * find_busiest_group finds and returns the busiest CPU group within the
-  * domain. It calculates and returns the amount of weighted load which
-  * should be moved to restore balance via the imbalance parameter.
+  * sd_lb_stats - Structure to store the statistics of a sched_domain
+  *            during load balancing.
   */
- static struct sched_group *
- find_busiest_group(struct sched_domain *sd, int this_cpu,
-                  unsigned long *imbalance, enum cpu_idle_type idle,
-                  int *sd_idle, const struct cpumask *cpus, int *balance)
- {
-       struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
-       unsigned long max_load, avg_load, total_load, this_load, total_pwr;
-       unsigned long max_pull;
-       unsigned long busiest_load_per_task, busiest_nr_running;
-       unsigned long this_load_per_task, this_nr_running;
-       int load_idx, group_imb = 0;
+ struct sd_lb_stats {
+       struct sched_group *busiest; /* Busiest group in this sd */
+       struct sched_group *this;  /* Local group in this sd */
+       unsigned long total_load;  /* Total load of all groups in sd */
+       unsigned long total_pwr;   /*   Total power of all groups in sd */
+       unsigned long avg_load;    /* Average load across all groups in sd */
+       /** Statistics of this group */
+       unsigned long this_load;
+       unsigned long this_load_per_task;
+       unsigned long this_nr_running;
+       /* Statistics of the busiest group */
+       unsigned long max_load;
+       unsigned long busiest_load_per_task;
+       unsigned long busiest_nr_running;
+       int group_imb; /* Is there imbalance in this sd */
  #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-       int power_savings_balance = 1;
-       unsigned long leader_nr_running = 0, min_load_per_task = 0;
-       unsigned long min_nr_running = ULONG_MAX;
-       struct sched_group *group_min = NULL, *group_leader = NULL;
+       int power_savings_balance; /* Is powersave balance needed for this sd */
+       struct sched_group *group_min; /* Least loaded group in sd */
+       struct sched_group *group_leader; /* Group which relieves group_min */
+       unsigned long min_load_per_task; /* load_per_task in group_min */
+       unsigned long leader_nr_running; /* Nr running of group_leader */
+       unsigned long min_nr_running; /* Nr running of group_min */
  #endif
+ };
  
-       max_load = this_load = total_load = total_pwr = 0;
-       busiest_load_per_task = busiest_nr_running = 0;
-       this_load_per_task = this_nr_running = 0;
+ /*
+  * sg_lb_stats - stats of a sched_group required for load_balancing
+  */
+ struct sg_lb_stats {
+       unsigned long avg_load; /*Avg load across the CPUs of the group */
+       unsigned long group_load; /* Total load over the CPUs of the group */
+       unsigned long sum_nr_running; /* Nr tasks running in the group */
+       unsigned long sum_weighted_load; /* Weighted load of group's tasks */
+       unsigned long group_capacity;
+       int group_imb; /* Is there an imbalance in the group ? */
+ };
  
-       if (idle == CPU_NOT_IDLE)
+ /**
+  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+  * @group: The group whose first cpu is to be returned.
+  */
+ static inline unsigned int group_first_cpu(struct sched_group *group)
+ {
+       return cpumask_first(sched_group_cpus(group));
+ }
+ /**
+  * get_sd_load_idx - Obtain the load index for a given sched domain.
+  * @sd: The sched_domain whose load_idx is to be obtained.
+  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+  */
+ static inline int get_sd_load_idx(struct sched_domain *sd,
+                                       enum cpu_idle_type idle)
+ {
+       int load_idx;
+       switch (idle) {
+       case CPU_NOT_IDLE:
                load_idx = sd->busy_idx;
-       else if (idle == CPU_NEWLY_IDLE)
+               break;
+       case CPU_NEWLY_IDLE:
                load_idx = sd->newidle_idx;
-       else
+               break;
+       default:
                load_idx = sd->idle_idx;
+               break;
+       }
  
-       do {
-               unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
-               int local_group;
-               int i;
-               int __group_imb = 0;
-               unsigned int balance_cpu = -1, first_idle_cpu = 0;
-               unsigned long sum_nr_running, sum_weighted_load;
-               unsigned long sum_avg_load_per_task;
-               unsigned long avg_load_per_task;
+       return load_idx;
+ }
  
-               local_group = cpumask_test_cpu(this_cpu,
-                                              sched_group_cpus(group));
  
-               if (local_group)
-                       balance_cpu = cpumask_first(sched_group_cpus(group));
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /**
+  * init_sd_power_savings_stats - Initialize power savings statistics for
+  * the given sched_domain, during load balancing.
+  *
+  * @sd: Sched domain whose power-savings statistics are to be initialized.
+  * @sds: Variable containing the statistics for sd.
+  * @idle: Idle status of the CPU at which we're performing load-balancing.
+  */
+ static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+       struct sd_lb_stats *sds, enum cpu_idle_type idle)
+ {
+       /*
+        * Busy processors will not participate in power savings
+        * balance.
+        */
+       if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+               sds->power_savings_balance = 0;
+       else {
+               sds->power_savings_balance = 1;
+               sds->min_nr_running = ULONG_MAX;
+               sds->leader_nr_running = 0;
+       }
+ }
  
-               /* Tally up the load of all CPUs in the group */
-               sum_weighted_load = sum_nr_running = avg_load = 0;
-               sum_avg_load_per_task = avg_load_per_task = 0;
+ /**
+  * update_sd_power_savings_stats - Update the power saving stats for a
+  * sched_domain while performing load balancing.
+  *
+  * @group: sched_group belonging to the sched_domain under consideration.
+  * @sds: Variable containing the statistics of the sched_domain
+  * @local_group: Does group contain the CPU for which we're performing
+  *            load balancing ?
+  * @sgs: Variable containing the statistics of the group.
+  */
+ static inline void update_sd_power_savings_stats(struct sched_group *group,
+       struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+ {
  
-               max_cpu_load = 0;
-               min_cpu_load = ~0UL;
+       if (!sds->power_savings_balance)
+               return;
  
-               for_each_cpu_and(i, sched_group_cpus(group), cpus) {
-                       struct rq *rq = cpu_rq(i);
+       /*
+        * If the local group is idle or completely loaded
+        * no need to do power savings balance at this domain
+        */
+       if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
+                               !sds->this_nr_running))
+               sds->power_savings_balance = 0;
  
-                       if (*sd_idle && rq->nr_running)
-                               *sd_idle = 0;
+       /*
+        * If a group is already running at full capacity or idle,
+        * don't include that group in power savings calculations
+        */
+       if (!sds->power_savings_balance ||
+               sgs->sum_nr_running >= sgs->group_capacity ||
+               !sgs->sum_nr_running)
+               return;
  
-                       /* Bias balancing toward cpus of our domain */
-                       if (local_group) {
-                               if (idle_cpu(i) && !first_idle_cpu) {
-                                       first_idle_cpu = 1;
-                                       balance_cpu = i;
-                               }
+       /*
+        * Calculate the group which has the least non-idle load.
+        * This is the group from where we need to pick up the load
+        * for saving power
+        */
+       if ((sgs->sum_nr_running < sds->min_nr_running) ||
+           (sgs->sum_nr_running == sds->min_nr_running &&
+            group_first_cpu(group) > group_first_cpu(sds->group_min))) {
+               sds->group_min = group;
+               sds->min_nr_running = sgs->sum_nr_running;
+               sds->min_load_per_task = sgs->sum_weighted_load /
+                                               sgs->sum_nr_running;
+       }
  
-                               load = target_load(i, load_idx);
-                       } else {
-                               load = source_load(i, load_idx);
-                               if (load > max_cpu_load)
-                                       max_cpu_load = load;
-                               if (min_cpu_load > load)
-                                       min_cpu_load = load;
-                       }
+       /*
+        * Calculate the group which is almost near its
+        * capacity but still has some space to pick up some load
+        * from other group and save more power
+        */
+       if (sgs->sum_nr_running > sgs->group_capacity - 1)
+               return;
  
-                       avg_load += load;
-                       sum_nr_running += rq->nr_running;
-                       sum_weighted_load += weighted_cpuload(i);
+       if (sgs->sum_nr_running > sds->leader_nr_running ||
+           (sgs->sum_nr_running == sds->leader_nr_running &&
+            group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
+               sds->group_leader = group;
+               sds->leader_nr_running = sgs->sum_nr_running;
+       }
+ }
  
-                       sum_avg_load_per_task += cpu_avg_load_per_task(i);
-               }
+ /**
+  * check_power_save_busiest_group - see if there is potential for some power-savings balance
+  * @sds: Variable containing the statistics of the sched_domain
+  *    under consideration.
+  * @this_cpu: Cpu at which we're currently performing load-balancing.
+  * @imbalance: Variable to store the imbalance.
+  *
+  * Description:
+  * Check if we have potential to perform some power-savings balance.
+  * If yes, set the busiest group to be the least loaded group in the
+  * sched_domain, so that it's CPUs can be put to idle.
+  *
+  * Returns 1 if there is potential to perform power-savings balance.
+  * Else returns 0.
+  */
+ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+                                       int this_cpu, unsigned long *imbalance)
+ {
+       if (!sds->power_savings_balance)
+               return 0;
  
-               /*
-                * First idle cpu or the first cpu(busiest) in this sched group
-                * is eligible for doing load balancing at this and above
-                * domains. In the newly idle case, we will allow all the cpu's
-                * to do the newly idle load balance.
-                */
-               if (idle != CPU_NEWLY_IDLE && local_group &&
-                   balance_cpu != this_cpu && balance) {
-                       *balance = 0;
-                       goto ret;
-               }
+       if (sds->this != sds->group_leader ||
+                       sds->group_leader == sds->group_min)
+               return 0;
  
-               total_load += avg_load;
-               total_pwr += group->__cpu_power;
+       *imbalance = sds->min_load_per_task;
+       sds->busiest = sds->group_min;
  
-               /* Adjust by relative CPU power of the group */
-               avg_load = sg_div_cpu_power(group,
-                               avg_load * SCHED_LOAD_SCALE);
+       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+                       group_first_cpu(sds->group_leader);
+       }
+       return 1;
  
+ }
+ #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+ static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+       struct sd_lb_stats *sds, enum cpu_idle_type idle)
+ {
+       return;
+ }
  
-               /*
-                * Consider the group unbalanced when the imbalance is larger
-                * than the average weight of two tasks.
-                *
-                * APZ: with cgroup the avg task weight can vary wildly and
-                *      might not be a suitable number - should we keep a
-                *      normalized nr_running number somewhere that negates
-                *      the hierarchy?
-                */
-               avg_load_per_task = sg_div_cpu_power(group,
-                               sum_avg_load_per_task * SCHED_LOAD_SCALE);
+ static inline void update_sd_power_savings_stats(struct sched_group *group,
+       struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+ {
+       return;
+ }
+ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+                                       int this_cpu, unsigned long *imbalance)
+ {
+       return 0;
+ }
+ #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+ /**
+  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
+  * @group: sched_group whose statistics are to be updated.
+  * @this_cpu: Cpu for which load balance is currently performed.
+  * @idle: Idle status of this_cpu
+  * @load_idx: Load index of sched_domain of this_cpu for load calc.
+  * @sd_idle: Idle status of the sched_domain containing group.
+  * @local_group: Does group contain this_cpu.
+  * @cpus: Set of cpus considered for load balancing.
+  * @balance: Should we balance.
+  * @sgs: variable to hold the statistics for this group.
+  */
+ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+                       enum cpu_idle_type idle, int load_idx, int *sd_idle,
+                       int local_group, const struct cpumask *cpus,
+                       int *balance, struct sg_lb_stats *sgs)
+ {
+       unsigned long load, max_cpu_load, min_cpu_load;
+       int i;
+       unsigned int balance_cpu = -1, first_idle_cpu = 0;
+       unsigned long sum_avg_load_per_task;
+       unsigned long avg_load_per_task;
+       if (local_group)
+               balance_cpu = group_first_cpu(group);
+       /* Tally up the load of all CPUs in the group */
+       sum_avg_load_per_task = avg_load_per_task = 0;
+       max_cpu_load = 0;
+       min_cpu_load = ~0UL;
  
-               if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
-                       __group_imb = 1;
+       for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+               struct rq *rq = cpu_rq(i);
  
-               group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+               if (*sd_idle && rq->nr_running)
+                       *sd_idle = 0;
  
+               /* Bias balancing toward cpus of our domain */
                if (local_group) {
-                       this_load = avg_load;
-                       this = group;
-                       this_nr_running = sum_nr_running;
-                       this_load_per_task = sum_weighted_load;
-               } else if (avg_load > max_load &&
-                          (sum_nr_running > group_capacity || __group_imb)) {
-                       max_load = avg_load;
-                       busiest = group;
-                       busiest_nr_running = sum_nr_running;
-                       busiest_load_per_task = sum_weighted_load;
-                       group_imb = __group_imb;
+                       if (idle_cpu(i) && !first_idle_cpu) {
+                               first_idle_cpu = 1;
+                               balance_cpu = i;
+                       }
+                       load = target_load(i, load_idx);
+               } else {
+                       load = source_load(i, load_idx);
+                       if (load > max_cpu_load)
+                               max_cpu_load = load;
+                       if (min_cpu_load > load)
+                               min_cpu_load = load;
                }
  
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-               /*
-                * Busy processors will not participate in power savings
-                * balance.
-                */
-               if (idle == CPU_NOT_IDLE ||
-                               !(sd->flags & SD_POWERSAVINGS_BALANCE))
-                       goto group_next;
+               sgs->group_load += load;
+               sgs->sum_nr_running += rq->nr_running;
+               sgs->sum_weighted_load += weighted_cpuload(i);
  
-               /*
-                * If the local group is idle or completely loaded
-                * no need to do power savings balance at this domain
-                */
-               if (local_group && (this_nr_running >= group_capacity ||
-                                   !this_nr_running))
-                       power_savings_balance = 0;
+               sum_avg_load_per_task += cpu_avg_load_per_task(i);
+       }
  
-               /*
-                * If a group is already running at full capacity or idle,
-                * don't include that group in power savings calculations
-                */
-               if (!power_savings_balance || sum_nr_running >= group_capacity
-                   || !sum_nr_running)
-                       goto group_next;
+       /*
+        * First idle cpu or the first cpu(busiest) in this sched group
+        * is eligible for doing load balancing at this and above
+        * domains. In the newly idle case, we will allow all the cpu's
+        * to do the newly idle load balance.
+        */
+       if (idle != CPU_NEWLY_IDLE && local_group &&
+           balance_cpu != this_cpu && balance) {
+               *balance = 0;
+               return;
+       }
  
-               /*
-                * Calculate the group which has the least non-idle load.
-                * This is the group from where we need to pick up the load
-                * for saving power
-                */
-               if ((sum_nr_running < min_nr_running) ||
-                   (sum_nr_running == min_nr_running &&
-                    cpumask_first(sched_group_cpus(group)) >
-                    cpumask_first(sched_group_cpus(group_min)))) {
-                       group_min = group;
-                       min_nr_running = sum_nr_running;
-                       min_load_per_task = sum_weighted_load /
-                                               sum_nr_running;
-               }
+       /* Adjust by relative CPU power of the group */
+       sgs->avg_load = sg_div_cpu_power(group,
+                       sgs->group_load * SCHED_LOAD_SCALE);
  
-               /*
-                * Calculate the group which is almost near its
-                * capacity but still has some space to pick up some load
-                * from other group and save more power
-                */
-               if (sum_nr_running <= group_capacity - 1) {
-                       if (sum_nr_running > leader_nr_running ||
-                           (sum_nr_running == leader_nr_running &&
-                            cpumask_first(sched_group_cpus(group)) <
-                            cpumask_first(sched_group_cpus(group_leader)))) {
-                               group_leader = group;
-                               leader_nr_running = sum_nr_running;
-                       }
+       /*
+        * Consider the group unbalanced when the imbalance is larger
+        * than the average weight of two tasks.
+        *
+        * APZ: with cgroup the avg task weight can vary wildly and
+        *      might not be a suitable number - should we keep a
+        *      normalized nr_running number somewhere that negates
+        *      the hierarchy?
+        */
+       avg_load_per_task = sg_div_cpu_power(group,
+                       sum_avg_load_per_task * SCHED_LOAD_SCALE);
+       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+               sgs->group_imb = 1;
+       sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+ }
+ /**
+  * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+  * @sd: sched_domain whose statistics are to be updated.
+  * @this_cpu: Cpu for which load balance is currently performed.
+  * @idle: Idle status of this_cpu
+  * @sd_idle: Idle status of the sched_domain containing group.
+  * @cpus: Set of cpus considered for load balancing.
+  * @balance: Should we balance.
+  * @sds: variable to hold the statistics for this sched_domain.
+  */
+ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
+                       enum cpu_idle_type idle, int *sd_idle,
+                       const struct cpumask *cpus, int *balance,
+                       struct sd_lb_stats *sds)
+ {
+       struct sched_group *group = sd->groups;
+       struct sg_lb_stats sgs;
+       int load_idx;
+       init_sd_power_savings_stats(sd, sds, idle);
+       load_idx = get_sd_load_idx(sd, idle);
+       do {
+               int local_group;
+               local_group = cpumask_test_cpu(this_cpu,
+                                              sched_group_cpus(group));
+               memset(&sgs, 0, sizeof(sgs));
+               update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+                               local_group, cpus, balance, &sgs);
+               if (local_group && balance && !(*balance))
+                       return;
+               sds->total_load += sgs.group_load;
+               sds->total_pwr += group->__cpu_power;
+               if (local_group) {
+                       sds->this_load = sgs.avg_load;
+                       sds->this = group;
+                       sds->this_nr_running = sgs.sum_nr_running;
+                       sds->this_load_per_task = sgs.sum_weighted_load;
+               } else if (sgs.avg_load > sds->max_load &&
+                          (sgs.sum_nr_running > sgs.group_capacity ||
+                               sgs.group_imb)) {
+                       sds->max_load = sgs.avg_load;
+                       sds->busiest = group;
+                       sds->busiest_nr_running = sgs.sum_nr_running;
+                       sds->busiest_load_per_task = sgs.sum_weighted_load;
+                       sds->group_imb = sgs.group_imb;
                }
- group_next:
- #endif
+               update_sd_power_savings_stats(group, sds, local_group, &sgs);
                group = group->next;
        } while (group != sd->groups);
  
-       if (!busiest || this_load >= max_load || busiest_nr_running == 0)
-               goto out_balanced;
-       avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
+ }
  
-       if (this_load >= avg_load ||
-                       100*max_load <= sd->imbalance_pct*this_load)
-               goto out_balanced;
+ /**
+  * fix_small_imbalance - Calculate the minor imbalance that exists
+  *                    amongst the groups of a sched_domain, during
+  *                    load balancing.
+  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+  * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
+  * @imbalance: Variable to store the imbalance.
+  */
+ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
+                               int this_cpu, unsigned long *imbalance)
+ {
+       unsigned long tmp, pwr_now = 0, pwr_move = 0;
+       unsigned int imbn = 2;
+       if (sds->this_nr_running) {
+               sds->this_load_per_task /= sds->this_nr_running;
+               if (sds->busiest_load_per_task >
+                               sds->this_load_per_task)
+                       imbn = 1;
+       } else
+               sds->this_load_per_task =
+                       cpu_avg_load_per_task(this_cpu);
  
-       busiest_load_per_task /= busiest_nr_running;
-       if (group_imb)
-               busiest_load_per_task = min(busiest_load_per_task, avg_load);
+       if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+                       sds->busiest_load_per_task * imbn) {
+               *imbalance = sds->busiest_load_per_task;
+               return;
+       }
  
        /*
-        * We're trying to get all the cpus to the average_load, so we don't
-        * want to push ourselves above the average load, nor do we wish to
-        * reduce the max loaded cpu below the average load, as either of these
-        * actions would just result in more rebalancing later, and ping-pong
-        * tasks around. Thus we look for the minimum possible imbalance.
-        * Negative imbalances (*we* are more loaded than anyone else) will
-        * be counted as no imbalance for these purposes -- we can't fix that
-        * by pulling tasks to us. Be careful of negative numbers as they'll
-        * appear as very large values with unsigned longs.
+        * OK, we don't have enough imbalance to justify moving tasks,
+        * however we may be able to increase total CPU power used by
+        * moving them.
         */
-       if (max_load <= busiest_load_per_task)
-               goto out_balanced;
  
+       pwr_now += sds->busiest->__cpu_power *
+                       min(sds->busiest_load_per_task, sds->max_load);
+       pwr_now += sds->this->__cpu_power *
+                       min(sds->this_load_per_task, sds->this_load);
+       pwr_now /= SCHED_LOAD_SCALE;
+       /* Amount of load we'd subtract */
+       tmp = sg_div_cpu_power(sds->busiest,
+                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+       if (sds->max_load > tmp)
+               pwr_move += sds->busiest->__cpu_power *
+                       min(sds->busiest_load_per_task, sds->max_load - tmp);
+       /* Amount of load we'd add */
+       if (sds->max_load * sds->busiest->__cpu_power <
+               sds->busiest_load_per_task * SCHED_LOAD_SCALE)
+               tmp = sg_div_cpu_power(sds->this,
+                       sds->max_load * sds->busiest->__cpu_power);
+       else
+               tmp = sg_div_cpu_power(sds->this,
+                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+       pwr_move += sds->this->__cpu_power *
+                       min(sds->this_load_per_task, sds->this_load + tmp);
+       pwr_move /= SCHED_LOAD_SCALE;
+       /* Move if we gain throughput */
+       if (pwr_move > pwr_now)
+               *imbalance = sds->busiest_load_per_task;
+ }
+ /**
+  * calculate_imbalance - Calculate the amount of imbalance present within the
+  *                     groups of a given sched_domain during load balance.
+  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
+  * @this_cpu: Cpu for which currently load balance is being performed.
+  * @imbalance: The variable to store the imbalance.
+  */
+ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+               unsigned long *imbalance)
+ {
+       unsigned long max_pull;
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
         * its cpu_power, while calculating max_load..)
         */
-       if (max_load < avg_load) {
+       if (sds->max_load < sds->avg_load) {
                *imbalance = 0;
-               goto small_imbalance;
+               return fix_small_imbalance(sds, this_cpu, imbalance);
        }
  
        /* Don't want to pull so many tasks that a group would go idle */
-       max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
+       max_pull = min(sds->max_load - sds->avg_load,
+                       sds->max_load - sds->busiest_load_per_task);
  
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * busiest->__cpu_power,
-                               (avg_load - this_load) * this->__cpu_power)
+       *imbalance = min(max_pull * sds->busiest->__cpu_power,
+               (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
                        / SCHED_LOAD_SCALE;
  
        /*
         * a think about bumping its value to force at least one task to be
         * moved
         */
-       if (*imbalance < busiest_load_per_task) {
-               unsigned long tmp, pwr_now, pwr_move;
-               unsigned int imbn;
- small_imbalance:
-               pwr_move = pwr_now = 0;
-               imbn = 2;
-               if (this_nr_running) {
-                       this_load_per_task /= this_nr_running;
-                       if (busiest_load_per_task > this_load_per_task)
-                               imbn = 1;
-               } else
-                       this_load_per_task = cpu_avg_load_per_task(this_cpu);
+       if (*imbalance < sds->busiest_load_per_task)
+               return fix_small_imbalance(sds, this_cpu, imbalance);
  
-               if (max_load - this_load + busiest_load_per_task >=
-                                       busiest_load_per_task * imbn) {
-                       *imbalance = busiest_load_per_task;
-                       return busiest;
-               }
+ }
+ /******* find_busiest_group() helpers end here *********************/
  
-               /*
-                * OK, we don't have enough imbalance to justify moving tasks,
-                * however we may be able to increase total CPU power used by
-                * moving them.
-                */
+ /**
+  * find_busiest_group - Returns the busiest group within the sched_domain
+  * if there is an imbalance. If there isn't an imbalance, and
+  * the user has opted for power-savings, it returns a group whose
+  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+  * such a group exists.
+  *
+  * Also calculates the amount of weighted load which should be moved
+  * to restore balance.
+  *
+  * @sd: The sched_domain whose busiest group is to be returned.
+  * @this_cpu: The cpu for which load balancing is currently being performed.
+  * @imbalance: Variable which stores amount of weighted load which should
+  *            be moved to restore balance/put a group to idle.
+  * @idle: The idle status of this_cpu.
+  * @sd_idle: The idleness of sd
+  * @cpus: The set of CPUs under consideration for load-balancing.
+  * @balance: Pointer to a variable indicating if this_cpu
+  *    is the appropriate cpu to perform load balancing at this_level.
+  *
+  * Returns:   - the busiest group if imbalance exists.
+  *            - If no imbalance and user has opted for power-savings balance,
+  *               return the least loaded group whose CPUs can be
+  *               put to idle by rebalancing its tasks onto our group.
+  */
+ static struct sched_group *
+ find_busiest_group(struct sched_domain *sd, int this_cpu,
+                  unsigned long *imbalance, enum cpu_idle_type idle,
+                  int *sd_idle, const struct cpumask *cpus, int *balance)
+ {
+       struct sd_lb_stats sds;
  
-               pwr_now += busiest->__cpu_power *
-                               min(busiest_load_per_task, max_load);
-               pwr_now += this->__cpu_power *
-                               min(this_load_per_task, this_load);
-               pwr_now /= SCHED_LOAD_SCALE;
-               /* Amount of load we'd subtract */
-               tmp = sg_div_cpu_power(busiest,
-                               busiest_load_per_task * SCHED_LOAD_SCALE);
-               if (max_load > tmp)
-                       pwr_move += busiest->__cpu_power *
-                               min(busiest_load_per_task, max_load - tmp);
-               /* Amount of load we'd add */
-               if (max_load * busiest->__cpu_power <
-                               busiest_load_per_task * SCHED_LOAD_SCALE)
-                       tmp = sg_div_cpu_power(this,
-                                       max_load * busiest->__cpu_power);
-               else
-                       tmp = sg_div_cpu_power(this,
-                               busiest_load_per_task * SCHED_LOAD_SCALE);
-               pwr_move += this->__cpu_power *
-                               min(this_load_per_task, this_load + tmp);
-               pwr_move /= SCHED_LOAD_SCALE;
+       memset(&sds, 0, sizeof(sds));
  
-               /* Move if we gain throughput */
-               if (pwr_move > pwr_now)
-                       *imbalance = busiest_load_per_task;
-       }
+       /*
+        * Compute the various statistics relavent for load balancing at
+        * this level.
+        */
+       update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
+                                       balance, &sds);
+       /* Cases where imbalance does not exist from POV of this_cpu */
+       /* 1) this_cpu is not the appropriate cpu to perform load balancing
+        *    at this level.
+        * 2) There is no busy sibling group to pull from.
+        * 3) This group is the busiest group.
+        * 4) This group is more busy than the avg busieness at this
+        *    sched_domain.
+        * 5) The imbalance is within the specified limit.
+        * 6) Any rebalance would lead to ping-pong
+        */
+       if (balance && !(*balance))
+               goto ret;
  
-       return busiest;
+       if (!sds.busiest || sds.busiest_nr_running == 0)
+               goto out_balanced;
  
- out_balanced:
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-       if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
-               goto ret;
+       if (sds.this_load >= sds.max_load)
+               goto out_balanced;
  
-       if (this == group_leader && group_leader != group_min) {
-               *imbalance = min_load_per_task;
-               if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
-                       cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
-                               cpumask_first(sched_group_cpus(group_leader));
-               }
-               return group_min;
-       }
- #endif
+       sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
+       if (sds.this_load >= sds.avg_load)
+               goto out_balanced;
+       if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+               goto out_balanced;
+       sds.busiest_load_per_task /= sds.busiest_nr_running;
+       if (sds.group_imb)
+               sds.busiest_load_per_task =
+                       min(sds.busiest_load_per_task, sds.avg_load);
+       /*
+        * We're trying to get all the cpus to the average_load, so we don't
+        * want to push ourselves above the average load, nor do we wish to
+        * reduce the max loaded cpu below the average load, as either of these
+        * actions would just result in more rebalancing later, and ping-pong
+        * tasks around. Thus we look for the minimum possible imbalance.
+        * Negative imbalances (*we* are more loaded than anyone else) will
+        * be counted as no imbalance for these purposes -- we can't fix that
+        * by pulling tasks to us. Be careful of negative numbers as they'll
+        * appear as very large values with unsigned longs.
+        */
+       if (sds.max_load <= sds.busiest_load_per_task)
+               goto out_balanced;
+       /* Looks like there is an imbalance. Compute it */
+       calculate_imbalance(&sds, this_cpu, imbalance);
+       return sds.busiest;
+ out_balanced:
+       /*
+        * There is no obvious imbalance. But check if we can do some balancing
+        * to save power.
+        */
+       if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
+               return sds.busiest;
  ret:
        *imbalance = 0;
        return NULL;
@@@ -3470,19 -3826,23 +3838,23 @@@ find_busiest_queue(struct sched_group *
   */
  #define MAX_PINNED_INTERVAL   512
  
+ /* Working cpumask for load_balance and load_balance_newidle. */
+ static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
  /*
   * Check this_cpu to ensure it is balanced within domain. Attempt to move
   * tasks if there is an imbalance.
   */
  static int load_balance(int this_cpu, struct rq *this_rq,
                        struct sched_domain *sd, enum cpu_idle_type idle,
-                       int *balance, struct cpumask *cpus)
+                       int *balance)
  {
        int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
        struct sched_group *group;
        unsigned long imbalance;
        struct rq *busiest;
        unsigned long flags;
+       struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  
        cpumask_setall(cpus);
  
@@@ -3637,8 -3997,7 +4009,7 @@@ out
   * this_rq is locked.
   */
  static int
- load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
-                       struct cpumask *cpus)
+ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
  {
        struct sched_group *group;
        struct rq *busiest = NULL;
        int ld_moved = 0;
        int sd_idle = 0;
        int all_pinned = 0;
+       struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  
        cpumask_setall(cpus);
  
@@@ -3786,10 -4146,6 +4158,6 @@@ static void idle_balance(int this_cpu, 
        struct sched_domain *sd;
        int pulled_task = 0;
        unsigned long next_balance = jiffies + HZ;
-       cpumask_var_t tmpmask;
-       if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
-               return;
  
        for_each_domain(this_cpu, sd) {
                unsigned long interval;
                if (sd->flags & SD_BALANCE_NEWIDLE)
                        /* If we've pulled tasks over stop searching: */
                        pulled_task = load_balance_newidle(this_cpu, this_rq,
-                                                          sd, tmpmask);
+                                                          sd);
  
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                 */
                this_rq->next_balance = next_balance;
        }
-       free_cpumask_var(tmpmask);
  }
  
  /*
@@@ -3902,19 -4257,24 +4269,24 @@@ int select_nohz_load_balancer(int stop_
        int cpu = smp_processor_id();
  
        if (stop_tick) {
-               cpumask_set_cpu(cpu, nohz.cpu_mask);
                cpu_rq(cpu)->in_nohz_recently = 1;
  
-               /*
-                * If we are going offline and still the leader, give up!
-                */
-               if (!cpu_active(cpu) &&
-                   atomic_read(&nohz.load_balancer) == cpu) {
+               if (!cpu_active(cpu)) {
+                       if (atomic_read(&nohz.load_balancer) != cpu)
+                               return 0;
+                       /*
+                        * If we are going offline and still the leader,
+                        * give up!
+                        */
                        if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
                                BUG();
                        return 0;
                }
  
+               cpumask_set_cpu(cpu, nohz.cpu_mask);
                /* time for ilb owner also to sleep */
                if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
                        if (atomic_read(&nohz.load_balancer) == cpu)
@@@ -3960,11 -4320,6 +4332,6 @@@ static void rebalance_domains(int cpu, 
        unsigned long next_balance = jiffies + 60*HZ;
        int update_next_balance = 0;
        int need_serialize;
-       cpumask_var_t tmp;
-       /* Fails alloc?  Rebalancing probably not a priority right now. */
-       if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
-               return;
  
        for_each_domain(cpu, sd) {
                if (!(sd->flags & SD_LOAD_BALANCE))
                }
  
                if (time_after_eq(jiffies, sd->last_balance + interval)) {
-                       if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
+                       if (load_balance(cpu, rq, sd, idle, &balance)) {
                                /*
                                 * We've pulled tasks over so either we're no
                                 * longer idle, or one of our SMT siblings is
@@@ -4023,8 -4378,6 +4390,6 @@@ out
         */
        if (likely(update_next_balance))
                rq->next_balance = next_balance;
-       free_cpumask_var(tmp);
  }
  
  /*
@@@ -4074,6 -4427,11 +4439,11 @@@ static void run_rebalance_domains(struc
  #endif
  }
  
+ static inline int on_null_domain(int cpu)
+ {
+       return !rcu_dereference(cpu_rq(cpu)->sd);
+ }
  /*
   * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
   *
@@@ -4131,7 -4489,9 +4501,9 @@@ static inline void trigger_load_balance
            cpumask_test_cpu(cpu, nohz.cpu_mask))
                return;
  #endif
-       if (time_after_eq(jiffies, rq->next_balance))
+       /* Don't need to rebalance while attached to NULL domain */
+       if (time_after_eq(jiffies, rq->next_balance) &&
+           likely(!on_null_domain(cpu)))
                raise_softirq(SCHED_SOFTIRQ);
  }
  
@@@ -4151,25 -4511,9 +4523,25 @@@ DEFINE_PER_CPU(struct kernel_stat, ksta
  EXPORT_PER_CPU_SYMBOL(kstat);
  
  /*
 - * Return any ns on the sched_clock that have not yet been banked in
 + * Return any ns on the sched_clock that have not yet been accounted in
   * @p in case that task is currently running.
 + *
 + * Called with task_rq_lock() held on @rq.
   */
 +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
 +{
 +      u64 ns = 0;
 +
 +      if (task_current(rq, p)) {
 +              update_rq_clock(rq);
 +              ns = rq->clock - p->se.exec_start;
 +              if ((s64)ns < 0)
 +                      ns = 0;
 +      }
 +
 +      return ns;
 +}
 +
  unsigned long long task_delta_exec(struct task_struct *p)
  {
        unsigned long flags;
        u64 ns = 0;
  
        rq = task_rq_lock(p, &flags);
 +      ns = do_task_delta_exec(p, rq);
 +      task_rq_unlock(rq, &flags);
  
 -      if (task_current(rq, p)) {
 -              u64 delta_exec;
 +      return ns;
 +}
  
 -              update_rq_clock(rq);
 -              delta_exec = rq->clock - p->se.exec_start;
 -              if ((s64)delta_exec > 0)
 -                      ns = delta_exec;
 -      }
 +/*
 + * Return accounted runtime for the task.
 + * In case the task is currently running, return the runtime plus current's
 + * pending runtime that have not been accounted yet.
 + */
 +unsigned long long task_sched_runtime(struct task_struct *p)
 +{
 +      unsigned long flags;
 +      struct rq *rq;
 +      u64 ns = 0;
 +
 +      rq = task_rq_lock(p, &flags);
 +      ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
 +      task_rq_unlock(rq, &flags);
 +
 +      return ns;
 +}
 +
 +/*
 + * Return sum_exec_runtime for the thread group.
 + * In case the task is currently running, return the sum plus current's
 + * pending runtime that have not been accounted yet.
 + *
 + * Note that the thread group might have other running tasks as well,
 + * so the return value not includes other pending runtime that other
 + * running tasks might have.
 + */
 +unsigned long long thread_group_sched_runtime(struct task_struct *p)
 +{
 +      struct task_cputime totals;
 +      unsigned long flags;
 +      struct rq *rq;
 +      u64 ns;
  
 +      rq = task_rq_lock(p, &flags);
 +      thread_group_cputime(p, &totals);
 +      ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
        task_rq_unlock(rq, &flags);
  
        return ns;
@@@ -4248,8 -4559,6 +4620,8 @@@ void account_user_time(struct task_stru
                cpustat->nice = cputime64_add(cpustat->nice, tmp);
        else
                cpustat->user = cputime64_add(cpustat->user, tmp);
 +
 +      cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
        /* Account for user time used */
        acct_update_integrals(p);
  }
@@@ -4311,8 -4620,6 +4683,8 @@@ void account_system_time(struct task_st
        else
                cpustat->system = cputime64_add(cpustat->system, tmp);
  
 +      cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
 +
        /* Account for system time used */
        acct_update_integrals(p);
  }
@@@ -4474,10 -4781,7 +4846,7 @@@ void scheduler_tick(void
  #endif
  }
  
- #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
-                               defined(CONFIG_PREEMPT_TRACER))
- static inline unsigned long get_parent_ip(unsigned long addr)
+ unsigned long get_parent_ip(unsigned long addr)
  {
        if (in_lock_functions(addr)) {
                addr = CALLER_ADDR2;
        return addr;
  }
  
+ #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+                               defined(CONFIG_PREEMPT_TRACER))
  void __kprobes add_preempt_count(int val)
  {
  #ifdef CONFIG_DEBUG_PREEMPT
@@@ -4578,11 -4885,33 +4950,33 @@@ static inline void schedule_debug(struc
  #endif
  }
  
+ static void put_prev_task(struct rq *rq, struct task_struct *prev)
+ {
+       if (prev->state == TASK_RUNNING) {
+               u64 runtime = prev->se.sum_exec_runtime;
+               runtime -= prev->se.prev_sum_exec_runtime;
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+               /*
+                * In order to avoid avg_overlap growing stale when we are
+                * indeed overlapping and hence not getting put to sleep, grow
+                * the avg_overlap on preemption.
+                *
+                * We use the average preemption runtime because that
+                * correlates to the amount of cache footprint a task can
+                * build up.
+                */
+               update_avg(&prev->se.avg_overlap, runtime);
+       }
+       prev->sched_class->put_prev_task(rq, prev);
+ }
  /*
   * Pick up the highest-prio task:
   */
  static inline struct task_struct *
- pick_next_task(struct rq *rq, struct task_struct *prev)
+ pick_next_task(struct rq *rq)
  {
        const struct sched_class *class;
        struct task_struct *p;
@@@ -4654,8 -4983,8 +5048,8 @@@ need_resched_nonpreemptible
        if (unlikely(!rq->nr_running))
                idle_balance(cpu, rq);
  
-       prev->sched_class->put_prev_task(rq, prev);
-       next = pick_next_task(rq, prev);
+       put_prev_task(rq, prev);
+       next = pick_next_task(rq);
  
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
@@@ -4777,7 -5106,7 +5171,7 @@@ asmlinkage void __sched preempt_schedul
                 * between schedule and now.
                 */
                barrier();
-       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+       } while (need_resched());
  }
  EXPORT_SYMBOL(preempt_schedule);
  
@@@ -4806,7 -5135,7 +5200,7 @@@ asmlinkage void __sched preempt_schedul
                 * between schedule and now.
                 */
                barrier();
-       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
+       } while (need_resched());
  }
  
  #endif /* CONFIG_PREEMPT */
@@@ -4867,11 -5196,17 +5261,17 @@@ void __wake_up_locked(wait_queue_head_
        __wake_up_common(q, mode, 1, 0, NULL);
  }
  
+ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+ {
+       __wake_up_common(q, mode, 1, 0, key);
+ }
  /**
-  * __wake_up_sync - wake up threads blocked on a waitqueue.
+  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
   * @q: the waitqueue
   * @mode: which threads
   * @nr_exclusive: how many wake-one or wake-many threads to wake up
+  * @key: opaque value to be passed to wakeup targets
   *
   * The sync wakeup differs that the waker knows that it will schedule
   * away soon, so while the target thread will be woken up, it will not
   *
   * On UP it can prevent extra preemption.
   */
- void
__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
                      int nr_exclusive, void *key)
  {
        unsigned long flags;
        int sync = 1;
                sync = 0;
  
        spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, sync, NULL);
+       __wake_up_common(q, mode, nr_exclusive, sync, key);
        spin_unlock_irqrestore(&q->lock, flags);
  }
+ EXPORT_SYMBOL_GPL(__wake_up_sync_key);
+ /*
+  * __wake_up_sync - see __wake_up_sync_key()
+  */
+ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+ {
+       __wake_up_sync_key(q, mode, nr_exclusive, NULL);
+ }
  EXPORT_SYMBOL_GPL(__wake_up_sync);    /* For internal use only */
  
  /**
@@@ -5280,7 -5624,7 +5689,7 @@@ SYSCALL_DEFINE1(nice, int, increment
        if (increment > 40)
                increment = 40;
  
-       nice = PRIO_TO_NICE(current->static_prio) + increment;
+       nice = TASK_NICE(current) + increment;
        if (nice < -20)
                nice = -20;
        if (nice > 19)
@@@ -6079,12 -6423,7 +6488,7 @@@ void sched_show_task(struct task_struc
                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  #endif
  #ifdef CONFIG_DEBUG_STACK_USAGE
-       {
-               unsigned long *n = end_of_stack(p);
-               while (!*n)
-                       n++;
-               free = (unsigned long)n - (unsigned long)end_of_stack(p);
-       }
+       free = stack_not_used(p);
  #endif
        printk(KERN_CONT "%5lu %5d %6d\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent));
@@@ -6558,7 -6897,7 +6962,7 @@@ static void migrate_dead_tasks(unsigne
                if (!rq->nr_running)
                        break;
                update_rq_clock(rq);
-               next = pick_next_task(rq, rq->curr);
+               next = pick_next_task(rq);
                if (!next)
                        break;
                next->sched_class->put_prev_task(rq, next);
@@@ -6963,8 -7302,7 +7367,8 @@@ static int sched_domain_debug_one(struc
                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
  
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 -              printk(KERN_CONT " %s", str);
 +              printk(KERN_CONT " %s (__cpu_power = %d)", str,
 +                                              group->__cpu_power);
  
                group = group->next;
        } while (group != sd->groups);
@@@ -7080,20 -7418,26 +7484,26 @@@ static void free_rootdomain(struct root
  
  static void rq_attach_root(struct rq *rq, struct root_domain *rd)
  {
+       struct root_domain *old_rd = NULL;
        unsigned long flags;
  
        spin_lock_irqsave(&rq->lock, flags);
  
        if (rq->rd) {
-               struct root_domain *old_rd = rq->rd;
+               old_rd = rq->rd;
  
                if (cpumask_test_cpu(rq->cpu, old_rd->online))
                        set_rq_offline(rq);
  
                cpumask_clear_cpu(rq->cpu, old_rd->span);
  
-               if (atomic_dec_and_test(&old_rd->refcount))
-                       free_rootdomain(old_rd);
+               /*
+                * If we dont want to free the old_rt yet then
+                * set old_rd to NULL to skip the freeing later
+                * in this function:
+                */
+               if (!atomic_dec_and_test(&old_rd->refcount))
+                       old_rd = NULL;
        }
  
        atomic_inc(&rd->refcount);
                set_rq_online(rq);
  
        spin_unlock_irqrestore(&rq->lock, flags);
+       if (old_rd)
+               free_rootdomain(old_rd);
  }
  
  static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
@@@ -7381,7 -7728,7 +7794,7 @@@ cpu_to_core_group(int cpu, const struc
  {
        int group;
  
-       cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+       cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
        if (sg)
                *sg = &per_cpu(sched_group_core, group).sg;
@@@ -7410,7 -7757,7 +7823,7 @@@ cpu_to_phys_group(int cpu, const struc
        cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
        group = cpumask_first(mask);
  #elif defined(CONFIG_SCHED_SMT)
-       cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+       cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
  #else
        group = cpu;
@@@ -7753,7 -8100,7 +8166,7 @@@ static int __build_sched_domains(const 
                SD_INIT(sd, SIBLING);
                set_domain_attribute(sd, attr);
                cpumask_and(sched_domain_span(sd),
-                           &per_cpu(cpu_sibling_map, i), cpu_map);
+                           topology_thread_cpumask(i), cpu_map);
                sd->parent = p;
                p->child = sd;
                cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
        /* Set up CPU (sibling) groups */
        for_each_cpu(i, cpu_map) {
                cpumask_and(this_sibling_map,
-                           &per_cpu(cpu_sibling_map, i), cpu_map);
+                           topology_thread_cpumask(i), cpu_map);
                if (i != cpumask_first(this_sibling_map))
                        continue;
  
@@@ -8345,11 -8692,15 +8758,15 @@@ static void init_rt_rq(struct rt_rq *rt
        __set_bit(MAX_RT_PRIO, array->bitmap);
  
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       rt_rq->highest_prio = MAX_RT_PRIO;
+       rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ #ifdef CONFIG_SMP
+       rt_rq->highest_prio.next = MAX_RT_PRIO;
+ #endif
  #endif
  #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
+       plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
  #endif
  
        rt_rq->rt_time = 0;
@@@ -8435,6 -8786,9 +8852,9 @@@ void __init sched_init(void
  #endif
  #ifdef CONFIG_USER_SCHED
        alloc_size *= 2;
+ #endif
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+       alloc_size += num_possible_cpus() * cpumask_size();
  #endif
        /*
         * As sched_init() is called before page_alloc is setup,
                ptr += nr_cpu_ids * sizeof(void **);
  #endif /* CONFIG_USER_SCHED */
  #endif /* CONFIG_RT_GROUP_SCHED */
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+               for_each_possible_cpu(i) {
+                       per_cpu(load_balance_tmpmask, i) = (void *)ptr;
+                       ptr += cpumask_size();
+               }
+ #endif /* CONFIG_CPUMASK_OFFSTACK */
        }
  
  #ifdef CONFIG_SMP
@@@ -9351,6 -9711,16 +9777,16 @@@ static int sched_rt_global_constraints(
  
        return ret;
  }
+ int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+ {
+       /* Don't accept realtime tasks when there is no way for them to run */
+       if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+               return 0;
+       return 1;
+ }
  #else /* !CONFIG_RT_GROUP_SCHED */
  static int sched_rt_global_constraints(void)
  {
@@@ -9444,8 -9814,7 +9880,7 @@@ cpu_cgroup_can_attach(struct cgroup_sub
                      struct task_struct *tsk)
  {
  #ifdef CONFIG_RT_GROUP_SCHED
-       /* Don't accept realtime tasks when there is no way for them to run */
-       if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
+       if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
                return -EINVAL;
  #else
        /* We don't support RT-tasks being in separate groups */
@@@ -9556,7 -9925,6 +9991,7 @@@ struct cpuacct 
        struct cgroup_subsys_state css;
        /* cpuusage holds pointer to a u64-type object on every cpu */
        u64 *cpuusage;
 +      struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
        struct cpuacct *parent;
  };
  
@@@ -9581,32 -9949,20 +10016,32 @@@ static struct cgroup_subsys_state *cpua
        struct cgroup_subsys *ss, struct cgroup *cgrp)
  {
        struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
 +      int i;
  
        if (!ca)
 -              return ERR_PTR(-ENOMEM);
 +              goto out;
  
        ca->cpuusage = alloc_percpu(u64);
 -      if (!ca->cpuusage) {
 -              kfree(ca);
 -              return ERR_PTR(-ENOMEM);
 -      }
 +      if (!ca->cpuusage)
 +              goto out_free_ca;
 +
 +      for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
 +              if (percpu_counter_init(&ca->cpustat[i], 0))
 +                      goto out_free_counters;
  
        if (cgrp->parent)
                ca->parent = cgroup_ca(cgrp->parent);
  
        return &ca->css;
 +
 +out_free_counters:
 +      while (--i >= 0)
 +              percpu_counter_destroy(&ca->cpustat[i]);
 +      free_percpu(ca->cpuusage);
 +out_free_ca:
 +      kfree(ca);
 +out:
 +      return ERR_PTR(-ENOMEM);
  }
  
  /* destroy an existing cpu accounting group */
@@@ -9614,17 -9970,14 +10049,17 @@@ static voi
  cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  {
        struct cpuacct *ca = cgroup_ca(cgrp);
 +      int i;
  
 +      for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
 +              percpu_counter_destroy(&ca->cpustat[i]);
        free_percpu(ca->cpuusage);
        kfree(ca);
  }
  
  static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
        u64 data;
  
  #ifndef CONFIG_64BIT
  
  static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  
  #ifndef CONFIG_64BIT
        /*
@@@ -9704,25 -10057,6 +10139,25 @@@ static int cpuacct_percpu_seq_read(stru
        return 0;
  }
  
 +static const char *cpuacct_stat_desc[] = {
 +      [CPUACCT_STAT_USER] = "user",
 +      [CPUACCT_STAT_SYSTEM] = "system",
 +};
 +
 +static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
 +              struct cgroup_map_cb *cb)
 +{
 +      struct cpuacct *ca = cgroup_ca(cgrp);
 +      int i;
 +
 +      for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
 +              s64 val = percpu_counter_read(&ca->cpustat[i]);
 +              val = cputime64_to_clock_t(val);
 +              cb->fill(cb, cpuacct_stat_desc[i], val);
 +      }
 +      return 0;
 +}
 +
  static struct cftype files[] = {
        {
                .name = "usage",
                .name = "usage_percpu",
                .read_seq_string = cpuacct_percpu_seq_read,
        },
 -
 +      {
 +              .name = "stat",
 +              .read_map = cpuacct_stats_show,
 +      },
  };
  
  static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@@ -9754,42 -10085,16 +10189,42 @@@ static void cpuacct_charge(struct task_
        struct cpuacct *ca;
        int cpu;
  
-       if (!cpuacct_subsys.active)
+       if (unlikely(!cpuacct_subsys.active))
                return;
  
        cpu = task_cpu(tsk);
 +
 +      rcu_read_lock();
 +
        ca = task_ca(tsk);
  
        for (; ca; ca = ca->parent) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+               u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
                *cpuusage += cputime;
        }
 +
 +      rcu_read_unlock();
 +}
 +
 +/*
 + * Charge the system/user time to the task's accounting group.
 + */
 +static void cpuacct_update_stats(struct task_struct *tsk,
 +              enum cpuacct_stat_index idx, cputime_t val)
 +{
 +      struct cpuacct *ca;
 +
 +      if (unlikely(!cpuacct_subsys.active))
 +              return;
 +
 +      rcu_read_lock();
 +      ca = task_ca(tsk);
 +
 +      do {
 +              percpu_counter_add(&ca->cpustat[idx], val);
 +              ca = ca->parent;
 +      } while (ca);
 +      rcu_read_unlock();
  }
  
  struct cgroup_subsys cpuacct_subsys = {
diff --combined kernel/sched_rt.c
@@@ -3,6 -3,40 +3,40 @@@
   * policies)
   */
  
+ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+ {
+       return container_of(rt_se, struct task_struct, rt);
+ }
+ #ifdef CONFIG_RT_GROUP_SCHED
+ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+ {
+       return rt_rq->rq;
+ }
+ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+ {
+       return rt_se->rt_rq;
+ }
+ #else /* CONFIG_RT_GROUP_SCHED */
+ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+ {
+       return container_of(rt_rq, struct rq, rt);
+ }
+ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+ {
+       struct task_struct *p = rt_task_of(rt_se);
+       struct rq *rq = task_rq(p);
+       return &rq->rt;
+ }
+ #endif /* CONFIG_RT_GROUP_SCHED */
  #ifdef CONFIG_SMP
  
  static inline int rt_overloaded(struct rq *rq)
@@@ -37,25 -71,69 +71,69 @@@ static inline void rt_clear_overload(st
        cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
  }
  
- static void update_rt_migration(struct rq *rq)
+ static void update_rt_migration(struct rt_rq *rt_rq)
  {
-       if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
-               if (!rq->rt.overloaded) {
-                       rt_set_overload(rq);
-                       rq->rt.overloaded = 1;
+       if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+               if (!rt_rq->overloaded) {
+                       rt_set_overload(rq_of_rt_rq(rt_rq));
+                       rt_rq->overloaded = 1;
                }
-       } else if (rq->rt.overloaded) {
-               rt_clear_overload(rq);
-               rq->rt.overloaded = 0;
+       } else if (rt_rq->overloaded) {
+               rt_clear_overload(rq_of_rt_rq(rt_rq));
+               rt_rq->overloaded = 0;
        }
  }
- #endif /* CONFIG_SMP */
  
- static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory++;
+       update_rt_migration(rt_rq);
+ }
+ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory--;
+       update_rt_migration(rt_rq);
+ }
+ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+       plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+       plist_node_init(&p->pushable_tasks, p->prio);
+       plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ }
+ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+       plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+ }
+ #else
+ static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
  {
-       return container_of(rt_se, struct task_struct, rt);
  }
  
+ static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+ }
+ static inline
+ void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+ }
+ static inline
+ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+ }
+ #endif /* CONFIG_SMP */
  static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  {
        return !list_empty(&rt_se->run_list);
@@@ -79,16 -157,6 +157,6 @@@ static inline u64 sched_rt_period(struc
  #define for_each_leaf_rt_rq(rt_rq, rq) \
        list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
  
- static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
- {
-       return rt_rq->rq;
- }
- static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
- {
-       return rt_se->rt_rq;
- }
  #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = rt_se->parent)
  
@@@ -108,7 -176,7 +176,7 @@@ static void sched_rt_rq_enqueue(struct 
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
                        enqueue_rt_entity(rt_se);
-               if (rt_rq->highest_prio < curr->prio)
+               if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
  }
@@@ -176,19 -244,6 +244,6 @@@ static inline u64 sched_rt_period(struc
  #define for_each_leaf_rt_rq(rt_rq, rq) \
        for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
  
- static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
- {
-       return container_of(rt_rq, struct rq, rt);
- }
- static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
- {
-       struct task_struct *p = rt_task_of(rt_se);
-       struct rq *rq = task_rq(p);
-       return &rq->rt;
- }
  #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = NULL)
  
@@@ -473,7 -528,7 +528,7 @@@ static inline int rt_se_prio(struct sch
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
  
        if (rt_rq)
-               return rt_rq->highest_prio;
+               return rt_rq->highest_prio.curr;
  #endif
  
        return rt_task_of(rt_se)->prio;
@@@ -547,91 -602,174 +602,174 @@@ static void update_curr_rt(struct rq *r
        }
  }
  
- static inline
- void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ #if defined CONFIG_SMP
+ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
+ static inline int next_prio(struct rq *rq)
  {
-       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-       rt_rq->rt_nr_running++;
- #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
- #ifdef CONFIG_SMP
-               struct rq *rq = rq_of_rt_rq(rt_rq);
- #endif
+       struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
+       if (next && rt_prio(next->prio))
+               return next->prio;
+       else
+               return MAX_RT_PRIO;
+ }
+ static void
+ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+       if (prio < prev_prio) {
+               /*
+                * If the new task is higher in priority than anything on the
+                * run-queue, we know that the previous high becomes our
+                * next-highest.
+                */
+               rt_rq->highest_prio.next = prev_prio;
  
-               rt_rq->highest_prio = rt_se_prio(rt_se);
- #ifdef CONFIG_SMP
                if (rq->online)
-                       cpupri_set(&rq->rd->cpupri, rq->cpu,
-                                  rt_se_prio(rt_se));
- #endif
-       }
- #endif
- #ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+                       cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
  
-               rq->rt.rt_nr_migratory++;
-       }
+       } else if (prio == rt_rq->highest_prio.curr)
+               /*
+                * If the next task is equal in priority to the highest on
+                * the run-queue, then we implicitly know that the next highest
+                * task cannot be any lower than current
+                */
+               rt_rq->highest_prio.next = prio;
+       else if (prio < rt_rq->highest_prio.next)
+               /*
+                * Otherwise, we need to recompute next-highest
+                */
+               rt_rq->highest_prio.next = next_prio(rq);
+ }
  
-       update_rt_migration(rq_of_rt_rq(rt_rq));
- #endif
- #ifdef CONFIG_RT_GROUP_SCHED
-       if (rt_se_boosted(rt_se))
-               rt_rq->rt_nr_boosted++;
+ static void
+ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+       struct rq *rq = rq_of_rt_rq(rt_rq);
  
-       if (rt_rq->tg)
-               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
- #else
-       start_rt_bandwidth(&def_rt_bandwidth);
- #endif
+       if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
+               rt_rq->highest_prio.next = next_prio(rq);
+       if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
  }
  
+ #else /* CONFIG_SMP */
  static inline
- void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- #ifdef CONFIG_SMP
-       int highest_prio = rt_rq->highest_prio;
- #endif
+ void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+ static inline
+ void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+ #endif /* CONFIG_SMP */
  
-       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-       WARN_ON(!rt_rq->rt_nr_running);
-       rt_rq->rt_nr_running--;
  #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+ static void
+ inc_rt_prio(struct rt_rq *rt_rq, int prio)
+ {
+       int prev_prio = rt_rq->highest_prio.curr;
+       if (prio < prev_prio)
+               rt_rq->highest_prio.curr = prio;
+       inc_rt_prio_smp(rt_rq, prio, prev_prio);
+ }
+ static void
+ dec_rt_prio(struct rt_rq *rt_rq, int prio)
+ {
+       int prev_prio = rt_rq->highest_prio.curr;
        if (rt_rq->rt_nr_running) {
-               struct rt_prio_array *array;
  
-               WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
-               if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
-                       /* recalculate */
-                       array = &rt_rq->active;
-                       rt_rq->highest_prio =
+               WARN_ON(prio < prev_prio);
+               /*
+                * This may have been our highest task, and therefore
+                * we may have some recomputation to do
+                */
+               if (prio == prev_prio) {
+                       struct rt_prio_array *array = &rt_rq->active;
+                       rt_rq->highest_prio.curr =
                                sched_find_first_bit(array->bitmap);
-               } /* otherwise leave rq->highest prio alone */
+               }
        } else
-               rt_rq->highest_prio = MAX_RT_PRIO;
- #endif
- #ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
-               rq->rt.rt_nr_migratory--;
-       }
+               rt_rq->highest_prio.curr = MAX_RT_PRIO;
  
-       if (rt_rq->highest_prio != highest_prio) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+       dec_rt_prio_smp(rt_rq, prio, prev_prio);
+ }
  
-               if (rq->online)
-                       cpupri_set(&rq->rd->cpupri, rq->cpu,
-                                  rt_rq->highest_prio);
-       }
+ #else
+ static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
+ static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+ #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
  
-       update_rt_migration(rq_of_rt_rq(rt_rq));
- #endif /* CONFIG_SMP */
  #ifdef CONFIG_RT_GROUP_SCHED
+ static void
+ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+ }
+ static void
+ dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
        if (rt_se_boosted(rt_se))
                rt_rq->rt_nr_boosted--;
  
        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
- #endif
+ }
+ #else /* CONFIG_RT_GROUP_SCHED */
+ static void
+ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       start_rt_bandwidth(&def_rt_bandwidth);
+ }
+ static inline
+ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
+ #endif /* CONFIG_RT_GROUP_SCHED */
+ static inline
+ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       int prio = rt_se_prio(rt_se);
+       WARN_ON(!rt_prio(prio));
+       rt_rq->rt_nr_running++;
+       inc_rt_prio(rt_rq, prio);
+       inc_rt_migration(rt_se, rt_rq);
+       inc_rt_group(rt_se, rt_rq);
+ }
+ static inline
+ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+       WARN_ON(!rt_rq->rt_nr_running);
+       rt_rq->rt_nr_running--;
+       dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+       dec_rt_migration(rt_se, rt_rq);
+       dec_rt_group(rt_se, rt_rq);
  }
  
  static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@@ -718,6 -856,9 +856,9 @@@ static void enqueue_task_rt(struct rq *
  
        enqueue_rt_entity(rt_se);
  
+       if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+               enqueue_pushable_task(rq, p);
        inc_cpu_load(rq, p->se.load.weight);
  }
  
@@@ -728,6 -869,8 +869,8 @@@ static void dequeue_task_rt(struct rq *
        update_curr_rt(rq);
        dequeue_rt_entity(rt_se);
  
+       dequeue_pushable_task(rq, p);
        dec_cpu_load(rq, p->se.load.weight);
  }
  
@@@ -805,15 -948,20 +948,15 @@@ static int select_task_rq_rt(struct tas
  
  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
  {
 -      cpumask_var_t mask;
 -
        if (rq->curr->rt.nr_cpus_allowed == 1)
                return;
  
 -      if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
 -              return;
 -
        if (p->rt.nr_cpus_allowed != 1
 -          && cpupri_find(&rq->rd->cpupri, p, mask))
 -              goto free;
 +          && cpupri_find(&rq->rd->cpupri, p, NULL))
 +              return;
  
 -      if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
 -              goto free;
 +      if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
 +              return;
  
        /*
         * There appears to be other cpus that can accept
         */
        requeue_task_rt(rq, p, 1);
        resched_task(rq->curr);
 -free:
 -      free_cpumask_var(mask);
  }
  
  #endif /* CONFIG_SMP */
@@@ -871,7 -1021,7 +1014,7 @@@ static struct sched_rt_entity *pick_nex
        return next;
  }
  
- static struct task_struct *pick_next_task_rt(struct rq *rq)
+ static struct task_struct *_pick_next_task_rt(struct rq *rq)
  {
        struct sched_rt_entity *rt_se;
        struct task_struct *p;
  
        p = rt_task_of(rt_se);
        p->se.exec_start = rq->clock;
+       return p;
+ }
+ static struct task_struct *pick_next_task_rt(struct rq *rq)
+ {
+       struct task_struct *p = _pick_next_task_rt(rq);
+       /* The running task is never eligible for pushing */
+       if (p)
+               dequeue_pushable_task(rq, p);
        return p;
  }
  
@@@ -900,6 -1062,13 +1055,13 @@@ static void put_prev_task_rt(struct rq 
  {
        update_curr_rt(rq);
        p->se.exec_start = 0;
+       /*
+        * The previous task needs to be made eligible for pushing
+        * if it is still active
+        */
+       if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+               enqueue_pushable_task(rq, p);
  }
  
  #ifdef CONFIG_SMP
@@@ -953,12 -1122,13 +1115,13 @@@ static struct task_struct *pick_next_hi
  
  static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
  
- static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+ static inline int pick_optimal_cpu(int this_cpu,
+                                  const struct cpumask *mask)
  {
        int first;
  
        /* "this_cpu" is cheaper to preempt than a remote processor */
-       if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+       if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
                return this_cpu;
  
        first = cpumask_first(mask);
@@@ -974,6 -1144,7 +1137,7 @@@ static int find_lowest_rq(struct task_s
        struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
+       cpumask_var_t domain_mask;
  
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
        if (this_cpu == cpu)
                this_cpu = -1; /* Skip this_cpu opt if the same */
  
-       for_each_domain(cpu, sd) {
-               if (sd->flags & SD_WAKE_AFFINE) {
-                       cpumask_t domain_mask;
-                       int       best_cpu;
+       if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+               for_each_domain(cpu, sd) {
+                       if (sd->flags & SD_WAKE_AFFINE) {
+                               int best_cpu;
+                               cpumask_and(domain_mask,
+                                           sched_domain_span(sd),
+                                           lowest_mask);
  
-                       cpumask_and(&domain_mask, sched_domain_span(sd),
-                                   lowest_mask);
+                               best_cpu = pick_optimal_cpu(this_cpu,
+                                                           domain_mask);
  
-                       best_cpu = pick_optimal_cpu(this_cpu,
-                                                   &domain_mask);
-                       if (best_cpu != -1)
-                               return best_cpu;
+                               if (best_cpu != -1) {
+                                       free_cpumask_var(domain_mask);
+                                       return best_cpu;
+                               }
+                       }
                }
+               free_cpumask_var(domain_mask);
        }
  
        /*
@@@ -1065,7 -1242,7 +1235,7 @@@ static struct rq *find_lock_lowest_rq(s
                }
  
                /* If this rq is still suitable use it. */
-               if (lowest_rq->rt.highest_prio > task->prio)
+               if (lowest_rq->rt.highest_prio.curr > task->prio)
                        break;
  
                /* try again */
        return lowest_rq;
  }
  
+ static inline int has_pushable_tasks(struct rq *rq)
+ {
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
+ static struct task_struct *pick_next_pushable_task(struct rq *rq)
+ {
+       struct task_struct *p;
+       if (!has_pushable_tasks(rq))
+               return NULL;
+       p = plist_first_entry(&rq->rt.pushable_tasks,
+                             struct task_struct, pushable_tasks);
+       BUG_ON(rq->cpu != task_cpu(p));
+       BUG_ON(task_current(rq, p));
+       BUG_ON(p->rt.nr_cpus_allowed <= 1);
+       BUG_ON(!p->se.on_rq);
+       BUG_ON(!rt_task(p));
+       return p;
+ }
  /*
   * If the current CPU has more than one RT task, see if the non
   * running task can migrate over to a CPU that is running a task
@@@ -1085,13 -1287,11 +1280,11 @@@ static int push_rt_task(struct rq *rq
  {
        struct task_struct *next_task;
        struct rq *lowest_rq;
-       int ret = 0;
-       int paranoid = RT_MAX_TRIES;
  
        if (!rq->rt.overloaded)
                return 0;
  
-       next_task = pick_next_highest_task_rt(rq, -1);
+       next_task = pick_next_pushable_task(rq);
        if (!next_task)
                return 0;
  
                struct task_struct *task;
                /*
                 * find lock_lowest_rq releases rq->lock
-                * so it is possible that next_task has changed.
-                * If it has, then try again.
+                * so it is possible that next_task has migrated.
+                *
+                * We need to make sure that the task is still on the same
+                * run-queue and is also still the next task eligible for
+                * pushing.
                 */
-               task = pick_next_highest_task_rt(rq, -1);
-               if (unlikely(task != next_task) && task && paranoid--) {
-                       put_task_struct(next_task);
-                       next_task = task;
-                       goto retry;
+               task = pick_next_pushable_task(rq);
+               if (task_cpu(next_task) == rq->cpu && task == next_task) {
+                       /*
+                        * If we get here, the task hasnt moved at all, but
+                        * it has failed to push.  We will not try again,
+                        * since the other cpus will pull from us when they
+                        * are ready.
+                        */
+                       dequeue_pushable_task(rq, next_task);
+                       goto out;
                }
-               goto out;
+               if (!task)
+                       /* No more tasks, just exit */
+                       goto out;
+               /*
+                * Something has shifted, try again.
+                */
+               put_task_struct(next_task);
+               next_task = task;
+               goto retry;
        }
  
        deactivate_task(rq, next_task, 0);
  
        double_unlock_balance(rq, lowest_rq);
  
-       ret = 1;
  out:
        put_task_struct(next_task);
  
-       return ret;
+       return 1;
  }
  
- /*
-  * TODO: Currently we just use the second highest prio task on
-  *       the queue, and stop when it can't migrate (or there's
-  *       no more RT tasks).  There may be a case where a lower
-  *       priority RT task has a different affinity than the
-  *       higher RT task. In this case the lower RT task could
-  *       possibly be able to migrate where as the higher priority
-  *       RT task could not.  We currently ignore this issue.
-  *       Enhancements are welcome!
-  */
  static void push_rt_tasks(struct rq *rq)
  {
        /* push_rt_task will return true if it moved an RT */
  static int pull_rt_task(struct rq *this_rq)
  {
        int this_cpu = this_rq->cpu, ret = 0, cpu;
-       struct task_struct *p, *next;
+       struct task_struct *p;
        struct rq *src_rq;
  
        if (likely(!rt_overloaded(this_rq)))
                return 0;
  
-       next = pick_next_task_rt(this_rq);
        for_each_cpu(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
  
                src_rq = cpu_rq(cpu);
+               /*
+                * Don't bother taking the src_rq->lock if the next highest
+                * task is known to be lower-priority than our current task.
+                * This may look racy, but if this value is about to go
+                * logically higher, the src_rq will push this task away.
+                * And if its going logically lower, we do not care
+                */
+               if (src_rq->rt.highest_prio.next >=
+                   this_rq->rt.highest_prio.curr)
+                       continue;
                /*
                 * We can potentially drop this_rq's lock in
                 * double_lock_balance, and another CPU could
-                * steal our next task - hence we must cause
-                * the caller to recalculate the next task
-                * in that case:
+                * alter this_rq
                 */
-               if (double_lock_balance(this_rq, src_rq)) {
-                       struct task_struct *old_next = next;
-                       next = pick_next_task_rt(this_rq);
-                       if (next != old_next)
-                               ret = 1;
-               }
+               double_lock_balance(this_rq, src_rq);
  
                /*
                 * Are there still pullable RT tasks?
                 * Do we have an RT task that preempts
                 * the to-be-scheduled task?
                 */
-               if (p && (!next || (p->prio < next->prio))) {
+               if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
                        WARN_ON(p == src_rq->curr);
                        WARN_ON(!p->se.on_rq);
  
                         * This is just that p is wakeing up and hasn't
                         * had a chance to schedule. We only pull
                         * p if it is lower in priority than the
-                        * current task on the run queue or
-                        * this_rq next task is lower in prio than
-                        * the current task on that rq.
+                        * current task on the run queue
                         */
-                       if (p->prio < src_rq->curr->prio ||
-                           (next && next->prio < src_rq->curr->prio))
+                       if (p->prio < src_rq->curr->prio)
                                goto skip;
  
                        ret = 1;
                         * case there's an even higher prio task
                         * in another runqueue. (low likelyhood
                         * but possible)
-                        *
-                        * Update next so that we won't pick a task
-                        * on another cpu with a priority lower (or equal)
-                        * than the one we just picked.
                         */
-                       next = p;
                }
   skip:
                double_unlock_balance(this_rq, src_rq);
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
  {
        /* Try to pull RT tasks here if we lower this rq's prio */
-       if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
+       if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
                pull_rt_task(rq);
  }
  
+ /*
+  * assumes rq->lock is held
+  */
+ static int needs_post_schedule_rt(struct rq *rq)
+ {
+       return has_pushable_tasks(rq);
+ }
  static void post_schedule_rt(struct rq *rq)
  {
        /*
-        * If we have more than one rt_task queued, then
-        * see if we can push the other rt_tasks off to other CPUS.
-        * Note we may release the rq lock, and since
-        * the lock was owned by prev, we need to release it
-        * first via finish_lock_switch and then reaquire it here.
+        * This is only called if needs_post_schedule_rt() indicates that
+        * we need to push tasks away
         */
-       if (unlikely(rq->rt.overloaded)) {
-               spin_lock_irq(&rq->lock);
-               push_rt_tasks(rq);
-               spin_unlock_irq(&rq->lock);
-       }
+       spin_lock_irq(&rq->lock);
+       push_rt_tasks(rq);
+       spin_unlock_irq(&rq->lock);
  }
  
  /*
@@@ -1281,7 -1484,8 +1477,8 @@@ static void task_wake_up_rt(struct rq *
  {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
-           rq->rt.overloaded)
+           has_pushable_tasks(rq) &&
+           p->rt.nr_cpus_allowed > 1)
                push_rt_tasks(rq);
  }
  
@@@ -1317,6 -1521,24 +1514,24 @@@ static void set_cpus_allowed_rt(struct 
        if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
                struct rq *rq = task_rq(p);
  
+               if (!task_current(rq, p)) {
+                       /*
+                        * Make sure we dequeue this task from the pushable list
+                        * before going further.  It will either remain off of
+                        * the list because we are no longer pushable, or it
+                        * will be requeued.
+                        */
+                       if (p->rt.nr_cpus_allowed > 1)
+                               dequeue_pushable_task(rq, p);
+                       /*
+                        * Requeue if our weight is changing and still > 1
+                        */
+                       if (weight > 1)
+                               enqueue_pushable_task(rq, p);
+               }
                if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
                        rq->rt.rt_nr_migratory++;
                } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
                        rq->rt.rt_nr_migratory--;
                }
  
-               update_rt_migration(rq);
+               update_rt_migration(&rq->rt);
        }
  
        cpumask_copy(&p->cpus_allowed, new_mask);
@@@ -1339,7 -1561,7 +1554,7 @@@ static void rq_online_rt(struct rq *rq
  
        __enable_runtime(rq);
  
-       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
+       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
  }
  
  /* Assumes rq->lock is held */
@@@ -1431,7 -1653,7 +1646,7 @@@ static void prio_changed_rt(struct rq *
                 * can release the rq lock and p could migrate.
                 * Only reschedule if p is still on the same runqueue.
                 */
-               if (p->prio > rq->rt.highest_prio && rq->curr == p)
+               if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
                        resched_task(p);
  #else
                /* For UP simply resched on drop of prio */
@@@ -1502,6 -1724,9 +1717,9 @@@ static void set_curr_task_rt(struct rq 
        struct task_struct *p = rq->curr;
  
        p->se.exec_start = rq->clock;
+       /* The running task is never eligible for pushing */
+       dequeue_pushable_task(rq, p);
  }
  
  static const struct sched_class rt_sched_class = {
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
+       .needs_post_schedule    = needs_post_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_wake_up           = task_wake_up_rt,
        .switched_from          = switched_from_rt,