Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / kernel / sched / core.c
index f235c41..4499950 100644 (file)
@@ -317,9 +317,12 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
        for (;;) {
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
                        return rq;
                raw_spin_unlock(&rq->lock);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
        }
 }
 
@@ -336,10 +339,13 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
                raw_spin_lock_irqsave(&p->pi_lock, *flags);
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
                        return rq;
                raw_spin_unlock(&rq->lock);
                raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
        }
 }
 
@@ -433,7 +439,15 @@ static void __hrtick_start(void *arg)
 void hrtick_start(struct rq *rq, u64 delay)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
+       ktime_t time;
+       s64 delta;
+
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense and can cause timer DoS.
+        */
+       delta = max_t(s64, delay, 10000LL);
+       time = ktime_add_ns(timer->base->get_time(), delta);
 
        hrtimer_set_expires(timer, time);
 
@@ -1027,7 +1041,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * A queue event has occurred, and we're going to schedule.  In
         * this case, we can save a useless back to back clock update.
         */
-       if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
+       if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
                rq->skip_clock_update = 1;
 }
 
@@ -1072,7 +1086,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                struct rq *src_rq, *dst_rq;
 
                src_rq = task_rq(p);
@@ -1198,7 +1212,7 @@ static int migration_cpu_stop(void *data);
 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
        unsigned long flags;
-       int running, on_rq;
+       int running, queued;
        unsigned long ncsw;
        struct rq *rq;
 
@@ -1236,7 +1250,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                rq = task_rq_lock(p, &flags);
                trace_sched_wait_task(p);
                running = task_running(rq, p);
-               on_rq = p->on_rq;
+               queued = task_on_rq_queued(p);
                ncsw = 0;
                if (!match_state || p->state == match_state)
                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1268,7 +1282,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * running right now), it's preempted, and we should
                 * yield - it could be a while.
                 */
-               if (unlikely(on_rq)) {
+               if (unlikely(queued)) {
                        ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
 
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1462,7 +1476,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
        activate_task(rq, p, en_flags);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
@@ -1521,7 +1535,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        int ret = 0;
 
        rq = __task_rq_lock(p);
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                /* check_preempt_curr() may use rq clock */
                update_rq_clock(rq);
                ttwu_do_wakeup(rq, p, wake_flags);
@@ -1604,6 +1618,25 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
        }
 }
 
+void wake_up_if_idle(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       if (!is_idle_task(rq->curr))
+               return;
+
+       if (set_nr_if_polling(rq->idle)) {
+               trace_sched_wake_idle_without_ipi(cpu);
+       } else {
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               if (is_idle_task(rq->curr))
+                       smp_send_reschedule(cpu);
+               /* Else cpu is not in idle, do nothing here */
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
+}
+
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1726,7 +1759,7 @@ static void try_to_wake_up_local(struct task_struct *p)
        if (!(p->state & TASK_NORMAL))
                goto out;
 
-       if (!p->on_rq)
+       if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0);
@@ -1759,6 +1792,20 @@ int wake_up_state(struct task_struct *p, unsigned int state)
        return try_to_wake_up(p, state, 0);
 }
 
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = 0;
+       dl_se->dl_deadline = 0;
+       dl_se->dl_period = 0;
+       dl_se->flags = 0;
+       dl_se->dl_bw = 0;
+}
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -1783,10 +1830,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 
        RB_CLEAR_NODE(&p->dl.rb_node);
        hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       p->dl.dl_runtime = p->dl.runtime = 0;
-       p->dl.dl_deadline = p->dl.deadline = 0;
-       p->dl.dl_period = 0;
-       p->dl.flags = 0;
+       __dl_clear_params(p);
 
        INIT_LIST_HEAD(&p->rt.run_list);
 
@@ -1961,6 +2005,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
 #ifdef CONFIG_SMP
 inline struct dl_bw *dl_bw_of(int i)
 {
+       rcu_lockdep_assert(rcu_read_lock_sched_held(),
+                          "sched RCU must be held");
        return &cpu_rq(i)->rd->dl_bw;
 }
 
@@ -1969,6 +2015,8 @@ static inline int dl_bw_cpus(int i)
        struct root_domain *rd = cpu_rq(i)->rd;
        int cpus = 0;
 
+       rcu_lockdep_assert(rcu_read_lock_sched_held(),
+                          "sched RCU must be held");
        for_each_cpu_and(i, rd->span, cpu_active_mask)
                cpus++;
 
@@ -2079,7 +2127,7 @@ void wake_up_new_task(struct task_struct *p)
        init_task_runnable_average(p);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p, true);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -2271,10 +2319,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
         */
        post_schedule(rq);
 
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       /* In this case, finish_task_switch does not reenable preemption */
-       preempt_enable();
-#endif
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
 }
@@ -2317,9 +2361,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
 
        context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
@@ -2447,7 +2489,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
         * project cycles that may never be accounted to this
         * thread, breaking clock_gettime().
         */
-       if (task_current(rq, p) && p->on_rq) {
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
                update_rq_clock(rq);
                ns = rq_clock_task(rq) - p->se.exec_start;
                if ((s64)ns < 0)
@@ -2493,7 +2535,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * If we see ->on_cpu without ->on_rq, the task is leaving, and has
         * been accounted, so we're correct here as well.
         */
-       if (!p->on_cpu || !p->on_rq)
+       if (!p->on_cpu || !task_on_rq_queued(p))
                return p->se.sum_exec_runtime;
 #endif
 
@@ -2656,6 +2698,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
  */
 static inline void schedule_debug(struct task_struct *prev)
 {
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+       BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+#endif
        /*
         * Test if we are atomic. Since do_exit() needs to call into
         * schedule() atomically, we ignore that path. Otherwise whine
@@ -2797,7 +2842,7 @@ need_resched:
                switch_count = &prev->nvcsw;
        }
 
-       if (prev->on_rq || rq->skip_clock_update < 0)
+       if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
@@ -2962,7 +3007,7 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, on_rq, running, enqueue_flag = 0;
+       int oldprio, queued, running, enqueue_flag = 0;
        struct rq *rq;
        const struct sched_class *prev_class;
 
@@ -2991,12 +3036,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        trace_sched_pi_setprio(p, prio);
        oldprio = p->prio;
        prev_class = p->sched_class;
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        /*
         * Boosting condition are:
@@ -3033,7 +3078,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, enqueue_flag);
 
        check_class_changed(rq, p, prev_class, oldprio);
@@ -3044,7 +3089,7 @@ out_unlock:
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-       int old_prio, delta, on_rq;
+       int old_prio, delta, queued;
        unsigned long flags;
        struct rq *rq;
 
@@ -3065,8 +3110,8 @@ void set_user_nice(struct task_struct *p, long nice)
                p->static_prio = NICE_TO_PRIO(nice);
                goto out_unlock;
        }
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
 
        p->static_prio = NICE_TO_PRIO(nice);
@@ -3075,7 +3120,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
-       if (on_rq) {
+       if (queued) {
                enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
@@ -3347,7 +3392,7 @@ static int __sched_setscheduler(struct task_struct *p,
 {
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, on_rq, running;
+       int retval, oldprio, oldpolicy = -1, queued, running;
        int policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
@@ -3544,19 +3589,19 @@ change:
                return 0;
        }
 
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        prev_class = p->sched_class;
        __setscheduler(rq, p, attr);
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq) {
+       if (queued) {
                /*
                 * We enqueue to tail when the priority of a task is
                 * increased (user space view).
@@ -3980,14 +4025,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_unlock;
+                       goto out_free_new_mask;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_unlock;
+               goto out_free_new_mask;
 
 
        cpuset_cpus_allowed(p, cpus_allowed);
@@ -4000,13 +4045,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
         * root_domain.
         */
 #ifdef CONFIG_SMP
-       if (task_has_dl_policy(p)) {
-               const struct cpumask *span = task_rq(p)->rd->span;
-
-               if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               rcu_read_lock();
+               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
                        retval = -EBUSY;
-                       goto out_unlock;
+                       rcu_read_unlock();
+                       goto out_free_new_mask;
                }
+               rcu_read_unlock();
        }
 #endif
 again:
@@ -4024,7 +4070,7 @@ again:
                        goto again;
                }
        }
-out_unlock:
+out_free_new_mask:
        free_cpumask_var(new_mask);
 out_free_cpus_allowed:
        free_cpumask_var(cpus_allowed);
@@ -4508,7 +4554,7 @@ void show_state_filter(unsigned long state_filter)
                "  task                        PC stack   pid father\n");
 #endif
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
@@ -4516,7 +4562,7 @@ void show_state_filter(unsigned long state_filter)
                touch_nmi_watchdog();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
-       } while_each_thread(g, p);
+       }
 
        touch_all_softlockup_watchdogs();
 
@@ -4571,7 +4617,7 @@ void init_idle(struct task_struct *idle, int cpu)
        rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
-       idle->on_rq = 1;
+       idle->on_rq = TASK_ON_RQ_QUEUED;
 #if defined(CONFIG_SMP)
        idle->on_cpu = 1;
 #endif
@@ -4592,6 +4638,33 @@ void init_idle(struct task_struct *idle, int cpu)
 }
 
 #ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+       struct rq *rq = task_rq(p);
+
+       lockdep_assert_held(&rq->lock);
+
+       dequeue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, new_cpu);
+       raw_spin_unlock(&rq->lock);
+
+       rq = cpu_rq(new_cpu);
+
+       raw_spin_lock(&rq->lock);
+       BUG_ON(task_cpu(p) != new_cpu);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       enqueue_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+
+       return rq;
+}
+
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
        if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4648,14 +4721,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
                goto out;
 
        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (p->on_rq) {
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
                task_rq_unlock(rq, p, &flags);
                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
                tlb_migrate_finish(p->mm);
                return 0;
-       }
+       } else if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 out:
        task_rq_unlock(rq, p, &flags);
 
@@ -4676,20 +4750,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  */
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
-       struct rq *rq_dest, *rq_src;
+       struct rq *rq;
        int ret = 0;
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
 
-       rq_src = cpu_rq(src_cpu);
-       rq_dest = cpu_rq(dest_cpu);
+       rq = cpu_rq(src_cpu);
 
        raw_spin_lock(&p->pi_lock);
-       double_rq_lock(rq_src, rq_dest);
+       raw_spin_lock(&rq->lock);
        /* Already moved. */
        if (task_cpu(p) != src_cpu)
                goto done;
+
        /* Affinity changed (again). */
        if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
                goto fail;
@@ -4698,16 +4772,12 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
         * If we're not on a rq, the next wake-up will ensure we're
         * placed properly.
         */
-       if (p->on_rq) {
-               dequeue_task(rq_src, p, 0);
-               set_task_cpu(p, dest_cpu);
-               enqueue_task(rq_dest, p, 0);
-               check_preempt_curr(rq_dest, p, 0);
-       }
+       if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 done:
        ret = 1;
 fail:
-       double_rq_unlock(rq_src, rq_dest);
+       raw_spin_unlock(&rq->lock);
        raw_spin_unlock(&p->pi_lock);
        return ret;
 }
@@ -4739,22 +4809,22 @@ void sched_setnuma(struct task_struct *p, int nid)
 {
        struct rq *rq;
        unsigned long flags;
-       bool on_rq, running;
+       bool queued, running;
 
        rq = task_rq_lock(p, &flags);
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        p->numa_preferred_nid = nid;
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, 0);
        task_rq_unlock(rq, p, &flags);
 }
@@ -4774,6 +4844,12 @@ static int migration_cpu_stop(void *data)
         * be on another cpu but it doesn't matter.
         */
        local_irq_disable();
+       /*
+        * We need to explicitly wake pending tasks before running
+        * __migrate_task() such that we will not miss enforcing cpus_allowed
+        * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+        */
+       sched_ttwu_pending();
        __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
        local_irq_enable();
        return 0;
@@ -5184,6 +5260,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
 {
        unsigned long flags;
        long cpu = (long)hcpu;
+       struct dl_bw *dl_b;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
@@ -5191,15 +5268,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
 
                /* explicitly allow suspend */
                if (!(action & CPU_TASKS_FROZEN)) {
-                       struct dl_bw *dl_b = dl_bw_of(cpu);
                        bool overflow;
                        int cpus;
 
+                       rcu_read_lock_sched();
+                       dl_b = dl_bw_of(cpu);
+
                        raw_spin_lock_irqsave(&dl_b->lock, flags);
                        cpus = dl_bw_cpus(cpu);
                        overflow = __dl_overflow(dl_b, cpus, 0, 0);
                        raw_spin_unlock_irqrestore(&dl_b->lock, flags);
 
+                       rcu_read_unlock_sched();
+
                        if (overflow)
                                return notifier_from_errno(-EBUSY);
                }
@@ -5742,7 +5823,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
        const struct cpumask *span = sched_domain_span(sd);
        struct cpumask *covered = sched_domains_tmpmask;
        struct sd_data *sdd = sd->private;
-       struct sched_domain *child;
+       struct sched_domain *sibling;
        int i;
 
        cpumask_clear(covered);
@@ -5753,10 +5834,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               child = *per_cpu_ptr(sdd->sd, i);
+               sibling = *per_cpu_ptr(sdd->sd, i);
 
                /* See the comment near build_group_mask(). */
-               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
                        continue;
 
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
@@ -5766,10 +5847,9 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-               if (child->child) {
-                       child = child->child;
-                       cpumask_copy(sg_span, sched_domain_span(child));
-               } else
+               if (sibling->child)
+                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
+               else
                        cpumask_set_cpu(i, sg_span);
 
                cpumask_or(covered, covered, sg_span);
@@ -7120,13 +7200,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
                .sched_policy = SCHED_NORMAL,
        };
        int old_prio = p->prio;
-       int on_rq;
+       int queued;
 
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
        __setscheduler(rq, p, &attr);
-       if (on_rq) {
+       if (queued) {
                enqueue_task(rq, p, 0);
                resched_curr(rq);
        }
@@ -7140,12 +7220,12 @@ void normalize_rt_tasks(void)
        unsigned long flags;
        struct rq *rq;
 
-       read_lock_irqsave(&tasklist_lock, flags);
-       do_each_thread(g, p) {
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
                /*
                 * Only normalize user tasks:
                 */
-               if (!p->mm)
+               if (p->flags & PF_KTHREAD)
                        continue;
 
                p->se.exec_start                = 0;
@@ -7160,21 +7240,16 @@ void normalize_rt_tasks(void)
                         * Renice negative nice level userspace
                         * tasks back to 0:
                         */
-                       if (task_nice(p) < 0 && p->mm)
+                       if (task_nice(p) < 0)
                                set_user_nice(p, 0);
                        continue;
                }
 
-               raw_spin_lock(&p->pi_lock);
-               rq = __task_rq_lock(p);
-
+               rq = task_rq_lock(p, &flags);
                normalize_task(rq, p);
-
-               __task_rq_unlock(rq);
-               raw_spin_unlock(&p->pi_lock);
-       } while_each_thread(g, p);
-
-       read_unlock_irqrestore(&tasklist_lock, flags);
+               task_rq_unlock(rq, p, &flags);
+       }
+       read_unlock(&tasklist_lock);
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -7314,19 +7389,19 @@ void sched_offline_group(struct task_group *tg)
 void sched_move_task(struct task_struct *tsk)
 {
        struct task_group *tg;
-       int on_rq, running;
+       int queued, running;
        unsigned long flags;
        struct rq *rq;
 
        rq = task_rq_lock(tsk, &flags);
 
        running = task_current(rq, tsk);
-       on_rq = tsk->on_rq;
+       queued = task_on_rq_queued(tsk);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, tsk, 0);
        if (unlikely(running))
-               tsk->sched_class->put_prev_task(rq, tsk);
+               put_prev_task(rq, tsk);
 
        tg = container_of(task_css_check(tsk, cpu_cgrp_id,
                                lockdep_is_held(&tsk->sighand->siglock)),
@@ -7336,14 +7411,14 @@ void sched_move_task(struct task_struct *tsk)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk, on_rq);
+               tsk->sched_class->task_move_group(tsk, queued);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, tsk, &flags);
@@ -7361,10 +7436,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
-       do_each_thread(g, p) {
-               if (rt_task(p) && task_rq(p)->rt.tg == tg)
+       for_each_process_thread(g, p) {
+               if (rt_task(p) && task_group(p) == tg)
                        return 1;
-       } while_each_thread(g, p);
+       }
 
        return 0;
 }
@@ -7573,6 +7648,7 @@ static int sched_dl_global_constraints(void)
        u64 runtime = global_rt_runtime();
        u64 period = global_rt_period();
        u64 new_bw = to_ratio(period, runtime);
+       struct dl_bw *dl_b;
        int cpu, ret = 0;
        unsigned long flags;
 
@@ -7586,13 +7662,16 @@ static int sched_dl_global_constraints(void)
         * solutions is welcome!
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                if (new_bw < dl_b->total_bw)
                        ret = -EBUSY;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
 
+               rcu_read_unlock_sched();
+
                if (ret)
                        break;
        }
@@ -7603,6 +7682,7 @@ static int sched_dl_global_constraints(void)
 static void sched_dl_do_global(void)
 {
        u64 new_bw = -1;
+       struct dl_bw *dl_b;
        int cpu;
        unsigned long flags;
 
@@ -7616,11 +7696,14 @@ static void sched_dl_do_global(void)
         * FIXME: As above...
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                dl_b->bw = new_bw;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
        }
 }
 
@@ -8001,7 +8084,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
 
                quota = normalize_cfs_quota(tg, d);
-               parent_quota = parent_b->hierarchal_quota;
+               parent_quota = parent_b->hierarchical_quota;
 
                /*
                 * ensure max(child_quota) <= parent_quota, inherit when no
@@ -8012,7 +8095,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
                        return -EINVAL;
        }
-       cfs_b->hierarchal_quota = quota;
+       cfs_b->hierarchical_quota = quota;
 
        return 0;
 }