2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
6 #ifdef CONFIG_RT_GROUP_SCHED
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
12 #ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
15 return container_of(rt_se, struct task_struct, rt);
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
28 #else /* CONFIG_RT_GROUP_SCHED */
30 #define rt_entity_is_task(rt_se) (1)
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
34 return container_of(rt_se, struct task_struct, rt);
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
39 return container_of(rt_rq, struct rq, rt);
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
50 #endif /* CONFIG_RT_GROUP_SCHED */
54 static inline int rt_overloaded(struct rq *rq)
56 return atomic_read(&rq->rd->rto_count);
59 static inline void rt_set_overload(struct rq *rq)
64 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
73 atomic_inc(&rq->rd->rto_count);
76 static inline void rt_clear_overload(struct rq *rq)
81 /* the order here really doesn't matter */
82 atomic_dec(&rq->rd->rto_count);
83 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
86 static void update_rt_migration(struct rt_rq *rt_rq)
88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
101 if (!rt_entity_is_task(rt_se))
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
106 rt_rq->rt_nr_total++;
107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
110 update_rt_migration(rt_rq);
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
115 if (!rt_entity_is_task(rt_se))
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
120 rt_rq->rt_nr_total--;
121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
124 update_rt_migration(rt_rq);
127 static inline int has_pushable_tasks(struct rq *rq)
129 return !plist_head_empty(&rq->rt.pushable_tasks);
132 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
134 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
135 plist_node_init(&p->pushable_tasks, p->prio);
136 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
138 /* Update the highest prio pushable task */
139 if (p->prio < rq->rt.highest_prio.next)
140 rq->rt.highest_prio.next = p->prio;
143 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
145 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
147 /* Update the new highest prio pushable task */
148 if (has_pushable_tasks(rq)) {
149 p = plist_first_entry(&rq->rt.pushable_tasks,
150 struct task_struct, pushable_tasks);
151 rq->rt.highest_prio.next = p->prio;
153 rq->rt.highest_prio.next = MAX_RT_PRIO;
158 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
162 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
167 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
172 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
176 #endif /* CONFIG_SMP */
178 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
180 return !list_empty(&rt_se->run_list);
183 #ifdef CONFIG_RT_GROUP_SCHED
185 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
190 return rt_rq->rt_runtime;
193 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
195 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
198 typedef struct task_group *rt_rq_iter_t;
200 static inline struct task_group *next_task_group(struct task_group *tg)
203 tg = list_entry_rcu(tg->list.next,
204 typeof(struct task_group), list);
205 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
207 if (&tg->list == &task_groups)
213 #define for_each_rt_rq(rt_rq, iter, rq) \
214 for (iter = container_of(&task_groups, typeof(*iter), list); \
215 (iter = next_task_group(iter)) && \
216 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
218 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
220 list_add_rcu(&rt_rq->leaf_rt_rq_list,
221 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
224 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
226 list_del_rcu(&rt_rq->leaf_rt_rq_list);
229 #define for_each_leaf_rt_rq(rt_rq, rq) \
230 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
232 #define for_each_sched_rt_entity(rt_se) \
233 for (; rt_se; rt_se = rt_se->parent)
235 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
240 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
241 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
243 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
245 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
246 struct sched_rt_entity *rt_se;
248 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
250 rt_se = rt_rq->tg->rt_se[cpu];
252 if (rt_rq->rt_nr_running) {
253 if (rt_se && !on_rt_rq(rt_se))
254 enqueue_rt_entity(rt_se, false);
255 if (rt_rq->highest_prio.curr < curr->prio)
260 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
262 struct sched_rt_entity *rt_se;
263 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
265 rt_se = rt_rq->tg->rt_se[cpu];
267 if (rt_se && on_rt_rq(rt_se))
268 dequeue_rt_entity(rt_se);
271 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
273 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
276 static int rt_se_boosted(struct sched_rt_entity *rt_se)
278 struct rt_rq *rt_rq = group_rt_rq(rt_se);
279 struct task_struct *p;
282 return !!rt_rq->rt_nr_boosted;
284 p = rt_task_of(rt_se);
285 return p->prio != p->normal_prio;
289 static inline const struct cpumask *sched_rt_period_mask(void)
291 return cpu_rq(smp_processor_id())->rd->span;
294 static inline const struct cpumask *sched_rt_period_mask(void)
296 return cpu_online_mask;
301 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
303 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
306 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
308 return &rt_rq->tg->rt_bandwidth;
311 #else /* !CONFIG_RT_GROUP_SCHED */
313 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
315 return rt_rq->rt_runtime;
318 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
320 return ktime_to_ns(def_rt_bandwidth.rt_period);
323 typedef struct rt_rq *rt_rq_iter_t;
325 #define for_each_rt_rq(rt_rq, iter, rq) \
326 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
328 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
332 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
336 #define for_each_leaf_rt_rq(rt_rq, rq) \
337 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
339 #define for_each_sched_rt_entity(rt_se) \
340 for (; rt_se; rt_se = NULL)
342 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
347 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
349 if (rt_rq->rt_nr_running)
350 resched_task(rq_of_rt_rq(rt_rq)->curr);
353 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
357 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
359 return rt_rq->rt_throttled;
362 static inline const struct cpumask *sched_rt_period_mask(void)
364 return cpu_online_mask;
368 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
370 return &cpu_rq(cpu)->rt;
373 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
375 return &def_rt_bandwidth;
378 #endif /* CONFIG_RT_GROUP_SCHED */
382 * We ran out of runtime, see if we can borrow some from our neighbours.
384 static int do_balance_runtime(struct rt_rq *rt_rq)
386 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
387 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
388 int i, weight, more = 0;
391 weight = cpumask_weight(rd->span);
393 raw_spin_lock(&rt_b->rt_runtime_lock);
394 rt_period = ktime_to_ns(rt_b->rt_period);
395 for_each_cpu(i, rd->span) {
396 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
402 raw_spin_lock(&iter->rt_runtime_lock);
404 * Either all rqs have inf runtime and there's nothing to steal
405 * or __disable_runtime() below sets a specific rq to inf to
406 * indicate its been disabled and disalow stealing.
408 if (iter->rt_runtime == RUNTIME_INF)
412 * From runqueues with spare time, take 1/n part of their
413 * spare time, but no more than our period.
415 diff = iter->rt_runtime - iter->rt_time;
417 diff = div_u64((u64)diff, weight);
418 if (rt_rq->rt_runtime + diff > rt_period)
419 diff = rt_period - rt_rq->rt_runtime;
420 iter->rt_runtime -= diff;
421 rt_rq->rt_runtime += diff;
423 if (rt_rq->rt_runtime == rt_period) {
424 raw_spin_unlock(&iter->rt_runtime_lock);
429 raw_spin_unlock(&iter->rt_runtime_lock);
431 raw_spin_unlock(&rt_b->rt_runtime_lock);
437 * Ensure this RQ takes back all the runtime it lend to its neighbours.
439 static void __disable_runtime(struct rq *rq)
441 struct root_domain *rd = rq->rd;
445 if (unlikely(!scheduler_running))
448 for_each_rt_rq(rt_rq, iter, rq) {
449 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
453 raw_spin_lock(&rt_b->rt_runtime_lock);
454 raw_spin_lock(&rt_rq->rt_runtime_lock);
456 * Either we're all inf and nobody needs to borrow, or we're
457 * already disabled and thus have nothing to do, or we have
458 * exactly the right amount of runtime to take out.
460 if (rt_rq->rt_runtime == RUNTIME_INF ||
461 rt_rq->rt_runtime == rt_b->rt_runtime)
463 raw_spin_unlock(&rt_rq->rt_runtime_lock);
466 * Calculate the difference between what we started out with
467 * and what we current have, that's the amount of runtime
468 * we lend and now have to reclaim.
470 want = rt_b->rt_runtime - rt_rq->rt_runtime;
473 * Greedy reclaim, take back as much as we can.
475 for_each_cpu(i, rd->span) {
476 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
480 * Can't reclaim from ourselves or disabled runqueues.
482 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
485 raw_spin_lock(&iter->rt_runtime_lock);
487 diff = min_t(s64, iter->rt_runtime, want);
488 iter->rt_runtime -= diff;
491 iter->rt_runtime -= want;
494 raw_spin_unlock(&iter->rt_runtime_lock);
500 raw_spin_lock(&rt_rq->rt_runtime_lock);
502 * We cannot be left wanting - that would mean some runtime
503 * leaked out of the system.
508 * Disable all the borrow logic by pretending we have inf
509 * runtime - in which case borrowing doesn't make sense.
511 rt_rq->rt_runtime = RUNTIME_INF;
512 raw_spin_unlock(&rt_rq->rt_runtime_lock);
513 raw_spin_unlock(&rt_b->rt_runtime_lock);
517 static void disable_runtime(struct rq *rq)
521 raw_spin_lock_irqsave(&rq->lock, flags);
522 __disable_runtime(rq);
523 raw_spin_unlock_irqrestore(&rq->lock, flags);
526 static void __enable_runtime(struct rq *rq)
531 if (unlikely(!scheduler_running))
535 * Reset each runqueue's bandwidth settings
537 for_each_rt_rq(rt_rq, iter, rq) {
538 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
540 raw_spin_lock(&rt_b->rt_runtime_lock);
541 raw_spin_lock(&rt_rq->rt_runtime_lock);
542 rt_rq->rt_runtime = rt_b->rt_runtime;
544 rt_rq->rt_throttled = 0;
545 raw_spin_unlock(&rt_rq->rt_runtime_lock);
546 raw_spin_unlock(&rt_b->rt_runtime_lock);
550 static void enable_runtime(struct rq *rq)
554 raw_spin_lock_irqsave(&rq->lock, flags);
555 __enable_runtime(rq);
556 raw_spin_unlock_irqrestore(&rq->lock, flags);
559 static int balance_runtime(struct rt_rq *rt_rq)
563 if (!sched_feat(RT_RUNTIME_SHARE))
566 if (rt_rq->rt_time > rt_rq->rt_runtime) {
567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
568 more = do_balance_runtime(rt_rq);
569 raw_spin_lock(&rt_rq->rt_runtime_lock);
574 #else /* !CONFIG_SMP */
575 static inline int balance_runtime(struct rt_rq *rt_rq)
579 #endif /* CONFIG_SMP */
581 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
584 const struct cpumask *span;
586 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
589 span = sched_rt_period_mask();
590 #ifdef CONFIG_RT_GROUP_SCHED
592 * FIXME: isolated CPUs should really leave the root task group,
593 * whether they are isolcpus or were isolated via cpusets, lest
594 * the timer run on a CPU which does not service all runqueues,
595 * potentially leaving other CPUs indefinitely throttled. If
596 * isolation is really required, the user will turn the throttle
597 * off to kill the perturbations it causes anyway. Meanwhile,
598 * this maintains functionality for boot and/or troubleshooting.
600 if (rt_b == &root_task_group.rt_bandwidth)
601 span = cpu_online_mask;
603 for_each_cpu(i, span) {
605 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
606 struct rq *rq = rq_of_rt_rq(rt_rq);
608 raw_spin_lock(&rq->lock);
609 if (rt_rq->rt_time) {
612 raw_spin_lock(&rt_rq->rt_runtime_lock);
613 if (rt_rq->rt_throttled)
614 balance_runtime(rt_rq);
615 runtime = rt_rq->rt_runtime;
616 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
617 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
618 rt_rq->rt_throttled = 0;
622 * Force a clock update if the CPU was idle,
623 * lest wakeup -> unthrottle time accumulate.
625 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
626 rq->skip_clock_update = -1;
628 if (rt_rq->rt_time || rt_rq->rt_nr_running)
630 raw_spin_unlock(&rt_rq->rt_runtime_lock);
631 } else if (rt_rq->rt_nr_running) {
633 if (!rt_rq_throttled(rt_rq))
638 sched_rt_rq_enqueue(rt_rq);
639 raw_spin_unlock(&rq->lock);
645 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
647 #ifdef CONFIG_RT_GROUP_SCHED
648 struct rt_rq *rt_rq = group_rt_rq(rt_se);
651 return rt_rq->highest_prio.curr;
654 return rt_task_of(rt_se)->prio;
657 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
659 u64 runtime = sched_rt_runtime(rt_rq);
661 if (rt_rq->rt_throttled)
662 return rt_rq_throttled(rt_rq);
664 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
667 balance_runtime(rt_rq);
668 runtime = sched_rt_runtime(rt_rq);
669 if (runtime == RUNTIME_INF)
672 if (rt_rq->rt_time > runtime) {
673 rt_rq->rt_throttled = 1;
674 printk_once(KERN_WARNING "sched: RT throttling activated\n");
675 if (rt_rq_throttled(rt_rq)) {
676 sched_rt_rq_dequeue(rt_rq);
685 * Update the current task's runtime statistics. Skip current tasks that
686 * are not in our scheduling class.
688 static void update_curr_rt(struct rq *rq)
690 struct task_struct *curr = rq->curr;
691 struct sched_rt_entity *rt_se = &curr->rt;
692 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
695 if (curr->sched_class != &rt_sched_class)
698 delta_exec = rq->clock_task - curr->se.exec_start;
699 if (unlikely((s64)delta_exec < 0))
702 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
704 curr->se.sum_exec_runtime += delta_exec;
705 account_group_exec_runtime(curr, delta_exec);
707 curr->se.exec_start = rq->clock_task;
708 cpuacct_charge(curr, delta_exec);
710 sched_rt_avg_update(rq, delta_exec);
712 if (!rt_bandwidth_enabled())
715 for_each_sched_rt_entity(rt_se) {
716 rt_rq = rt_rq_of_se(rt_se);
718 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
719 raw_spin_lock(&rt_rq->rt_runtime_lock);
720 rt_rq->rt_time += delta_exec;
721 if (sched_rt_runtime_exceeded(rt_rq))
723 raw_spin_unlock(&rt_rq->rt_runtime_lock);
728 #if defined CONFIG_SMP
731 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
733 struct rq *rq = rq_of_rt_rq(rt_rq);
735 #ifdef CONFIG_RT_GROUP_SCHED
737 * Change rq's cpupri only if rt_rq is the top queue.
739 if (&rq->rt != rt_rq)
742 if (rq->online && prio < prev_prio)
743 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
747 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
749 struct rq *rq = rq_of_rt_rq(rt_rq);
751 #ifdef CONFIG_RT_GROUP_SCHED
753 * Change rq's cpupri only if rt_rq is the top queue.
755 if (&rq->rt != rt_rq)
758 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
759 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
762 #else /* CONFIG_SMP */
765 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
767 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
769 #endif /* CONFIG_SMP */
771 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
773 inc_rt_prio(struct rt_rq *rt_rq, int prio)
775 int prev_prio = rt_rq->highest_prio.curr;
777 if (prio < prev_prio)
778 rt_rq->highest_prio.curr = prio;
780 inc_rt_prio_smp(rt_rq, prio, prev_prio);
784 dec_rt_prio(struct rt_rq *rt_rq, int prio)
786 int prev_prio = rt_rq->highest_prio.curr;
788 if (rt_rq->rt_nr_running) {
790 WARN_ON(prio < prev_prio);
793 * This may have been our highest task, and therefore
794 * we may have some recomputation to do
796 if (prio == prev_prio) {
797 struct rt_prio_array *array = &rt_rq->active;
799 rt_rq->highest_prio.curr =
800 sched_find_first_bit(array->bitmap);
804 rt_rq->highest_prio.curr = MAX_RT_PRIO;
806 dec_rt_prio_smp(rt_rq, prio, prev_prio);
811 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
812 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
814 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
816 #ifdef CONFIG_RT_GROUP_SCHED
819 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
821 if (rt_se_boosted(rt_se))
822 rt_rq->rt_nr_boosted++;
825 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
829 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
831 if (rt_se_boosted(rt_se))
832 rt_rq->rt_nr_boosted--;
834 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
837 #else /* CONFIG_RT_GROUP_SCHED */
840 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
842 start_rt_bandwidth(&def_rt_bandwidth);
846 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
848 #endif /* CONFIG_RT_GROUP_SCHED */
851 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
853 int prio = rt_se_prio(rt_se);
855 WARN_ON(!rt_prio(prio));
856 rt_rq->rt_nr_running++;
858 inc_rt_prio(rt_rq, prio);
859 inc_rt_migration(rt_se, rt_rq);
860 inc_rt_group(rt_se, rt_rq);
864 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
866 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
867 WARN_ON(!rt_rq->rt_nr_running);
868 rt_rq->rt_nr_running--;
870 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
871 dec_rt_migration(rt_se, rt_rq);
872 dec_rt_group(rt_se, rt_rq);
875 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
877 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
878 struct rt_prio_array *array = &rt_rq->active;
879 struct rt_rq *group_rq = group_rt_rq(rt_se);
880 struct list_head *queue = array->queue + rt_se_prio(rt_se);
883 * Don't enqueue the group if its throttled, or when empty.
884 * The latter is a consequence of the former when a child group
885 * get throttled and the current group doesn't have any other
888 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
891 if (!rt_rq->rt_nr_running)
892 list_add_leaf_rt_rq(rt_rq);
895 list_add(&rt_se->run_list, queue);
897 list_add_tail(&rt_se->run_list, queue);
898 __set_bit(rt_se_prio(rt_se), array->bitmap);
900 inc_rt_tasks(rt_se, rt_rq);
903 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
905 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
906 struct rt_prio_array *array = &rt_rq->active;
908 list_del_init(&rt_se->run_list);
909 if (list_empty(array->queue + rt_se_prio(rt_se)))
910 __clear_bit(rt_se_prio(rt_se), array->bitmap);
912 dec_rt_tasks(rt_se, rt_rq);
913 if (!rt_rq->rt_nr_running)
914 list_del_leaf_rt_rq(rt_rq);
918 * Because the prio of an upper entry depends on the lower
919 * entries, we must remove entries top - down.
921 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
923 struct sched_rt_entity *back = NULL;
925 for_each_sched_rt_entity(rt_se) {
930 for (rt_se = back; rt_se; rt_se = rt_se->back) {
932 __dequeue_rt_entity(rt_se);
936 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
938 dequeue_rt_stack(rt_se);
939 for_each_sched_rt_entity(rt_se)
940 __enqueue_rt_entity(rt_se, head);
943 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
945 dequeue_rt_stack(rt_se);
947 for_each_sched_rt_entity(rt_se) {
948 struct rt_rq *rt_rq = group_rt_rq(rt_se);
950 if (rt_rq && rt_rq->rt_nr_running)
951 __enqueue_rt_entity(rt_se, false);
956 * Adding/removing a task to/from a priority array:
959 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
961 struct sched_rt_entity *rt_se = &p->rt;
963 if (flags & ENQUEUE_WAKEUP)
966 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
968 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
969 enqueue_pushable_task(rq, p);
974 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
976 struct sched_rt_entity *rt_se = &p->rt;
979 dequeue_rt_entity(rt_se);
981 dequeue_pushable_task(rq, p);
987 * Put task to the end of the run list without the overhead of dequeue
988 * followed by enqueue.
991 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
993 if (on_rt_rq(rt_se)) {
994 struct rt_prio_array *array = &rt_rq->active;
995 struct list_head *queue = array->queue + rt_se_prio(rt_se);
998 list_move(&rt_se->run_list, queue);
1000 list_move_tail(&rt_se->run_list, queue);
1004 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1006 struct sched_rt_entity *rt_se = &p->rt;
1007 struct rt_rq *rt_rq;
1009 for_each_sched_rt_entity(rt_se) {
1010 rt_rq = rt_rq_of_se(rt_se);
1011 requeue_rt_entity(rt_rq, rt_se, head);
1015 static void yield_task_rt(struct rq *rq)
1017 requeue_task_rt(rq, rq->curr, 0);
1021 static int find_lowest_rq(struct task_struct *task);
1024 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1026 struct task_struct *curr;
1032 /* For anything but wake ups, just return the task_cpu */
1033 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1039 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1042 * If the current task on @p's runqueue is an RT task, then
1043 * try to see if we can wake this RT task up on another
1044 * runqueue. Otherwise simply start this RT task
1045 * on its current runqueue.
1047 * We want to avoid overloading runqueues. If the woken
1048 * task is a higher priority, then it will stay on this CPU
1049 * and the lower prio task should be moved to another CPU.
1050 * Even though this will probably make the lower prio task
1051 * lose its cache, we do not want to bounce a higher task
1052 * around just because it gave up its CPU, perhaps for a
1055 * For equal prio tasks, we just let the scheduler sort it out.
1057 * Otherwise, just let it ride on the affined RQ and the
1058 * post-schedule router will push the preempted task away
1060 * This test is optimistic, if we get it wrong the load-balancer
1061 * will have to sort it out.
1063 if (curr && unlikely(rt_task(curr)) &&
1064 (curr->rt.nr_cpus_allowed < 2 ||
1065 curr->prio <= p->prio) &&
1066 (p->rt.nr_cpus_allowed > 1)) {
1067 int target = find_lowest_rq(p);
1078 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1080 if (rq->curr->rt.nr_cpus_allowed == 1)
1083 if (p->rt.nr_cpus_allowed != 1
1084 && cpupri_find(&rq->rd->cpupri, p, NULL))
1087 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1091 * There appears to be other cpus that can accept
1092 * current and none to run 'p', so lets reschedule
1093 * to try and push current away:
1095 requeue_task_rt(rq, p, 1);
1096 resched_task(rq->curr);
1099 #endif /* CONFIG_SMP */
1102 * Preempt the current task with a newly woken task if needed:
1104 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1106 if (p->prio < rq->curr->prio) {
1107 resched_task(rq->curr);
1115 * - the newly woken task is of equal priority to the current task
1116 * - the newly woken task is non-migratable while current is migratable
1117 * - current will be preempted on the next reschedule
1119 * we should check to see if current can readily move to a different
1120 * cpu. If so, we will reschedule to allow the push logic to try
1121 * to move current somewhere else, making room for our non-migratable
1124 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1125 check_preempt_equal_prio(rq, p);
1129 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1130 struct rt_rq *rt_rq)
1132 struct rt_prio_array *array = &rt_rq->active;
1133 struct sched_rt_entity *next = NULL;
1134 struct list_head *queue;
1137 idx = sched_find_first_bit(array->bitmap);
1138 BUG_ON(idx >= MAX_RT_PRIO);
1140 queue = array->queue + idx;
1141 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1146 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1148 struct sched_rt_entity *rt_se;
1149 struct task_struct *p;
1150 struct rt_rq *rt_rq;
1154 if (!rt_rq->rt_nr_running)
1157 if (rt_rq_throttled(rt_rq))
1161 rt_se = pick_next_rt_entity(rq, rt_rq);
1163 rt_rq = group_rt_rq(rt_se);
1166 p = rt_task_of(rt_se);
1167 p->se.exec_start = rq->clock_task;
1172 static struct task_struct *pick_next_task_rt(struct rq *rq)
1174 struct task_struct *p = _pick_next_task_rt(rq);
1176 /* The running task is never eligible for pushing */
1178 dequeue_pushable_task(rq, p);
1182 * We detect this state here so that we can avoid taking the RQ
1183 * lock again later if there is no need to push
1185 rq->post_schedule = has_pushable_tasks(rq);
1191 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1196 * The previous task needs to be made eligible for pushing
1197 * if it is still active
1199 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1200 enqueue_pushable_task(rq, p);
1205 /* Only try algorithms three times */
1206 #define RT_MAX_TRIES 3
1208 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1210 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1212 if (!task_running(rq, p) &&
1213 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1214 (p->rt.nr_cpus_allowed > 1))
1219 /* Return the second highest RT task, NULL otherwise */
1220 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1222 struct task_struct *next = NULL;
1223 struct sched_rt_entity *rt_se;
1224 struct rt_prio_array *array;
1225 struct rt_rq *rt_rq;
1228 for_each_leaf_rt_rq(rt_rq, rq) {
1229 array = &rt_rq->active;
1230 idx = sched_find_first_bit(array->bitmap);
1232 if (idx >= MAX_RT_PRIO)
1234 if (next && next->prio < idx)
1236 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1237 struct task_struct *p;
1239 if (!rt_entity_is_task(rt_se))
1242 p = rt_task_of(rt_se);
1243 if (pick_rt_task(rq, p, cpu)) {
1249 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1257 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1259 static int find_lowest_rq(struct task_struct *task)
1261 struct sched_domain *sd;
1262 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1263 int this_cpu = smp_processor_id();
1264 int cpu = task_cpu(task);
1266 /* Make sure the mask is initialized first */
1267 if (unlikely(!lowest_mask))
1270 if (task->rt.nr_cpus_allowed == 1)
1271 return -1; /* No other targets possible */
1273 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1274 return -1; /* No targets found */
1277 * At this point we have built a mask of cpus representing the
1278 * lowest priority tasks in the system. Now we want to elect
1279 * the best one based on our affinity and topology.
1281 * We prioritize the last cpu that the task executed on since
1282 * it is most likely cache-hot in that location.
1284 if (cpumask_test_cpu(cpu, lowest_mask))
1288 * Otherwise, we consult the sched_domains span maps to figure
1289 * out which cpu is logically closest to our hot cache data.
1291 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1292 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1295 for_each_domain(cpu, sd) {
1296 if (sd->flags & SD_WAKE_AFFINE) {
1300 * "this_cpu" is cheaper to preempt than a
1303 if (this_cpu != -1 &&
1304 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1309 best_cpu = cpumask_first_and(lowest_mask,
1310 sched_domain_span(sd));
1311 if (best_cpu < nr_cpu_ids) {
1320 * And finally, if there were no matches within the domains
1321 * just give the caller *something* to work with from the compatible
1327 cpu = cpumask_any(lowest_mask);
1328 if (cpu < nr_cpu_ids)
1333 /* Will lock the rq it finds */
1334 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1336 struct rq *lowest_rq = NULL;
1340 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1341 cpu = find_lowest_rq(task);
1343 if ((cpu == -1) || (cpu == rq->cpu))
1346 lowest_rq = cpu_rq(cpu);
1348 /* if the prio of this runqueue changed, try again */
1349 if (double_lock_balance(rq, lowest_rq)) {
1351 * We had to unlock the run queue. In
1352 * the mean time, task could have
1353 * migrated already or had its affinity changed.
1354 * Also make sure that it wasn't scheduled on its rq.
1356 if (unlikely(task_rq(task) != rq ||
1357 !cpumask_test_cpu(lowest_rq->cpu,
1358 tsk_cpus_allowed(task)) ||
1359 task_running(rq, task) ||
1362 raw_spin_unlock(&lowest_rq->lock);
1368 /* If this rq is still suitable use it. */
1369 if (lowest_rq->rt.highest_prio.curr > task->prio)
1373 double_unlock_balance(rq, lowest_rq);
1380 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1382 struct task_struct *p;
1384 if (!has_pushable_tasks(rq))
1387 p = plist_first_entry(&rq->rt.pushable_tasks,
1388 struct task_struct, pushable_tasks);
1390 BUG_ON(rq->cpu != task_cpu(p));
1391 BUG_ON(task_current(rq, p));
1392 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1395 BUG_ON(!rt_task(p));
1401 * If the current CPU has more than one RT task, see if the non
1402 * running task can migrate over to a CPU that is running a task
1403 * of lesser priority.
1405 static int push_rt_task(struct rq *rq)
1407 struct task_struct *next_task;
1408 struct rq *lowest_rq;
1411 if (!rq->rt.overloaded)
1414 next_task = pick_next_pushable_task(rq);
1418 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1419 if (unlikely(task_running(rq, next_task)))
1424 if (unlikely(next_task == rq->curr)) {
1430 * It's possible that the next_task slipped in of
1431 * higher priority than current. If that's the case
1432 * just reschedule current.
1434 if (unlikely(next_task->prio < rq->curr->prio)) {
1435 resched_task(rq->curr);
1439 /* We might release rq lock */
1440 get_task_struct(next_task);
1442 /* find_lock_lowest_rq locks the rq if found */
1443 lowest_rq = find_lock_lowest_rq(next_task, rq);
1445 struct task_struct *task;
1447 * find_lock_lowest_rq releases rq->lock
1448 * so it is possible that next_task has migrated.
1450 * We need to make sure that the task is still on the same
1451 * run-queue and is also still the next task eligible for
1454 task = pick_next_pushable_task(rq);
1455 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1457 * The task hasn't migrated, and is still the next
1458 * eligible task, but we failed to find a run-queue
1459 * to push it to. Do not retry in this case, since
1460 * other cpus will pull from us when ready.
1466 /* No more tasks, just exit */
1470 * Something has shifted, try again.
1472 put_task_struct(next_task);
1477 deactivate_task(rq, next_task, 0);
1478 set_task_cpu(next_task, lowest_rq->cpu);
1479 activate_task(lowest_rq, next_task, 0);
1482 resched_task(lowest_rq->curr);
1484 double_unlock_balance(rq, lowest_rq);
1487 put_task_struct(next_task);
1492 static void push_rt_tasks(struct rq *rq)
1494 /* push_rt_task will return true if it moved an RT */
1495 while (push_rt_task(rq))
1499 static int pull_rt_task(struct rq *this_rq)
1501 int this_cpu = this_rq->cpu, ret = 0, cpu;
1502 struct task_struct *p;
1505 if (likely(!rt_overloaded(this_rq)))
1508 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1509 if (this_cpu == cpu)
1512 src_rq = cpu_rq(cpu);
1515 * Don't bother taking the src_rq->lock if the next highest
1516 * task is known to be lower-priority than our current task.
1517 * This may look racy, but if this value is about to go
1518 * logically higher, the src_rq will push this task away.
1519 * And if its going logically lower, we do not care
1521 if (src_rq->rt.highest_prio.next >=
1522 this_rq->rt.highest_prio.curr)
1526 * We can potentially drop this_rq's lock in
1527 * double_lock_balance, and another CPU could
1530 double_lock_balance(this_rq, src_rq);
1533 * Are there still pullable RT tasks?
1535 if (src_rq->rt.rt_nr_running <= 1)
1538 p = pick_next_highest_task_rt(src_rq, this_cpu);
1541 * Do we have an RT task that preempts
1542 * the to-be-scheduled task?
1544 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1545 WARN_ON(p == src_rq->curr);
1549 * There's a chance that p is higher in priority
1550 * than what's currently running on its cpu.
1551 * This is just that p is wakeing up and hasn't
1552 * had a chance to schedule. We only pull
1553 * p if it is lower in priority than the
1554 * current task on the run queue
1556 if (p->prio < src_rq->curr->prio)
1561 deactivate_task(src_rq, p, 0);
1562 set_task_cpu(p, this_cpu);
1563 activate_task(this_rq, p, 0);
1565 * We continue with the search, just in
1566 * case there's an even higher prio task
1567 * in another runqueue. (low likelihood
1572 double_unlock_balance(this_rq, src_rq);
1578 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1580 /* Try to pull RT tasks here if we lower this rq's prio */
1581 if (rq->rt.highest_prio.curr > prev->prio)
1585 static void post_schedule_rt(struct rq *rq)
1591 * If we are not running and we are not going to reschedule soon, we should
1592 * try to push tasks away now
1594 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1596 if (!task_running(rq, p) &&
1597 !test_tsk_need_resched(rq->curr) &&
1598 has_pushable_tasks(rq) &&
1599 p->rt.nr_cpus_allowed > 1 &&
1600 rt_task(rq->curr) &&
1601 (rq->curr->rt.nr_cpus_allowed < 2 ||
1602 rq->curr->prio <= p->prio))
1606 static void set_cpus_allowed_rt(struct task_struct *p,
1607 const struct cpumask *new_mask)
1609 int weight = cpumask_weight(new_mask);
1611 BUG_ON(!rt_task(p));
1614 * Update the migration status of the RQ if we have an RT task
1615 * which is running AND changing its weight value.
1617 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1618 struct rq *rq = task_rq(p);
1620 if (!task_current(rq, p)) {
1622 * Make sure we dequeue this task from the pushable list
1623 * before going further. It will either remain off of
1624 * the list because we are no longer pushable, or it
1627 if (p->rt.nr_cpus_allowed > 1)
1628 dequeue_pushable_task(rq, p);
1631 * Requeue if our weight is changing and still > 1
1634 enqueue_pushable_task(rq, p);
1638 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1639 rq->rt.rt_nr_migratory++;
1640 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1641 BUG_ON(!rq->rt.rt_nr_migratory);
1642 rq->rt.rt_nr_migratory--;
1645 update_rt_migration(&rq->rt);
1649 /* Assumes rq->lock is held */
1650 static void rq_online_rt(struct rq *rq)
1652 if (rq->rt.overloaded)
1653 rt_set_overload(rq);
1655 __enable_runtime(rq);
1657 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1660 /* Assumes rq->lock is held */
1661 static void rq_offline_rt(struct rq *rq)
1663 if (rq->rt.overloaded)
1664 rt_clear_overload(rq);
1666 __disable_runtime(rq);
1668 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1672 * When switch from the rt queue, we bring ourselves to a position
1673 * that we might want to pull RT tasks from other runqueues.
1675 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1678 * If there are other RT tasks then we will reschedule
1679 * and the scheduling of the other RT tasks will handle
1680 * the balancing. But if we are the last RT task
1681 * we may need to handle the pulling of RT tasks
1684 if (p->on_rq && !rq->rt.rt_nr_running)
1688 static inline void init_sched_rt_class(void)
1692 for_each_possible_cpu(i)
1693 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1694 GFP_KERNEL, cpu_to_node(i));
1696 #endif /* CONFIG_SMP */
1699 * When switching a task to RT, we may overload the runqueue
1700 * with RT tasks. In this case we try to push them off to
1703 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1705 int check_resched = 1;
1708 * If we are already running, then there's nothing
1709 * that needs to be done. But if we are not running
1710 * we may need to preempt the current running task.
1711 * If that current running task is also an RT task
1712 * then see if we can move to another run queue.
1714 if (p->on_rq && rq->curr != p) {
1716 if (rq->rt.overloaded && push_rt_task(rq) &&
1717 /* Don't resched if we changed runqueues */
1720 #endif /* CONFIG_SMP */
1721 if (check_resched && p->prio < rq->curr->prio)
1722 resched_task(rq->curr);
1727 * Priority of the task has changed. This may cause
1728 * us to initiate a push or pull.
1731 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1736 if (rq->curr == p) {
1739 * If our priority decreases while running, we
1740 * may need to pull tasks to this runqueue.
1742 if (oldprio < p->prio)
1745 * If there's a higher priority task waiting to run
1746 * then reschedule. Note, the above pull_rt_task
1747 * can release the rq lock and p could migrate.
1748 * Only reschedule if p is still on the same runqueue.
1750 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1753 /* For UP simply resched on drop of prio */
1754 if (oldprio < p->prio)
1756 #endif /* CONFIG_SMP */
1759 * This task is not running, but if it is
1760 * greater than the current running task
1763 if (p->prio < rq->curr->prio)
1764 resched_task(rq->curr);
1768 static void watchdog(struct rq *rq, struct task_struct *p)
1770 unsigned long soft, hard;
1772 /* max may change after cur was read, this will be fixed next tick */
1773 soft = task_rlimit(p, RLIMIT_RTTIME);
1774 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1776 if (soft != RLIM_INFINITY) {
1780 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1781 if (p->rt.timeout > next)
1782 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1786 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1788 struct sched_rt_entity *rt_se = &p->rt;
1795 * RR tasks need a special form of timeslice management.
1796 * FIFO tasks have no timeslices.
1798 if (p->policy != SCHED_RR)
1801 if (--p->rt.time_slice)
1804 p->rt.time_slice = DEF_TIMESLICE;
1807 * Requeue to the end of queue if we (and all of our ancestors) are the
1808 * only element on the queue
1810 for_each_sched_rt_entity(rt_se) {
1811 if (rt_se->run_list.prev != rt_se->run_list.next) {
1812 requeue_task_rt(rq, p, 0);
1813 set_tsk_need_resched(p);
1819 static void set_curr_task_rt(struct rq *rq)
1821 struct task_struct *p = rq->curr;
1823 p->se.exec_start = rq->clock_task;
1825 /* The running task is never eligible for pushing */
1826 dequeue_pushable_task(rq, p);
1829 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1832 * Time slice is 0 for SCHED_FIFO tasks
1834 if (task->policy == SCHED_RR)
1835 return DEF_TIMESLICE;
1840 static const struct sched_class rt_sched_class = {
1841 .next = &fair_sched_class,
1842 .enqueue_task = enqueue_task_rt,
1843 .dequeue_task = dequeue_task_rt,
1844 .yield_task = yield_task_rt,
1846 .check_preempt_curr = check_preempt_curr_rt,
1848 .pick_next_task = pick_next_task_rt,
1849 .put_prev_task = put_prev_task_rt,
1852 .select_task_rq = select_task_rq_rt,
1854 .set_cpus_allowed = set_cpus_allowed_rt,
1855 .rq_online = rq_online_rt,
1856 .rq_offline = rq_offline_rt,
1857 .pre_schedule = pre_schedule_rt,
1858 .post_schedule = post_schedule_rt,
1859 .task_woken = task_woken_rt,
1860 .switched_from = switched_from_rt,
1863 .set_curr_task = set_curr_task_rt,
1864 .task_tick = task_tick_rt,
1866 .get_rr_interval = get_rr_interval_rt,
1868 .prio_changed = prio_changed_rt,
1869 .switched_to = switched_to_rt,
1872 #ifdef CONFIG_SCHED_DEBUG
1873 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1875 static void print_rt_stats(struct seq_file *m, int cpu)
1878 struct rt_rq *rt_rq;
1881 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
1882 print_rt_rq(m, cpu, rt_rq);
1885 #endif /* CONFIG_SCHED_DEBUG */