uint64_t action;
/* Load the mailbox register to figure out what we're supposed to do */
-- action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
++ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
/* Clear the mailbox to clear the interrupt */
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
+ + if (action & SMP_RESCHEDULE_YOURSELF)
+ + scheduler_ipi();
/* Check if we've been told to flush the icache */
if (action & SMP_ICACHE_FLUSH)
if (labi->labi_signature != LABI_SIGNATURE)
panic("The bootloader version on this board is incorrect.");
#endif
--
-- cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
++ /*
++ * Only the low order mailbox bits are used for IPIs, leave
++ * the other bits alone.
++ */
++ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
-- "mailbox0", mailbox_interrupt)) {
++ "SMP-IPI", mailbox_interrupt)) {
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
}
-- if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED,
-- "mailbox1", mailbox_interrupt)) {
-- panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
-- }
}
/**
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
/*
- - * Reschedule call back. Nothing to do,
- - * all the work is done automatically when
- - * we return from the interrupt.
+ + * Reschedule call back.
*/
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
inc_irq_stat(irq_resched_count);
+ + scheduler_ipi();
return IRQ_HANDLED;
}
--static __cpuinit void cpu_bringup(void)
++static void __cpuinit cpu_bringup(void)
{
int cpu = smp_processor_id();
wmb(); /* make sure everything is out */
}
--static __cpuinit void cpu_bringup_and_idle(void)
++static void __cpuinit cpu_bringup_and_idle(void)
{
cpu_bringup();
cpu_idle();
}
}
--static __cpuinit int
++static int __cpuinit
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
struct vcpu_guest_context *ctxt;
return IRQ_HANDLED;
}
--static const struct smp_ops xen_smp_ops __initdata = {
++static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
.smp_cpus_done = xen_smp_cpus_done,
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
++ typedef struct task_group *rt_rq_iter_t;
++
++ #define for_each_rt_rq(rt_rq, iter, rq) \
++ for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
++ (&iter->list != &task_groups) && \
++ (rt_rq = iter->rt_rq[cpu_of(rq)]); \
++ iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
++
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
{
list_add_rcu(&rt_rq->leaf_rt_rq_list,
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
++ typedef struct rt_rq *rt_rq_iter_t;
++
++ #define for_each_rt_rq(rt_rq, iter, rq) \
++ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
++
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
{
}
static void __disable_runtime(struct rq *rq)
{
struct root_domain *rd = rq->rd;
++ rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
-- for_each_leaf_rt_rq(rt_rq, rq) {
++ for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
s64 want;
int i;
static void __enable_runtime(struct rq *rq)
{
++ rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
/*
* Reset each runqueue's bandwidth settings
*/
-- for_each_leaf_rt_rq(rt_rq, rq) {
++ for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
raw_spin_lock(&rt_b->rt_runtime_lock);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
+ +
+ + /*
+ + * Force a clock update if the CPU was idle,
+ + * lest wakeup -> unthrottle time accumulate.
+ + */
+ + if (rt_rq->rt_nr_running && rq->curr == rq->idle)
+ + rq->skip_clock_update = -1;
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
static int find_lowest_rq(struct task_struct *task);
static int
- -select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+ +select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
{
+ + struct task_struct *curr;
+ + struct rq *rq;
+ + int cpu;
+ +
if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();
+ + cpu = task_cpu(p);
+ + rq = cpu_rq(cpu);
+ +
+ + rcu_read_lock();
+ + curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+ +
/*
- - * If the current task is an RT task, then
+ + * If the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
+ + *
+ + * Otherwise, just let it ride on the affined RQ and the
+ + * post-schedule router will push the preempted task away
+ + *
+ + * This test is optimistic, if we get it wrong the load-balancer
+ + * will have to sort it out.
*/
- - if (unlikely(rt_task(rq->curr)) &&
- - (rq->curr->rt.nr_cpus_allowed < 2 ||
- - rq->curr->prio < p->prio) &&
+ + if (curr && unlikely(rt_task(curr)) &&
+ + (curr->rt.nr_cpus_allowed < 2 ||
+ + curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
- - int cpu = find_lowest_rq(p);
+ + int target = find_lowest_rq(p);
- - return (cpu == -1) ? task_cpu(p) : cpu;
+ + if (target != -1)
+ + cpu = target;
}
+ + rcu_read_unlock();
- - /*
- - * Otherwise, just let it ride on the affined RQ and the
- - * post-schedule router will push the preempted task away
- - */
- - return task_cpu(p);
+ + return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing
* if it is still active
*/
- - if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+ + if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
!cpumask_test_cpu(lowest_rq->cpu,
&task->cpus_allowed) ||
task_running(rq, task) ||
- - !task->se.on_rq)) {
+ + !task->on_rq)) {
raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
BUG_ON(task_current(rq, p));
BUG_ON(p->rt.nr_cpus_allowed <= 1);
- - BUG_ON(!p->se.on_rq);
+ + BUG_ON(!p->on_rq);
BUG_ON(!rt_task(p));
return p;
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
- - WARN_ON(!p->se.on_rq);
+ + WARN_ON(!p->on_rq);
/*
* There's a chance that p is higher in priority
* Update the migration status of the RQ if we have an RT task
* which is running AND changing its weight value.
*/
- - if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
+ + if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
struct rq *rq = task_rq(p);
if (!task_current(rq, p)) {
* we may need to handle the pulling of RT tasks
* now.
*/
- - if (p->se.on_rq && !rq->rt.rt_nr_running)
+ + if (p->on_rq && !rq->rt.rt_nr_running)
pull_rt_task(rq);
}
* If that current running task is also an RT task
* then see if we can move to another run queue.
*/
- - if (p->se.on_rq && rq->curr != p) {
+ + if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) &&
/* Don't resched if we changed runqueues */
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
- - if (!p->se.on_rq)
+ + if (!p->on_rq)
return;
if (rq->curr == p) {
static void print_rt_stats(struct seq_file *m, int cpu)
{
++ rt_rq_iter_t iter;
struct rt_rq *rt_rq;
rcu_read_lock();
-- for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
++ for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}