unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
+ struct callback_head *balance_callback;
+
unsigned char idle_balance;
/* For active balancing */
- int post_schedule;
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
#ifdef CONFIG_SMP
+static inline void
+queue_balance_callback(struct rq *rq,
+ struct callback_head *head,
+ void (*func)(struct rq *rq))
+{
+ lockdep_assert_held(&rq->lock);
+
+ if (unlikely(head->next))
+ return;
+
+ head->func = (void (*)(struct callback_head *))func;
+ head->next = rq->balance_callback;
+ rq->balance_callback = head;
+}
+
extern void sched_ttwu_pending(void);
#define rcu_dereference_check_sched_domain(p) \
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
- void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+ lockdep_pin_lock(&rq->lock);
return rq;
+ }
raw_spin_unlock(&rq->lock);
while (unlikely(task_on_rq_migrating(p)))
* If we observe the new cpu in task_rq_lock, the acquire will
* pair with the WMB to ensure we must then also see migrating.
*/
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+ lockdep_pin_lock(&rq->lock);
return rq;
+ }
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
static inline void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
+ lockdep_unpin_lock(&rq->lock);
raw_spin_unlock(&rq->lock);
}
__releases(rq->lock)
__releases(p->pi_lock)
{
+ lockdep_unpin_lock(&rq->lock);
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}