#else
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+static inline struct task_group *task_group(struct task_struct *p)
+{
+ return NULL;
+}
#endif /* CONFIG_GROUP_SCHED */
struct task_group *tg; /* group that "owns" this runqueue */
#ifdef CONFIG_SMP
+ /*
+ * the part of load.weight contributed by tasks
+ */
unsigned long task_weight;
- unsigned long shares;
+
/*
- * We need space to build a sched_domain wide view of the full task
- * group tree, in order to avoid depending on dynamic memory allocation
- * during the load balancing we place this in the per cpu task group
- * hierarchy. This limits the load balancing to one instance per cpu,
- * but more should not be needed anyway.
+ * h_load = weight * f(tg)
+ *
+ * Where f(tg) is the recursive weight fraction assigned to
+ * this group.
*/
- struct aggregate_struct {
- /*
- * load = weight(cpus) * f(tg)
- *
- * Where f(tg) is the recursive weight fraction assigned to
- * this group.
- */
- unsigned long load;
+ unsigned long h_load;
- /*
- * part of the group weight distributed to this span.
- */
- unsigned long shares;
+ /*
+ * this cpu's part of tg->shares
+ */
+ unsigned long shares;
- /*
- * The sum of all runqueue weights within this span.
- */
- unsigned long rq_weight;
- } aggregate;
+ /*
+ * load.weight at the time we set shares
+ */
+ unsigned long rq_weight;
#endif
#endif
};
int cpu;
int online;
+ unsigned long avg_load_per_task;
+
struct task_struct *migration_thread;
struct list_head migration_queue;
#endif
*/
const_debug unsigned int sysctl_sched_nr_migrate = 32;
+/*
+ * ratelimit for updating the group shares.
+ * default: 0.5ms
+ */
+const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
+
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
-static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
-#ifdef CONFIG_FAIR_GROUP_SCHED
+static unsigned long cpu_avg_load_per_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
-/*
- * Group load balancing.
- *
- * We calculate a few balance domain wide aggregate numbers; load and weight.
- * Given the pictures below, and assuming each item has equal weight:
- *
- * root 1 - thread
- * / | \ A - group
- * A 1 B
- * /|\ / \
- * C 2 D 3 4
- * | |
- * 5 6
- *
- * load:
- * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
- * which equals 1/9-th of the total load.
- *
- * shares:
- * The weight of this group on the selected cpus.
- *
- * rq_weight:
- * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
- * B would get 2.
- */
+ if (rq->nr_running)
+ rq->avg_load_per_task = rq->load.weight / rq->nr_running;
-static inline struct aggregate_struct *
-aggregate(struct task_group *tg, int cpu)
-{
- return &tg->cfs_rq[cpu]->aggregate;
+ return rq->avg_load_per_task;
}
-typedef void (*aggregate_func)(struct task_group *, int, struct sched_domain *);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*/
-static
-void aggregate_walk_tree(aggregate_func down, aggregate_func up,
- int cpu, struct sched_domain *sd)
+static void
+walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
{
struct task_group *parent, *child;
rcu_read_unlock();
}
-/*
- * Calculate the aggregate runqueue weight.
- */
-static void
-aggregate_group_weight(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
- unsigned long rq_weight = 0;
- int i;
-
- for_each_cpu_mask(i, sd->span)
- rq_weight += tg->cfs_rq[i]->load.weight;
-
- aggregate(tg, cpu)->rq_weight = rq_weight;
-}
-
-/*
- * Compute the weight of this group on the given cpus.
- */
-static void
-aggregate_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
- unsigned long shares = 0;
- int i;
-
- for_each_cpu_mask(i, sd->span)
- shares += tg->cfs_rq[i]->shares;
-
- if ((!shares && aggregate(tg, cpu)->rq_weight) || shares > tg->shares)
- shares = tg->shares;
-
- if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
- shares = tg->shares;
-
- aggregate(tg, cpu)->shares = shares;
-}
-
-/*
- * Compute the load fraction assigned to this group, relies on the aggregate
- * weight and this group's parent's load, i.e. top-down.
- */
-static void
-aggregate_group_load(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
- unsigned long load;
-
- if (!tg->parent) {
- int i;
-
- load = 0;
- for_each_cpu_mask(i, sd->span)
- load += cpu_rq(i)->load.weight;
-
- } else {
- load = aggregate(tg->parent, cpu)->load;
-
- /*
- * shares is our weight in the parent's rq so
- * shares/parent->rq_weight gives our fraction of the load
- */
- load *= aggregate(tg, cpu)->shares;
- load /= aggregate(tg->parent, cpu)->rq_weight + 1;
- }
-
- aggregate(tg, cpu)->load = load;
-}
-
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
/*
*/
static void
__update_group_shares_cpu(struct task_group *tg, int cpu,
- struct sched_domain *sd, int tcpu)
+ unsigned long sd_shares, unsigned long sd_rq_weight)
{
int boost = 0;
unsigned long shares;
unsigned long rq_weight;
- if (!tg->se[tcpu])
+ if (!tg->se[cpu])
return;
- rq_weight = tg->cfs_rq[tcpu]->load.weight;
+ rq_weight = tg->cfs_rq[cpu]->load.weight;
/*
* If there are currently no tasks on the cpu pretend there is one of
rq_weight = NICE_0_LOAD;
}
+ if (unlikely(rq_weight > sd_rq_weight))
+ rq_weight = sd_rq_weight;
+
/*
* \Sum shares * rq_weight
* shares = -----------------------
* \Sum rq_weight
*
*/
- shares = aggregate(tg, cpu)->shares * rq_weight;
- shares /= aggregate(tg, cpu)->rq_weight + 1;
+ shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
/*
* record the actual number of shares, not the boosted amount.
*/
- tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
+ tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
+ tg->cfs_rq[cpu]->rq_weight = rq_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
else if (shares > MAX_SHARES)
shares = MAX_SHARES;
- __set_se_shares(tg->se[tcpu], shares);
+ __set_se_shares(tg->se[cpu], shares);
}
/*
- * Re-adjust the weights on the cpu the task came from and on the cpu the
- * task went to.
+ * Re-compute the task group their per cpu shares over the given domain.
+ * This needs to be done in a bottom-up fashion because the rq weight of a
+ * parent group depends on the shares of its child groups.
*/
static void
-__move_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd,
- int scpu, int dcpu)
+tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
{
- __update_group_shares_cpu(tg, cpu, sd, scpu);
- __update_group_shares_cpu(tg, cpu, sd, dcpu);
-}
+ unsigned long rq_weight = 0;
+ unsigned long shares = 0;
+ int i;
-/*
- * Because changing a group's shares changes the weight of the super-group
- * we need to walk up the tree and change all shares until we hit the root.
- */
-static void
-move_group_shares(struct task_group *tg, int cpu, struct sched_domain *sd,
- int scpu, int dcpu)
-{
- while (tg) {
- __move_group_shares(tg, cpu, sd, scpu, dcpu);
- tg = tg->parent;
+ for_each_cpu_mask(i, sd->span) {
+ rq_weight += tg->cfs_rq[i]->load.weight;
+ shares += tg->cfs_rq[i]->shares;
}
-}
-static void
-aggregate_group_set_shares(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
- int i;
+ if ((!shares && rq_weight) || shares > tg->shares)
+ shares = tg->shares;
+
+ if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
+ shares = tg->shares;
+
+ if (!rq_weight)
+ rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
for_each_cpu_mask(i, sd->span) {
struct rq *rq = cpu_rq(i);
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
- __update_group_shares_cpu(tg, cpu, sd, i);
+ __update_group_shares_cpu(tg, i, shares, rq_weight);
spin_unlock_irqrestore(&rq->lock, flags);
}
-
- aggregate_group_shares(tg, cpu, sd);
}
/*
- * Calculate the accumulative weight and recursive load of each task group
- * while walking down the tree.
+ * Compute the cpu's hierarchical load factor for each task group.
+ * This needs to be done in a top-down fashion because the load of a child
+ * group is a fraction of its parents load.
*/
static void
-aggregate_get_down(struct task_group *tg, int cpu, struct sched_domain *sd)
+tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
{
- aggregate_group_weight(tg, cpu, sd);
- aggregate_group_shares(tg, cpu, sd);
- aggregate_group_load(tg, cpu, sd);
-}
+ unsigned long load;
-/*
- * Rebalance the cpu shares while walking back up the tree.
- */
-static void
-aggregate_get_up(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
- aggregate_group_set_shares(tg, cpu, sd);
-}
+ if (!tg->parent) {
+ load = cpu_rq(cpu)->load.weight;
+ } else {
+ load = tg->parent->cfs_rq[cpu]->h_load;
+ load *= tg->cfs_rq[cpu]->shares;
+ load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
+ }
-static void
-aggregate_get_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
-{
+ tg->cfs_rq[cpu]->h_load = load;
}
-static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
-
-static void __init init_aggregate(void)
+static void
+tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
{
- int i;
-
- for_each_possible_cpu(i)
- spin_lock_init(&per_cpu(aggregate_lock, i));
}
-static int get_aggregate(int cpu, struct sched_domain *sd)
+static void update_shares(struct sched_domain *sd)
{
- if (!spin_trylock(&per_cpu(aggregate_lock, cpu)))
- return 0;
+ u64 now = cpu_clock(raw_smp_processor_id());
+ s64 elapsed = now - sd->last_update;
- aggregate_walk_tree(aggregate_get_down, aggregate_get_up, cpu, sd);
- return 1;
+ if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
+ sd->last_update = now;
+ walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
+ }
}
-static void update_aggregate(int cpu, struct sched_domain *sd)
+static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
- aggregate_walk_tree(aggregate_get_down, aggregate_get_nop, cpu, sd);
+ spin_unlock(&rq->lock);
+ update_shares(sd);
+ spin_lock(&rq->lock);
}
-static void put_aggregate(int cpu, struct sched_domain *sd)
+static void update_h_load(int cpu)
{
- spin_unlock(&per_cpu(aggregate_lock, cpu));
-}
-
-static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
-{
- cfs_rq->shares = shares;
+ walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
}
#else
-static inline void init_aggregate(void)
+static inline void update_shares(struct sched_domain *sd)
{
}
-static inline int get_aggregate(int cpu, struct sched_domain *sd)
+static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
- return 0;
}
-static inline void update_aggregate(int cpu, struct sched_domain *sd)
-{
-}
+#endif
-static inline void put_aggregate(int cpu, struct sched_domain *sd)
-{
-}
#endif
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
+{
+#ifdef CONFIG_SMP
+ cfs_rq->shares = shares;
+#endif
+}
#endif
#include "sched_stats.h"
p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}
+static void update_avg(u64 *avg, u64 sample)
+{
+ s64 diff = sample - *avg;
+ *avg += diff >> 3;
+}
+
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
{
sched_info_queued(p);
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
+ if (sleep && p->se.last_wakeup) {
+ update_avg(&p->se.avg_overlap,
+ p->se.sum_exec_runtime - p->se.last_wakeup);
+ p->se.last_wakeup = 0;
+ }
+
+ sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
}
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
- if (type == 0)
+ if (type == 0 || !sched_feat(LB_BIAS))
return total;
return min(rq->cpu_load[type-1], total);
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
- if (type == 0)
+ if (type == 0 || !sched_feat(LB_BIAS))
return total;
return max(rq->cpu_load[type-1], total);
}
-/*
- * Return the average load per task on the cpu's run queue
- */
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
- unsigned long n = rq->nr_running;
-
- return n ? total / n : SCHED_LOAD_SCALE;
-}
-
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
- /*
- * now that we have both rqs locked the rq weight won't change
- * anymore - so update the stats.
- */
- update_aggregate(this_cpu, sd);
-
do {
unsigned long load, avg_load;
int local_group;
sd = tmp;
}
+ if (sd)
+ update_shares(sd);
+
while (sd) {
cpumask_t span, tmpmask;
struct sched_group *group;
if (!sched_feat(SYNC_WAKEUPS))
sync = 0;
+#ifdef CONFIG_SMP
+ if (sched_feat(LB_WAKEUP_UPDATE)) {
+ struct sched_domain *sd;
+
+ this_cpu = raw_smp_processor_id();
+ cpu = task_cpu(p);
+
+ for_each_domain(this_cpu, sd) {
+ if (cpu_isset(cpu, sd->span)) {
+ update_shares(sd);
+ break;
+ }
+ }
+ }
+#endif
+
smp_wmb();
rq = task_rq_lock(p, &flags);
old_state = p->state;
p->sched_class->task_wake_up(rq, p);
#endif
out:
+ current->se.last_wakeup = current->se.sum_exec_runtime;
+
task_rq_unlock(rq, &flags);
return success;
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct rq_iterator *iterator)
{
- int loops = 0, pulled = 0, pinned = 0, skip_for_load;
+ int loops = 0, pulled = 0, pinned = 0;
struct task_struct *p;
long rem_load_move = max_load_move;
next:
if (!p || loops++ > sysctl_sched_nr_migrate)
goto out;
- /*
- * To help distribute high priority tasks across CPUs we don't
- * skip a task if it will be the highest priority task (i.e. smallest
- * prio value) on its new queue regardless of its load weight
- */
- skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
- SCHED_LOAD_SCALE_FUZZ;
- if ((skip_for_load && p->prio >= *this_best_prio) ||
+
+ if ((p->se.load.weight >> 1) > rem_load_move ||
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
p = iterator->next(iterator->arg);
goto next;
max_load_move - total_load_moved,
sd, idle, all_pinned, &this_best_prio);
class = class->next;
+
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+ break;
+
} while (class && max_load_move > total_load_moved);
return total_load_moved > 0;
max_load = this_load = total_load = total_pwr = 0;
busiest_load_per_task = busiest_nr_running = 0;
this_load_per_task = this_nr_running = 0;
+
if (idle == CPU_NOT_IDLE)
load_idx = sd->busy_idx;
else if (idle == CPU_NEWLY_IDLE)
int __group_imb = 0;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long sum_nr_running, sum_weighted_load;
+ unsigned long sum_avg_load_per_task;
+ unsigned long avg_load_per_task;
local_group = cpu_isset(this_cpu, group->cpumask);
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
+ sum_avg_load_per_task = avg_load_per_task = 0;
+
max_cpu_load = 0;
min_cpu_load = ~0UL;
avg_load += load;
sum_nr_running += rq->nr_running;
sum_weighted_load += weighted_cpuload(i);
+
+ sum_avg_load_per_task += cpu_avg_load_per_task(i);
}
/*
avg_load = sg_div_cpu_power(group,
avg_load * SCHED_LOAD_SCALE);
- if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
+
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of two tasks.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ avg_load_per_task = sg_div_cpu_power(group,
+ sum_avg_load_per_task * SCHED_LOAD_SCALE);
+
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
__group_imb = 1;
group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
if (busiest_load_per_task > this_load_per_task)
imbn = 1;
} else
- this_load_per_task = SCHED_LOAD_SCALE;
+ this_load_per_task = cpu_avg_load_per_task(this_cpu);
- if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
+ if (max_load - this_load + 2*busiest_load_per_task >=
busiest_load_per_task * imbn) {
*imbalance = busiest_load_per_task;
return busiest;
unsigned long imbalance;
struct rq *busiest;
unsigned long flags;
- int unlock_aggregate;
cpus_setall(*cpus);
- unlock_aggregate = get_aggregate(this_cpu, sd);
-
/*
* When power savings policy is enabled for the parent domain, idle
* sibling can pick up load irrespective of busy siblings. In this case,
schedstat_inc(sd, lb_count[idle]);
redo:
+ update_shares(sd);
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
cpus, balance);
else
ld_moved = 0;
out:
- if (unlock_aggregate)
- put_aggregate(this_cpu, sd);
+ if (ld_moved)
+ update_shares(sd);
return ld_moved;
}
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
+ update_shares_locked(this_rq, sd);
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, cpus, NULL);
if (!group) {
} else
sd->nr_balance_failed = 0;
+ update_shares_locked(this_rq, sd);
return ld_moved;
out_balanced:
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
- goto out;
+ goto done;
/* Affinity changed (again). */
if (!cpu_isset(dest_cpu, p->cpus_allowed))
- goto out;
+ goto fail;
on_rq = p->se.on_rq;
if (on_rq)
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}
+done:
ret = 1;
-out:
+fail:
double_rq_unlock(rq_src, rq_dest);
return ret;
}
next = pick_next_task(rq, rq->curr);
if (!next)
break;
+ next->sched_class->put_prev_task(rq, next);
migrate_dead(dead_cpu, next);
}
min_val = INT_MAX;
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
- n = (node + i) % MAX_NUMNODES;
+ n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
if (!sched_group_nodes)
continue;
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
*nodemask = node_to_cpumask(i);
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
+ sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
#endif
/* Set up physical groups */
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
SCHED_CPUMASK_VAR(nodemask, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
send_covered, tmpmask);
}
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
SCHED_CPUMASK_VAR(nodemask, allmasks);
cpus_or(*covered, *covered, *nodemask);
prev = sg;
- for (j = 0; j < MAX_NUMNODES; j++) {
+ for (j = 0; j < nr_node_ids; j++) {
SCHED_CPUMASK_VAR(notcovered, allmasks);
- int n = (i + j) % MAX_NUMNODES;
+ int n = (i + j) % nr_node_ids;
node_to_cpumask_ptr(pnodemask, n);
cpus_complement(*notcovered, *covered);
}
#ifdef CONFIG_NUMA
- for (i = 0; i < MAX_NUMNODES; i++)
+ for (i = 0; i < nr_node_ids; i++)
init_numa_sched_groups_power(sched_group_nodes[i]);
if (sd_allnodes) {
}
#ifdef CONFIG_SMP
- init_aggregate();
init_defrootdomain();
#endif
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
+ if (rt_period == 0)
+ return -EINVAL;
+
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}