git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sched: fix cpu hotplug, cleanup
[pandora-kernel.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index
eaf6751
..
4e2f603
100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-1127,6
+1127,7
@@
static enum hrtimer_restart hrtick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
return HRTIMER_NORESTART;
}
+#ifdef CONFIG_SMP
static void hotplug_hrtick_disable(int cpu)
{
struct rq *rq = cpu_rq(cpu);
static void hotplug_hrtick_disable(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@
-1182,6
+1183,7
@@
static void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
{
hotcpu_notifier(hotplug_hrtick, 0);
}
+#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
static void init_rq_hrtick(struct rq *rq)
{
@@
-4396,22
+4398,20
@@
do_wait_for_common(struct completion *x, long timeout, int state)
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
-
__remove_wait_queue(&x->wait, &wait)
;
-
return -ERESTARTSYS
;
+
timeout = -ERESTARTSYS
;
+
break
;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
- if (!timeout) {
- __remove_wait_queue(&x->wait, &wait);
- return timeout;
- }
- } while (!x->done);
+ } while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
__remove_wait_queue(&x->wait, &wait);
+ if (!x->done)
+ return timeout;
}
x->done--;
}
x->done--;
- return timeout;
+ return timeout
?: 1
;
}
static long __sched
}
static long __sched
@@
-5622,10
+5622,10
@@
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
- goto
out
;
+ goto
done
;
/* Affinity changed (again). */
if (!cpu_isset(dest_cpu, p->cpus_allowed))
/* Affinity changed (again). */
if (!cpu_isset(dest_cpu, p->cpus_allowed))
- goto
out
;
+ goto
fail
;
on_rq = p->se.on_rq;
if (on_rq)
on_rq = p->se.on_rq;
if (on_rq)
@@
-5636,8
+5636,9
@@
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}
+done:
ret = 1;
ret = 1;
-
out
:
+
fail
:
double_rq_unlock(rq_src, rq_dest);
return ret;
}
double_rq_unlock(rq_src, rq_dest);
return ret;
}
@@
-5887,6
+5888,7
@@
static void migrate_dead_tasks(unsigned int dead_cpu)
next = pick_next_task(rq, rq->curr);
if (!next)
break;
next = pick_next_task(rq, rq->curr);
if (!next)
break;
+ next->sched_class->put_prev_task(rq, next);
migrate_dead(dead_cpu, next);
}
migrate_dead(dead_cpu, next);
}
@@
-6877,7
+6879,12
@@
static int default_relax_domain_level = -1;
static int __init setup_relax_domain_level(char *str)
{
static int __init setup_relax_domain_level(char *str)
{
- default_relax_domain_level = simple_strtoul(str, NULL, 0);
+ unsigned long val;
+
+ val = simple_strtoul(str, NULL, 0);
+ if (val < SD_LV_MAX)
+ default_relax_domain_level = val;
+
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
@@
-7235,6
+7242,18
@@
void __attribute__((weak)) arch_update_cpu_topology(void)
{
}
{
}
+/*
+ * Free current domain masks.
+ * Called after all cpus are attached to NULL domain.
+ */
+static void free_sched_domains(void)
+{
+ ndoms_cur = 0;
+ if (doms_cur != &fallback_doms)
+ kfree(doms_cur);
+ doms_cur = &fallback_doms;
+}
+
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
@@
-7382,6
+7401,7
@@
int arch_reinit_sched_domains(void)
get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
@@
-7467,6
+7487,7
@@
static int update_sched_domains(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
+ free_sched_domains();
return NOTIFY_OK;
case CPU_UP_CANCELED:
return NOTIFY_OK;
case CPU_UP_CANCELED:
@@
-7485,8
+7506,16
@@
static int update_sched_domains(struct notifier_block *nfb,
return NOTIFY_DONE;
}
return NOTIFY_DONE;
}
+#ifndef CONFIG_CPUSETS
+ /*
+ * Create default domain partitioning if cpusets are disabled.
+ * Otherwise we let cpusets rebuild the domains based on the
+ * current setup.
+ */
+
/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
+#endif
return NOTIFY_OK;
}
return NOTIFY_OK;
}
@@
-7626,7
+7655,6
@@
static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
else
rt_se->rt_rq = parent->my_q;
else
rt_se->rt_rq = parent->my_q;
- rt_se->rt_rq = &rq->rt;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
@@
-8348,7
+8376,7
@@
static unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
#ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
- struct task_group *tgi, *parent = tg
->parent
;
+ struct task_group *tgi, *parent = tg
? tg->parent : NULL
;
unsigned long total = 0;
if (!parent) {
unsigned long total = 0;
if (!parent) {
@@
-8475,6
+8503,9
@@
int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
+ if (rt_period == 0)
+ return -EINVAL;
+
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}