git.openpandora.org
/
pandora-kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
cpumask fallout: Initialize irq_default_affinity earlier
[pandora-kernel.git]
/
kernel
/
sched.c
diff --git
a/kernel/sched.c
b/kernel/sched.c
index
deb5ac8
..
52bbf1c
100644
(file)
--- a/
kernel/sched.c
+++ b/
kernel/sched.c
@@
-125,6
+125,9
@@
DEFINE_TRACE(sched_switch);
DEFINE_TRACE(sched_migrate_task);
#ifdef CONFIG_SMP
DEFINE_TRACE(sched_migrate_task);
#ifdef CONFIG_SMP
+
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide.
/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide.
@@
-1320,8
+1323,8
@@
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* slice expiry etc.
*/
* slice expiry etc.
*/
-#define WEIGHT_IDLEPRIO
2
-#define WMULT_IDLEPRIO
(1 << 31)
+#define WEIGHT_IDLEPRIO
3
+#define WMULT_IDLEPRIO
1431655765
/*
* Nice levels are multiplicative, with a gentle 10% change for every
/*
* Nice levels are multiplicative, with a gentle 10% change for every
@@
-4437,7
+4440,7
@@
void __kprobes sub_preempt_count(int val)
/*
* Underflow?
*/
/*
* Underflow?
*/
- if (DEBUG_LOCKS_WARN_ON(val > preempt_count()
- (!!kernel_locked())
))
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
return;
/*
* Is the spinlock portion underflowing?
@@
-5123,7
+5126,7
@@
int can_nice(const struct task_struct *p, const int nice)
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
-
asmlinkage long sys_nice(int
increment)
+
SYSCALL_DEFINE1(nice, int,
increment)
{
long nice, retval;
{
long nice, retval;
@@
-5430,8
+5433,8
@@
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
-asmlinkage long
-
sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *
param)
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
+
struct sched_param __user *,
param)
{
/* negative values for policy are not valid */
if (policy < 0)
{
/* negative values for policy are not valid */
if (policy < 0)
@@
-5445,7
+5448,7
@@
sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
-
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *
param)
+
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *,
param)
{
return do_sched_setscheduler(pid, -1, param);
}
{
return do_sched_setscheduler(pid, -1, param);
}
@@
-5454,7
+5457,7
@@
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
-
asmlinkage long sys_sched_getscheduler(pid_t
pid)
+
SYSCALL_DEFINE1(sched_getscheduler, pid_t,
pid)
{
struct task_struct *p;
int retval;
{
struct task_struct *p;
int retval;
@@
-5479,7
+5482,7
@@
asmlinkage long sys_sched_getscheduler(pid_t pid)
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
-
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *
param)
+
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *,
param)
{
struct sched_param lp;
struct task_struct *p;
{
struct sched_param lp;
struct task_struct *p;
@@
-5597,8
+5600,8
@@
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
-
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int
len,
-
unsigned long __user *
user_mask_ptr)
+
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int,
len,
+
unsigned long __user *,
user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
{
cpumask_var_t new_mask;
int retval;
@@
-5645,8
+5648,8
@@
out_unlock:
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
-
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int
len,
-
unsigned long __user *
user_mask_ptr)
+
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int,
len,
+
unsigned long __user *,
user_mask_ptr)
{
int ret;
cpumask_var_t mask;
{
int ret;
cpumask_var_t mask;
@@
-5675,7
+5678,7
@@
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
-
asmlinkage long sys_sched_yield(voi
d)
+
SYSCALL_DEFINE0(sched_yiel
d)
{
struct rq *rq = this_rq_lock();
{
struct rq *rq = this_rq_lock();
@@
-5816,7
+5819,7
@@
long __sched io_schedule_timeout(long timeout)
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
-
asmlinkage long sys_sched_get_priority_max(int
policy)
+
SYSCALL_DEFINE1(sched_get_priority_max, int,
policy)
{
int ret = -EINVAL;
{
int ret = -EINVAL;
@@
-5841,7
+5844,7
@@
asmlinkage long sys_sched_get_priority_max(int policy)
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
-
asmlinkage long sys_sched_get_priority_min(int
policy)
+
SYSCALL_DEFINE1(sched_get_priority_min, int,
policy)
{
int ret = -EINVAL;
{
int ret = -EINVAL;
@@
-5866,8
+5869,8
@@
asmlinkage long sys_sched_get_priority_min(int policy)
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
-asmlinkage
-
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *
interval)
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+
struct timespec __user *,
interval)
{
struct task_struct *p;
unsigned int time_slice;
{
struct task_struct *p;
unsigned int time_slice;
@@
-7282,10
+7285,10
@@
cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static DEFINE_PER_CPU(struct s
tatic_s
ched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;
static struct sched_group ***sched_group_nodes_bycpu;
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(struct s
tatic_s
ched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@
-7560,7
+7563,7
@@
static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
- sd = &per_cpu(allnodes_domains, i);
+ sd = &per_cpu(allnodes_domains, i)
.sd
;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
@@
-7570,7
+7573,7
@@
static int __build_sched_domains(const struct cpumask *cpu_map,
} else
p = NULL;
} else
p = NULL;
- sd = &per_cpu(node_domains, i);
+ sd = &per_cpu(node_domains, i)
.sd
;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@
-7688,7
+7691,7
@@
static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(j, nodemask) {
struct sched_domain *sd;
for_each_cpu(j, nodemask) {
struct sched_domain *sd;
- sd = &per_cpu(node_domains, j);
+ sd = &per_cpu(node_domains, j)
.sd
;
sd->groups = sg;
}
sg->__cpu_power = 0;
sd->groups = sg;
}
sg->__cpu_power = 0;
@@
-9047,6
+9050,13
@@
static int tg_schedulable(struct task_group *tg, void *data)
runtime = d->rt_runtime;
}
runtime = d->rt_runtime;
}
+#ifdef CONFIG_USER_SCHED
+ if (tg == &root_task_group) {
+ period = global_rt_period();
+ runtime = global_rt_runtime();
+ }
+#endif
+
/*
* Cannot have more runtime than the period.
*/
/*
* Cannot have more runtime than the period.
*/