static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
--- int ret;
if (clone_flags & CLONE_THREAD) {
--- ret = thread_group_cputime_clone_thread(current);
--- if (likely(!ret)) {
--- atomic_inc(¤t->signal->count);
--- atomic_inc(¤t->signal->live);
--- }
--- return ret;
+++ atomic_inc(¤t->signal->count);
+++ atomic_inc(¤t->signal->live);
+++ return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+++
+++ if (sig)
+++ posix_cpu_timers_init_group(sig);
+++
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->tty_old_pgrp = NULL;
sig->tty = NULL;
---- sig->cutime = sig->cstime = cputime_zero;
++++ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
task_io_accounting_init(&sig->ioac);
++++ sig->sum_sched_runtime = 0;
taskstats_tgid_init(sig);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
--- posix_cpu_timers_init_group(sig);
---
acct_init_pacct(&sig->pacct);
tty_audit_fork(sig);
clear_freeze_flag(p);
}
- asmlinkage long sys_set_tid_address(int __user *tidptr)
+ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
++++ retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
* constructed. Here we are modifying the current, active,
* task_struct.
*/
- asmlinkage long sys_unshare(unsigned long unshare_flags)
+ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
DEFINE_TRACE(sched_migrate_task);
#ifdef CONFIG_SMP
+
+ static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide.
* slice expiry etc.
*/
- #define WEIGHT_IDLEPRIO 2
- #define WMULT_IDLEPRIO (1 << 31)
+ #define WEIGHT_IDLEPRIO 3
+ #define WMULT_IDLEPRIO 1431655765
/*
* Nice levels are multiplicative, with a gentle 10% change for every
if (!sched_feat(SYNC_WAKEUPS))
sync = 0;
+++ if (!sync) {
+++ if (current->se.avg_overlap < sysctl_sched_migration_cost &&
+++ p->se.avg_overlap < sysctl_sched_migration_cost)
+++ sync = 1;
+++ } else {
+++ if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
+++ p->se.avg_overlap >= sysctl_sched_migration_cost)
+++ sync = 0;
+++ }
+++
#ifdef CONFIG_SMP
if (sched_feat(LB_WAKEUP_UPDATE)) {
struct sched_domain *sd;
int cpu = smp_processor_id();
if (stop_tick) {
---- cpumask_set_cpu(cpu, nohz.cpu_mask);
cpu_rq(cpu)->in_nohz_recently = 1;
---- /*
---- * If we are going offline and still the leader, give up!
---- */
---- if (!cpu_active(cpu) &&
---- atomic_read(&nohz.load_balancer) == cpu) {
++++ if (!cpu_active(cpu)) {
++++ if (atomic_read(&nohz.load_balancer) != cpu)
++++ return 0;
++++
++++ /*
++++ * If we are going offline and still the leader,
++++ * give up!
++++ */
if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
BUG();
++++
return 0;
}
++++ cpumask_set_cpu(cpu, nohz.cpu_mask);
++++
/* time for ilb owner also to sleep */
if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
if (atomic_read(&nohz.load_balancer) == cpu)
/*
* Underflow?
*/
- if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
----static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
---- int nr_exclusive, int sync, void *key)
++++void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
++++ int nr_exclusive, int sync, void *key)
{
wait_queue_t *curr, *next;
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
- asmlinkage long sys_nice(int increment)
+ SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
- asmlinkage long
- sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
+ struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
- asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
+ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
- asmlinkage long sys_sched_getscheduler(pid_t pid)
+ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
- asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
- asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
+ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
- asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
+ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
- asmlinkage long sys_sched_yield(void)
+ SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
- asmlinkage long sys_sched_get_priority_max(int policy)
+ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
- asmlinkage long sys_sched_get_priority_min(int policy)
+ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
- asmlinkage
- long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
- static DEFINE_PER_CPU(struct sched_domain, node_domains);
+ static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;
- static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+ static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
- sd = &per_cpu(allnodes_domains, i);
+ sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
} else
p = NULL;
- sd = &per_cpu(node_domains, i);
+ sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
for_each_cpu(j, nodemask) {
struct sched_domain *sd;
- sd = &per_cpu(node_domains, j);
+ sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->__cpu_power = 0;
runtime = d->rt_runtime;
}
+ #ifdef CONFIG_USER_SCHED
+ if (tg == &root_task_group) {
+ period = global_rt_period();
+ runtime = global_rt_runtime();
+ }
+ #endif
+
/*
* Cannot have more runtime than the period.
*/