Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / sched.c
index 5049d95..2f76e06 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/completion.h>
 #include <linux/kernel_stat.h>
 #include <linux/debug_locks.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
@@ -293,12 +293,12 @@ struct task_group root_task_group;
 /* Default task group's sched entity on each cpu */
 static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
 /* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq)
  * This interface allows printk to be called with the runqueue lock
  * held and know whether or not it is OK to wake up the klogd.
  */
-int runqueue_is_locked(void)
+int runqueue_is_locked(int cpu)
 {
-       int cpu = get_cpu();
-       struct rq *rq = cpu_rq(cpu);
-       int ret;
-
-       ret = spin_is_locked(&rq->lock);
-       put_cpu();
-       return ret;
+       return spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -2059,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                if (task_hot(p, old_rq->clock, NULL))
                        schedstat_inc(p, se.nr_forced2_migrations);
 #endif
-               perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
                                     1, 1, NULL, 0);
        }
        p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2343,7 +2337,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
        /*
         * In order to handle concurrent wakeups and release the rq->lock
         * we put the task in TASK_WAKING state.
+        *
+        * First fix up the nr_uninterruptible count:
         */
+       if (task_contributes_to_load(p))
+               rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
        task_rq_unlock(rq, &flags);
 
@@ -2454,6 +2452,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
        p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
+       p->se.avg_running               = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                        = 0;
@@ -2719,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
-       perf_counter_task_sched_in(current, cpu_of(rq));
+       perf_event_task_sched_in(current, cpu_of(rq));
        finish_lock_switch(rq, prev);
 
        fire_sched_in_preempt_notifiers(current);
@@ -2905,6 +2904,19 @@ unsigned long nr_iowait(void)
        return sum;
 }
 
+unsigned long nr_iowait_cpu(void)
+{
+       struct rq *this = this_rq();
+       return atomic_read(&this->nr_iowait);
+}
+
+unsigned long this_cpu_load(void)
+{
+       struct rq *this = this_rq();
+       return this->cpu_load[0];
+}
+
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
@@ -5080,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
  */
 void account_process_tick(struct task_struct *p, int user_tick)
 {
-       cputime_t one_jiffy = jiffies_to_cputime(1);
-       cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
        if (user_tick)
-               account_user_time(p, one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
-               account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+               account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
                                    one_jiffy_scaled);
        else
-               account_idle_time(one_jiffy);
+               account_idle_time(cputime_one_jiffy);
 }
 
 /*
@@ -5194,7 +5205,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        spin_unlock(&rq->lock);
 
-       perf_counter_task_tick(curr, cpu);
+       perf_event_task_tick(curr, cpu);
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -5306,14 +5317,13 @@ static inline void schedule_debug(struct task_struct *prev)
 #endif
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
+static void put_prev_task(struct rq *rq, struct task_struct *p)
 {
-       if (prev->state == TASK_RUNNING) {
-               u64 runtime = prev->se.sum_exec_runtime;
+       u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
 
-               runtime -= prev->se.prev_sum_exec_runtime;
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+       update_avg(&p->se.avg_running, runtime);
 
+       if (p->state == TASK_RUNNING) {
                /*
                 * In order to avoid avg_overlap growing stale when we are
                 * indeed overlapping and hence not getting put to sleep, grow
@@ -5323,9 +5333,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
                 * correlates to the amount of cache footprint a task can
                 * build up.
                 */
-               update_avg(&prev->se.avg_overlap, runtime);
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+               update_avg(&p->se.avg_overlap, runtime);
+       } else {
+               update_avg(&p->se.avg_running, 0);
        }
-       prev->sched_class->put_prev_task(rq, prev);
+       p->sched_class->put_prev_task(rq, p);
 }
 
 /*
@@ -5408,7 +5421,7 @@ need_resched_nonpreemptible:
 
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
-               perf_counter_task_sched_out(prev, next, cpu);
+               perf_event_task_sched_out(prev, next, cpu);
 
                rq->nr_switches++;
                rq->curr = next;
@@ -6818,23 +6831,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        if (retval)
                goto out_unlock;
 
-       /*
-        * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
-        * tasks that are on an otherwise idle runqueue:
-        */
-       time_slice = 0;
-       if (p->policy == SCHED_RR) {
-               time_slice = DEF_TIMESLICE;
-       } else if (p->policy != SCHED_FIFO) {
-               struct sched_entity *se = &p->se;
-               unsigned long flags;
-               struct rq *rq;
+       time_slice = p->sched_class->get_rr_interval(p);
 
-               rq = task_rq_lock(p, &flags);
-               if (rq->cfs.load.weight)
-                       time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
-               task_rq_unlock(rq, &flags);
-       }
        read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -7685,7 +7683,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 /*
  * Register at high priority so that task migration (migrate_all_tasks)
  * happens before everything else.  This has to be lower priority than
- * the notifier in the perf_counter subsystem, though.
+ * the notifier in the perf_event subsystem, though.
  */
 static struct notifier_block __cpuinitdata migration_notifier = {
        .notifier_call = migration_call,
@@ -9164,6 +9162,7 @@ void __init sched_init_smp(void)
        cpumask_var_t non_isolated_cpus;
 
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
 #if defined(CONFIG_NUMA)
        sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9195,7 +9194,6 @@ void __init sched_init_smp(void)
        sched_init_granularity();
        free_cpumask_var(non_isolated_cpus);
 
-       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
        init_sched_rt_class();
 }
 #else
@@ -9542,7 +9540,7 @@ void __init sched_init(void)
        alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
 #endif /* SMP */
 
-       perf_counter_init();
+       perf_event_init();
 
        scheduler_running = 1;
 }