Merge branch 'for_paulus' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc
[pandora-kernel.git] / kernel / sched.c
index 4c64f85..a856040 100644 (file)
@@ -664,48 +664,6 @@ static int effective_prio(task_t *p)
        return prio;
 }
 
-/*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired, and switch periodically
- * regardless, to ensure that highly interactive tasks do not starve
- * the less fortunate for unreasonably long periods.
- */
-static inline int expired_starving(runqueue_t *rq)
-{
-       int limit;
-
-       /*
-        * Arrays were recently switched, all is well
-        */
-       if (!rq->expired_timestamp)
-               return 0;
-
-       limit = STARVATION_LIMIT * rq->nr_running;
-
-       /*
-        * It's time to switch arrays
-        */
-       if (jiffies - rq->expired_timestamp >= limit)
-               return 1;
-
-       /*
-        * There's a better selection in the expired array
-        */
-       if (rq->curr->static_prio > rq->best_expired_prio)
-               return 1;
-
-       /*
-        * All is well
-        */
-       return 0;
-}
-
 /*
  * __activate_task - move a task to the runqueue.
  */
@@ -713,7 +671,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
 {
        prio_array_t *target = rq->active;
 
-       if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
+       if (batch_task(p))
                target = rq->expired;
        enqueue_task(p, target);
        rq->nr_running++;
@@ -860,6 +818,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
  * the target CPU.
  */
 #ifdef CONFIG_SMP
+
+#ifndef tsk_is_polling
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#endif
+
 static void resched_task(task_t *p)
 {
        int cpu;
@@ -875,9 +838,9 @@ static void resched_task(task_t *p)
        if (cpu == smp_processor_id())
                return;
 
-       /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+       /* NEED_RESCHED must be visible before we test polling */
        smp_mb();
-       if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+       if (!tsk_is_polling(p))
                smp_send_reschedule(cpu);
 }
 #else
@@ -2531,6 +2494,22 @@ unsigned long long current_sched_time(const task_t *tsk)
        return ns;
 }
 
+/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+       ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
+               (jiffies - (rq)->expired_timestamp >= \
+                       STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+                       ((rq)->curr->static_prio > (rq)->best_expired_prio))
+
 /*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
@@ -2666,7 +2645,7 @@ void scheduler_tick(void)
 
                if (!rq->expired_timestamp)
                        rq->expired_timestamp = jiffies;
-               if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
+               if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
                        enqueue_task(p, rq->expired);
                        if (p->static_prio < rq->best_expired_prio)
                                rq->best_expired_prio = p->static_prio;
@@ -3912,6 +3891,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
                        !capable(CAP_SYS_NICE))
                goto out_unlock;
 
+       retval = security_task_setscheduler(p, 0, NULL);
+       if (retval)
+               goto out_unlock;
+
        cpus_allowed = cpuset_cpus_allowed(p);
        cpus_and(new_mask, new_mask, cpus_allowed);
        retval = set_cpus_allowed(p, new_mask);
@@ -3980,7 +3963,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        if (!p)
                goto out_unlock;
 
-       retval = 0;
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
@@ -4072,6 +4058,9 @@ asmlinkage long sys_sched_yield(void)
 
 static inline void __cond_resched(void)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+       __might_sleep(__FILE__, __LINE__);
+#endif
        /*
         * The BKS might be reacquired before we have dropped
         * PREEMPT_ACTIVE, which could trigger a second
@@ -4168,7 +4157,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -4179,7 +4168,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4263,7 +4252,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        if (retval)
                goto out_unlock;
 
-       jiffies_to_timespec(p->policy & SCHED_FIFO ?
+       jiffies_to_timespec(p->policy == SCHED_FIFO ?
                                0 : task_timeslice(p), &t);
        read_unlock(&tasklist_lock);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4772,6 +4761,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+               if (!cpu_rq(cpu)->migration_thread)
+                       break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                kthread_bind(cpu_rq(cpu)->migration_thread,
                             any_online_cpu(cpu_online_map));