Merge branch 'for_paulus' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc
[pandora-kernel.git] / kernel / sched.c
index c13f1bd..a856040 100644 (file)
@@ -818,6 +818,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
  * the target CPU.
  */
 #ifdef CONFIG_SMP
+
+#ifndef tsk_is_polling
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#endif
+
 static void resched_task(task_t *p)
 {
        int cpu;
@@ -833,9 +838,9 @@ static void resched_task(task_t *p)
        if (cpu == smp_processor_id())
                return;
 
-       /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+       /* NEED_RESCHED must be visible before we test polling */
        smp_mb();
-       if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+       if (!tsk_is_polling(p))
                smp_send_reschedule(cpu);
 }
 #else
@@ -3886,6 +3891,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
                        !capable(CAP_SYS_NICE))
                goto out_unlock;
 
+       retval = security_task_setscheduler(p, 0, NULL);
+       if (retval)
+               goto out_unlock;
+
        cpus_allowed = cpuset_cpus_allowed(p);
        cpus_and(new_mask, new_mask, cpus_allowed);
        retval = set_cpus_allowed(p, new_mask);
@@ -3954,7 +3963,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        if (!p)
                goto out_unlock;
 
-       retval = 0;
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
@@ -4046,6 +4058,9 @@ asmlinkage long sys_sched_yield(void)
 
 static inline void __cond_resched(void)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+       __might_sleep(__FILE__, __LINE__);
+#endif
        /*
         * The BKS might be reacquired before we have dropped
         * PREEMPT_ACTIVE, which could trigger a second
@@ -4142,7 +4157,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -4153,7 +4168,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4237,7 +4252,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        if (retval)
                goto out_unlock;
 
-       jiffies_to_timespec(p->policy & SCHED_FIFO ?
+       jiffies_to_timespec(p->policy == SCHED_FIFO ?
                                0 : task_timeslice(p), &t);
        read_unlock(&tasklist_lock);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4746,6 +4761,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+               if (!cpu_rq(cpu)->migration_thread)
+                       break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                kthread_bind(cpu_rq(cpu)->migration_thread,
                             any_online_cpu(cpu_online_map));