sched: fix cpu hotplug, cleanup
[pandora-kernel.git] / kernel / sched.c
index b048ad8..4e2f603 100644 (file)
@@ -4398,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
                             signal_pending(current)) ||
                            (state == TASK_KILLABLE &&
                             fatal_signal_pending(current))) {
-                               __remove_wait_queue(&x->wait, &wait);
-                               return -ERESTARTSYS;
+                               timeout = -ERESTARTSYS;
+                               break;
                        }
                        __set_current_state(state);
                        spin_unlock_irq(&x->wait.lock);
                        timeout = schedule_timeout(timeout);
                        spin_lock_irq(&x->wait.lock);
-                       if (!timeout) {
-                               __remove_wait_queue(&x->wait, &wait);
-                               return timeout;
-                       }
-               } while (!x->done);
+               } while (!x->done && timeout);
                __remove_wait_queue(&x->wait, &wait);
+               if (!x->done)
+                       return timeout;
        }
        x->done--;
-       return timeout;
+       return timeout ?: 1;
 }
 
 static long __sched
@@ -5624,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
        double_rq_lock(rq_src, rq_dest);
        /* Already moved. */
        if (task_cpu(p) != src_cpu)
-               goto out;
+               goto done;
        /* Affinity changed (again). */
        if (!cpu_isset(dest_cpu, p->cpus_allowed))
-               goto out;
+               goto fail;
 
        on_rq = p->se.on_rq;
        if (on_rq)
@@ -5638,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
                activate_task(rq_dest, p, 0);
                check_preempt_curr(rq_dest, p);
        }
+done:
        ret = 1;
-out:
+fail:
        double_rq_unlock(rq_src, rq_dest);
        return ret;
 }
@@ -5889,6 +5888,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
                next = pick_next_task(rq, rq->curr);
                if (!next)
                        break;
+               next->sched_class->put_prev_task(rq, next);
                migrate_dead(dead_cpu, next);
 
        }
@@ -8503,6 +8503,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
        rt_period = (u64)rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
+       if (rt_period == 0)
+               return -EINVAL;
+
        return tg_set_bandwidth(tg, rt_period, rt_runtime);
 }