[Bluetooth] Add support for Canyon CN-BTU1 dongle
[pandora-kernel.git] / kernel / sched.c
index b44b9a4..5c848fd 100644 (file)
@@ -238,6 +238,7 @@ struct rq {
        /* For active balancing */
        int active_balance;
        int push_cpu;
+       int cpu;                /* cpu of this runqueue */
 
        struct task_struct *migration_thread;
        struct list_head migration_queue;
@@ -267,6 +268,15 @@ struct rq {
 
 static DEFINE_PER_CPU(struct rq, runqueues);
 
+static inline int cpu_of(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+       return rq->cpu;
+#else
+       return 0;
+#endif
+}
+
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -2211,7 +2221,8 @@ out:
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
-                  unsigned long *imbalance, enum idle_type idle, int *sd_idle)
+                  unsigned long *imbalance, enum idle_type idle, int *sd_idle,
+                  cpumask_t *cpus)
 {
        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -2248,7 +2259,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                sum_weighted_load = sum_nr_running = avg_load = 0;
 
                for_each_cpu_mask(i, group->cpumask) {
-                       struct rq *rq = cpu_rq(i);
+                       struct rq *rq;
+
+                       if (!cpu_isset(i, *cpus))
+                               continue;
+
+                       rq = cpu_rq(i);
 
                        if (*sd_idle && !idle_cpu(i))
                                *sd_idle = 0;
@@ -2466,13 +2482,17 @@ ret:
  */
 static struct rq *
 find_busiest_queue(struct sched_group *group, enum idle_type idle,
-                  unsigned long imbalance)
+                  unsigned long imbalance, cpumask_t *cpus)
 {
        struct rq *busiest = NULL, *rq;
        unsigned long max_load = 0;
        int i;
 
        for_each_cpu_mask(i, group->cpumask) {
+
+               if (!cpu_isset(i, *cpus))
+                       continue;
+
                rq = cpu_rq(i);
 
                if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
@@ -2511,6 +2531,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        struct sched_group *group;
        unsigned long imbalance;
        struct rq *busiest;
+       cpumask_t cpus = CPU_MASK_ALL;
 
        if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
            !sched_smt_power_savings)
@@ -2518,13 +2539,15 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
        schedstat_inc(sd, lb_cnt[idle]);
 
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
+redo:
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+                                                       &cpus);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group, idle, imbalance);
+       busiest = find_busiest_queue(group, idle, imbalance, &cpus);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[idle]);
                goto out_balanced;
@@ -2549,8 +2572,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                double_rq_unlock(this_rq, busiest);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
-               if (unlikely(all_pinned))
+               if (unlikely(all_pinned)) {
+                       cpu_clear(cpu_of(busiest), cpus);
+                       if (!cpus_empty(cpus))
+                               goto redo;
                        goto out_balanced;
+               }
        }
 
        if (!nr_moved) {
@@ -2639,18 +2666,22 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        unsigned long imbalance;
        int nr_moved = 0;
        int sd_idle = 0;
+       cpumask_t cpus = CPU_MASK_ALL;
 
        if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
                sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
-       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
+redo:
+       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
+                               &sd_idle, &cpus);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
+       busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
+                               &cpus);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
                goto out_balanced;
@@ -2668,6 +2699,12 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
                                        minus_1_or_zero(busiest->nr_running),
                                        imbalance, sd, NEWLY_IDLE, NULL);
                spin_unlock(&busiest->lock);
+
+               if (!nr_moved) {
+                       cpu_clear(cpu_of(busiest), cpus);
+                       if (!cpus_empty(cpus))
+                               goto redo;
+               }
        }
 
        if (!nr_moved) {
@@ -4162,10 +4199,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
                read_unlock_irq(&tasklist_lock);
                return -ESRCH;
        }
-       get_task_struct(p);
-       read_unlock_irq(&tasklist_lock);
        retval = sched_setscheduler(p, policy, &lparam);
-       put_task_struct(p);
+       read_unlock_irq(&tasklist_lock);
 
        return retval;
 }
@@ -4456,9 +4491,9 @@ asmlinkage long sys_sched_yield(void)
        return 0;
 }
 
-static inline int __resched_legal(void)
+static inline int __resched_legal(int expected_preempt_count)
 {
-       if (unlikely(preempt_count()))
+       if (unlikely(preempt_count() != expected_preempt_count))
                return 0;
        if (unlikely(system_state != SYSTEM_RUNNING))
                return 0;
@@ -4484,7 +4519,7 @@ static void __cond_resched(void)
 
 int __sched cond_resched(void)
 {
-       if (need_resched() && __resched_legal()) {
+       if (need_resched() && __resched_legal(0)) {
                __cond_resched();
                return 1;
        }
@@ -4510,7 +4545,7 @@ int cond_resched_lock(spinlock_t *lock)
                ret = 1;
                spin_lock(lock);
        }
-       if (need_resched() && __resched_legal()) {
+       if (need_resched() && __resched_legal(1)) {
                spin_release(&lock->dep_map, 1, _THIS_IP_);
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
@@ -4526,7 +4561,7 @@ int __sched cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (need_resched() && __resched_legal()) {
+       if (need_resched() && __resched_legal(0)) {
                raw_local_irq_disable();
                _local_bh_enable();
                raw_local_irq_enable();
@@ -6494,7 +6529,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
        for (i = 0; i < MAX_NUMNODES; i++)
                init_numa_sched_groups_power(sched_group_nodes[i]);
 
-       init_numa_sched_groups_power(sched_group_allnodes);
+       if (sched_group_allnodes) {
+               int group = cpu_to_allnodes_group(first_cpu(*cpu_map));
+               struct sched_group *sg = &sched_group_allnodes[group];
+
+               init_numa_sched_groups_power(sg);
+       }
 #endif
 
        /* Attach the domains */
@@ -6744,6 +6784,7 @@ void __init sched_init(void)
                        rq->cpu_load[j] = 0;
                rq->active_balance = 0;
                rq->push_cpu = 0;
+               rq->cpu = i;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
 #endif
@@ -6761,6 +6802,11 @@ void __init sched_init(void)
        }
 
        set_load_weight(&init_task);
+
+#ifdef CONFIG_RT_MUTEXES
+       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+#endif
+
        /*
         * The boot idle thread does lazy MMU switching as well:
         */