Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / kernel / cpuset.c
index 9fceb97..d573891 100644 (file)
@@ -564,7 +564,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
  *     partition_sched_domains().
  */
 
-static void rebuild_sched_domains(void)
+void rebuild_sched_domains(void)
 {
        struct kfifo *q;        /* queue of cpusets to be scanned */
        struct cpuset *cp;      /* scans q */
@@ -679,7 +679,9 @@ restart:
                                if (apn == b->pn) {
                                        cpus_or(*dp, *dp, b->cpus_allowed);
                                        b->pn = -1;
-                                       update_domain_attr(dattr, b);
+                                       if (dattr)
+                                               update_domain_attr(dattr
+                                                                  + nslot, b);
                                }
                        }
                        nslot++;
@@ -1194,6 +1196,15 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
 
        if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
                return -ENOSPC;
+       if (tsk->flags & PF_THREAD_BOUND) {
+               cpumask_t mask;
+
+               mutex_lock(&callback_mutex);
+               mask = cs->cpus_allowed;
+               mutex_unlock(&callback_mutex);
+               if (!cpus_equal(tsk->cpus_allowed, mask))
+                       return -EINVAL;
+       }
 
        return security_task_setscheduler(tsk, 0, NULL);
 }
@@ -1207,11 +1218,14 @@ static void cpuset_attach(struct cgroup_subsys *ss,
        struct mm_struct *mm;
        struct cpuset *cs = cgroup_cs(cont);
        struct cpuset *oldcs = cgroup_cs(oldcont);
+       int err;
 
        mutex_lock(&callback_mutex);
        guarantee_online_cpus(cs, &cpus);
-       set_cpus_allowed_ptr(tsk, &cpus);
+       err = set_cpus_allowed_ptr(tsk, &cpus);
        mutex_unlock(&callback_mutex);
+       if (err)
+               return;
 
        from = oldcs->mems_allowed;
        to = cs->mems_allowed;
@@ -1882,7 +1896,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
  * in order to minimize text size.
  */
 
-static void common_cpu_mem_hotplug_unplug(void)
+static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
 {
        cgroup_lock();
 
@@ -1894,7 +1908,8 @@ static void common_cpu_mem_hotplug_unplug(void)
         * Scheduler destroys domains on hotplug events.
         * Rebuild them based on the current settings.
         */
-       rebuild_sched_domains();
+       if (rebuild_sd)
+               rebuild_sched_domains();
 
        cgroup_unlock();
 }
@@ -1912,11 +1927,22 @@ static void common_cpu_mem_hotplug_unplug(void)
 static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
                                unsigned long phase, void *unused_cpu)
 {
-       if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
+       switch (phase) {
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               common_cpu_mem_hotplug_unplug(1);
+               break;
+       default:
                return NOTIFY_DONE;
+       }
 
-       common_cpu_mem_hotplug_unplug();
-       return 0;
+       return NOTIFY_OK;
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -1929,7 +1955,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
 
 void cpuset_track_online_nodes(void)
 {
-       common_cpu_mem_hotplug_unplug();
+       common_cpu_mem_hotplug_unplug(0);
 }
 #endif