Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / kernel / cgroup.c
index 97194ba..7ccba4b 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/smp_lock.h>
 #include <linux/pid_namespace.h>
 #include <linux/idr.h>
+#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
 
 #include <asm/atomic.h>
 
@@ -266,6 +267,12 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
        return &css_set_table[index];
 }
 
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+       struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+       kfree(cg);
+}
+
 /* We don't maintain the lists running through each css_set to its
  * task until after the first call to cgroup_iter_start(). This
  * reduces the fork()/exit() overhead for people who have cgroups
@@ -309,7 +316,7 @@ static void __put_css_set(struct css_set *cg, int taskexit)
        }
 
        write_unlock(&css_set_lock);
-       kfree(cg);
+       call_rcu(&cg->rcu_head, free_css_set_rcu);
 }
 
 /*
@@ -1545,7 +1552,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 
        for_each_subsys(root, ss) {
                if (ss->can_attach) {
-                       retval = ss->can_attach(ss, cgrp, tsk);
+                       retval = ss->can_attach(ss, cgrp, tsk, false);
                        if (retval)
                                return retval;
                }
@@ -1583,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 
        for_each_subsys(root, ss) {
                if (ss->attach)
-                       ss->attach(ss, cgrp, oldcgrp, tsk);
+                       ss->attach(ss, cgrp, oldcgrp, tsk, false);
        }
        set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
        synchronize_rcu();
@@ -2350,6 +2357,42 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
  *
  */
 
+/*
+ * The following two functions "fix" the issue where there are more pids
+ * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
+ * TODO: replace with a kernel-wide solution to this problem
+ */
+#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
+static void *pidlist_allocate(int count)
+{
+       if (PIDLIST_TOO_LARGE(count))
+               return vmalloc(count * sizeof(pid_t));
+       else
+               return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
+}
+static void pidlist_free(void *p)
+{
+       if (is_vmalloc_addr(p))
+               vfree(p);
+       else
+               kfree(p);
+}
+static void *pidlist_resize(void *p, int newcount)
+{
+       void *newlist;
+       /* note: if new alloc fails, old p will still be valid either way */
+       if (is_vmalloc_addr(p)) {
+               newlist = vmalloc(newcount * sizeof(pid_t));
+               if (!newlist)
+                       return NULL;
+               memcpy(newlist, p, newcount * sizeof(pid_t));
+               vfree(p);
+       } else {
+               newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
+       }
+       return newlist;
+}
+
 /*
  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
  * If the new stripped list is sufficiently smaller and there's enough memory
@@ -2389,7 +2432,7 @@ after:
         * we'll just stay with what we've got.
         */
        if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
-               newlist = krealloc(list, dest * sizeof(pid_t), GFP_KERNEL);
+               newlist = pidlist_resize(list, dest);
                if (newlist)
                        *p = newlist;
        }
@@ -2470,7 +2513,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
         * show up until sometime later on.
         */
        length = cgroup_task_count(cgrp);
-       array = kmalloc(length * sizeof(pid_t), GFP_KERNEL);
+       array = pidlist_allocate(length);
        if (!array)
                return -ENOMEM;
        /* now, populate the array */
@@ -2494,11 +2537,11 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
                length = pidlist_uniq(&array, length);
        l = cgroup_pidlist_find(cgrp, type);
        if (!l) {
-               kfree(array);
+               pidlist_free(array);
                return -ENOMEM;
        }
        /* store array, freeing old if necessary - lock already held */
-       kfree(l->list);
+       pidlist_free(l->list);
        l->list = array;
        l->length = length;
        l->use_count++;
@@ -2659,7 +2702,7 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
                /* we're the last user if refcount is 0; remove and free */
                list_del(&l->links);
                mutex_unlock(&l->owner->pidlist_mutex);
-               kfree(l->list);
+               pidlist_free(l->list);
                put_pid_ns(l->key.ns);
                up_write(&l->mutex);
                kfree(l);