memcg: new force_empty to free pages under group
[pandora-kernel.git] / mm / memcontrol.c
index e00f25e..decace3 100644 (file)
@@ -1062,21 +1062,27 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
  * make mem_cgroup's charge to be 0 if there is no task.
  * This enables deleting this mem_cgroup.
  */
-static int mem_cgroup_force_empty(struct mem_cgroup *mem)
+static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
 {
        int ret;
        int node, zid, shrink;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+       struct cgroup *cgrp = mem->css.cgroup;
 
        css_get(&mem->css);
 
        shrink = 0;
+       /* should free all ? */
+       if (free_all)
+               goto try_to_free;
 move_account:
        while (mem->res.usage > 0) {
                ret = -EBUSY;
-               if (atomic_read(&mem->css.cgroup->count) > 0)
+               if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
+                       goto out;
+               ret = -EINTR;
+               if (signal_pending(current))
                        goto out;
-
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
                ret = 0;
@@ -1106,19 +1112,29 @@ out:
        return ret;
 
 try_to_free:
-       /* returns EBUSY if we come here twice. */
-       if (shrink)  {
+       /* returns EBUSY if there is a task or if we come here twice. */
+       if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
                ret = -EBUSY;
                goto out;
        }
+       /* we call try-to-free pages for make this cgroup empty */
+       lru_add_drain_all();
        /* try to free all pages in this cgroup */
        shrink = 1;
        while (nr_retries && mem->res.usage > 0) {
                int progress;
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       goto out;
+               }
                progress = try_to_free_mem_cgroup_pages(mem,
                                                  GFP_HIGHUSER_MOVABLE);
-               if (!progress)
+               if (!progress) {
                        nr_retries--;
+                       /* maybe some writeback is necessary */
+                       congestion_wait(WRITE, HZ/10);
+               }
 
        }
        /* try move_account...there may be some *locked* pages. */
@@ -1128,6 +1144,12 @@ try_to_free:
        goto out;
 }
 
+int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+{
+       return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
+}
+
+
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
        return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
@@ -1225,6 +1247,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        return 0;
 }
 
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -1253,6 +1276,10 @@ static struct cftype mem_cgroup_files[] = {
                .name = "stat",
                .read_map = mem_control_stat_show,
        },
+       {
+               .name = "force_empty",
+               .trigger = mem_cgroup_force_empty_write,
+       },
 };
 
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
@@ -1350,7 +1377,7 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
                                        struct cgroup *cont)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       mem_cgroup_force_empty(mem);
+       mem_cgroup_force_empty(mem, false);
 }
 
 static void mem_cgroup_destroy(struct cgroup_subsys *ss,