Merge branch 'cgroup/for-3.14-fixes' into cgroup/for-3.15
authorTejun Heo <tj@kernel.org>
Tue, 25 Feb 2014 14:56:49 +0000 (09:56 -0500)
committerTejun Heo <tj@kernel.org>
Tue, 25 Feb 2014 14:56:49 +0000 (09:56 -0500)
Pull in for-3.14-fixes to receive 532de3fc72ad ("cgroup: update
cgroup_enable_task_cg_lists() to grab siglock") which conflicts with
afeb0f9fd425 ("cgroup: relocate cgroup_enable_task_cg_lists()") and
the following cg_lists updates.  This is likely to cause further
conflicts down the line too, so let's merge it early.

As cgroup_enable_task_cg_lists() is relocated in for-3.15, this merge
causes conflict in the original position.  It's resolved by applying
siglock changes to the updated version in the new location.

Conflicts:
kernel/cgroup.c

Signed-off-by: Tejun Heo <tj@kernel.org>
1  2 
kernel/cgroup.c

diff --cc kernel/cgroup.c
@@@ -1294,53 -1336,12 +1294,57 @@@ static int cgroup_remount(struct kernfs
        return ret;
  }
  
 -static const struct super_operations cgroup_ops = {
 -      .statfs = simple_statfs,
 -      .drop_inode = generic_delete_inode,
 -      .show_options = cgroup_show_options,
 -      .remount_fs = cgroup_remount,
 -};
 +/*
 + * To reduce the fork() overhead for systems that are not actually using
 + * their cgroups capability, we don't maintain the lists running through
 + * each css_set to its tasks until we see the list actually used - in other
 + * words after the first mount.
 + */
 +static bool use_task_css_set_links __read_mostly;
 +
 +static void cgroup_enable_task_cg_lists(void)
 +{
 +      struct task_struct *p, *g;
 +
 +      down_write(&css_set_rwsem);
 +
 +      if (use_task_css_set_links)
 +              goto out_unlock;
 +
 +      use_task_css_set_links = true;
 +
 +      /*
 +       * We need tasklist_lock because RCU is not safe against
 +       * while_each_thread(). Besides, a forking task that has passed
 +       * cgroup_post_fork() without seeing use_task_css_set_links = 1
 +       * is not guaranteed to have its child immediately visible in the
 +       * tasklist if we walk through it with RCU.
 +       */
 +      read_lock(&tasklist_lock);
 +      do_each_thread(g, p) {
 +              task_lock(p);
 +
 +              WARN_ON_ONCE(!list_empty(&p->cg_list) ||
 +                           task_css_set(p) != &init_css_set);
 +
 +              /*
 +               * We should check if the process is exiting, otherwise
 +               * it will race with cgroup_exit() in that the list
 +               * entry won't be deleted though the process has exited.
++               * Do it while holding siglock so that we don't end up
++               * racing against cgroup_exit().
 +               */
++              spin_lock_irq(&p->sighand->siglock);
 +              if (!(p->flags & PF_EXITING))
 +                      list_add(&p->cg_list, &task_css_set(p)->tasks);
++              spin_unlock_irq(&p->sighand->siglock);
 +
 +              task_unlock(p);
 +      } while_each_thread(g, p);
 +      read_unlock(&tasklist_lock);
 +out_unlock:
 +      up_write(&css_set_rwsem);
 +}
  
  static void init_cgroup_housekeeping(struct cgroup *cgrp)
  {