aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2014-06-29 23:49:58 -0400
committerTejun Heo <tj@kernel.org>2014-06-30 10:16:25 -0400
commit970317aa48c6ef66cd023c039c2650c897bad927 (patch)
tree6308e957b9690395717c0894dd339505d1071d9a /kernel
parent391acf970d21219a2a5446282d3b20eace0c0d7a (diff)
cgroup: fix mount failure in a corner case
# cat test.sh #! /bin/bash mount -t cgroup -o cpu xxx /cgroup umount /cgroup mount -t cgroup -o cpu,cpuacct xxx /cgroup umount /cgroup # ./test.sh mount: xxx already mounted or /cgroup busy mount: according to mtab, xxx is already mounted on /cgroup It's because the cgroupfs_root of the first mount was under destruction asynchronously. Fix this by delaying and then retrying mount for this case. v3: - put the refcnt immediately after getting it. (Tejun) v2: - use percpu_ref_tryget_live() rather that introducing percpu_ref_alive(). (Tejun) - adjust comment. tj: Updated the comment a bit. Cc: <stable@vger.kernel.org> # 3.15 Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9a8be911f5b..64068667be84 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1648,10 +1648,12 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1648 int flags, const char *unused_dev_name, 1648 int flags, const char *unused_dev_name,
1649 void *data) 1649 void *data)
1650{ 1650{
1651 struct cgroup_subsys *ss;
1651 struct cgroup_root *root; 1652 struct cgroup_root *root;
1652 struct cgroup_sb_opts opts; 1653 struct cgroup_sb_opts opts;
1653 struct dentry *dentry; 1654 struct dentry *dentry;
1654 int ret; 1655 int ret;
1656 int i;
1655 bool new_sb; 1657 bool new_sb;
1656 1658
1657 /* 1659 /*
@@ -1677,6 +1679,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1677 goto out_unlock; 1679 goto out_unlock;
1678 } 1680 }
1679 1681
1682 /*
1683 * Destruction of cgroup root is asynchronous, so subsystems may
1684 * still be dying after the previous unmount. Let's drain the
1685 * dying subsystems. We just need to ensure that the ones
1686 * unmounted previously finish dying and don't care about new ones
1687 * starting. Testing ref liveliness is good enough.
1688 */
1689 for_each_subsys(ss, i) {
1690 if (!(opts.subsys_mask & (1 << i)) ||
1691 ss->root == &cgrp_dfl_root)
1692 continue;
1693
1694 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1695 mutex_unlock(&cgroup_mutex);
1696 msleep(10);
1697 ret = restart_syscall();
1698 goto out_free;
1699 }
1700 cgroup_put(&ss->root->cgrp);
1701 }
1702
1680 for_each_root(root) { 1703 for_each_root(root) {
1681 bool name_match = false; 1704 bool name_match = false;
1682 1705