diff options
author | Tejun Heo <tj@kernel.org> | 2014-05-13 12:19:23 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-05-13 12:19:23 -0400 |
commit | 8353da1f91f12a3079ecc849226f371242d2807c (patch) | |
tree | 8df00b7bc0b46b850099e7198bdecbc02f36a9ab /kernel/cgroup.c | |
parent | 01f6474ce04fffd6282b569ac0a31f4b98d4c82a (diff) |
cgroup: remove cgroup_tree_mutex
cgroup_tree_mutex was introduced to work around the circular
dependency between cgroup_mutex and kernfs active protection - some
kernfs file and directory operations needed cgroup_mutex putting
cgroup_mutex under active protection but cgroup also needs to be able
to access cgroup hierarchies and cftypes to determine which
kernfs_nodes need to be removed. cgroup_tree_mutex nested above both
cgroup_mutex and kernfs active protection and used to protect the
hierarchy and cftypes. While this worked, it added a lot of double
lockings and was generally cumbersome.
kernfs provides a mechanism to opt out of active protection and cgroup
was already using it for removal and subtree_control. There's no
reason to mix both methods of avoiding circular locking dependency and
the preceding cgroup_kn_lock_live() changes applied it to all relevant
cgroup kernfs operations making it unnecessary to nest cgroup_mutex
under kernfs active protection. The previous patch reversed the
original lock ordering and put cgroup_mutex above kernfs active
protection.
After these changes, all cgroup_tree_mutex usages are now accompanied
by cgroup_mutex making the former completely redundant. This patch
removes cgroup_tree_mutex and all its usages.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 64 |
1 files changed, 9 insertions, 55 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bf1d7ce250ac..457e52705f56 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -71,15 +71,6 @@ | |||
71 | MAX_CFTYPE_NAME + 2) | 71 | MAX_CFTYPE_NAME + 2) |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file | ||
75 | * creation/removal and hierarchy changing operations including cgroup | ||
76 | * creation, removal, css association and controller rebinding. This outer | ||
77 | * lock is needed mainly to resolve the circular dependency between kernfs | ||
78 | * active ref and cgroup_mutex. cgroup_tree_mutex nests above both. | ||
79 | */ | ||
80 | static DEFINE_MUTEX(cgroup_tree_mutex); | ||
81 | |||
82 | /* | ||
83 | * cgroup_mutex is the master lock. Any modification to cgroup or its | 74 | * cgroup_mutex is the master lock. Any modification to cgroup or its |
84 | * hierarchy must be performed while holding it. | 75 | * hierarchy must be performed while holding it. |
85 | * | 76 | * |
@@ -111,11 +102,10 @@ static DEFINE_SPINLOCK(cgroup_idr_lock); | |||
111 | */ | 102 | */ |
112 | static DEFINE_SPINLOCK(release_agent_path_lock); | 103 | static DEFINE_SPINLOCK(release_agent_path_lock); |
113 | 104 | ||
114 | #define cgroup_assert_mutexes_or_rcu_locked() \ | 105 | #define cgroup_assert_mutex_or_rcu_locked() \ |
115 | rcu_lockdep_assert(rcu_read_lock_held() || \ | 106 | rcu_lockdep_assert(rcu_read_lock_held() || \ |
116 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
117 | lockdep_is_held(&cgroup_mutex), \ | 107 | lockdep_is_held(&cgroup_mutex), \ |
118 | "cgroup_[tree_]mutex or RCU read lock required"); | 108 | "cgroup_mutex or RCU read lock required"); |
119 | 109 | ||
120 | /* | 110 | /* |
121 | * cgroup destruction makes heavy use of work items and there can be a lot | 111 | * cgroup destruction makes heavy use of work items and there can be a lot |
@@ -243,7 +233,6 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, | |||
243 | { | 233 | { |
244 | if (ss) | 234 | if (ss) |
245 | return rcu_dereference_check(cgrp->subsys[ss->id], | 235 | return rcu_dereference_check(cgrp->subsys[ss->id], |
246 | lockdep_is_held(&cgroup_tree_mutex) || | ||
247 | lockdep_is_held(&cgroup_mutex)); | 236 | lockdep_is_held(&cgroup_mutex)); |
248 | else | 237 | else |
249 | return &cgrp->dummy_css; | 238 | return &cgrp->dummy_css; |
@@ -347,7 +336,6 @@ static int notify_on_release(const struct cgroup *cgrp) | |||
347 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ | 336 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
348 | if (!((css) = rcu_dereference_check( \ | 337 | if (!((css) = rcu_dereference_check( \ |
349 | (cgrp)->subsys[(ssid)], \ | 338 | (cgrp)->subsys[(ssid)], \ |
350 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
351 | lockdep_is_held(&cgroup_mutex)))) { } \ | 339 | lockdep_is_held(&cgroup_mutex)))) { } \ |
352 | else | 340 | else |
353 | 341 | ||
@@ -381,7 +369,7 @@ static int notify_on_release(const struct cgroup *cgrp) | |||
381 | /* iterate over child cgrps, lock should be held throughout iteration */ | 369 | /* iterate over child cgrps, lock should be held throughout iteration */ |
382 | #define cgroup_for_each_live_child(child, cgrp) \ | 370 | #define cgroup_for_each_live_child(child, cgrp) \ |
383 | list_for_each_entry((child), &(cgrp)->children, sibling) \ | 371 | list_for_each_entry((child), &(cgrp)->children, sibling) \ |
384 | if (({ lockdep_assert_held(&cgroup_tree_mutex); \ | 372 | if (({ lockdep_assert_held(&cgroup_mutex); \ |
385 | cgroup_is_dead(child); })) \ | 373 | cgroup_is_dead(child); })) \ |
386 | ; \ | 374 | ; \ |
387 | else | 375 | else |
@@ -869,7 +857,6 @@ static void cgroup_destroy_root(struct cgroup_root *root) | |||
869 | struct cgroup *cgrp = &root->cgrp; | 857 | struct cgroup *cgrp = &root->cgrp; |
870 | struct cgrp_cset_link *link, *tmp_link; | 858 | struct cgrp_cset_link *link, *tmp_link; |
871 | 859 | ||
872 | mutex_lock(&cgroup_tree_mutex); | ||
873 | mutex_lock(&cgroup_mutex); | 860 | mutex_lock(&cgroup_mutex); |
874 | 861 | ||
875 | BUG_ON(atomic_read(&root->nr_cgrps)); | 862 | BUG_ON(atomic_read(&root->nr_cgrps)); |
@@ -899,7 +886,6 @@ static void cgroup_destroy_root(struct cgroup_root *root) | |||
899 | cgroup_exit_root_id(root); | 886 | cgroup_exit_root_id(root); |
900 | 887 | ||
901 | mutex_unlock(&cgroup_mutex); | 888 | mutex_unlock(&cgroup_mutex); |
902 | mutex_unlock(&cgroup_tree_mutex); | ||
903 | 889 | ||
904 | kernfs_destroy_root(root->kf_root); | 890 | kernfs_destroy_root(root->kf_root); |
905 | cgroup_free_root(root); | 891 | cgroup_free_root(root); |
@@ -1096,7 +1082,6 @@ static void cgroup_kn_unlock(struct kernfs_node *kn) | |||
1096 | cgrp = kn->parent->priv; | 1082 | cgrp = kn->parent->priv; |
1097 | 1083 | ||
1098 | mutex_unlock(&cgroup_mutex); | 1084 | mutex_unlock(&cgroup_mutex); |
1099 | mutex_unlock(&cgroup_tree_mutex); | ||
1100 | 1085 | ||
1101 | kernfs_unbreak_active_protection(kn); | 1086 | kernfs_unbreak_active_protection(kn); |
1102 | cgroup_put(cgrp); | 1087 | cgroup_put(cgrp); |
@@ -1135,7 +1120,6 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn) | |||
1135 | cgroup_get(cgrp); | 1120 | cgroup_get(cgrp); |
1136 | kernfs_break_active_protection(kn); | 1121 | kernfs_break_active_protection(kn); |
1137 | 1122 | ||
1138 | mutex_lock(&cgroup_tree_mutex); | ||
1139 | mutex_lock(&cgroup_mutex); | 1123 | mutex_lock(&cgroup_mutex); |
1140 | 1124 | ||
1141 | if (!cgroup_is_dead(cgrp)) | 1125 | if (!cgroup_is_dead(cgrp)) |
@@ -1149,7 +1133,6 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) | |||
1149 | { | 1133 | { |
1150 | char name[CGROUP_FILE_NAME_MAX]; | 1134 | char name[CGROUP_FILE_NAME_MAX]; |
1151 | 1135 | ||
1152 | lockdep_assert_held(&cgroup_tree_mutex); | ||
1153 | lockdep_assert_held(&cgroup_mutex); | 1136 | lockdep_assert_held(&cgroup_mutex); |
1154 | kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); | 1137 | kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); |
1155 | } | 1138 | } |
@@ -1179,7 +1162,6 @@ static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask) | |||
1179 | struct cgroup_subsys *ss; | 1162 | struct cgroup_subsys *ss; |
1180 | int ssid, i, ret; | 1163 | int ssid, i, ret; |
1181 | 1164 | ||
1182 | lockdep_assert_held(&cgroup_tree_mutex); | ||
1183 | lockdep_assert_held(&cgroup_mutex); | 1165 | lockdep_assert_held(&cgroup_mutex); |
1184 | 1166 | ||
1185 | for_each_subsys(ss, ssid) { | 1167 | for_each_subsys(ss, ssid) { |
@@ -1457,7 +1439,6 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) | |||
1457 | return -EINVAL; | 1439 | return -EINVAL; |
1458 | } | 1440 | } |
1459 | 1441 | ||
1460 | mutex_lock(&cgroup_tree_mutex); | ||
1461 | mutex_lock(&cgroup_mutex); | 1442 | mutex_lock(&cgroup_mutex); |
1462 | 1443 | ||
1463 | /* See what subsystems are wanted */ | 1444 | /* See what subsystems are wanted */ |
@@ -1503,7 +1484,6 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) | |||
1503 | kfree(opts.release_agent); | 1484 | kfree(opts.release_agent); |
1504 | kfree(opts.name); | 1485 | kfree(opts.name); |
1505 | mutex_unlock(&cgroup_mutex); | 1486 | mutex_unlock(&cgroup_mutex); |
1506 | mutex_unlock(&cgroup_tree_mutex); | ||
1507 | return ret; | 1487 | return ret; |
1508 | } | 1488 | } |
1509 | 1489 | ||
@@ -1606,7 +1586,6 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask) | |||
1606 | struct css_set *cset; | 1586 | struct css_set *cset; |
1607 | int i, ret; | 1587 | int i, ret; |
1608 | 1588 | ||
1609 | lockdep_assert_held(&cgroup_tree_mutex); | ||
1610 | lockdep_assert_held(&cgroup_mutex); | 1589 | lockdep_assert_held(&cgroup_mutex); |
1611 | 1590 | ||
1612 | ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT); | 1591 | ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT); |
@@ -1696,7 +1675,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, | |||
1696 | if (!use_task_css_set_links) | 1675 | if (!use_task_css_set_links) |
1697 | cgroup_enable_task_cg_lists(); | 1676 | cgroup_enable_task_cg_lists(); |
1698 | 1677 | ||
1699 | mutex_lock(&cgroup_tree_mutex); | ||
1700 | mutex_lock(&cgroup_mutex); | 1678 | mutex_lock(&cgroup_mutex); |
1701 | 1679 | ||
1702 | /* First find the desired set of subsystems */ | 1680 | /* First find the desired set of subsystems */ |
@@ -1761,9 +1739,7 @@ retry: | |||
1761 | */ | 1739 | */ |
1762 | if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { | 1740 | if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { |
1763 | mutex_unlock(&cgroup_mutex); | 1741 | mutex_unlock(&cgroup_mutex); |
1764 | mutex_unlock(&cgroup_tree_mutex); | ||
1765 | msleep(10); | 1742 | msleep(10); |
1766 | mutex_lock(&cgroup_tree_mutex); | ||
1767 | mutex_lock(&cgroup_mutex); | 1743 | mutex_lock(&cgroup_mutex); |
1768 | goto retry; | 1744 | goto retry; |
1769 | } | 1745 | } |
@@ -1796,7 +1772,6 @@ retry: | |||
1796 | 1772 | ||
1797 | out_unlock: | 1773 | out_unlock: |
1798 | mutex_unlock(&cgroup_mutex); | 1774 | mutex_unlock(&cgroup_mutex); |
1799 | mutex_unlock(&cgroup_tree_mutex); | ||
1800 | 1775 | ||
1801 | kfree(opts.release_agent); | 1776 | kfree(opts.release_agent); |
1802 | kfree(opts.name); | 1777 | kfree(opts.name); |
@@ -2507,7 +2482,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
2507 | struct css_set *src_cset; | 2482 | struct css_set *src_cset; |
2508 | int ret; | 2483 | int ret; |
2509 | 2484 | ||
2510 | lockdep_assert_held(&cgroup_tree_mutex); | ||
2511 | lockdep_assert_held(&cgroup_mutex); | 2485 | lockdep_assert_held(&cgroup_mutex); |
2512 | 2486 | ||
2513 | /* look up all csses currently attached to @cgrp's subtree */ | 2487 | /* look up all csses currently attached to @cgrp's subtree */ |
@@ -2866,20 +2840,18 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, | |||
2866 | return -EPERM; | 2840 | return -EPERM; |
2867 | 2841 | ||
2868 | /* | 2842 | /* |
2869 | * We're gonna grab cgroup_tree_mutex which nests outside kernfs | 2843 | * We're gonna grab cgroup_mutex which nests outside kernfs |
2870 | * active_ref. kernfs_rename() doesn't require active_ref | 2844 | * active_ref. kernfs_rename() doesn't require active_ref |
2871 | * protection. Break them before grabbing cgroup_tree_mutex. | 2845 | * protection. Break them before grabbing cgroup_mutex. |
2872 | */ | 2846 | */ |
2873 | kernfs_break_active_protection(new_parent); | 2847 | kernfs_break_active_protection(new_parent); |
2874 | kernfs_break_active_protection(kn); | 2848 | kernfs_break_active_protection(kn); |
2875 | 2849 | ||
2876 | mutex_lock(&cgroup_tree_mutex); | ||
2877 | mutex_lock(&cgroup_mutex); | 2850 | mutex_lock(&cgroup_mutex); |
2878 | 2851 | ||
2879 | ret = kernfs_rename(kn, new_parent, new_name_str); | 2852 | ret = kernfs_rename(kn, new_parent, new_name_str); |
2880 | 2853 | ||
2881 | mutex_unlock(&cgroup_mutex); | 2854 | mutex_unlock(&cgroup_mutex); |
2882 | mutex_unlock(&cgroup_tree_mutex); | ||
2883 | 2855 | ||
2884 | kernfs_unbreak_active_protection(kn); | 2856 | kernfs_unbreak_active_protection(kn); |
2885 | kernfs_unbreak_active_protection(new_parent); | 2857 | kernfs_unbreak_active_protection(new_parent); |
@@ -2944,7 +2916,6 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | |||
2944 | struct cftype *cft; | 2916 | struct cftype *cft; |
2945 | int ret; | 2917 | int ret; |
2946 | 2918 | ||
2947 | lockdep_assert_held(&cgroup_tree_mutex); | ||
2948 | lockdep_assert_held(&cgroup_mutex); | 2919 | lockdep_assert_held(&cgroup_mutex); |
2949 | 2920 | ||
2950 | for (cft = cfts; cft->name[0] != '\0'; cft++) { | 2921 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
@@ -2980,7 +2951,6 @@ static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) | |||
2980 | struct cgroup_subsys_state *css; | 2951 | struct cgroup_subsys_state *css; |
2981 | int ret = 0; | 2952 | int ret = 0; |
2982 | 2953 | ||
2983 | lockdep_assert_held(&cgroup_tree_mutex); | ||
2984 | lockdep_assert_held(&cgroup_mutex); | 2954 | lockdep_assert_held(&cgroup_mutex); |
2985 | 2955 | ||
2986 | /* add/rm files for all cgroups created before */ | 2956 | /* add/rm files for all cgroups created before */ |
@@ -3049,7 +3019,6 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | |||
3049 | 3019 | ||
3050 | static int cgroup_rm_cftypes_locked(struct cftype *cfts) | 3020 | static int cgroup_rm_cftypes_locked(struct cftype *cfts) |
3051 | { | 3021 | { |
3052 | lockdep_assert_held(&cgroup_tree_mutex); | ||
3053 | lockdep_assert_held(&cgroup_mutex); | 3022 | lockdep_assert_held(&cgroup_mutex); |
3054 | 3023 | ||
3055 | if (!cfts || !cfts[0].ss) | 3024 | if (!cfts || !cfts[0].ss) |
@@ -3076,11 +3045,9 @@ int cgroup_rm_cftypes(struct cftype *cfts) | |||
3076 | { | 3045 | { |
3077 | int ret; | 3046 | int ret; |
3078 | 3047 | ||
3079 | mutex_lock(&cgroup_tree_mutex); | ||
3080 | mutex_lock(&cgroup_mutex); | 3048 | mutex_lock(&cgroup_mutex); |
3081 | ret = cgroup_rm_cftypes_locked(cfts); | 3049 | ret = cgroup_rm_cftypes_locked(cfts); |
3082 | mutex_unlock(&cgroup_mutex); | 3050 | mutex_unlock(&cgroup_mutex); |
3083 | mutex_unlock(&cgroup_tree_mutex); | ||
3084 | return ret; | 3051 | return ret; |
3085 | } | 3052 | } |
3086 | 3053 | ||
@@ -3109,7 +3076,6 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | |||
3109 | if (ret) | 3076 | if (ret) |
3110 | return ret; | 3077 | return ret; |
3111 | 3078 | ||
3112 | mutex_lock(&cgroup_tree_mutex); | ||
3113 | mutex_lock(&cgroup_mutex); | 3079 | mutex_lock(&cgroup_mutex); |
3114 | 3080 | ||
3115 | list_add_tail(&cfts->node, &ss->cfts); | 3081 | list_add_tail(&cfts->node, &ss->cfts); |
@@ -3118,7 +3084,6 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | |||
3118 | cgroup_rm_cftypes_locked(cfts); | 3084 | cgroup_rm_cftypes_locked(cfts); |
3119 | 3085 | ||
3120 | mutex_unlock(&cgroup_mutex); | 3086 | mutex_unlock(&cgroup_mutex); |
3121 | mutex_unlock(&cgroup_tree_mutex); | ||
3122 | return ret; | 3087 | return ret; |
3123 | } | 3088 | } |
3124 | 3089 | ||
@@ -3158,7 +3123,7 @@ css_next_child(struct cgroup_subsys_state *pos_css, | |||
3158 | struct cgroup *cgrp = parent_css->cgroup; | 3123 | struct cgroup *cgrp = parent_css->cgroup; |
3159 | struct cgroup *next; | 3124 | struct cgroup *next; |
3160 | 3125 | ||
3161 | cgroup_assert_mutexes_or_rcu_locked(); | 3126 | cgroup_assert_mutex_or_rcu_locked(); |
3162 | 3127 | ||
3163 | /* | 3128 | /* |
3164 | * @pos could already have been removed. Once a cgroup is removed, | 3129 | * @pos could already have been removed. Once a cgroup is removed, |
@@ -3224,7 +3189,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, | |||
3224 | { | 3189 | { |
3225 | struct cgroup_subsys_state *next; | 3190 | struct cgroup_subsys_state *next; |
3226 | 3191 | ||
3227 | cgroup_assert_mutexes_or_rcu_locked(); | 3192 | cgroup_assert_mutex_or_rcu_locked(); |
3228 | 3193 | ||
3229 | /* if first iteration, visit @root */ | 3194 | /* if first iteration, visit @root */ |
3230 | if (!pos) | 3195 | if (!pos) |
@@ -3264,7 +3229,7 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) | |||
3264 | { | 3229 | { |
3265 | struct cgroup_subsys_state *last, *tmp; | 3230 | struct cgroup_subsys_state *last, *tmp; |
3266 | 3231 | ||
3267 | cgroup_assert_mutexes_or_rcu_locked(); | 3232 | cgroup_assert_mutex_or_rcu_locked(); |
3268 | 3233 | ||
3269 | do { | 3234 | do { |
3270 | last = pos; | 3235 | last = pos; |
@@ -3311,7 +3276,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
3311 | { | 3276 | { |
3312 | struct cgroup_subsys_state *next; | 3277 | struct cgroup_subsys_state *next; |
3313 | 3278 | ||
3314 | cgroup_assert_mutexes_or_rcu_locked(); | 3279 | cgroup_assert_mutex_or_rcu_locked(); |
3315 | 3280 | ||
3316 | /* if first iteration, visit leftmost descendant which may be @root */ | 3281 | /* if first iteration, visit leftmost descendant which may be @root */ |
3317 | if (!pos) | 3282 | if (!pos) |
@@ -4178,7 +4143,6 @@ static int online_css(struct cgroup_subsys_state *css) | |||
4178 | struct cgroup_subsys *ss = css->ss; | 4143 | struct cgroup_subsys *ss = css->ss; |
4179 | int ret = 0; | 4144 | int ret = 0; |
4180 | 4145 | ||
4181 | lockdep_assert_held(&cgroup_tree_mutex); | ||
4182 | lockdep_assert_held(&cgroup_mutex); | 4146 | lockdep_assert_held(&cgroup_mutex); |
4183 | 4147 | ||
4184 | if (ss->css_online) | 4148 | if (ss->css_online) |
@@ -4196,7 +4160,6 @@ static void offline_css(struct cgroup_subsys_state *css) | |||
4196 | { | 4160 | { |
4197 | struct cgroup_subsys *ss = css->ss; | 4161 | struct cgroup_subsys *ss = css->ss; |
4198 | 4162 | ||
4199 | lockdep_assert_held(&cgroup_tree_mutex); | ||
4200 | lockdep_assert_held(&cgroup_mutex); | 4163 | lockdep_assert_held(&cgroup_mutex); |
4201 | 4164 | ||
4202 | if (!(css->flags & CSS_ONLINE)) | 4165 | if (!(css->flags & CSS_ONLINE)) |
@@ -4399,7 +4362,6 @@ static void css_killed_work_fn(struct work_struct *work) | |||
4399 | container_of(work, struct cgroup_subsys_state, destroy_work); | 4362 | container_of(work, struct cgroup_subsys_state, destroy_work); |
4400 | struct cgroup *cgrp = css->cgroup; | 4363 | struct cgroup *cgrp = css->cgroup; |
4401 | 4364 | ||
4402 | mutex_lock(&cgroup_tree_mutex); | ||
4403 | mutex_lock(&cgroup_mutex); | 4365 | mutex_lock(&cgroup_mutex); |
4404 | 4366 | ||
4405 | /* | 4367 | /* |
@@ -4417,7 +4379,6 @@ static void css_killed_work_fn(struct work_struct *work) | |||
4417 | cgroup_destroy_css_killed(cgrp); | 4379 | cgroup_destroy_css_killed(cgrp); |
4418 | 4380 | ||
4419 | mutex_unlock(&cgroup_mutex); | 4381 | mutex_unlock(&cgroup_mutex); |
4420 | mutex_unlock(&cgroup_tree_mutex); | ||
4421 | 4382 | ||
4422 | /* | 4383 | /* |
4423 | * Put the css refs from kill_css(). Each css holds an extra | 4384 | * Put the css refs from kill_css(). Each css holds an extra |
@@ -4450,7 +4411,6 @@ static void css_killed_ref_fn(struct percpu_ref *ref) | |||
4450 | */ | 4411 | */ |
4451 | static void kill_css(struct cgroup_subsys_state *css) | 4412 | static void kill_css(struct cgroup_subsys_state *css) |
4452 | { | 4413 | { |
4453 | lockdep_assert_held(&cgroup_tree_mutex); | ||
4454 | lockdep_assert_held(&cgroup_mutex); | 4414 | lockdep_assert_held(&cgroup_mutex); |
4455 | 4415 | ||
4456 | /* | 4416 | /* |
@@ -4510,7 +4470,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
4510 | bool empty; | 4470 | bool empty; |
4511 | int ssid; | 4471 | int ssid; |
4512 | 4472 | ||
4513 | lockdep_assert_held(&cgroup_tree_mutex); | ||
4514 | lockdep_assert_held(&cgroup_mutex); | 4473 | lockdep_assert_held(&cgroup_mutex); |
4515 | 4474 | ||
4516 | /* | 4475 | /* |
@@ -4593,7 +4552,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) | |||
4593 | { | 4552 | { |
4594 | struct cgroup *parent = cgrp->parent; | 4553 | struct cgroup *parent = cgrp->parent; |
4595 | 4554 | ||
4596 | lockdep_assert_held(&cgroup_tree_mutex); | ||
4597 | lockdep_assert_held(&cgroup_mutex); | 4555 | lockdep_assert_held(&cgroup_mutex); |
4598 | 4556 | ||
4599 | /* delete this cgroup from parent->children */ | 4557 | /* delete this cgroup from parent->children */ |
@@ -4647,7 +4605,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) | |||
4647 | 4605 | ||
4648 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); | 4606 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); |
4649 | 4607 | ||
4650 | mutex_lock(&cgroup_tree_mutex); | ||
4651 | mutex_lock(&cgroup_mutex); | 4608 | mutex_lock(&cgroup_mutex); |
4652 | 4609 | ||
4653 | idr_init(&ss->css_idr); | 4610 | idr_init(&ss->css_idr); |
@@ -4685,7 +4642,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) | |||
4685 | cgrp_dfl_root.subsys_mask |= 1 << ss->id; | 4642 | cgrp_dfl_root.subsys_mask |= 1 << ss->id; |
4686 | 4643 | ||
4687 | mutex_unlock(&cgroup_mutex); | 4644 | mutex_unlock(&cgroup_mutex); |
4688 | mutex_unlock(&cgroup_tree_mutex); | ||
4689 | } | 4645 | } |
4690 | 4646 | ||
4691 | /** | 4647 | /** |
@@ -4735,7 +4691,6 @@ int __init cgroup_init(void) | |||
4735 | 4691 | ||
4736 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); | 4692 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); |
4737 | 4693 | ||
4738 | mutex_lock(&cgroup_tree_mutex); | ||
4739 | mutex_lock(&cgroup_mutex); | 4694 | mutex_lock(&cgroup_mutex); |
4740 | 4695 | ||
4741 | /* Add init_css_set to the hash table */ | 4696 | /* Add init_css_set to the hash table */ |
@@ -4745,7 +4700,6 @@ int __init cgroup_init(void) | |||
4745 | BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); | 4700 | BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); |
4746 | 4701 | ||
4747 | mutex_unlock(&cgroup_mutex); | 4702 | mutex_unlock(&cgroup_mutex); |
4748 | mutex_unlock(&cgroup_tree_mutex); | ||
4749 | 4703 | ||
4750 | for_each_subsys(ss, ssid) { | 4704 | for_each_subsys(ss, ssid) { |
4751 | if (ss->early_init) { | 4705 | if (ss->early_init) { |