aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup_freezer.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-10-16 18:03:15 -0400
committerTejun Heo <tj@kernel.org>2012-10-20 19:33:12 -0400
commitead5c473712eb26db792b18a4dc98fdb312883fe (patch)
treeb1700298660d970e7e09d9ec60cc49c36c64e695 /kernel/cgroup_freezer.c
parentb4d18311d37b0b1b370a1ef3e4de92b97930f0a8 (diff)
cgroup_freezer: don't use cgroup_lock_live_group()
freezer_read/write() used cgroup_lock_live_group() to synchronize against task migration into and out of the target cgroup. cgroup_lock_live_group() grabs the internal cgroup lock and using it from outside cgroup core leads to complex and fragile locking dependency issues which are difficult to resolve. Now that freezer_can_attach() is replaced with freezer_attach() and update_if_frozen() updated, nothing requires excluding migration against freezer state reads and changes. This patch removes cgroup_lock_live_group() and the matching cgroup_unlock() usages. The prone-to-bitrot, already outdated and unnecessary global lock hierarchy documentation is replaced with documentation in local scope. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup_freezer.c')
-rw-r--r--kernel/cgroup_freezer.c66
1 files changed, 10 insertions, 56 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 3d45503a21a2..8a92b0e52099 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -84,50 +84,6 @@ static const char *freezer_state_strs[] = {
84 84
85struct cgroup_subsys freezer_subsys; 85struct cgroup_subsys freezer_subsys;
86 86
87/* Locks taken and their ordering
88 * ------------------------------
89 * cgroup_mutex (AKA cgroup_lock)
90 * freezer->lock
91 * css_set_lock
92 * task->alloc_lock (AKA task_lock)
93 * task->sighand->siglock
94 *
95 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 *
97 * freezer_create(), freezer_destroy():
98 * cgroup_mutex [ by cgroup core ]
99 *
100 * freezer_can_attach():
101 * cgroup_mutex (held by caller of can_attach)
102 *
103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
104 * freezer->lock
105 * sighand->siglock (if the cgroup is freezing)
106 *
107 * freezer_read():
108 * cgroup_mutex
109 * freezer->lock
110 * write_lock css_set_lock (cgroup iterator start)
111 * task->alloc_lock
112 * read_lock css_set_lock (cgroup iterator start)
113 *
114 * freezer_write() (freeze):
115 * cgroup_mutex
116 * freezer->lock
117 * write_lock css_set_lock (cgroup iterator start)
118 * task->alloc_lock
119 * read_lock css_set_lock (cgroup iterator start)
120 * sighand->siglock (fake signal delivery inside freeze_task())
121 *
122 * freezer_write() (unfreeze):
123 * cgroup_mutex
124 * freezer->lock
125 * write_lock css_set_lock (cgroup iterator start)
126 * task->alloc_lock
127 * read_lock css_set_lock (cgroup iterator start)
128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
129 * sighand->siglock
130 */
131static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup) 87static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
132{ 88{
133 struct freezer *freezer; 89 struct freezer *freezer;
@@ -151,9 +107,13 @@ static void freezer_destroy(struct cgroup *cgroup)
151} 107}
152 108
153/* 109/*
154 * The call to cgroup_lock() in the freezer.state write method prevents 110 * Tasks can be migrated into a different freezer anytime regardless of its
155 * a write to that file racing against an attach, and hence we don't need 111 * current state. freezer_attach() is responsible for making new tasks
156 * to worry about racing against migration. 112 * conform to the current state.
113 *
114 * Freezer state changes and task migration are synchronized via
115 * @freezer->lock. freezer_attach() makes the new tasks conform to the
116 * current state and all following state changes can see the new tasks.
157 */ 117 */
158static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset) 118static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
159{ 119{
@@ -217,8 +177,8 @@ out:
217 * partially frozen when we exitted write. Caller must hold freezer->lock. 177 * partially frozen when we exitted write. Caller must hold freezer->lock.
218 * 178 *
219 * Task states and freezer state might disagree while tasks are being 179 * Task states and freezer state might disagree while tasks are being
220 * migrated into @cgroup, so we can't verify task states against @freezer 180 * migrated into or out of @cgroup, so we can't verify task states against
221 * state here. See freezer_attach() for details. 181 * @freezer state here. See freezer_attach() for details.
222 */ 182 */
223static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer) 183static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer)
224{ 184{
@@ -255,15 +215,11 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
255 struct freezer *freezer; 215 struct freezer *freezer;
256 enum freezer_state state; 216 enum freezer_state state;
257 217
258 if (!cgroup_lock_live_group(cgroup))
259 return -ENODEV;
260
261 freezer = cgroup_freezer(cgroup); 218 freezer = cgroup_freezer(cgroup);
262 spin_lock_irq(&freezer->lock); 219 spin_lock_irq(&freezer->lock);
263 update_if_frozen(cgroup, freezer); 220 update_if_frozen(cgroup, freezer);
264 state = freezer->state; 221 state = freezer->state;
265 spin_unlock_irq(&freezer->lock); 222 spin_unlock_irq(&freezer->lock);
266 cgroup_unlock();
267 223
268 seq_puts(m, freezer_state_strs[state]); 224 seq_puts(m, freezer_state_strs[state]);
269 seq_putc(m, '\n'); 225 seq_putc(m, '\n');
@@ -297,6 +253,7 @@ static void freezer_change_state(struct cgroup *cgroup,
297{ 253{
298 struct freezer *freezer = cgroup_freezer(cgroup); 254 struct freezer *freezer = cgroup_freezer(cgroup);
299 255
256 /* also synchronizes against task migration, see freezer_attach() */
300 spin_lock_irq(&freezer->lock); 257 spin_lock_irq(&freezer->lock);
301 258
302 switch (goal_state) { 259 switch (goal_state) {
@@ -332,10 +289,7 @@ static int freezer_write(struct cgroup *cgroup,
332 else 289 else
333 return -EINVAL; 290 return -EINVAL;
334 291
335 if (!cgroup_lock_live_group(cgroup))
336 return -ENODEV;
337 freezer_change_state(cgroup, goal_state); 292 freezer_change_state(cgroup, goal_state);
338 cgroup_unlock();
339 return 0; 293 return 0;
340} 294}
341 295