aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c43
1 files changed, 17 insertions, 26 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 0b0e10545ef0..3d45503a21a2 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -213,41 +213,39 @@ out:
213} 213}
214 214
215/* 215/*
216 * caller must hold freezer->lock 216 * We change from FREEZING to FROZEN lazily if the cgroup was only
217 * partially frozen when we exitted write. Caller must hold freezer->lock.
218 *
219 * Task states and freezer state might disagree while tasks are being
220 * migrated into @cgroup, so we can't verify task states against @freezer
221 * state here. See freezer_attach() for details.
217 */ 222 */
218static void update_if_frozen(struct cgroup *cgroup, 223static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer)
219 struct freezer *freezer)
220{ 224{
221 struct cgroup_iter it; 225 struct cgroup_iter it;
222 struct task_struct *task; 226 struct task_struct *task;
223 unsigned int nfrozen = 0, ntotal = 0; 227
224 enum freezer_state old_state = freezer->state; 228 if (freezer->state != CGROUP_FREEZING)
229 return;
225 230
226 cgroup_iter_start(cgroup, &it); 231 cgroup_iter_start(cgroup, &it);
232
227 while ((task = cgroup_iter_next(cgroup, &it))) { 233 while ((task = cgroup_iter_next(cgroup, &it))) {
228 if (freezing(task)) { 234 if (freezing(task)) {
229 ntotal++;
230 /* 235 /*
231 * freezer_should_skip() indicates that the task 236 * freezer_should_skip() indicates that the task
232 * should be skipped when determining freezing 237 * should be skipped when determining freezing
233 * completion. Consider it frozen in addition to 238 * completion. Consider it frozen in addition to
234 * the usual frozen condition. 239 * the usual frozen condition.
235 */ 240 */
236 if (frozen(task) || task_is_stopped_or_traced(task) || 241 if (!frozen(task) && !task_is_stopped_or_traced(task) &&
237 freezer_should_skip(task)) 242 !freezer_should_skip(task))
238 nfrozen++; 243 goto notyet;
239 } 244 }
240 } 245 }
241 246
242 if (old_state == CGROUP_THAWED) { 247 freezer->state = CGROUP_FROZEN;
243 BUG_ON(nfrozen > 0); 248notyet:
244 } else if (old_state == CGROUP_FREEZING) {
245 if (nfrozen == ntotal)
246 freezer->state = CGROUP_FROZEN;
247 } else { /* old_state == CGROUP_FROZEN */
248 BUG_ON(nfrozen != ntotal);
249 }
250
251 cgroup_iter_end(cgroup, &it); 249 cgroup_iter_end(cgroup, &it);
252} 250}
253 251
@@ -262,13 +260,8 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
262 260
263 freezer = cgroup_freezer(cgroup); 261 freezer = cgroup_freezer(cgroup);
264 spin_lock_irq(&freezer->lock); 262 spin_lock_irq(&freezer->lock);
263 update_if_frozen(cgroup, freezer);
265 state = freezer->state; 264 state = freezer->state;
266 if (state == CGROUP_FREEZING) {
267 /* We change from FREEZING to FROZEN lazily if the cgroup was
268 * only partially frozen when we exitted write. */
269 update_if_frozen(cgroup, freezer);
270 state = freezer->state;
271 }
272 spin_unlock_irq(&freezer->lock); 265 spin_unlock_irq(&freezer->lock);
273 cgroup_unlock(); 266 cgroup_unlock();
274 267
@@ -306,8 +299,6 @@ static void freezer_change_state(struct cgroup *cgroup,
306 299
307 spin_lock_irq(&freezer->lock); 300 spin_lock_irq(&freezer->lock);
308 301
309 update_if_frozen(cgroup, freezer);
310
311 switch (goal_state) { 302 switch (goal_state) {
312 case CGROUP_THAWED: 303 case CGROUP_THAWED:
313 if (freezer->state != CGROUP_THAWED) 304 if (freezer->state != CGROUP_THAWED)