aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup_freezer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cgroup_freezer.c')
-rw-r--r--kernel/cgroup_freezer.c88
1 files changed, 37 insertions, 51 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index ce71ed53e88f..e691818d7e45 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task)
48 struct freezer, css); 48 struct freezer, css);
49} 49}
50 50
51int cgroup_freezing_or_frozen(struct task_struct *task) 51static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
52{ 52{
53 struct freezer *freezer; 53 enum freezer_state state = task_freezer(task)->state;
54 enum freezer_state state; 54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
55}
55 56
57int cgroup_freezing_or_frozen(struct task_struct *task)
58{
59 int result;
56 task_lock(task); 60 task_lock(task);
57 freezer = task_freezer(task); 61 result = __cgroup_freezing_or_frozen(task);
58 if (!freezer->css.cgroup->parent)
59 state = CGROUP_THAWED; /* root cgroup can't be frozen */
60 else
61 state = freezer->state;
62 task_unlock(task); 62 task_unlock(task);
63 63 return result;
64 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
65} 64}
66 65
67/* 66/*
@@ -154,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss,
154 kfree(cgroup_freezer(cgroup)); 153 kfree(cgroup_freezer(cgroup));
155} 154}
156 155
157/* Task is frozen or will freeze immediately when next it gets woken */
158static bool is_task_frozen_enough(struct task_struct *task)
159{
160 return frozen(task) ||
161 (task_is_stopped_or_traced(task) && freezing(task));
162}
163
164/* 156/*
165 * The call to cgroup_lock() in the freezer.state write method prevents 157 * The call to cgroup_lock() in the freezer.state write method prevents
166 * a write to that file racing against an attach, and hence the 158 * a write to that file racing against an attach, and hence the
@@ -168,37 +160,29 @@ static bool is_task_frozen_enough(struct task_struct *task)
168 */ 160 */
169static int freezer_can_attach(struct cgroup_subsys *ss, 161static int freezer_can_attach(struct cgroup_subsys *ss,
170 struct cgroup *new_cgroup, 162 struct cgroup *new_cgroup,
171 struct task_struct *task, bool threadgroup) 163 struct task_struct *task)
172{ 164{
173 struct freezer *freezer; 165 struct freezer *freezer;
174 166
175 /* 167 /*
176 * Anything frozen can't move or be moved to/from. 168 * Anything frozen can't move or be moved to/from.
177 *
178 * Since orig_freezer->state == FROZEN means that @task has been
179 * frozen, so it's sufficient to check the latter condition.
180 */ 169 */
181 170
182 if (is_task_frozen_enough(task))
183 return -EBUSY;
184
185 freezer = cgroup_freezer(new_cgroup); 171 freezer = cgroup_freezer(new_cgroup);
186 if (freezer->state == CGROUP_FROZEN) 172 if (freezer->state != CGROUP_THAWED)
187 return -EBUSY; 173 return -EBUSY;
188 174
189 if (threadgroup) { 175 return 0;
190 struct task_struct *c; 176}
191 177
192 rcu_read_lock(); 178static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
193 list_for_each_entry_rcu(c, &task->thread_group, thread_group) { 179{
194 if (is_task_frozen_enough(c)) { 180 rcu_read_lock();
195 rcu_read_unlock(); 181 if (__cgroup_freezing_or_frozen(tsk)) {
196 return -EBUSY;
197 }
198 }
199 rcu_read_unlock(); 182 rcu_read_unlock();
183 return -EBUSY;
200 } 184 }
201 185 rcu_read_unlock();
202 return 0; 186 return 0;
203} 187}
204 188
@@ -236,31 +220,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
236/* 220/*
237 * caller must hold freezer->lock 221 * caller must hold freezer->lock
238 */ 222 */
239static void update_freezer_state(struct cgroup *cgroup, 223static void update_if_frozen(struct cgroup *cgroup,
240 struct freezer *freezer) 224 struct freezer *freezer)
241{ 225{
242 struct cgroup_iter it; 226 struct cgroup_iter it;
243 struct task_struct *task; 227 struct task_struct *task;
244 unsigned int nfrozen = 0, ntotal = 0; 228 unsigned int nfrozen = 0, ntotal = 0;
229 enum freezer_state old_state = freezer->state;
245 230
246 cgroup_iter_start(cgroup, &it); 231 cgroup_iter_start(cgroup, &it);
247 while ((task = cgroup_iter_next(cgroup, &it))) { 232 while ((task = cgroup_iter_next(cgroup, &it))) {
248 ntotal++; 233 ntotal++;
249 if (is_task_frozen_enough(task)) 234 if (frozen(task))
250 nfrozen++; 235 nfrozen++;
251 } 236 }
252 237
253 /* 238 if (old_state == CGROUP_THAWED) {
254 * Transition to FROZEN when no new tasks can be added ensures 239 BUG_ON(nfrozen > 0);
255 * that we never exist in the FROZEN state while there are unfrozen 240 } else if (old_state == CGROUP_FREEZING) {
256 * tasks. 241 if (nfrozen == ntotal)
257 */ 242 freezer->state = CGROUP_FROZEN;
258 if (nfrozen == ntotal) 243 } else { /* old_state == CGROUP_FROZEN */
259 freezer->state = CGROUP_FROZEN; 244 BUG_ON(nfrozen != ntotal);
260 else if (nfrozen > 0) 245 }
261 freezer->state = CGROUP_FREEZING; 246
262 else
263 freezer->state = CGROUP_THAWED;
264 cgroup_iter_end(cgroup, &it); 247 cgroup_iter_end(cgroup, &it);
265} 248}
266 249
@@ -279,7 +262,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
279 if (state == CGROUP_FREEZING) { 262 if (state == CGROUP_FREEZING) {
280 /* We change from FREEZING to FROZEN lazily if the cgroup was 263 /* We change from FREEZING to FROZEN lazily if the cgroup was
281 * only partially frozen when we exitted write. */ 264 * only partially frozen when we exitted write. */
282 update_freezer_state(cgroup, freezer); 265 update_if_frozen(cgroup, freezer);
283 state = freezer->state; 266 state = freezer->state;
284 } 267 }
285 spin_unlock_irq(&freezer->lock); 268 spin_unlock_irq(&freezer->lock);
@@ -301,7 +284,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
301 while ((task = cgroup_iter_next(cgroup, &it))) { 284 while ((task = cgroup_iter_next(cgroup, &it))) {
302 if (!freeze_task(task, true)) 285 if (!freeze_task(task, true))
303 continue; 286 continue;
304 if (is_task_frozen_enough(task)) 287 if (frozen(task))
305 continue; 288 continue;
306 if (!freezing(task) && !freezer_should_skip(task)) 289 if (!freezing(task) && !freezer_should_skip(task))
307 num_cant_freeze_now++; 290 num_cant_freeze_now++;
@@ -335,7 +318,7 @@ static int freezer_change_state(struct cgroup *cgroup,
335 318
336 spin_lock_irq(&freezer->lock); 319 spin_lock_irq(&freezer->lock);
337 320
338 update_freezer_state(cgroup, freezer); 321 update_if_frozen(cgroup, freezer);
339 if (goal_state == freezer->state) 322 if (goal_state == freezer->state)
340 goto out; 323 goto out;
341 324
@@ -398,6 +381,9 @@ struct cgroup_subsys freezer_subsys = {
398 .populate = freezer_populate, 381 .populate = freezer_populate,
399 .subsys_id = freezer_subsys_id, 382 .subsys_id = freezer_subsys_id,
400 .can_attach = freezer_can_attach, 383 .can_attach = freezer_can_attach,
384 .can_attach_task = freezer_can_attach_task,
385 .pre_attach = NULL,
386 .attach_task = NULL,
401 .attach = NULL, 387 .attach = NULL,
402 .fork = freezer_fork, 388 .fork = freezer_fork,
403 .exit = NULL, 389 .exit = NULL,