aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-07 12:29:51 -0400
committerTejun Heo <tj@kernel.org>2013-04-07 12:29:51 -0400
commit47cfcd0922454e49f4923b1e2d31a5bf199237c3 (patch)
tree2b257e98a90b584fb8bd04a1ff1619cb27f394ae /kernel/cgroup.c
parentb9777cf8d7c7854c3c38bd6621d993b85c2afcdf (diff)
cgroup: kill cgroup_[un]lock()
Now that locking interface is unexported, there's no reason to keep around these thin wrappers. Kill them and use mutex operations directly. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c41
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 32ca0304452f..1a65958c1a0b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -333,8 +333,8 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
333 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. 333 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
334 * @cgrp: the cgroup to be checked for liveness 334 * @cgrp: the cgroup to be checked for liveness
335 * 335 *
336 * On success, returns true; the lock should be later released with 336 * On success, returns true; the mutex should be later unlocked. On
337 * cgroup_unlock(). On failure returns false with no lock held. 337 * failure returns false with no lock held.
338 */ 338 */
339static bool cgroup_lock_live_group(struct cgroup *cgrp) 339static bool cgroup_lock_live_group(struct cgroup *cgrp)
340{ 340{
@@ -819,25 +819,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
819 * update of a tasks cgroup pointer by cgroup_attach_task() 819 * update of a tasks cgroup pointer by cgroup_attach_task()
820 */ 820 */
821 821
822/**
823 * cgroup_lock - lock out any changes to cgroup structures
824 *
825 */
826static void cgroup_lock(void)
827{
828 mutex_lock(&cgroup_mutex);
829}
830
831/**
832 * cgroup_unlock - release lock on cgroup changes
833 *
834 * Undo the lock taken in a previous cgroup_lock() call.
835 */
836static void cgroup_unlock(void)
837{
838 mutex_unlock(&cgroup_mutex);
839}
840
841/* 822/*
842 * A couple of forward declarations required, due to cyclic reference loop: 823 * A couple of forward declarations required, due to cyclic reference loop:
843 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> 824 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
@@ -1967,8 +1948,8 @@ static void cgroup_task_migrate(struct cgroup *oldcgrp,
1967 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take 1948 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
1968 * task_lock of @tsk or each thread in the threadgroup individually in turn. 1949 * task_lock of @tsk or each thread in the threadgroup individually in turn.
1969 */ 1950 */
1970int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, 1951static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1971 bool threadgroup) 1952 bool threadgroup)
1972{ 1953{
1973 int retval, i, group_size; 1954 int retval, i, group_size;
1974 struct cgroup_subsys *ss, *failed_ss = NULL; 1955 struct cgroup_subsys *ss, *failed_ss = NULL;
@@ -2191,7 +2172,7 @@ retry_find_task:
2191 2172
2192 put_task_struct(tsk); 2173 put_task_struct(tsk);
2193out_unlock_cgroup: 2174out_unlock_cgroup:
2194 cgroup_unlock(); 2175 mutex_unlock(&cgroup_mutex);
2195 return ret; 2176 return ret;
2196} 2177}
2197 2178
@@ -2205,7 +2186,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2205 struct cgroupfs_root *root; 2186 struct cgroupfs_root *root;
2206 int retval = 0; 2187 int retval = 0;
2207 2188
2208 cgroup_lock(); 2189 mutex_lock(&cgroup_mutex);
2209 for_each_active_root(root) { 2190 for_each_active_root(root) {
2210 struct cgroup *from_cg = task_cgroup_from_root(from, root); 2191 struct cgroup *from_cg = task_cgroup_from_root(from, root);
2211 2192
@@ -2213,7 +2194,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2213 if (retval) 2194 if (retval)
2214 break; 2195 break;
2215 } 2196 }
2216 cgroup_unlock(); 2197 mutex_unlock(&cgroup_mutex);
2217 2198
2218 return retval; 2199 return retval;
2219} 2200}
@@ -2240,7 +2221,7 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
2240 mutex_lock(&cgroup_root_mutex); 2221 mutex_lock(&cgroup_root_mutex);
2241 strcpy(cgrp->root->release_agent_path, buffer); 2222 strcpy(cgrp->root->release_agent_path, buffer);
2242 mutex_unlock(&cgroup_root_mutex); 2223 mutex_unlock(&cgroup_root_mutex);
2243 cgroup_unlock(); 2224 mutex_unlock(&cgroup_mutex);
2244 return 0; 2225 return 0;
2245} 2226}
2246 2227
@@ -2251,7 +2232,7 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
2251 return -ENODEV; 2232 return -ENODEV;
2252 seq_puts(seq, cgrp->root->release_agent_path); 2233 seq_puts(seq, cgrp->root->release_agent_path);
2253 seq_putc(seq, '\n'); 2234 seq_putc(seq, '\n');
2254 cgroup_unlock(); 2235 mutex_unlock(&cgroup_mutex);
2255 return 0; 2236 return 0;
2256} 2237}
2257 2238
@@ -3271,9 +3252,9 @@ static void cgroup_transfer_one_task(struct task_struct *task,
3271{ 3252{
3272 struct cgroup *new_cgroup = scan->data; 3253 struct cgroup *new_cgroup = scan->data;
3273 3254
3274 cgroup_lock(); 3255 mutex_lock(&cgroup_mutex);
3275 cgroup_attach_task(new_cgroup, task, false); 3256 cgroup_attach_task(new_cgroup, task, false);
3276 cgroup_unlock(); 3257 mutex_unlock(&cgroup_mutex);
3277} 3258}
3278 3259
3279/** 3260/**