aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-07-31 04:18:36 -0400
committerTejun Heo <tj@kernel.org>2013-07-31 06:20:18 -0400
commit6f4b7e632d78c2d91502211c430722cc66428492 (patch)
tree51f64de7cce9b50ee120599dc976a8694c3ed8fa
parente0798ce27346edb8aa369b5b39af5a47fdf2b25c (diff)
cgroup: more naming cleanups
Constantly use @cset for css_set variables and use @cgrp as cgroup variables. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/cgroup.h6
-rw-r--r--kernel/cgroup.c26
-rw-r--r--kernel/cpuset.c16
3 files changed, 24 insertions, 24 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 297462b9f41a..00a7e07a1567 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -394,8 +394,8 @@ struct cgroup_map_cb {
394 394
395/* cftype->flags */ 395/* cftype->flags */
396enum { 396enum {
397 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */ 397 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
398 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */ 398 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
399 CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ 399 CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
400}; 400};
401 401
@@ -513,7 +513,7 @@ struct cftype_set {
513}; 513};
514 514
515struct cgroup_scanner { 515struct cgroup_scanner {
516 struct cgroup *cg; 516 struct cgroup *cgrp;
517 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); 517 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
518 void (*process_task)(struct task_struct *p, 518 void (*process_task)(struct task_struct *p,
519 struct cgroup_scanner *scan); 519 struct cgroup_scanner *scan);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ed2104304833..9577bebe2546 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -466,7 +466,7 @@ static inline void put_css_set_taskexit(struct css_set *cset)
466 * @new_cgrp: cgroup that's being entered by the task 466 * @new_cgrp: cgroup that's being entered by the task
467 * @template: desired set of css pointers in css_set (pre-calculated) 467 * @template: desired set of css pointers in css_set (pre-calculated)
468 * 468 *
469 * Returns true if "cg" matches "old_cg" except for the hierarchy 469 * Returns true if "cset" matches "old_cset" except for the hierarchy
470 * which "new_cgrp" belongs to, for which it should match "new_cgrp". 470 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
471 */ 471 */
472static bool compare_css_sets(struct css_set *cset, 472static bool compare_css_sets(struct css_set *cset,
@@ -1839,7 +1839,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy);
1839struct task_and_cgroup { 1839struct task_and_cgroup {
1840 struct task_struct *task; 1840 struct task_struct *task;
1841 struct cgroup *cgrp; 1841 struct cgroup *cgrp;
1842 struct css_set *cg; 1842 struct css_set *cset;
1843}; 1843};
1844 1844
1845struct cgroup_taskset { 1845struct cgroup_taskset {
@@ -2057,8 +2057,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2057 2057
2058 tc = flex_array_get(group, i); 2058 tc = flex_array_get(group, i);
2059 old_cset = task_css_set(tc->task); 2059 old_cset = task_css_set(tc->task);
2060 tc->cg = find_css_set(old_cset, cgrp); 2060 tc->cset = find_css_set(old_cset, cgrp);
2061 if (!tc->cg) { 2061 if (!tc->cset) {
2062 retval = -ENOMEM; 2062 retval = -ENOMEM;
2063 goto out_put_css_set_refs; 2063 goto out_put_css_set_refs;
2064 } 2064 }
@@ -2071,7 +2071,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2071 */ 2071 */
2072 for (i = 0; i < group_size; i++) { 2072 for (i = 0; i < group_size; i++) {
2073 tc = flex_array_get(group, i); 2073 tc = flex_array_get(group, i);
2074 cgroup_task_migrate(tc->cgrp, tc->task, tc->cg); 2074 cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
2075 } 2075 }
2076 /* nothing is sensitive to fork() after this point. */ 2076 /* nothing is sensitive to fork() after this point. */
2077 2077
@@ -2091,9 +2091,9 @@ out_put_css_set_refs:
2091 if (retval) { 2091 if (retval) {
2092 for (i = 0; i < group_size; i++) { 2092 for (i = 0; i < group_size; i++) {
2093 tc = flex_array_get(group, i); 2093 tc = flex_array_get(group, i);
2094 if (!tc->cg) 2094 if (!tc->cset)
2095 break; 2095 break;
2096 put_css_set(tc->cg); 2096 put_css_set(tc->cset);
2097 } 2097 }
2098 } 2098 }
2099out_cancel_attach: 2099out_cancel_attach:
@@ -2203,9 +2203,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2203 2203
2204 mutex_lock(&cgroup_mutex); 2204 mutex_lock(&cgroup_mutex);
2205 for_each_active_root(root) { 2205 for_each_active_root(root) {
2206 struct cgroup *from_cg = task_cgroup_from_root(from, root); 2206 struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
2207 2207
2208 retval = cgroup_attach_task(from_cg, tsk, false); 2208 retval = cgroup_attach_task(from_cgrp, tsk, false);
2209 if (retval) 2209 if (retval)
2210 break; 2210 break;
2211 } 2211 }
@@ -3305,8 +3305,8 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
3305 * guarantees forward progress and that we don't miss any tasks. 3305 * guarantees forward progress and that we don't miss any tasks.
3306 */ 3306 */
3307 heap->size = 0; 3307 heap->size = 0;
3308 cgroup_iter_start(scan->cg, &it); 3308 cgroup_iter_start(scan->cgrp, &it);
3309 while ((p = cgroup_iter_next(scan->cg, &it))) { 3309 while ((p = cgroup_iter_next(scan->cgrp, &it))) {
3310 /* 3310 /*
3311 * Only affect tasks that qualify per the caller's callback, 3311 * Only affect tasks that qualify per the caller's callback,
3312 * if he provided one 3312 * if he provided one
@@ -3339,7 +3339,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
3339 * the heap and wasn't inserted 3339 * the heap and wasn't inserted
3340 */ 3340 */
3341 } 3341 }
3342 cgroup_iter_end(scan->cg, &it); 3342 cgroup_iter_end(scan->cgrp, &it);
3343 3343
3344 if (heap->size) { 3344 if (heap->size) {
3345 for (i = 0; i < heap->size; i++) { 3345 for (i = 0; i < heap->size; i++) {
@@ -3385,7 +3385,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3385{ 3385{
3386 struct cgroup_scanner scan; 3386 struct cgroup_scanner scan;
3387 3387
3388 scan.cg = from; 3388 scan.cgrp = from;
3389 scan.test_task = NULL; /* select all tasks in cgroup */ 3389 scan.test_task = NULL; /* select all tasks in cgroup */
3390 scan.process_task = cgroup_transfer_one_task; 3390 scan.process_task = cgroup_transfer_one_task;
3391 scan.heap = NULL; 3391 scan.heap = NULL;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 703bfd5a32a9..1b9c31549797 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -845,7 +845,7 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
845{ 845{
846 struct cpuset *cpus_cs; 846 struct cpuset *cpus_cs;
847 847
848 cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg)); 848 cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
849 set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed); 849 set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
850} 850}
851 851
@@ -866,7 +866,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
866{ 866{
867 struct cgroup_scanner scan; 867 struct cgroup_scanner scan;
868 868
869 scan.cg = cs->css.cgroup; 869 scan.cgrp = cs->css.cgroup;
870 scan.test_task = NULL; 870 scan.test_task = NULL;
871 scan.process_task = cpuset_change_cpumask; 871 scan.process_task = cpuset_change_cpumask;
872 scan.heap = heap; 872 scan.heap = heap;
@@ -1062,7 +1062,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1062static void cpuset_change_nodemask(struct task_struct *p, 1062static void cpuset_change_nodemask(struct task_struct *p,
1063 struct cgroup_scanner *scan) 1063 struct cgroup_scanner *scan)
1064{ 1064{
1065 struct cpuset *cs = cgroup_cs(scan->cg); 1065 struct cpuset *cs = cgroup_cs(scan->cgrp);
1066 struct mm_struct *mm; 1066 struct mm_struct *mm;
1067 int migrate; 1067 int migrate;
1068 nodemask_t *newmems = scan->data; 1068 nodemask_t *newmems = scan->data;
@@ -1102,7 +1102,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1102 1102
1103 guarantee_online_mems(mems_cs, &newmems); 1103 guarantee_online_mems(mems_cs, &newmems);
1104 1104
1105 scan.cg = cs->css.cgroup; 1105 scan.cgrp = cs->css.cgroup;
1106 scan.test_task = NULL; 1106 scan.test_task = NULL;
1107 scan.process_task = cpuset_change_nodemask; 1107 scan.process_task = cpuset_change_nodemask;
1108 scan.heap = heap; 1108 scan.heap = heap;
@@ -1275,7 +1275,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1275static void cpuset_change_flag(struct task_struct *tsk, 1275static void cpuset_change_flag(struct task_struct *tsk,
1276 struct cgroup_scanner *scan) 1276 struct cgroup_scanner *scan)
1277{ 1277{
1278 cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk); 1278 cpuset_update_task_spread_flag(cgroup_cs(scan->cgrp), tsk);
1279} 1279}
1280 1280
1281/* 1281/*
@@ -1295,7 +1295,7 @@ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1295{ 1295{
1296 struct cgroup_scanner scan; 1296 struct cgroup_scanner scan;
1297 1297
1298 scan.cg = cs->css.cgroup; 1298 scan.cgrp = cs->css.cgroup;
1299 scan.test_task = NULL; 1299 scan.test_task = NULL;
1300 scan.process_task = cpuset_change_flag; 1300 scan.process_task = cpuset_change_flag;
1301 scan.heap = heap; 1301 scan.heap = heap;
@@ -1971,7 +1971,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
1971 struct cpuset *cs = cgroup_cs(cgrp); 1971 struct cpuset *cs = cgroup_cs(cgrp);
1972 struct cpuset *parent = parent_cs(cs); 1972 struct cpuset *parent = parent_cs(cs);
1973 struct cpuset *tmp_cs; 1973 struct cpuset *tmp_cs;
1974 struct cgroup *pos_cg; 1974 struct cgroup *pos_cgrp;
1975 1975
1976 if (!parent) 1976 if (!parent)
1977 return 0; 1977 return 0;
@@ -2003,7 +2003,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
2003 * (and likewise for mems) to the new cgroup. 2003 * (and likewise for mems) to the new cgroup.
2004 */ 2004 */
2005 rcu_read_lock(); 2005 rcu_read_lock();
2006 cpuset_for_each_child(tmp_cs, pos_cg, parent) { 2006 cpuset_for_each_child(tmp_cs, pos_cgrp, parent) {
2007 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 2007 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2008 rcu_read_unlock(); 2008 rcu_read_unlock();
2009 goto out_unlock; 2009 goto out_unlock;