diff options
author | Tejun Heo <tj@kernel.org> | 2014-02-13 06:58:43 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-02-13 06:58:43 -0500 |
commit | 9db8de3722d184b8a431afd6bef803d6867ac889 (patch) | |
tree | eeb23ad425d8bad43382e53ac61287092b0a8530 /kernel/cgroup.c | |
parent | bc668c7519ff8b4681af80e92f463bec7bf7cf9e (diff) |
cgroup: cosmetic updates to cgroup_attach_task()
cgroup_attach_task() is planned to go through restructuring. Let's
tidy it up a bit in preparation.
* Update cgroup_attach_task() to receive the target task argument in
@leader instead of @tsk.
* Rename @tsk to @task.
* Rename @retval to @ret.
This is purely cosmetic.
v2: get_nr_threads() was using uninitialized @task instead of @leader.
Fixed. Reported by Dan Carpenter.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 45 |
1 files changed, 23 insertions, 22 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a9d9bbb12310..9a890a2e58fc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1728,20 +1728,20 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
1728 | /** | 1728 | /** |
1729 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | 1729 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup |
1730 | * @cgrp: the cgroup to attach to | 1730 | * @cgrp: the cgroup to attach to |
1731 | * @tsk: the task or the leader of the threadgroup to be attached | 1731 | * @leader: the task or the leader of the threadgroup to be attached |
1732 | * @threadgroup: attach the whole threadgroup? | 1732 | * @threadgroup: attach the whole threadgroup? |
1733 | * | 1733 | * |
1734 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take | 1734 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take |
1735 | * task_lock of @tsk or each thread in the threadgroup individually in turn. | 1735 | * task_lock of @tsk or each thread in the threadgroup individually in turn. |
1736 | */ | 1736 | */ |
1737 | static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | 1737 | static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, |
1738 | bool threadgroup) | 1738 | bool threadgroup) |
1739 | { | 1739 | { |
1740 | int retval, i, group_size; | 1740 | int ret, i, group_size; |
1741 | struct cgroupfs_root *root = cgrp->root; | 1741 | struct cgroupfs_root *root = cgrp->root; |
1742 | struct cgroup_subsys_state *css, *failed_css = NULL; | 1742 | struct cgroup_subsys_state *css, *failed_css = NULL; |
1743 | /* threadgroup list cursor and array */ | 1743 | /* threadgroup list cursor and array */ |
1744 | struct task_struct *leader = tsk; | 1744 | struct task_struct *task; |
1745 | struct task_and_cgroup *tc; | 1745 | struct task_and_cgroup *tc; |
1746 | struct flex_array *group; | 1746 | struct flex_array *group; |
1747 | struct cgroup_taskset tset = { }; | 1747 | struct cgroup_taskset tset = { }; |
@@ -1754,7 +1754,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1754 | * threads exit, this will just be an over-estimate. | 1754 | * threads exit, this will just be an over-estimate. |
1755 | */ | 1755 | */ |
1756 | if (threadgroup) | 1756 | if (threadgroup) |
1757 | group_size = get_nr_threads(tsk); | 1757 | group_size = get_nr_threads(leader); |
1758 | else | 1758 | else |
1759 | group_size = 1; | 1759 | group_size = 1; |
1760 | /* flex_array supports very large thread-groups better than kmalloc. */ | 1760 | /* flex_array supports very large thread-groups better than kmalloc. */ |
@@ -1762,8 +1762,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1762 | if (!group) | 1762 | if (!group) |
1763 | return -ENOMEM; | 1763 | return -ENOMEM; |
1764 | /* pre-allocate to guarantee space while iterating in rcu read-side. */ | 1764 | /* pre-allocate to guarantee space while iterating in rcu read-side. */ |
1765 | retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); | 1765 | ret = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); |
1766 | if (retval) | 1766 | if (ret) |
1767 | goto out_free_group_list; | 1767 | goto out_free_group_list; |
1768 | 1768 | ||
1769 | i = 0; | 1769 | i = 0; |
@@ -1774,17 +1774,18 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1774 | */ | 1774 | */ |
1775 | down_read(&css_set_rwsem); | 1775 | down_read(&css_set_rwsem); |
1776 | rcu_read_lock(); | 1776 | rcu_read_lock(); |
1777 | task = leader; | ||
1777 | do { | 1778 | do { |
1778 | struct task_and_cgroup ent; | 1779 | struct task_and_cgroup ent; |
1779 | 1780 | ||
1780 | /* @tsk either already exited or can't exit until the end */ | 1781 | /* @task either already exited or can't exit until the end */ |
1781 | if (tsk->flags & PF_EXITING) | 1782 | if (task->flags & PF_EXITING) |
1782 | goto next; | 1783 | goto next; |
1783 | 1784 | ||
1784 | /* as per above, nr_threads may decrease, but not increase. */ | 1785 | /* as per above, nr_threads may decrease, but not increase. */ |
1785 | BUG_ON(i >= group_size); | 1786 | BUG_ON(i >= group_size); |
1786 | ent.task = tsk; | 1787 | ent.task = task; |
1787 | ent.cgrp = task_cgroup_from_root(tsk, root); | 1788 | ent.cgrp = task_cgroup_from_root(task, root); |
1788 | /* nothing to do if this task is already in the cgroup */ | 1789 | /* nothing to do if this task is already in the cgroup */ |
1789 | if (ent.cgrp == cgrp) | 1790 | if (ent.cgrp == cgrp) |
1790 | goto next; | 1791 | goto next; |
@@ -1792,13 +1793,13 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1792 | * saying GFP_ATOMIC has no effect here because we did prealloc | 1793 | * saying GFP_ATOMIC has no effect here because we did prealloc |
1793 | * earlier, but it's good form to communicate our expectations. | 1794 | * earlier, but it's good form to communicate our expectations. |
1794 | */ | 1795 | */ |
1795 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 1796 | ret = flex_array_put(group, i, &ent, GFP_ATOMIC); |
1796 | BUG_ON(retval != 0); | 1797 | BUG_ON(ret != 0); |
1797 | i++; | 1798 | i++; |
1798 | next: | 1799 | next: |
1799 | if (!threadgroup) | 1800 | if (!threadgroup) |
1800 | break; | 1801 | break; |
1801 | } while_each_thread(leader, tsk); | 1802 | } while_each_thread(leader, task); |
1802 | rcu_read_unlock(); | 1803 | rcu_read_unlock(); |
1803 | up_read(&css_set_rwsem); | 1804 | up_read(&css_set_rwsem); |
1804 | /* remember the number of threads in the array for later. */ | 1805 | /* remember the number of threads in the array for later. */ |
@@ -1807,7 +1808,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1807 | tset.tc_array_len = group_size; | 1808 | tset.tc_array_len = group_size; |
1808 | 1809 | ||
1809 | /* methods shouldn't be called if no task is actually migrating */ | 1810 | /* methods shouldn't be called if no task is actually migrating */ |
1810 | retval = 0; | 1811 | ret = 0; |
1811 | if (!group_size) | 1812 | if (!group_size) |
1812 | goto out_free_group_list; | 1813 | goto out_free_group_list; |
1813 | 1814 | ||
@@ -1816,8 +1817,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1816 | */ | 1817 | */ |
1817 | for_each_css(css, i, cgrp) { | 1818 | for_each_css(css, i, cgrp) { |
1818 | if (css->ss->can_attach) { | 1819 | if (css->ss->can_attach) { |
1819 | retval = css->ss->can_attach(css, &tset); | 1820 | ret = css->ss->can_attach(css, &tset); |
1820 | if (retval) { | 1821 | if (ret) { |
1821 | failed_css = css; | 1822 | failed_css = css; |
1822 | goto out_cancel_attach; | 1823 | goto out_cancel_attach; |
1823 | } | 1824 | } |
@@ -1835,7 +1836,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1835 | old_cset = task_css_set(tc->task); | 1836 | old_cset = task_css_set(tc->task); |
1836 | tc->cset = find_css_set(old_cset, cgrp); | 1837 | tc->cset = find_css_set(old_cset, cgrp); |
1837 | if (!tc->cset) { | 1838 | if (!tc->cset) { |
1838 | retval = -ENOMEM; | 1839 | ret = -ENOMEM; |
1839 | goto out_put_css_set_refs; | 1840 | goto out_put_css_set_refs; |
1840 | } | 1841 | } |
1841 | } | 1842 | } |
@@ -1863,9 +1864,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
1863 | /* | 1864 | /* |
1864 | * step 5: success! and cleanup | 1865 | * step 5: success! and cleanup |
1865 | */ | 1866 | */ |
1866 | retval = 0; | 1867 | ret = 0; |
1867 | out_put_css_set_refs: | 1868 | out_put_css_set_refs: |
1868 | if (retval) { | 1869 | if (ret) { |
1869 | for (i = 0; i < group_size; i++) { | 1870 | for (i = 0; i < group_size; i++) { |
1870 | tc = flex_array_get(group, i); | 1871 | tc = flex_array_get(group, i); |
1871 | if (!tc->cset) | 1872 | if (!tc->cset) |
@@ -1874,7 +1875,7 @@ out_put_css_set_refs: | |||
1874 | } | 1875 | } |
1875 | } | 1876 | } |
1876 | out_cancel_attach: | 1877 | out_cancel_attach: |
1877 | if (retval) { | 1878 | if (ret) { |
1878 | for_each_css(css, i, cgrp) { | 1879 | for_each_css(css, i, cgrp) { |
1879 | if (css == failed_css) | 1880 | if (css == failed_css) |
1880 | break; | 1881 | break; |
@@ -1884,7 +1885,7 @@ out_cancel_attach: | |||
1884 | } | 1885 | } |
1885 | out_free_group_list: | 1886 | out_free_group_list: |
1886 | flex_array_free(group); | 1887 | flex_array_free(group); |
1887 | return retval; | 1888 | return ret; |
1888 | } | 1889 | } |
1889 | 1890 | ||
1890 | /* | 1891 | /* |