aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-07 11:51:07 -0500
committerTejun Heo <tj@kernel.org>2013-01-07 11:51:07 -0500
commitc8f699bb56aeae951e02fe2a46c9ada022535770 (patch)
treebd1099ce72de75b4fcd2c9e763eabf51e27e3f7d /kernel/cpuset.c
parent0772324ae669f787b42fdb9fc5ac2c3d1c0df509 (diff)
cpuset: introduce ->css_on/offline()
Add cpuset_css_on/offline() and rearrange css init/exit such that, * Allocation and clearing to the default values happen in css_alloc(). Allocation now uses kzalloc(). * Config inheritance and registration happen in css_online(). * css_offline() undoes what css_online() did. * css_free() frees. This doesn't introduce any visible behavior changes. This will help cleaning up locking. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c66
1 files changed, 44 insertions, 22 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 5372b6f5e5b3..1d7a611ff771 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1790,15 +1790,12 @@ static struct cftype files[] = {
1790 1790
1791static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont) 1791static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1792{ 1792{
1793 struct cgroup *parent_cg = cont->parent; 1793 struct cpuset *cs;
1794 struct cgroup *tmp_cg;
1795 struct cpuset *parent, *cs;
1796 1794
1797 if (!parent_cg) 1795 if (!cont->parent)
1798 return &top_cpuset.css; 1796 return &top_cpuset.css;
1799 parent = cgroup_cs(parent_cg);
1800 1797
1801 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1798 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1802 if (!cs) 1799 if (!cs)
1803 return ERR_PTR(-ENOMEM); 1800 return ERR_PTR(-ENOMEM);
1804 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) { 1801 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
@@ -1806,22 +1803,34 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1806 return ERR_PTR(-ENOMEM); 1803 return ERR_PTR(-ENOMEM);
1807 } 1804 }
1808 1805
1809 cs->flags = 0;
1810 if (is_spread_page(parent))
1811 set_bit(CS_SPREAD_PAGE, &cs->flags);
1812 if (is_spread_slab(parent))
1813 set_bit(CS_SPREAD_SLAB, &cs->flags);
1814 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1806 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1815 cpumask_clear(cs->cpus_allowed); 1807 cpumask_clear(cs->cpus_allowed);
1816 nodes_clear(cs->mems_allowed); 1808 nodes_clear(cs->mems_allowed);
1817 fmeter_init(&cs->fmeter); 1809 fmeter_init(&cs->fmeter);
1818 cs->relax_domain_level = -1; 1810 cs->relax_domain_level = -1;
1811 cs->parent = cgroup_cs(cont->parent);
1812
1813 return &cs->css;
1814}
1815
1816static int cpuset_css_online(struct cgroup *cgrp)
1817{
1818 struct cpuset *cs = cgroup_cs(cgrp);
1819 struct cpuset *parent = cs->parent;
1820 struct cgroup *tmp_cg;
1821
1822 if (!parent)
1823 return 0;
1824
1825 if (is_spread_page(parent))
1826 set_bit(CS_SPREAD_PAGE, &cs->flags);
1827 if (is_spread_slab(parent))
1828 set_bit(CS_SPREAD_SLAB, &cs->flags);
1819 1829
1820 cs->parent = parent;
1821 number_of_cpusets++; 1830 number_of_cpusets++;
1822 1831
1823 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cont->flags)) 1832 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
1824 goto skip_clone; 1833 return 0;
1825 1834
1826 /* 1835 /*
1827 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 1836 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
@@ -1836,19 +1845,34 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1836 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 1845 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1837 * (and likewise for mems) to the new cgroup. 1846 * (and likewise for mems) to the new cgroup.
1838 */ 1847 */
1839 list_for_each_entry(tmp_cg, &parent_cg->children, sibling) { 1848 list_for_each_entry(tmp_cg, &cgrp->parent->children, sibling) {
1840 struct cpuset *tmp_cs = cgroup_cs(tmp_cg); 1849 struct cpuset *tmp_cs = cgroup_cs(tmp_cg);
1841 1850
1842 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) 1851 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs))
1843 goto skip_clone; 1852 return 0;
1844 } 1853 }
1845 1854
1846 mutex_lock(&callback_mutex); 1855 mutex_lock(&callback_mutex);
1847 cs->mems_allowed = parent->mems_allowed; 1856 cs->mems_allowed = parent->mems_allowed;
1848 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 1857 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1849 mutex_unlock(&callback_mutex); 1858 mutex_unlock(&callback_mutex);
1850skip_clone: 1859
1851 return &cs->css; 1860 return 0;
1861}
1862
1863static void cpuset_css_offline(struct cgroup *cgrp)
1864{
1865 struct cpuset *cs = cgroup_cs(cgrp);
1866
1867 /* css_offline is called w/o cgroup_mutex, grab it */
1868 cgroup_lock();
1869
1870 if (is_sched_load_balance(cs))
1871 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1872
1873 number_of_cpusets--;
1874
1875 cgroup_unlock();
1852} 1876}
1853 1877
1854/* 1878/*
@@ -1861,10 +1885,6 @@ static void cpuset_css_free(struct cgroup *cont)
1861{ 1885{
1862 struct cpuset *cs = cgroup_cs(cont); 1886 struct cpuset *cs = cgroup_cs(cont);
1863 1887
1864 if (is_sched_load_balance(cs))
1865 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1866
1867 number_of_cpusets--;
1868 free_cpumask_var(cs->cpus_allowed); 1888 free_cpumask_var(cs->cpus_allowed);
1869 kfree(cs); 1889 kfree(cs);
1870} 1890}
@@ -1872,6 +1892,8 @@ static void cpuset_css_free(struct cgroup *cont)
1872struct cgroup_subsys cpuset_subsys = { 1892struct cgroup_subsys cpuset_subsys = {
1873 .name = "cpuset", 1893 .name = "cpuset",
1874 .css_alloc = cpuset_css_alloc, 1894 .css_alloc = cpuset_css_alloc,
1895 .css_online = cpuset_css_online,
1896 .css_offline = cpuset_css_offline,
1875 .css_free = cpuset_css_free, 1897 .css_free = cpuset_css_free,
1876 .can_attach = cpuset_can_attach, 1898 .can_attach = cpuset_can_attach,
1877 .attach = cpuset_attach, 1899 .attach = cpuset_attach,