aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2009-01-07 21:08:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:11 -0500
commit300ed6cbb70718872cb4936d1d22ef295f9ba44d (patch)
tree56a1dd86be2d6e35b329131ef353a44b929bd53c /kernel/cpuset.c
parent645fcc9d2f6946f97a41c8d00edee38f8a6f0060 (diff)
cpuset: convert cpuset->cpus_allowed to cpumask_var_t
Impact: use new cpumask API This patch mainly does the following things: - change cs->cpus_allowed from cpumask_t to cpumask_var_t - call alloc_bootmem_cpumask_var() for top_cpuset in cpuset_init_early() - call alloc_cpumask_var() for other cpusets - replace cpus_xxx() to cpumask_xxx() Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Mike Travis <travis@sgi.com> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c100
1 files changed, 60 insertions, 40 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f66527bfd216..fc294aa9a97a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -84,7 +84,7 @@ struct cpuset {
84 struct cgroup_subsys_state css; 84 struct cgroup_subsys_state css;
85 85
86 unsigned long flags; /* "unsigned long" so bitops work */ 86 unsigned long flags; /* "unsigned long" so bitops work */
87 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 87 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
88 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ 88 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
89 89
90 struct cpuset *parent; /* my parent */ 90 struct cpuset *parent; /* my parent */
@@ -195,8 +195,6 @@ static int cpuset_mems_generation;
195 195
196static struct cpuset top_cpuset = { 196static struct cpuset top_cpuset = {
197 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), 197 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
198 .cpus_allowed = CPU_MASK_ALL,
199 .mems_allowed = NODE_MASK_ALL,
200}; 198};
201 199
202/* 200/*
@@ -278,7 +276,7 @@ static struct file_system_type cpuset_fs_type = {
278}; 276};
279 277
280/* 278/*
281 * Return in *pmask the portion of a cpusets's cpus_allowed that 279 * Return in pmask the portion of a cpusets's cpus_allowed that
282 * are online. If none are online, walk up the cpuset hierarchy 280 * are online. If none are online, walk up the cpuset hierarchy
283 * until we find one that does have some online cpus. If we get 281 * until we find one that does have some online cpus. If we get
284 * all the way to the top and still haven't found any online cpus, 282 * all the way to the top and still haven't found any online cpus,
@@ -293,13 +291,13 @@ static struct file_system_type cpuset_fs_type = {
293 291
294static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 292static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
295{ 293{
296 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) 294 while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
297 cs = cs->parent; 295 cs = cs->parent;
298 if (cs) 296 if (cs)
299 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); 297 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
300 else 298 else
301 *pmask = cpu_online_map; 299 cpumask_copy(pmask, cpu_online_mask);
302 BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); 300 BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
303} 301}
304 302
305/* 303/*
@@ -409,7 +407,7 @@ void cpuset_update_task_memory_state(void)
409 407
410static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 408static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
411{ 409{
412 return cpus_subset(p->cpus_allowed, q->cpus_allowed) && 410 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
413 nodes_subset(p->mems_allowed, q->mems_allowed) && 411 nodes_subset(p->mems_allowed, q->mems_allowed) &&
414 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 412 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
415 is_mem_exclusive(p) <= is_mem_exclusive(q); 413 is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -421,7 +419,19 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
421 */ 419 */
422static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs) 420static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
423{ 421{
424 return kmemdup(cs, sizeof(*cs), GFP_KERNEL); 422 struct cpuset *trial;
423
424 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
425 if (!trial)
426 return NULL;
427
428 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
429 kfree(trial);
430 return NULL;
431 }
432 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
433
434 return trial;
425} 435}
426 436
427/** 437/**
@@ -430,6 +440,7 @@ static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
430 */ 440 */
431static void free_trial_cpuset(struct cpuset *trial) 441static void free_trial_cpuset(struct cpuset *trial)
432{ 442{
443 free_cpumask_var(trial->cpus_allowed);
433 kfree(trial); 444 kfree(trial);
434} 445}
435 446
@@ -482,7 +493,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
482 c = cgroup_cs(cont); 493 c = cgroup_cs(cont);
483 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 494 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
484 c != cur && 495 c != cur &&
485 cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) 496 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
486 return -EINVAL; 497 return -EINVAL;
487 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 498 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
488 c != cur && 499 c != cur &&
@@ -492,7 +503,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
492 503
493 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ 504 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
494 if (cgroup_task_count(cur->css.cgroup)) { 505 if (cgroup_task_count(cur->css.cgroup)) {
495 if (cpus_empty(trial->cpus_allowed) || 506 if (cpumask_empty(trial->cpus_allowed) ||
496 nodes_empty(trial->mems_allowed)) { 507 nodes_empty(trial->mems_allowed)) {
497 return -ENOSPC; 508 return -ENOSPC;
498 } 509 }
@@ -507,7 +518,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
507 */ 518 */
508static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 519static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
509{ 520{
510 return cpus_intersects(a->cpus_allowed, b->cpus_allowed); 521 return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
511} 522}
512 523
513static void 524static void
@@ -532,7 +543,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
532 cp = list_first_entry(&q, struct cpuset, stack_list); 543 cp = list_first_entry(&q, struct cpuset, stack_list);
533 list_del(q.next); 544 list_del(q.next);
534 545
535 if (cpus_empty(cp->cpus_allowed)) 546 if (cpumask_empty(cp->cpus_allowed))
536 continue; 547 continue;
537 548
538 if (is_sched_load_balance(cp)) 549 if (is_sched_load_balance(cp))
@@ -627,7 +638,7 @@ static int generate_sched_domains(cpumask_t **domains,
627 *dattr = SD_ATTR_INIT; 638 *dattr = SD_ATTR_INIT;
628 update_domain_attr_tree(dattr, &top_cpuset); 639 update_domain_attr_tree(dattr, &top_cpuset);
629 } 640 }
630 *doms = top_cpuset.cpus_allowed; 641 cpumask_copy(doms, top_cpuset.cpus_allowed);
631 642
632 ndoms = 1; 643 ndoms = 1;
633 goto done; 644 goto done;
@@ -646,7 +657,7 @@ static int generate_sched_domains(cpumask_t **domains,
646 cp = list_first_entry(&q, struct cpuset, stack_list); 657 cp = list_first_entry(&q, struct cpuset, stack_list);
647 list_del(q.next); 658 list_del(q.next);
648 659
649 if (cpus_empty(cp->cpus_allowed)) 660 if (cpumask_empty(cp->cpus_allowed))
650 continue; 661 continue;
651 662
652 /* 663 /*
@@ -739,7 +750,7 @@ restart:
739 struct cpuset *b = csa[j]; 750 struct cpuset *b = csa[j];
740 751
741 if (apn == b->pn) { 752 if (apn == b->pn) {
742 cpus_or(*dp, *dp, b->cpus_allowed); 753 cpumask_or(dp, dp, b->cpus_allowed);
743 if (dattr) 754 if (dattr)
744 update_domain_attr_tree(dattr + nslot, b); 755 update_domain_attr_tree(dattr + nslot, b);
745 756
@@ -848,7 +859,7 @@ void rebuild_sched_domains(void)
848static int cpuset_test_cpumask(struct task_struct *tsk, 859static int cpuset_test_cpumask(struct task_struct *tsk,
849 struct cgroup_scanner *scan) 860 struct cgroup_scanner *scan)
850{ 861{
851 return !cpus_equal(tsk->cpus_allowed, 862 return !cpumask_equal(&tsk->cpus_allowed,
852 (cgroup_cs(scan->cg))->cpus_allowed); 863 (cgroup_cs(scan->cg))->cpus_allowed);
853} 864}
854 865
@@ -866,7 +877,7 @@ static int cpuset_test_cpumask(struct task_struct *tsk,
866static void cpuset_change_cpumask(struct task_struct *tsk, 877static void cpuset_change_cpumask(struct task_struct *tsk,
867 struct cgroup_scanner *scan) 878 struct cgroup_scanner *scan)
868{ 879{
869 set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); 880 set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
870} 881}
871 882
872/** 883/**
@@ -916,13 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
916 * with tasks have cpus. 927 * with tasks have cpus.
917 */ 928 */
918 if (!*buf) { 929 if (!*buf) {
919 cpus_clear(trialcs->cpus_allowed); 930 cpumask_clear(trialcs->cpus_allowed);
920 } else { 931 } else {
921 retval = cpulist_parse(buf, &trialcs->cpus_allowed); 932 retval = cpulist_parse(buf, trialcs->cpus_allowed);
922 if (retval < 0) 933 if (retval < 0)
923 return retval; 934 return retval;
924 935
925 if (!cpus_subset(trialcs->cpus_allowed, cpu_online_map)) 936 if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
926 return -EINVAL; 937 return -EINVAL;
927 } 938 }
928 retval = validate_change(cs, trialcs); 939 retval = validate_change(cs, trialcs);
@@ -930,7 +941,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
930 return retval; 941 return retval;
931 942
932 /* Nothing to do if the cpus didn't change */ 943 /* Nothing to do if the cpus didn't change */
933 if (cpus_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 944 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
934 return 0; 945 return 0;
935 946
936 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); 947 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
@@ -940,7 +951,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
940 is_load_balanced = is_sched_load_balance(trialcs); 951 is_load_balanced = is_sched_load_balance(trialcs);
941 952
942 mutex_lock(&callback_mutex); 953 mutex_lock(&callback_mutex);
943 cs->cpus_allowed = trialcs->cpus_allowed; 954 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
944 mutex_unlock(&callback_mutex); 955 mutex_unlock(&callback_mutex);
945 956
946 /* 957 /*
@@ -1028,7 +1039,7 @@ static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
1028 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1039 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1029 1040
1030 fudge = 10; /* spare mmarray[] slots */ 1041 fudge = 10; /* spare mmarray[] slots */
1031 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ 1042 fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
1032 retval = -ENOMEM; 1043 retval = -ENOMEM;
1033 1044
1034 /* 1045 /*
@@ -1176,7 +1187,8 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1176 1187
1177 if (val != cs->relax_domain_level) { 1188 if (val != cs->relax_domain_level) {
1178 cs->relax_domain_level = val; 1189 cs->relax_domain_level = val;
1179 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) 1190 if (!cpumask_empty(cs->cpus_allowed) &&
1191 is_sched_load_balance(cs))
1180 async_rebuild_sched_domains(); 1192 async_rebuild_sched_domains();
1181 } 1193 }
1182 1194
@@ -1219,7 +1231,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1219 cs->flags = trialcs->flags; 1231 cs->flags = trialcs->flags;
1220 mutex_unlock(&callback_mutex); 1232 mutex_unlock(&callback_mutex);
1221 1233
1222 if (!cpus_empty(trialcs->cpus_allowed) && balance_flag_changed) 1234 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1223 async_rebuild_sched_domains(); 1235 async_rebuild_sched_domains();
1224 1236
1225out: 1237out:
@@ -1335,12 +1347,12 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
1335 struct cpuset *cs = cgroup_cs(cont); 1347 struct cpuset *cs = cgroup_cs(cont);
1336 int ret = 0; 1348 int ret = 0;
1337 1349
1338 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1350 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1339 return -ENOSPC; 1351 return -ENOSPC;
1340 1352
1341 if (tsk->flags & PF_THREAD_BOUND) { 1353 if (tsk->flags & PF_THREAD_BOUND) {
1342 mutex_lock(&callback_mutex); 1354 mutex_lock(&callback_mutex);
1343 if (!cpus_equal(tsk->cpus_allowed, cs->cpus_allowed)) 1355 if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
1344 ret = -EINVAL; 1356 ret = -EINVAL;
1345 mutex_unlock(&callback_mutex); 1357 mutex_unlock(&callback_mutex);
1346 } 1358 }
@@ -1516,7 +1528,7 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1516 int ret; 1528 int ret;
1517 1529
1518 mutex_lock(&callback_mutex); 1530 mutex_lock(&callback_mutex);
1519 ret = cpulist_scnprintf(page, PAGE_SIZE, &cs->cpus_allowed); 1531 ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1520 mutex_unlock(&callback_mutex); 1532 mutex_unlock(&callback_mutex);
1521 1533
1522 return ret; 1534 return ret;
@@ -1755,7 +1767,7 @@ static void cpuset_post_clone(struct cgroup_subsys *ss,
1755 parent_cs = cgroup_cs(parent); 1767 parent_cs = cgroup_cs(parent);
1756 1768
1757 cs->mems_allowed = parent_cs->mems_allowed; 1769 cs->mems_allowed = parent_cs->mems_allowed;
1758 cs->cpus_allowed = parent_cs->cpus_allowed; 1770 cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1759 return; 1771 return;
1760} 1772}
1761 1773
@@ -1781,6 +1793,10 @@ static struct cgroup_subsys_state *cpuset_create(
1781 cs = kmalloc(sizeof(*cs), GFP_KERNEL); 1793 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1782 if (!cs) 1794 if (!cs)
1783 return ERR_PTR(-ENOMEM); 1795 return ERR_PTR(-ENOMEM);
1796 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1797 kfree(cs);
1798 return ERR_PTR(-ENOMEM);
1799 }
1784 1800
1785 cpuset_update_task_memory_state(); 1801 cpuset_update_task_memory_state();
1786 cs->flags = 0; 1802 cs->flags = 0;
@@ -1789,7 +1805,7 @@ static struct cgroup_subsys_state *cpuset_create(
1789 if (is_spread_slab(parent)) 1805 if (is_spread_slab(parent))
1790 set_bit(CS_SPREAD_SLAB, &cs->flags); 1806 set_bit(CS_SPREAD_SLAB, &cs->flags);
1791 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1807 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1792 cpus_clear(cs->cpus_allowed); 1808 cpumask_clear(cs->cpus_allowed);
1793 nodes_clear(cs->mems_allowed); 1809 nodes_clear(cs->mems_allowed);
1794 cs->mems_generation = cpuset_mems_generation++; 1810 cs->mems_generation = cpuset_mems_generation++;
1795 fmeter_init(&cs->fmeter); 1811 fmeter_init(&cs->fmeter);
@@ -1816,6 +1832,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1816 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 1832 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1817 1833
1818 number_of_cpusets--; 1834 number_of_cpusets--;
1835 free_cpumask_var(cs->cpus_allowed);
1819 kfree(cs); 1836 kfree(cs);
1820} 1837}
1821 1838
@@ -1839,6 +1856,8 @@ struct cgroup_subsys cpuset_subsys = {
1839 1856
1840int __init cpuset_init_early(void) 1857int __init cpuset_init_early(void)
1841{ 1858{
1859 alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
1860
1842 top_cpuset.mems_generation = cpuset_mems_generation++; 1861 top_cpuset.mems_generation = cpuset_mems_generation++;
1843 return 0; 1862 return 0;
1844} 1863}
@@ -1854,7 +1873,7 @@ int __init cpuset_init(void)
1854{ 1873{
1855 int err = 0; 1874 int err = 0;
1856 1875
1857 cpus_setall(top_cpuset.cpus_allowed); 1876 cpumask_setall(top_cpuset.cpus_allowed);
1858 nodes_setall(top_cpuset.mems_allowed); 1877 nodes_setall(top_cpuset.mems_allowed);
1859 1878
1860 fmeter_init(&top_cpuset.fmeter); 1879 fmeter_init(&top_cpuset.fmeter);
@@ -1943,7 +1962,7 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1943 * has online cpus, so can't be empty). 1962 * has online cpus, so can't be empty).
1944 */ 1963 */
1945 parent = cs->parent; 1964 parent = cs->parent;
1946 while (cpus_empty(parent->cpus_allowed) || 1965 while (cpumask_empty(parent->cpus_allowed) ||
1947 nodes_empty(parent->mems_allowed)) 1966 nodes_empty(parent->mems_allowed))
1948 parent = parent->parent; 1967 parent = parent->parent;
1949 1968
@@ -1984,7 +2003,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
1984 } 2003 }
1985 2004
1986 /* Continue past cpusets with all cpus, mems online */ 2005 /* Continue past cpusets with all cpus, mems online */
1987 if (cpus_subset(cp->cpus_allowed, cpu_online_map) && 2006 if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
1988 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 2007 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1989 continue; 2008 continue;
1990 2009
@@ -1992,13 +2011,14 @@ static void scan_for_empty_cpusets(struct cpuset *root)
1992 2011
1993 /* Remove offline cpus and mems from this cpuset. */ 2012 /* Remove offline cpus and mems from this cpuset. */
1994 mutex_lock(&callback_mutex); 2013 mutex_lock(&callback_mutex);
1995 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); 2014 cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2015 cpu_online_mask);
1996 nodes_and(cp->mems_allowed, cp->mems_allowed, 2016 nodes_and(cp->mems_allowed, cp->mems_allowed,
1997 node_states[N_HIGH_MEMORY]); 2017 node_states[N_HIGH_MEMORY]);
1998 mutex_unlock(&callback_mutex); 2018 mutex_unlock(&callback_mutex);
1999 2019
2000 /* Move tasks from the empty cpuset to a parent */ 2020 /* Move tasks from the empty cpuset to a parent */
2001 if (cpus_empty(cp->cpus_allowed) || 2021 if (cpumask_empty(cp->cpus_allowed) ||
2002 nodes_empty(cp->mems_allowed)) 2022 nodes_empty(cp->mems_allowed))
2003 remove_tasks_in_empty_cpuset(cp); 2023 remove_tasks_in_empty_cpuset(cp);
2004 else { 2024 else {
@@ -2039,7 +2059,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2039 } 2059 }
2040 2060
2041 cgroup_lock(); 2061 cgroup_lock();
2042 top_cpuset.cpus_allowed = cpu_online_map; 2062 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2043 scan_for_empty_cpusets(&top_cpuset); 2063 scan_for_empty_cpusets(&top_cpuset);
2044 ndoms = generate_sched_domains(&doms, &attr); 2064 ndoms = generate_sched_domains(&doms, &attr);
2045 cgroup_unlock(); 2065 cgroup_unlock();
@@ -2084,7 +2104,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2084 2104
2085void __init cpuset_init_smp(void) 2105void __init cpuset_init_smp(void)
2086{ 2106{
2087 top_cpuset.cpus_allowed = cpu_online_map; 2107 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2088 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2108 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2089 2109
2090 hotcpu_notifier(cpuset_track_online_cpus, 0); 2110 hotcpu_notifier(cpuset_track_online_cpus, 0);
@@ -2096,7 +2116,7 @@ void __init cpuset_init_smp(void)
2096 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 2116 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2097 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. 2117 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
2098 * 2118 *
2099 * Description: Returns the cpumask_t cpus_allowed of the cpuset 2119 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2100 * attached to the specified @tsk. Guaranteed to return some non-empty 2120 * attached to the specified @tsk. Guaranteed to return some non-empty
2101 * subset of cpu_online_map, even if this means going outside the 2121 * subset of cpu_online_map, even if this means going outside the
2102 * tasks cpuset. 2122 * tasks cpuset.