diff options
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 94 |
1 files changed, 66 insertions, 28 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 5d38bd74483c..d10946748ec2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -970,15 +970,20 @@ static void cpuset_change_nodemask(struct task_struct *p, | |||
970 | struct cpuset *cs; | 970 | struct cpuset *cs; |
971 | int migrate; | 971 | int migrate; |
972 | const nodemask_t *oldmem = scan->data; | 972 | const nodemask_t *oldmem = scan->data; |
973 | nodemask_t newmems; | 973 | NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); |
974 | |||
975 | if (!newmems) | ||
976 | return; | ||
974 | 977 | ||
975 | cs = cgroup_cs(scan->cg); | 978 | cs = cgroup_cs(scan->cg); |
976 | guarantee_online_mems(cs, &newmems); | 979 | guarantee_online_mems(cs, newmems); |
977 | 980 | ||
978 | task_lock(p); | 981 | task_lock(p); |
979 | cpuset_change_task_nodemask(p, &newmems); | 982 | cpuset_change_task_nodemask(p, newmems); |
980 | task_unlock(p); | 983 | task_unlock(p); |
981 | 984 | ||
985 | NODEMASK_FREE(newmems); | ||
986 | |||
982 | mm = get_task_mm(p); | 987 | mm = get_task_mm(p); |
983 | if (!mm) | 988 | if (!mm) |
984 | return; | 989 | return; |
@@ -1048,16 +1053,21 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, | |||
1048 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | 1053 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1049 | const char *buf) | 1054 | const char *buf) |
1050 | { | 1055 | { |
1051 | nodemask_t oldmem; | 1056 | NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL); |
1052 | int retval; | 1057 | int retval; |
1053 | struct ptr_heap heap; | 1058 | struct ptr_heap heap; |
1054 | 1059 | ||
1060 | if (!oldmem) | ||
1061 | return -ENOMEM; | ||
1062 | |||
1055 | /* | 1063 | /* |
1056 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | 1064 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; |
1057 | * it's read-only | 1065 | * it's read-only |
1058 | */ | 1066 | */ |
1059 | if (cs == &top_cpuset) | 1067 | if (cs == &top_cpuset) { |
1060 | return -EACCES; | 1068 | retval = -EACCES; |
1069 | goto done; | ||
1070 | } | ||
1061 | 1071 | ||
1062 | /* | 1072 | /* |
1063 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | 1073 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
@@ -1073,11 +1083,13 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1073 | goto done; | 1083 | goto done; |
1074 | 1084 | ||
1075 | if (!nodes_subset(trialcs->mems_allowed, | 1085 | if (!nodes_subset(trialcs->mems_allowed, |
1076 | node_states[N_HIGH_MEMORY])) | 1086 | node_states[N_HIGH_MEMORY])) { |
1077 | return -EINVAL; | 1087 | retval = -EINVAL; |
1088 | goto done; | ||
1089 | } | ||
1078 | } | 1090 | } |
1079 | oldmem = cs->mems_allowed; | 1091 | *oldmem = cs->mems_allowed; |
1080 | if (nodes_equal(oldmem, trialcs->mems_allowed)) { | 1092 | if (nodes_equal(*oldmem, trialcs->mems_allowed)) { |
1081 | retval = 0; /* Too easy - nothing to do */ | 1093 | retval = 0; /* Too easy - nothing to do */ |
1082 | goto done; | 1094 | goto done; |
1083 | } | 1095 | } |
@@ -1093,10 +1105,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1093 | cs->mems_allowed = trialcs->mems_allowed; | 1105 | cs->mems_allowed = trialcs->mems_allowed; |
1094 | mutex_unlock(&callback_mutex); | 1106 | mutex_unlock(&callback_mutex); |
1095 | 1107 | ||
1096 | update_tasks_nodemask(cs, &oldmem, &heap); | 1108 | update_tasks_nodemask(cs, oldmem, &heap); |
1097 | 1109 | ||
1098 | heap_free(&heap); | 1110 | heap_free(&heap); |
1099 | done: | 1111 | done: |
1112 | NODEMASK_FREE(oldmem); | ||
1100 | return retval; | 1113 | return retval; |
1101 | } | 1114 | } |
1102 | 1115 | ||
@@ -1381,39 +1394,47 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
1381 | struct cgroup *oldcont, struct task_struct *tsk, | 1394 | struct cgroup *oldcont, struct task_struct *tsk, |
1382 | bool threadgroup) | 1395 | bool threadgroup) |
1383 | { | 1396 | { |
1384 | nodemask_t from, to; | ||
1385 | struct mm_struct *mm; | 1397 | struct mm_struct *mm; |
1386 | struct cpuset *cs = cgroup_cs(cont); | 1398 | struct cpuset *cs = cgroup_cs(cont); |
1387 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1399 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1400 | NODEMASK_ALLOC(nodemask_t, from, GFP_KERNEL); | ||
1401 | NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); | ||
1402 | |||
1403 | if (from == NULL || to == NULL) | ||
1404 | goto alloc_fail; | ||
1388 | 1405 | ||
1389 | if (cs == &top_cpuset) { | 1406 | if (cs == &top_cpuset) { |
1390 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1407 | cpumask_copy(cpus_attach, cpu_possible_mask); |
1391 | } else { | 1408 | } else { |
1392 | guarantee_online_cpus(cs, cpus_attach); | 1409 | guarantee_online_cpus(cs, cpus_attach); |
1393 | } | 1410 | } |
1394 | guarantee_online_mems(cs, &to); | 1411 | guarantee_online_mems(cs, to); |
1395 | 1412 | ||
1396 | /* do per-task migration stuff possibly for each in the threadgroup */ | 1413 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1397 | cpuset_attach_task(tsk, &to, cs); | 1414 | cpuset_attach_task(tsk, to, cs); |
1398 | if (threadgroup) { | 1415 | if (threadgroup) { |
1399 | struct task_struct *c; | 1416 | struct task_struct *c; |
1400 | rcu_read_lock(); | 1417 | rcu_read_lock(); |
1401 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1418 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
1402 | cpuset_attach_task(c, &to, cs); | 1419 | cpuset_attach_task(c, to, cs); |
1403 | } | 1420 | } |
1404 | rcu_read_unlock(); | 1421 | rcu_read_unlock(); |
1405 | } | 1422 | } |
1406 | 1423 | ||
1407 | /* change mm; only needs to be done once even if threadgroup */ | 1424 | /* change mm; only needs to be done once even if threadgroup */ |
1408 | from = oldcs->mems_allowed; | 1425 | *from = oldcs->mems_allowed; |
1409 | to = cs->mems_allowed; | 1426 | *to = cs->mems_allowed; |
1410 | mm = get_task_mm(tsk); | 1427 | mm = get_task_mm(tsk); |
1411 | if (mm) { | 1428 | if (mm) { |
1412 | mpol_rebind_mm(mm, &to); | 1429 | mpol_rebind_mm(mm, to); |
1413 | if (is_memory_migrate(cs)) | 1430 | if (is_memory_migrate(cs)) |
1414 | cpuset_migrate_mm(mm, &from, &to); | 1431 | cpuset_migrate_mm(mm, from, to); |
1415 | mmput(mm); | 1432 | mmput(mm); |
1416 | } | 1433 | } |
1434 | |||
1435 | alloc_fail: | ||
1436 | NODEMASK_FREE(from); | ||
1437 | NODEMASK_FREE(to); | ||
1417 | } | 1438 | } |
1418 | 1439 | ||
1419 | /* The various types of files and directories in a cpuset file system */ | 1440 | /* The various types of files and directories in a cpuset file system */ |
@@ -1558,13 +1579,21 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |||
1558 | 1579 | ||
1559 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 1580 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) |
1560 | { | 1581 | { |
1561 | nodemask_t mask; | 1582 | NODEMASK_ALLOC(nodemask_t, mask, GFP_KERNEL); |
1583 | int retval; | ||
1584 | |||
1585 | if (mask == NULL) | ||
1586 | return -ENOMEM; | ||
1562 | 1587 | ||
1563 | mutex_lock(&callback_mutex); | 1588 | mutex_lock(&callback_mutex); |
1564 | mask = cs->mems_allowed; | 1589 | *mask = cs->mems_allowed; |
1565 | mutex_unlock(&callback_mutex); | 1590 | mutex_unlock(&callback_mutex); |
1566 | 1591 | ||
1567 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | 1592 | retval = nodelist_scnprintf(page, PAGE_SIZE, *mask); |
1593 | |||
1594 | NODEMASK_FREE(mask); | ||
1595 | |||
1596 | return retval; | ||
1568 | } | 1597 | } |
1569 | 1598 | ||
1570 | static ssize_t cpuset_common_file_read(struct cgroup *cont, | 1599 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
@@ -1993,7 +2022,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
1993 | struct cpuset *cp; /* scans cpusets being updated */ | 2022 | struct cpuset *cp; /* scans cpusets being updated */ |
1994 | struct cpuset *child; /* scans child cpusets of cp */ | 2023 | struct cpuset *child; /* scans child cpusets of cp */ |
1995 | struct cgroup *cont; | 2024 | struct cgroup *cont; |
1996 | nodemask_t oldmems; | 2025 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); |
2026 | |||
2027 | if (oldmems == NULL) | ||
2028 | return; | ||
1997 | 2029 | ||
1998 | list_add_tail((struct list_head *)&root->stack_list, &queue); | 2030 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
1999 | 2031 | ||
@@ -2010,7 +2042,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2010 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | 2042 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) |
2011 | continue; | 2043 | continue; |
2012 | 2044 | ||
2013 | oldmems = cp->mems_allowed; | 2045 | *oldmems = cp->mems_allowed; |
2014 | 2046 | ||
2015 | /* Remove offline cpus and mems from this cpuset. */ | 2047 | /* Remove offline cpus and mems from this cpuset. */ |
2016 | mutex_lock(&callback_mutex); | 2048 | mutex_lock(&callback_mutex); |
@@ -2026,9 +2058,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2026 | remove_tasks_in_empty_cpuset(cp); | 2058 | remove_tasks_in_empty_cpuset(cp); |
2027 | else { | 2059 | else { |
2028 | update_tasks_cpumask(cp, NULL); | 2060 | update_tasks_cpumask(cp, NULL); |
2029 | update_tasks_nodemask(cp, &oldmems, NULL); | 2061 | update_tasks_nodemask(cp, oldmems, NULL); |
2030 | } | 2062 | } |
2031 | } | 2063 | } |
2064 | NODEMASK_FREE(oldmems); | ||
2032 | } | 2065 | } |
2033 | 2066 | ||
2034 | /* | 2067 | /* |
@@ -2086,16 +2119,19 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2086 | static int cpuset_track_online_nodes(struct notifier_block *self, | 2119 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2087 | unsigned long action, void *arg) | 2120 | unsigned long action, void *arg) |
2088 | { | 2121 | { |
2089 | nodemask_t oldmems; | 2122 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); |
2123 | |||
2124 | if (oldmems == NULL) | ||
2125 | return NOTIFY_DONE; | ||
2090 | 2126 | ||
2091 | cgroup_lock(); | 2127 | cgroup_lock(); |
2092 | switch (action) { | 2128 | switch (action) { |
2093 | case MEM_ONLINE: | 2129 | case MEM_ONLINE: |
2094 | oldmems = top_cpuset.mems_allowed; | 2130 | *oldmems = top_cpuset.mems_allowed; |
2095 | mutex_lock(&callback_mutex); | 2131 | mutex_lock(&callback_mutex); |
2096 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2132 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2097 | mutex_unlock(&callback_mutex); | 2133 | mutex_unlock(&callback_mutex); |
2098 | update_tasks_nodemask(&top_cpuset, &oldmems, NULL); | 2134 | update_tasks_nodemask(&top_cpuset, oldmems, NULL); |
2099 | break; | 2135 | break; |
2100 | case MEM_OFFLINE: | 2136 | case MEM_OFFLINE: |
2101 | /* | 2137 | /* |
@@ -2108,6 +2144,8 @@ static int cpuset_track_online_nodes(struct notifier_block *self, | |||
2108 | break; | 2144 | break; |
2109 | } | 2145 | } |
2110 | cgroup_unlock(); | 2146 | cgroup_unlock(); |
2147 | |||
2148 | NODEMASK_FREE(oldmems); | ||
2111 | return NOTIFY_OK; | 2149 | return NOTIFY_OK; |
2112 | } | 2150 | } |
2113 | #endif | 2151 | #endif |