diff options
author | Li Zefan <lizefan@huawei.com> | 2014-07-09 04:49:04 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-07-09 15:56:17 -0400 |
commit | be4c9dd7aee5ecf3e748da68c27b38bdca70d444 (patch) | |
tree | 0db06f07a41e43275123f29771580d00872ce372 /kernel/cpuset.c | |
parent | 390a36aadf39e241c83035469aae48ed1a144088 (diff) |
cpuset: enable onlined cpu/node in effective masks
Firstly offline cpu1:
# echo 0-1 > cpuset.cpus
# echo 0 > /sys/devices/system/cpu/cpu1/online
# cat cpuset.cpus
0-1
# cat cpuset.effective_cpus
0
Then online it:
# echo 1 > /sys/devices/system/cpu/cpu1/online
# cat cpuset.cpus
0-1
# cat cpuset.effective_cpus
0-1
And cpuset will bring it back to the effective mask.
The implementation is quite straightforward. Instead of calculating the
offlined cpus/mems and do updates, we just set the new effective_mask
to online_mask & congifured_mask.
This is a behavior change for default hierarchy, so legacy hierarchy
won't be affected.
v2:
- make refactoring of cpuset_hotplug_update_tasks() as seperate patch,
suggested by Tejun.
- make hotplug_update_tasks_insane() use @new_cpus and @new_mems as
hotplug_update_tasks_sane() does.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 41822e2027c1..c47cb940712e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2080,26 +2080,27 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) | |||
2080 | } | 2080 | } |
2081 | } | 2081 | } |
2082 | 2082 | ||
2083 | static void hotplug_update_tasks_legacy(struct cpuset *cs, | 2083 | static void |
2084 | struct cpumask *off_cpus, | 2084 | hotplug_update_tasks_legacy(struct cpuset *cs, |
2085 | nodemask_t *off_mems) | 2085 | struct cpumask *new_cpus, nodemask_t *new_mems, |
2086 | bool cpus_updated, bool mems_updated) | ||
2086 | { | 2087 | { |
2087 | bool is_empty; | 2088 | bool is_empty; |
2088 | 2089 | ||
2089 | mutex_lock(&callback_mutex); | 2090 | mutex_lock(&callback_mutex); |
2090 | cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, off_cpus); | 2091 | cpumask_copy(cs->cpus_allowed, new_cpus); |
2091 | cpumask_andnot(cs->effective_cpus, cs->effective_cpus, off_cpus); | 2092 | cpumask_copy(cs->effective_cpus, new_cpus); |
2092 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, *off_mems); | 2093 | cs->mems_allowed = *new_mems; |
2093 | nodes_andnot(cs->effective_mems, cs->effective_mems, *off_mems); | 2094 | cs->effective_mems = *new_mems; |
2094 | mutex_unlock(&callback_mutex); | 2095 | mutex_unlock(&callback_mutex); |
2095 | 2096 | ||
2096 | /* | 2097 | /* |
2097 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, | 2098 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
2098 | * as the tasks will be migratecd to an ancestor. | 2099 | * as the tasks will be migratecd to an ancestor. |
2099 | */ | 2100 | */ |
2100 | if (!cpumask_empty(off_cpus) && !cpumask_empty(cs->cpus_allowed)) | 2101 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
2101 | update_tasks_cpumask(cs); | 2102 | update_tasks_cpumask(cs); |
2102 | if (!nodes_empty(*off_mems) && !nodes_empty(cs->mems_allowed)) | 2103 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
2103 | update_tasks_nodemask(cs); | 2104 | update_tasks_nodemask(cs); |
2104 | 2105 | ||
2105 | is_empty = cpumask_empty(cs->cpus_allowed) || | 2106 | is_empty = cpumask_empty(cs->cpus_allowed) || |
@@ -2118,24 +2119,24 @@ static void hotplug_update_tasks_legacy(struct cpuset *cs, | |||
2118 | mutex_lock(&cpuset_mutex); | 2119 | mutex_lock(&cpuset_mutex); |
2119 | } | 2120 | } |
2120 | 2121 | ||
2121 | static void hotplug_update_tasks(struct cpuset *cs, | 2122 | static void |
2122 | struct cpumask *off_cpus, | 2123 | hotplug_update_tasks(struct cpuset *cs, |
2123 | nodemask_t *off_mems) | 2124 | struct cpumask *new_cpus, nodemask_t *new_mems, |
2125 | bool cpus_updated, bool mems_updated) | ||
2124 | { | 2126 | { |
2127 | if (cpumask_empty(new_cpus)) | ||
2128 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); | ||
2129 | if (nodes_empty(*new_mems)) | ||
2130 | *new_mems = parent_cs(cs)->effective_mems; | ||
2131 | |||
2125 | mutex_lock(&callback_mutex); | 2132 | mutex_lock(&callback_mutex); |
2126 | cpumask_andnot(cs->effective_cpus, cs->effective_cpus, off_cpus); | 2133 | cpumask_copy(cs->effective_cpus, new_cpus); |
2127 | if (cpumask_empty(cs->effective_cpus)) | 2134 | cs->effective_mems = *new_mems; |
2128 | cpumask_copy(cs->effective_cpus, | ||
2129 | parent_cs(cs)->effective_cpus); | ||
2130 | |||
2131 | nodes_andnot(cs->effective_mems, cs->effective_mems, *off_mems); | ||
2132 | if (nodes_empty(cs->effective_mems)) | ||
2133 | cs->effective_mems = parent_cs(cs)->effective_mems; | ||
2134 | mutex_unlock(&callback_mutex); | 2135 | mutex_unlock(&callback_mutex); |
2135 | 2136 | ||
2136 | if (!cpumask_empty(off_cpus)) | 2137 | if (cpus_updated) |
2137 | update_tasks_cpumask(cs); | 2138 | update_tasks_cpumask(cs); |
2138 | if (!nodes_empty(*off_mems)) | 2139 | if (mems_updated) |
2139 | update_tasks_nodemask(cs); | 2140 | update_tasks_nodemask(cs); |
2140 | } | 2141 | } |
2141 | 2142 | ||
@@ -2149,8 +2150,10 @@ static void hotplug_update_tasks(struct cpuset *cs, | |||
2149 | */ | 2150 | */ |
2150 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) | 2151 | static void cpuset_hotplug_update_tasks(struct cpuset *cs) |
2151 | { | 2152 | { |
2152 | static cpumask_t off_cpus; | 2153 | static cpumask_t new_cpus; |
2153 | static nodemask_t off_mems; | 2154 | static nodemask_t new_mems; |
2155 | bool cpus_updated; | ||
2156 | bool mems_updated; | ||
2154 | retry: | 2157 | retry: |
2155 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); | 2158 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); |
2156 | 2159 | ||
@@ -2165,14 +2168,18 @@ retry: | |||
2165 | goto retry; | 2168 | goto retry; |
2166 | } | 2169 | } |
2167 | 2170 | ||
2168 | cpumask_andnot(&off_cpus, cs->effective_cpus, | 2171 | cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus); |
2169 | top_cpuset.effective_cpus); | 2172 | nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems); |
2170 | nodes_andnot(off_mems, cs->effective_mems, top_cpuset.effective_mems); | 2173 | |
2174 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); | ||
2175 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); | ||
2171 | 2176 | ||
2172 | if (cgroup_on_dfl(cs->css.cgroup)) | 2177 | if (cgroup_on_dfl(cs->css.cgroup)) |
2173 | hotplug_update_tasks(cs, &off_cpus, &off_mems); | 2178 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
2179 | cpus_updated, mems_updated); | ||
2174 | else | 2180 | else |
2175 | hotplug_update_tasks_legacy(cs, &off_cpus, &off_mems); | 2181 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
2182 | cpus_updated, mems_updated); | ||
2176 | 2183 | ||
2177 | mutex_unlock(&cpuset_mutex); | 2184 | mutex_unlock(&cpuset_mutex); |
2178 | } | 2185 | } |