diff options
author | Li Zefan <lizefan@huawei.com> | 2014-07-09 04:48:42 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-07-09 15:56:17 -0400 |
commit | 7e88291beefbb758fa3b27e500ee2e0c888d6e44 (patch) | |
tree | 2ab62e8ec6c4cfe295c1050ef5762d5dd145ecd7 /kernel/cpuset.c | |
parent | ae1c802382f7af60aa54879fb4f5920a9df1ff48 (diff) |
cpuset: make cs->{cpus, mems}_allowed as user-configured masks
Now we've used effective cpumasks to enforce hierarchical manner,
we can use cs->{cpus,mems}_allowed as configured masks.
Configured masks can be changed by writing cpuset.cpus and cpuset.mems
only. The new behaviors are:
- They won't be changed by hotplug anymore.
- They won't be limited by its parent's masks.
This ia a behavior change, but won't take effect unless mount with
sane_behavior.
v2:
- Add comments to explain the differences between configured masks and
effective masks.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 820870a715f8..4b409d2ecbb9 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -77,6 +77,26 @@ struct cpuset { | |||
77 | 77 | ||
78 | unsigned long flags; /* "unsigned long" so bitops work */ | 78 | unsigned long flags; /* "unsigned long" so bitops work */ |
79 | 79 | ||
80 | /* | ||
81 | * On default hierarchy: | ||
82 | * | ||
83 | * The user-configured masks can only be changed by writing to | ||
84 | * cpuset.cpus and cpuset.mems, and won't be limited by the | ||
85 | * parent masks. | ||
86 | * | ||
87 | * The effective masks is the real masks that apply to the tasks | ||
88 | * in the cpuset. They may be changed if the configured masks are | ||
89 | * changed or hotplug happens. | ||
90 | * | ||
91 | * effective_mask == configured_mask & parent's effective_mask, | ||
92 | * and if it ends up empty, it will inherit the parent's mask. | ||
93 | * | ||
94 | * | ||
95 | * On legacy hierachy: | ||
96 | * | ||
97 | * The user-configured masks are always the same with effective masks. | ||
98 | */ | ||
99 | |||
80 | /* user-configured CPUs and Memory Nodes allow to tasks */ | 100 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
81 | cpumask_var_t cpus_allowed; | 101 | cpumask_var_t cpus_allowed; |
82 | nodemask_t mems_allowed; | 102 | nodemask_t mems_allowed; |
@@ -450,9 +470,9 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
450 | 470 | ||
451 | par = parent_cs(cur); | 471 | par = parent_cs(cur); |
452 | 472 | ||
453 | /* We must be a subset of our parent cpuset */ | 473 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
454 | ret = -EACCES; | 474 | ret = -EACCES; |
455 | if (!is_cpuset_subset(trial, par)) | 475 | if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par)) |
456 | goto out; | 476 | goto out; |
457 | 477 | ||
458 | /* | 478 | /* |
@@ -2167,6 +2187,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2167 | static cpumask_t new_cpus; | 2187 | static cpumask_t new_cpus; |
2168 | static nodemask_t new_mems; | 2188 | static nodemask_t new_mems; |
2169 | bool cpus_updated, mems_updated; | 2189 | bool cpus_updated, mems_updated; |
2190 | bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup); | ||
2170 | 2191 | ||
2171 | mutex_lock(&cpuset_mutex); | 2192 | mutex_lock(&cpuset_mutex); |
2172 | 2193 | ||
@@ -2174,13 +2195,14 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2174 | cpumask_copy(&new_cpus, cpu_active_mask); | 2195 | cpumask_copy(&new_cpus, cpu_active_mask); |
2175 | new_mems = node_states[N_MEMORY]; | 2196 | new_mems = node_states[N_MEMORY]; |
2176 | 2197 | ||
2177 | cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus); | 2198 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
2178 | mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems); | 2199 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); |
2179 | 2200 | ||
2180 | /* synchronize cpus_allowed to cpu_active_mask */ | 2201 | /* synchronize cpus_allowed to cpu_active_mask */ |
2181 | if (cpus_updated) { | 2202 | if (cpus_updated) { |
2182 | mutex_lock(&callback_mutex); | 2203 | mutex_lock(&callback_mutex); |
2183 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | 2204 | if (!on_dfl) |
2205 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | ||
2184 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); | 2206 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
2185 | mutex_unlock(&callback_mutex); | 2207 | mutex_unlock(&callback_mutex); |
2186 | /* we don't mess with cpumasks of tasks in top_cpuset */ | 2208 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
@@ -2189,7 +2211,8 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2189 | /* synchronize mems_allowed to N_MEMORY */ | 2211 | /* synchronize mems_allowed to N_MEMORY */ |
2190 | if (mems_updated) { | 2212 | if (mems_updated) { |
2191 | mutex_lock(&callback_mutex); | 2213 | mutex_lock(&callback_mutex); |
2192 | top_cpuset.mems_allowed = new_mems; | 2214 | if (!on_dfl) |
2215 | top_cpuset.mems_allowed = new_mems; | ||
2193 | top_cpuset.effective_mems = new_mems; | 2216 | top_cpuset.effective_mems = new_mems; |
2194 | mutex_unlock(&callback_mutex); | 2217 | mutex_unlock(&callback_mutex); |
2195 | update_tasks_nodemask(&top_cpuset); | 2218 | update_tasks_nodemask(&top_cpuset); |