diff options
author | Li Zefan <lizefan@huawei.com> | 2014-07-09 04:47:16 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-07-09 15:56:15 -0400 |
commit | 1344ab9c2991b45bacfd2e26a8800a62663ae427 (patch) | |
tree | 310953e680ff3536034cfb982bc3afa32e300f5d | |
parent | e2b9a3d7d8f4ab2f3491b8ed2ac6af692a2269b2 (diff) |
cpuset: update cpuset->effective_{cpus,mems} at hotplug
We're going to have separate user-configured masks and effective ones.
Eventually configured masks can only be changed by writing cpuset.cpus
and cpuset.mems, and they won't be restricted by parent cpuset. While
effective masks reflect cpu/memory hotplug and hierachical restriction,
and these are the real masks that apply to the tasks in the cpuset.
We calculate effective mask this way:
- top cpuset's effective_mask == online_mask, otherwise
- cpuset's effective_mask == configured_mask & parent effective_mask,
if the result is empty, it inherits parent effective mask.
Those behavior changes are for default hierarchy only. For legacy
hierarchy, effective_mask and configured_mask are the same, so we won't
break old interfaces.
To make cs->effective_{cpus,mems} to be effective masks, we need to
- update the effective masks at hotplug
- update the effective masks at config change
- take on ancestor's mask when the effective mask is empty
The first item is done here.
This won't introduce behavior change.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | kernel/cpuset.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index ef0974c73b4b..94f651d2eee5 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2082,6 +2082,7 @@ retry: | |||
2082 | 2082 | ||
2083 | mutex_lock(&callback_mutex); | 2083 | mutex_lock(&callback_mutex); |
2084 | cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus); | 2084 | cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus); |
2085 | cpumask_andnot(cs->effective_cpus, cs->effective_cpus, &off_cpus); | ||
2085 | mutex_unlock(&callback_mutex); | 2086 | mutex_unlock(&callback_mutex); |
2086 | 2087 | ||
2087 | /* | 2088 | /* |
@@ -2096,6 +2097,7 @@ retry: | |||
2096 | 2097 | ||
2097 | mutex_lock(&callback_mutex); | 2098 | mutex_lock(&callback_mutex); |
2098 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); | 2099 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); |
2100 | nodes_andnot(cs->effective_mems, cs->effective_mems, off_mems); | ||
2099 | mutex_unlock(&callback_mutex); | 2101 | mutex_unlock(&callback_mutex); |
2100 | 2102 | ||
2101 | /* | 2103 | /* |
@@ -2159,6 +2161,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2159 | if (cpus_updated) { | 2161 | if (cpus_updated) { |
2160 | mutex_lock(&callback_mutex); | 2162 | mutex_lock(&callback_mutex); |
2161 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | 2163 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
2164 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); | ||
2162 | mutex_unlock(&callback_mutex); | 2165 | mutex_unlock(&callback_mutex); |
2163 | /* we don't mess with cpumasks of tasks in top_cpuset */ | 2166 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
2164 | } | 2167 | } |
@@ -2167,6 +2170,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2167 | if (mems_updated) { | 2170 | if (mems_updated) { |
2168 | mutex_lock(&callback_mutex); | 2171 | mutex_lock(&callback_mutex); |
2169 | top_cpuset.mems_allowed = new_mems; | 2172 | top_cpuset.mems_allowed = new_mems; |
2173 | top_cpuset.effective_mems = new_mems; | ||
2170 | mutex_unlock(&callback_mutex); | 2174 | mutex_unlock(&callback_mutex); |
2171 | update_tasks_nodemask(&top_cpuset); | 2175 | update_tasks_nodemask(&top_cpuset); |
2172 | } | 2176 | } |