diff options
author | Li Zefan <lizefan@huawei.com> | 2014-07-09 04:47:41 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-07-09 15:56:16 -0400 |
commit | 554b0d1c845e42ef01d7f6f5f24b3e4c6129ce8f (patch) | |
tree | f887cbfb24c0f7e2a8c904913a368abb6d28f11c /kernel/cpuset.c | |
parent | 734d45130cb4f668fb33d182f6943523628582ef (diff) |
cpuset: inherit ancestor's masks if effective_{cpus, mems} becomes empty
We're going to have separate user-configured masks and effective ones.
Eventually configured masks can only be changed by writing cpuset.cpus
and cpuset.mems, and they won't be restricted by parent cpuset. While
effective masks reflect cpu/memory hotplug and hierachical restriction,
and these are the real masks that apply to the tasks in the cpuset.
We calculate effective mask this way:
- top cpuset's effective_mask == online_mask, otherwise
- cpuset's effective_mask == configured_mask & parent effective_mask,
if the result is empty, it inherits parent effective mask.
Those behavior changes are for default hierarchy only. For legacy
hierarchy, effective_mask and configured_mask are the same, so we won't
break old interfaces.
To make cs->effective_{cpus,mems} to be effective masks, we need to
- update the effective masks at hotplug
- update the effective masks at config change
- take on ancestor's mask when the effective mask is empty
The last item is done here.
This won't introduce behavior change.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index da766c3736c4..f8340026d01c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -877,6 +877,13 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
877 | 877 | ||
878 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); | 878 | cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus); |
879 | 879 | ||
880 | /* | ||
881 | * If it becomes empty, inherit the effective mask of the | ||
882 | * parent, which is guaranteed to have some CPUs. | ||
883 | */ | ||
884 | if (cpumask_empty(new_cpus)) | ||
885 | cpumask_copy(new_cpus, parent->effective_cpus); | ||
886 | |||
880 | /* Skip the whole subtree if the cpumask remains the same. */ | 887 | /* Skip the whole subtree if the cpumask remains the same. */ |
881 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { | 888 | if (cpumask_equal(new_cpus, cp->effective_cpus)) { |
882 | pos_css = css_rightmost_descendant(pos_css); | 889 | pos_css = css_rightmost_descendant(pos_css); |
@@ -1123,6 +1130,13 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1123 | 1130 | ||
1124 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); | 1131 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); |
1125 | 1132 | ||
1133 | /* | ||
1134 | * If it becomes empty, inherit the effective mask of the | ||
1135 | * parent, which is guaranteed to have some MEMs. | ||
1136 | */ | ||
1137 | if (nodes_empty(*new_mems)) | ||
1138 | *new_mems = parent->effective_mems; | ||
1139 | |||
1126 | /* Skip the whole subtree if the nodemask remains the same. */ | 1140 | /* Skip the whole subtree if the nodemask remains the same. */ |
1127 | if (nodes_equal(*new_mems, cp->effective_mems)) { | 1141 | if (nodes_equal(*new_mems, cp->effective_mems)) { |
1128 | pos_css = css_rightmost_descendant(pos_css); | 1142 | pos_css = css_rightmost_descendant(pos_css); |
@@ -2102,7 +2116,11 @@ retry: | |||
2102 | 2116 | ||
2103 | mutex_lock(&callback_mutex); | 2117 | mutex_lock(&callback_mutex); |
2104 | cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus); | 2118 | cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus); |
2119 | |||
2120 | /* Inherit the effective mask of the parent, if it becomes empty. */ | ||
2105 | cpumask_andnot(cs->effective_cpus, cs->effective_cpus, &off_cpus); | 2121 | cpumask_andnot(cs->effective_cpus, cs->effective_cpus, &off_cpus); |
2122 | if (on_dfl && cpumask_empty(cs->effective_cpus)) | ||
2123 | cpumask_copy(cs->effective_cpus, parent_cs(cs)->effective_cpus); | ||
2106 | mutex_unlock(&callback_mutex); | 2124 | mutex_unlock(&callback_mutex); |
2107 | 2125 | ||
2108 | /* | 2126 | /* |
@@ -2117,7 +2135,11 @@ retry: | |||
2117 | 2135 | ||
2118 | mutex_lock(&callback_mutex); | 2136 | mutex_lock(&callback_mutex); |
2119 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); | 2137 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); |
2138 | |||
2139 | /* Inherit the effective mask of the parent, if it becomes empty */ | ||
2120 | nodes_andnot(cs->effective_mems, cs->effective_mems, off_mems); | 2140 | nodes_andnot(cs->effective_mems, cs->effective_mems, off_mems); |
2141 | if (on_dfl && nodes_empty(cs->effective_mems)) | ||
2142 | cs->effective_mems = parent_cs(cs)->effective_mems; | ||
2121 | mutex_unlock(&callback_mutex); | 2143 | mutex_unlock(&callback_mutex); |
2122 | 2144 | ||
2123 | /* | 2145 | /* |