diff options
-rw-r--r-- | include/linux/cpuset.h | 6 | ||||
-rw-r--r-- | kernel/cpuset.c | 20 | ||||
-rw-r--r-- | kernel/sched/core.c | 62 |
3 files changed, 52 insertions, 36 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e9eaec522655..e0ffaf061ab7 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -22,7 +22,7 @@ extern int cpuset_init(void); | |||
22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
23 | extern void cpuset_update_active_cpus(void); | 23 | extern void cpuset_update_active_cpus(void); |
24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 25 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
28 | void cpuset_init_current_mems_allowed(void); | 28 | void cpuset_init_current_mems_allowed(void); |
@@ -144,10 +144,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, | |||
144 | cpumask_copy(mask, cpu_possible_mask); | 144 | cpumask_copy(mask, cpu_possible_mask); |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | 147 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
148 | { | 148 | { |
149 | do_set_cpus_allowed(p, cpu_possible_mask); | ||
150 | return cpumask_any(cpu_active_mask); | ||
151 | } | 149 | } |
152 | 150 | ||
153 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 151 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a09ac2b9a661..c9837b74ab96 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2195,7 +2195,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) | |||
2195 | mutex_unlock(&callback_mutex); | 2195 | mutex_unlock(&callback_mutex); |
2196 | } | 2196 | } |
2197 | 2197 | ||
2198 | int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | 2198 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
2199 | { | 2199 | { |
2200 | const struct cpuset *cs; | 2200 | const struct cpuset *cs; |
2201 | int cpu; | 2201 | int cpu; |
@@ -2219,22 +2219,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | |||
2219 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary | 2219 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary |
2220 | * set any mask even if it is not right from task_cs() pov, | 2220 | * set any mask even if it is not right from task_cs() pov, |
2221 | * the pending set_cpus_allowed_ptr() will fix things. | 2221 | * the pending set_cpus_allowed_ptr() will fix things. |
2222 | * | ||
2223 | * select_fallback_rq() will fix things ups and set cpu_possible_mask | ||
2224 | * if required. | ||
2222 | */ | 2225 | */ |
2223 | |||
2224 | cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask); | ||
2225 | if (cpu >= nr_cpu_ids) { | ||
2226 | /* | ||
2227 | * Either tsk->cpus_allowed is wrong (see above) or it | ||
2228 | * is actually empty. The latter case is only possible | ||
2229 | * if we are racing with remove_tasks_in_empty_cpuset(). | ||
2230 | * Like above we can temporary set any mask and rely on | ||
2231 | * set_cpus_allowed_ptr() as synchronization point. | ||
2232 | */ | ||
2233 | do_set_cpus_allowed(tsk, cpu_possible_mask); | ||
2234 | cpu = cpumask_any(cpu_active_mask); | ||
2235 | } | ||
2236 | |||
2237 | return cpu; | ||
2238 | } | 2226 | } |
2239 | 2227 | ||
2240 | void cpuset_init_current_mems_allowed(void) | 2228 | void cpuset_init_current_mems_allowed(void) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e3ccc13c4caa..9c1629c90b2d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1263,29 +1263,59 @@ EXPORT_SYMBOL_GPL(kick_process); | |||
1263 | */ | 1263 | */ |
1264 | static int select_fallback_rq(int cpu, struct task_struct *p) | 1264 | static int select_fallback_rq(int cpu, struct task_struct *p) |
1265 | { | 1265 | { |
1266 | int dest_cpu; | ||
1267 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); | 1266 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); |
1267 | enum { cpuset, possible, fail } state = cpuset; | ||
1268 | int dest_cpu; | ||
1268 | 1269 | ||
1269 | /* Look for allowed, online CPU in same node. */ | 1270 | /* Look for allowed, online CPU in same node. */ |
1270 | for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) | 1271 | for_each_cpu_mask(dest_cpu, *nodemask) { |
1272 | if (!cpu_online(dest_cpu)) | ||
1273 | continue; | ||
1274 | if (!cpu_active(dest_cpu)) | ||
1275 | continue; | ||
1271 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) | 1276 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
1272 | return dest_cpu; | 1277 | return dest_cpu; |
1278 | } | ||
1273 | 1279 | ||
1274 | /* Any allowed, online CPU? */ | 1280 | for (;;) { |
1275 | dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); | 1281 | /* Any allowed, online CPU? */ |
1276 | if (dest_cpu < nr_cpu_ids) | 1282 | for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) { |
1277 | return dest_cpu; | 1283 | if (!cpu_online(dest_cpu)) |
1284 | continue; | ||
1285 | if (!cpu_active(dest_cpu)) | ||
1286 | continue; | ||
1287 | goto out; | ||
1288 | } | ||
1278 | 1289 | ||
1279 | /* No more Mr. Nice Guy. */ | 1290 | switch (state) { |
1280 | dest_cpu = cpuset_cpus_allowed_fallback(p); | 1291 | case cpuset: |
1281 | /* | 1292 | /* No more Mr. Nice Guy. */ |
1282 | * Don't tell them about moving exiting tasks or | 1293 | cpuset_cpus_allowed_fallback(p); |
1283 | * kernel threads (both mm NULL), since they never | 1294 | state = possible; |
1284 | * leave kernel. | 1295 | break; |
1285 | */ | 1296 | |
1286 | if (p->mm && printk_ratelimit()) { | 1297 | case possible: |
1287 | printk_sched("process %d (%s) no longer affine to cpu%d\n", | 1298 | do_set_cpus_allowed(p, cpu_possible_mask); |
1288 | task_pid_nr(p), p->comm, cpu); | 1299 | state = fail; |
1300 | break; | ||
1301 | |||
1302 | case fail: | ||
1303 | BUG(); | ||
1304 | break; | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | out: | ||
1309 | if (state != cpuset) { | ||
1310 | /* | ||
1311 | * Don't tell them about moving exiting tasks or | ||
1312 | * kernel threads (both mm NULL), since they never | ||
1313 | * leave kernel. | ||
1314 | */ | ||
1315 | if (p->mm && printk_ratelimit()) { | ||
1316 | printk_sched("process %d (%s) no longer affine to cpu%d\n", | ||
1317 | task_pid_nr(p), p->comm, cpu); | ||
1318 | } | ||
1289 | } | 1319 | } |
1290 | 1320 | ||
1291 | return dest_cpu; | 1321 | return dest_cpu; |