summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-09-07 05:13:38 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-07 05:45:21 -0400
commit50e76632339d4655859523a39249dd95ee5e93e7 (patch)
tree79a94a384c25cadbd341f89f6729bcd86224856a
parenta731ebe6f17bff9e7ca12ef227f9da4d5bdf8425 (diff)
sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
Cpusets vs. suspend-resume is _completely_ broken. And it got noticed because it now resulted in non-cpuset usage breaking too. On suspend cpuset_cpu_inactive() doesn't call into cpuset_update_active_cpus() because it doesn't want to move tasks about, there is no need, all tasks are frozen and won't run again until after we've resumed everything. But this means that when we finally do call into cpuset_update_active_cpus() after resuming the last frozen cpu in cpuset_cpu_active(), the top_cpuset will not have any difference with the cpu_active_mask and this it will not in fact do _anything_. So the cpuset configuration will not be restored. This was largely hidden because we would unconditionally create identity domains and mobile users would not in fact use cpusets much. And servers what do use cpusets tend to not suspend-resume much. An addition problem is that we'd not in fact wait for the cpuset work to finish before resuming the tasks, allowing spurious migrations outside of the specified domains. Fix the rebuild by introducing cpuset_force_rebuild() and fix the ordering with cpuset_wait_for_hotplug(). Reported-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <stable@vger.kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: deb7aa308ea2 ("cpuset: reorganize CPU / memory hotplug handling") Link: http://lkml.kernel.org/r/20170907091338.orwxrqkbfkki3c24@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--kernel/cgroup/cpuset.c16
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/sched/core.c7
4 files changed, 28 insertions, 6 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e74655d941b7..a1e6a33a4b03 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
51 51
52extern int cpuset_init(void); 52extern int cpuset_init(void);
53extern void cpuset_init_smp(void); 53extern void cpuset_init_smp(void);
54extern void cpuset_force_rebuild(void);
54extern void cpuset_update_active_cpus(void); 55extern void cpuset_update_active_cpus(void);
56extern void cpuset_wait_for_hotplug(void);
55extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 57extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
56extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 58extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
57extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 59extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
164static inline int cpuset_init(void) { return 0; } 166static inline int cpuset_init(void) { return 0; }
165static inline void cpuset_init_smp(void) {} 167static inline void cpuset_init_smp(void) {}
166 168
169static inline void cpuset_force_rebuild(void) { }
170
167static inline void cpuset_update_active_cpus(void) 171static inline void cpuset_update_active_cpus(void)
168{ 172{
169 partition_sched_domains(1, NULL, NULL); 173 partition_sched_domains(1, NULL, NULL);
170} 174}
171 175
176static inline void cpuset_wait_for_hotplug(void) { }
177
172static inline void cpuset_cpus_allowed(struct task_struct *p, 178static inline void cpuset_cpus_allowed(struct task_struct *p,
173 struct cpumask *mask) 179 struct cpumask *mask)
174{ 180{
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 2f4039bafebb..0513ee39698b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2267,6 +2267,13 @@ retry:
2267 mutex_unlock(&cpuset_mutex); 2267 mutex_unlock(&cpuset_mutex);
2268} 2268}
2269 2269
2270static bool force_rebuild;
2271
2272void cpuset_force_rebuild(void)
2273{
2274 force_rebuild = true;
2275}
2276
2270/** 2277/**
2271 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 2278 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2272 * 2279 *
@@ -2341,8 +2348,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2341 } 2348 }
2342 2349
2343 /* rebuild sched domains if cpus_allowed has changed */ 2350 /* rebuild sched domains if cpus_allowed has changed */
2344 if (cpus_updated) 2351 if (cpus_updated || force_rebuild) {
2352 force_rebuild = false;
2345 rebuild_sched_domains(); 2353 rebuild_sched_domains();
2354 }
2346} 2355}
2347 2356
2348void cpuset_update_active_cpus(void) 2357void cpuset_update_active_cpus(void)
@@ -2355,6 +2364,11 @@ void cpuset_update_active_cpus(void)
2355 schedule_work(&cpuset_hotplug_work); 2364 schedule_work(&cpuset_hotplug_work);
2356} 2365}
2357 2366
2367void cpuset_wait_for_hotplug(void)
2368{
2369 flush_work(&cpuset_hotplug_work);
2370}
2371
2358/* 2372/*
2359 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 2373 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2360 * Call this routine anytime after node_states[N_MEMORY] changes. 2374 * Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..50f25cb370c6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -20,8 +20,9 @@
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/kmod.h> 21#include <linux/kmod.h>
22#include <trace/events/power.h> 22#include <trace/events/power.h>
23#include <linux/cpuset.h>
23 24
24/* 25/*
25 * Timeout for stopping processes 26 * Timeout for stopping processes
26 */ 27 */
27unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; 28unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -202,6 +203,8 @@ void thaw_processes(void)
202 __usermodehelper_set_disable_depth(UMH_FREEZING); 203 __usermodehelper_set_disable_depth(UMH_FREEZING);
203 thaw_workqueues(); 204 thaw_workqueues();
204 205
206 cpuset_wait_for_hotplug();
207
205 read_lock(&tasklist_lock); 208 read_lock(&tasklist_lock);
206 for_each_process_thread(g, p) { 209 for_each_process_thread(g, p) {
207 /* No other threads should have PF_SUSPEND_TASK set */ 210 /* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6d2c7ff9ba98..136a76d80dbf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
5556 * operation in the resume sequence, just build a single sched 5556 * operation in the resume sequence, just build a single sched
5557 * domain, ignoring cpusets. 5557 * domain, ignoring cpusets.
5558 */ 5558 */
5559 num_cpus_frozen--; 5559 partition_sched_domains(1, NULL, NULL);
5560 if (likely(num_cpus_frozen)) { 5560 if (--num_cpus_frozen)
5561 partition_sched_domains(1, NULL, NULL);
5562 return; 5561 return;
5563 }
5564 /* 5562 /*
5565 * This is the last CPU online operation. So fall through and 5563 * This is the last CPU online operation. So fall through and
5566 * restore the original sched domains by considering the 5564 * restore the original sched domains by considering the
5567 * cpuset configurations. 5565 * cpuset configurations.
5568 */ 5566 */
5567 cpuset_force_rebuild();
5569 } 5568 }
5570 cpuset_update_active_cpus(); 5569 cpuset_update_active_cpus();
5571} 5570}