aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-12-04 17:41:11 -0500
committerTejun Heo <tj@kernel.org>2017-12-04 17:41:11 -0500
commit11db855c3d06e82f432cb1bafd73296586d5ceec (patch)
tree0b3f09d6e7da0c4bf5fdcae5b617ce046e6b8aa8
parent52cf373c37a684f8fc279d541307fad39d206376 (diff)
Revert "cpuset: Make cpuset hotplug synchronous"
This reverts commit 1599a185f0e6113be185b9fb809c621c73865829. This and the previous commit led to another circular locking scenario and the scenario which is fixed by this commit no longer exists after e8b3f8db7aad ("workqueue/hotplug: simplify workqueue_offline_cpu()") which removes work item flushing from hotplug path. Revert it for now. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--kernel/cgroup/cpuset.c41
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/sched/core.c1
4 files changed, 30 insertions, 20 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2ab910f85154..1b8e41597ef5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -52,7 +52,9 @@ static inline void cpuset_dec(void)
52 52
53extern int cpuset_init(void); 53extern int cpuset_init(void);
54extern void cpuset_init_smp(void); 54extern void cpuset_init_smp(void);
55extern void cpuset_force_rebuild(void);
55extern void cpuset_update_active_cpus(void); 56extern void cpuset_update_active_cpus(void);
57extern void cpuset_wait_for_hotplug(void);
56extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 58extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
57extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 59extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
58extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 60extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -165,11 +167,15 @@ static inline bool cpusets_enabled(void) { return false; }
165static inline int cpuset_init(void) { return 0; } 167static inline int cpuset_init(void) { return 0; }
166static inline void cpuset_init_smp(void) {} 168static inline void cpuset_init_smp(void) {}
167 169
170static inline void cpuset_force_rebuild(void) { }
171
168static inline void cpuset_update_active_cpus(void) 172static inline void cpuset_update_active_cpus(void)
169{ 173{
170 partition_sched_domains(1, NULL, NULL); 174 partition_sched_domains(1, NULL, NULL);
171} 175}
172 176
177static inline void cpuset_wait_for_hotplug(void) { }
178
173static inline void cpuset_cpus_allowed(struct task_struct *p, 179static inline void cpuset_cpus_allowed(struct task_struct *p,
174 struct cpumask *mask) 180 struct cpumask *mask)
175{ 181{
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 227bc25d951d..cab5fd1ee767 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2277,8 +2277,15 @@ retry:
2277 mutex_unlock(&cpuset_mutex); 2277 mutex_unlock(&cpuset_mutex);
2278} 2278}
2279 2279
2280static bool force_rebuild;
2281
2282void cpuset_force_rebuild(void)
2283{
2284 force_rebuild = true;
2285}
2286
2280/** 2287/**
2281 * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset 2288 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2282 * 2289 *
2283 * This function is called after either CPU or memory configuration has 2290 * This function is called after either CPU or memory configuration has
2284 * changed and updates cpuset accordingly. The top_cpuset is always 2291 * changed and updates cpuset accordingly. The top_cpuset is always
@@ -2293,7 +2300,7 @@ retry:
2293 * Note that CPU offlining during suspend is ignored. We don't modify 2300 * Note that CPU offlining during suspend is ignored. We don't modify
2294 * cpusets across suspend/resume cycles at all. 2301 * cpusets across suspend/resume cycles at all.
2295 */ 2302 */
2296static void cpuset_hotplug(bool use_cpu_hp_lock) 2303static void cpuset_hotplug_workfn(struct work_struct *work)
2297{ 2304{
2298 static cpumask_t new_cpus; 2305 static cpumask_t new_cpus;
2299 static nodemask_t new_mems; 2306 static nodemask_t new_mems;
@@ -2351,31 +2358,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock)
2351 } 2358 }
2352 2359
2353 /* rebuild sched domains if cpus_allowed has changed */ 2360 /* rebuild sched domains if cpus_allowed has changed */
2354 if (cpus_updated) { 2361 if (cpus_updated || force_rebuild) {
2355 if (use_cpu_hp_lock) 2362 force_rebuild = false;
2356 rebuild_sched_domains(); 2363 rebuild_sched_domains();
2357 else {
2358 /* Acquiring cpu_hotplug_lock is not required.
2359 * When cpuset_hotplug() is called in hotplug path,
2360 * cpu_hotplug_lock is held by the hotplug context
2361 * which is waiting for cpuhp_thread_fun to indicate
2362 * completion of callback.
2363 */
2364 mutex_lock(&cpuset_mutex);
2365 rebuild_sched_domains_cpuslocked();
2366 mutex_unlock(&cpuset_mutex);
2367 }
2368 } 2364 }
2369} 2365}
2370 2366
2371static void cpuset_hotplug_workfn(struct work_struct *work) 2367void cpuset_update_active_cpus(void)
2372{ 2368{
2373 cpuset_hotplug(true); 2369 /*
2370 * We're inside cpu hotplug critical region which usually nests
2371 * inside cgroup synchronization. Bounce actual hotplug processing
2372 * to a work item to avoid reverse locking order.
2373 */
2374 schedule_work(&cpuset_hotplug_work);
2374} 2375}
2375 2376
2376void cpuset_update_active_cpus(void) 2377void cpuset_wait_for_hotplug(void)
2377{ 2378{
2378 cpuset_hotplug(false); 2379 flush_work(&cpuset_hotplug_work);
2379} 2380}
2380 2381
2381/* 2382/*
diff --git a/kernel/power/process.c b/kernel/power/process.c
index c326d7235c5f..7381d49a44db 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -204,6 +204,8 @@ void thaw_processes(void)
204 __usermodehelper_set_disable_depth(UMH_FREEZING); 204 __usermodehelper_set_disable_depth(UMH_FREEZING);
205 thaw_workqueues(); 205 thaw_workqueues();
206 206
207 cpuset_wait_for_hotplug();
208
207 read_lock(&tasklist_lock); 209 read_lock(&tasklist_lock);
208 for_each_process_thread(g, p) { 210 for_each_process_thread(g, p) {
209 /* No other threads should have PF_SUSPEND_TASK set */ 211 /* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 88b3450b29ab..75554f366fd3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5624,6 +5624,7 @@ static void cpuset_cpu_active(void)
5624 * restore the original sched domains by considering the 5624 * restore the original sched domains by considering the
5625 * cpuset configurations. 5625 * cpuset configurations.
5626 */ 5626 */
5627 cpuset_force_rebuild();
5627 } 5628 }
5628 cpuset_update_active_cpus(); 5629 cpuset_update_active_cpus();
5629} 5630}