summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-12-04 17:41:11 -0500
committerTejun Heo <tj@kernel.org>2017-12-04 17:41:11 -0500
commit11db855c3d06e82f432cb1bafd73296586d5ceec (patch)
tree0b3f09d6e7da0c4bf5fdcae5b617ce046e6b8aa8 /kernel/cgroup
parent52cf373c37a684f8fc279d541307fad39d206376 (diff)
Revert "cpuset: Make cpuset hotplug synchronous"
This reverts commit 1599a185f0e6113be185b9fb809c621c73865829. This and the previous commit led to another circular locking scenario and the scenario which is fixed by this commit no longer exists after e8b3f8db7aad ("workqueue/hotplug: simplify workqueue_offline_cpu()") which removes work item flushing from hotplug path. Revert it for now. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cpuset.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 227bc25d951d..cab5fd1ee767 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2277,8 +2277,15 @@ retry:
2277 mutex_unlock(&cpuset_mutex); 2277 mutex_unlock(&cpuset_mutex);
2278} 2278}
2279 2279
2280static bool force_rebuild;
2281
2282void cpuset_force_rebuild(void)
2283{
2284 force_rebuild = true;
2285}
2286
2280/** 2287/**
2281 * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset 2288 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2282 * 2289 *
2283 * This function is called after either CPU or memory configuration has 2290 * This function is called after either CPU or memory configuration has
2284 * changed and updates cpuset accordingly. The top_cpuset is always 2291 * changed and updates cpuset accordingly. The top_cpuset is always
@@ -2293,7 +2300,7 @@ retry:
2293 * Note that CPU offlining during suspend is ignored. We don't modify 2300 * Note that CPU offlining during suspend is ignored. We don't modify
2294 * cpusets across suspend/resume cycles at all. 2301 * cpusets across suspend/resume cycles at all.
2295 */ 2302 */
2296static void cpuset_hotplug(bool use_cpu_hp_lock) 2303static void cpuset_hotplug_workfn(struct work_struct *work)
2297{ 2304{
2298 static cpumask_t new_cpus; 2305 static cpumask_t new_cpus;
2299 static nodemask_t new_mems; 2306 static nodemask_t new_mems;
@@ -2351,31 +2358,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock)
2351 } 2358 }
2352 2359
2353 /* rebuild sched domains if cpus_allowed has changed */ 2360 /* rebuild sched domains if cpus_allowed has changed */
2354 if (cpus_updated) { 2361 if (cpus_updated || force_rebuild) {
2355 if (use_cpu_hp_lock) 2362 force_rebuild = false;
2356 rebuild_sched_domains(); 2363 rebuild_sched_domains();
2357 else {
2358 /* Acquiring cpu_hotplug_lock is not required.
2359 * When cpuset_hotplug() is called in hotplug path,
2360 * cpu_hotplug_lock is held by the hotplug context
2361 * which is waiting for cpuhp_thread_fun to indicate
2362 * completion of callback.
2363 */
2364 mutex_lock(&cpuset_mutex);
2365 rebuild_sched_domains_cpuslocked();
2366 mutex_unlock(&cpuset_mutex);
2367 }
2368 } 2364 }
2369} 2365}
2370 2366
2371static void cpuset_hotplug_workfn(struct work_struct *work) 2367void cpuset_update_active_cpus(void)
2372{ 2368{
2373 cpuset_hotplug(true); 2369 /*
2370 * We're inside cpu hotplug critical region which usually nests
2371 * inside cgroup synchronization. Bounce actual hotplug processing
2372 * to a work item to avoid reverse locking order.
2373 */
2374 schedule_work(&cpuset_hotplug_work);
2374} 2375}
2375 2376
2376void cpuset_update_active_cpus(void) 2377void cpuset_wait_for_hotplug(void)
2377{ 2378{
2378 cpuset_hotplug(false); 2379 flush_work(&cpuset_hotplug_work);
2379} 2380}
2380 2381
2381/* 2382/*