summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorPrateek Sood <prsood@codeaurora.org>2017-11-15 09:20:15 -0500
committerTejun Heo <tj@kernel.org>2017-11-27 11:48:10 -0500
commit1599a185f0e6113be185b9fb809c621c73865829 (patch)
treedd2f4646ce247a91b3b531c32f3331f354a024de /kernel/cgroup
parentaa24163b2ee5c92120e32e99b5a93143a0f4258e (diff)
cpuset: Make cpuset hotplug synchronous
Convert cpuset_hotplug_workfn() into synchronous call for cpu hotplug path. For memory hotplug path it still gets queued as a work item. Since cpuset_hotplug_workfn() can be made synchronous for cpu hotplug path, it is not required to wait for cpuset hotplug while thawing processes. Signed-off-by: Prateek Sood <prsood@codeaurora.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cpuset.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index cab5fd1ee767..227bc25d951d 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2277,15 +2277,8 @@ retry:
2277 mutex_unlock(&cpuset_mutex); 2277 mutex_unlock(&cpuset_mutex);
2278} 2278}
2279 2279
2280static bool force_rebuild;
2281
2282void cpuset_force_rebuild(void)
2283{
2284 force_rebuild = true;
2285}
2286
2287/** 2280/**
2288 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 2281 * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset
2289 * 2282 *
2290 * This function is called after either CPU or memory configuration has 2283 * This function is called after either CPU or memory configuration has
2291 * changed and updates cpuset accordingly. The top_cpuset is always 2284 * changed and updates cpuset accordingly. The top_cpuset is always
@@ -2300,7 +2293,7 @@ void cpuset_force_rebuild(void)
2300 * Note that CPU offlining during suspend is ignored. We don't modify 2293 * Note that CPU offlining during suspend is ignored. We don't modify
2301 * cpusets across suspend/resume cycles at all. 2294 * cpusets across suspend/resume cycles at all.
2302 */ 2295 */
2303static void cpuset_hotplug_workfn(struct work_struct *work) 2296static void cpuset_hotplug(bool use_cpu_hp_lock)
2304{ 2297{
2305 static cpumask_t new_cpus; 2298 static cpumask_t new_cpus;
2306 static nodemask_t new_mems; 2299 static nodemask_t new_mems;
@@ -2358,25 +2351,31 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2358 } 2351 }
2359 2352
2360 /* rebuild sched domains if cpus_allowed has changed */ 2353 /* rebuild sched domains if cpus_allowed has changed */
2361 if (cpus_updated || force_rebuild) { 2354 if (cpus_updated) {
2362 force_rebuild = false; 2355 if (use_cpu_hp_lock)
2363 rebuild_sched_domains(); 2356 rebuild_sched_domains();
2357 else {
2358 /* Acquiring cpu_hotplug_lock is not required.
2359 * When cpuset_hotplug() is called in hotplug path,
2360 * cpu_hotplug_lock is held by the hotplug context
2361 * which is waiting for cpuhp_thread_fun to indicate
2362 * completion of callback.
2363 */
2364 mutex_lock(&cpuset_mutex);
2365 rebuild_sched_domains_cpuslocked();
2366 mutex_unlock(&cpuset_mutex);
2367 }
2364 } 2368 }
2365} 2369}
2366 2370
2367void cpuset_update_active_cpus(void) 2371static void cpuset_hotplug_workfn(struct work_struct *work)
2368{ 2372{
2369 /* 2373 cpuset_hotplug(true);
2370 * We're inside cpu hotplug critical region which usually nests
2371 * inside cgroup synchronization. Bounce actual hotplug processing
2372 * to a work item to avoid reverse locking order.
2373 */
2374 schedule_work(&cpuset_hotplug_work);
2375} 2374}
2376 2375
2377void cpuset_wait_for_hotplug(void) 2376void cpuset_update_active_cpus(void)
2378{ 2377{
2379 flush_work(&cpuset_hotplug_work); 2378 cpuset_hotplug(false);
2380} 2379}
2381 2380
2382/* 2381/*