aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 14:30:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 14:30:56 -0400
commit040b9d7ccff40e1fbd48029e3c769db188a3ba2c (patch)
tree43ab6312d5108302c4b67753597850275a9ee4ed
parente6328a7abe7f8fcd32e9d3bcbd14ff2161bf71c9 (diff)
parent46123355af729514e6fa8b8a9dd1e645e61a6466 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three fixes: - fix a suspend/resume cpusets bug - fix a !CONFIG_NUMA_BALANCING bug - fix a kerneldoc warning" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix nuisance kernel-doc warning sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs sched/fair: Fix wake_affine_llc() balancing rules
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--kernel/cgroup/cpuset.c16
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/fair.c4
5 files changed, 30 insertions, 8 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e74655d941b7..a1e6a33a4b03 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
51 51
52extern int cpuset_init(void); 52extern int cpuset_init(void);
53extern void cpuset_init_smp(void); 53extern void cpuset_init_smp(void);
54extern void cpuset_force_rebuild(void);
54extern void cpuset_update_active_cpus(void); 55extern void cpuset_update_active_cpus(void);
56extern void cpuset_wait_for_hotplug(void);
55extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 57extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
56extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 58extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
57extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 59extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
164static inline int cpuset_init(void) { return 0; } 166static inline int cpuset_init(void) { return 0; }
165static inline void cpuset_init_smp(void) {} 167static inline void cpuset_init_smp(void) {}
166 168
169static inline void cpuset_force_rebuild(void) { }
170
167static inline void cpuset_update_active_cpus(void) 171static inline void cpuset_update_active_cpus(void)
168{ 172{
169 partition_sched_domains(1, NULL, NULL); 173 partition_sched_domains(1, NULL, NULL);
170} 174}
171 175
176static inline void cpuset_wait_for_hotplug(void) { }
177
172static inline void cpuset_cpus_allowed(struct task_struct *p, 178static inline void cpuset_cpus_allowed(struct task_struct *p,
173 struct cpumask *mask) 179 struct cpumask *mask)
174{ 180{
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 67230ecf2ce1..4657e2924ecb 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2275,6 +2275,13 @@ retry:
2275 mutex_unlock(&cpuset_mutex); 2275 mutex_unlock(&cpuset_mutex);
2276} 2276}
2277 2277
2278static bool force_rebuild;
2279
2280void cpuset_force_rebuild(void)
2281{
2282 force_rebuild = true;
2283}
2284
2278/** 2285/**
2279 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 2286 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2280 * 2287 *
@@ -2349,8 +2356,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2349 } 2356 }
2350 2357
2351 /* rebuild sched domains if cpus_allowed has changed */ 2358 /* rebuild sched domains if cpus_allowed has changed */
2352 if (cpus_updated) 2359 if (cpus_updated || force_rebuild) {
2360 force_rebuild = false;
2353 rebuild_sched_domains(); 2361 rebuild_sched_domains();
2362 }
2354} 2363}
2355 2364
2356void cpuset_update_active_cpus(void) 2365void cpuset_update_active_cpus(void)
@@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void)
2363 schedule_work(&cpuset_hotplug_work); 2372 schedule_work(&cpuset_hotplug_work);
2364} 2373}
2365 2374
2375void cpuset_wait_for_hotplug(void)
2376{
2377 flush_work(&cpuset_hotplug_work);
2378}
2379
2366/* 2380/*
2367 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 2381 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2368 * Call this routine anytime after node_states[N_MEMORY] changes. 2382 * Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..50f25cb370c6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -20,8 +20,9 @@
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/kmod.h> 21#include <linux/kmod.h>
22#include <trace/events/power.h> 22#include <trace/events/power.h>
23#include <linux/cpuset.h>
23 24
24/* 25/*
25 * Timeout for stopping processes 26 * Timeout for stopping processes
26 */ 27 */
27unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; 28unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -202,6 +203,8 @@ void thaw_processes(void)
202 __usermodehelper_set_disable_depth(UMH_FREEZING); 203 __usermodehelper_set_disable_depth(UMH_FREEZING);
203 thaw_workqueues(); 204 thaw_workqueues();
204 205
206 cpuset_wait_for_hotplug();
207
205 read_lock(&tasklist_lock); 208 read_lock(&tasklist_lock);
206 for_each_process_thread(g, p) { 209 for_each_process_thread(g, p) {
207 /* No other threads should have PF_SUSPEND_TASK set */ 210 /* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6d2c7ff9ba98..136a76d80dbf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
5556 * operation in the resume sequence, just build a single sched 5556 * operation in the resume sequence, just build a single sched
5557 * domain, ignoring cpusets. 5557 * domain, ignoring cpusets.
5558 */ 5558 */
5559 num_cpus_frozen--; 5559 partition_sched_domains(1, NULL, NULL);
5560 if (likely(num_cpus_frozen)) { 5560 if (--num_cpus_frozen)
5561 partition_sched_domains(1, NULL, NULL);
5562 return; 5561 return;
5563 }
5564 /* 5562 /*
5565 * This is the last CPU online operation. So fall through and 5563 * This is the last CPU online operation. So fall through and
5566 * restore the original sched domains by considering the 5564 * restore the original sched domains by considering the
5567 * cpuset configurations. 5565 * cpuset configurations.
5568 */ 5566 */
5567 cpuset_force_rebuild();
5569 } 5568 }
5570 cpuset_update_active_cpus(); 5569 cpuset_update_active_cpus();
5571} 5570}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a5d83ed8dd82..0a85641e62ce 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
5424 return false; 5424 return false;
5425 5425
5426 /* if this cache has capacity, come here */ 5426 /* if this cache has capacity, come here */
5427 if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1) 5427 if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
5428 return true; 5428 return true;
5429 5429
5430 /* 5430 /*
@@ -7708,7 +7708,7 @@ next_group:
7708 * number. 7708 * number.
7709 * 7709 *
7710 * Return: 1 when packing is required and a task should be moved to 7710 * Return: 1 when packing is required and a task should be moved to
7711 * this CPU. The amount of the imbalance is returned in *imbalance. 7711 * this CPU. The amount of the imbalance is returned in env->imbalance.
7712 * 7712 *
7713 * @env: The load balancing environment. 7713 * @env: The load balancing environment.
7714 * @sds: Statistics of the sched_domain which is to be packed 7714 * @sds: Statistics of the sched_domain which is to be packed