summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorRohit Jain <rohit.k.jain@oracle.com>2018-05-09 12:39:48 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-14 03:12:26 -0400
commit943d355d7feef380e15a95892be3dff1095ef54b (patch)
tree08e5e8bedcdbb72da1359f9211887cab25e2604c /kernel/sched
parenta59a68fee05d46cccc4279ab6609421f7270398e (diff)
sched/core: Distinguish between idle_cpu() calls based on desired effect, introduce available_idle_cpu()
In the following commit: 247f2f6f3c70 ("sched/core: Don't schedule threads on pre-empted vCPUs") ... we distinguish between idle_cpu() when the vCPU is not running for scheduling threads. However, the idle_cpu() function is used in other places for actually checking whether the state of the CPU is idle or not. Hence split the use of that function based on the desired return value, by introducing the available_idle_cpu() function. This fixes a (slight) regression in that initial vCPU commit, because some code paths (like the load-balancer) don't care and shouldn't care if the vCPU is preempted or not, they just want to know if there's any tasks on the CPU. Signed-off-by: Rohit Jain <rohit.k.jain@oracle.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dhaval.giani@oracle.com Cc: linux-kernel@vger.kernel.org Cc: matt@codeblueprint.co.uk Cc: steven.sistare@oracle.com Cc: subhra.mazumdar@oracle.com Link: http://lkml.kernel.org/r/1525883988-10356-1-git-send-email-rohit.k.jain@oracle.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sched/fair.c20
2 files changed, 24 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 102c36c317dc..d1555185c054 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4009,6 +4009,20 @@ int idle_cpu(int cpu)
4009 return 0; 4009 return 0;
4010#endif 4010#endif
4011 4011
4012 return 1;
4013}
4014
4015/**
4016 * available_idle_cpu - is a given CPU idle for enqueuing work.
4017 * @cpu: the CPU in question.
4018 *
4019 * Return: 1 if the CPU is currently idle. 0 otherwise.
4020 */
4021int available_idle_cpu(int cpu)
4022{
4023 if (!idle_cpu(cpu))
4024 return 0;
4025
4012 if (vcpu_is_preempted(cpu)) 4026 if (vcpu_is_preempted(cpu))
4013 return 0; 4027 return 0;
4014 4028
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f32b97d4c63b..748cb054fefd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5899,8 +5899,8 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
5899 * a cpufreq perspective, it's better to have higher utilisation 5899 * a cpufreq perspective, it's better to have higher utilisation
5900 * on one CPU. 5900 * on one CPU.
5901 */ 5901 */
5902 if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 5902 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
5903 return idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 5903 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
5904 5904
5905 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5905 if (sync && cpu_rq(this_cpu)->nr_running == 1)
5906 return this_cpu; 5906 return this_cpu;
@@ -6143,7 +6143,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
6143 6143
6144 /* Traverse only the allowed CPUs */ 6144 /* Traverse only the allowed CPUs */
6145 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 6145 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
6146 if (idle_cpu(i)) { 6146 if (available_idle_cpu(i)) {
6147 struct rq *rq = cpu_rq(i); 6147 struct rq *rq = cpu_rq(i);
6148 struct cpuidle_state *idle = idle_get_state(rq); 6148 struct cpuidle_state *idle = idle_get_state(rq);
6149 if (idle && idle->exit_latency < min_exit_latency) { 6149 if (idle && idle->exit_latency < min_exit_latency) {
@@ -6272,7 +6272,7 @@ void __update_idle_core(struct rq *rq)
6272 if (cpu == core) 6272 if (cpu == core)
6273 continue; 6273 continue;
6274 6274
6275 if (!idle_cpu(cpu)) 6275 if (!available_idle_cpu(cpu))
6276 goto unlock; 6276 goto unlock;
6277 } 6277 }
6278 6278
@@ -6304,7 +6304,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
6304 6304
6305 for_each_cpu(cpu, cpu_smt_mask(core)) { 6305 for_each_cpu(cpu, cpu_smt_mask(core)) {
6306 cpumask_clear_cpu(cpu, cpus); 6306 cpumask_clear_cpu(cpu, cpus);
6307 if (!idle_cpu(cpu)) 6307 if (!available_idle_cpu(cpu))
6308 idle = false; 6308 idle = false;
6309 } 6309 }
6310 6310
@@ -6333,7 +6333,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
6333 for_each_cpu(cpu, cpu_smt_mask(target)) { 6333 for_each_cpu(cpu, cpu_smt_mask(target)) {
6334 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6334 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
6335 continue; 6335 continue;
6336 if (idle_cpu(cpu)) 6336 if (available_idle_cpu(cpu))
6337 return cpu; 6337 return cpu;
6338 } 6338 }
6339 6339
@@ -6396,7 +6396,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
6396 return -1; 6396 return -1;
6397 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6397 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
6398 continue; 6398 continue;
6399 if (idle_cpu(cpu)) 6399 if (available_idle_cpu(cpu))
6400 break; 6400 break;
6401 } 6401 }
6402 6402
@@ -6416,13 +6416,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
6416 struct sched_domain *sd; 6416 struct sched_domain *sd;
6417 int i, recent_used_cpu; 6417 int i, recent_used_cpu;
6418 6418
6419 if (idle_cpu(target)) 6419 if (available_idle_cpu(target))
6420 return target; 6420 return target;
6421 6421
6422 /* 6422 /*
6423 * If the previous CPU is cache affine and idle, don't be stupid: 6423 * If the previous CPU is cache affine and idle, don't be stupid:
6424 */ 6424 */
6425 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) 6425 if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev))
6426 return prev; 6426 return prev;
6427 6427
6428 /* Check a recently used CPU as a potential idle candidate: */ 6428 /* Check a recently used CPU as a potential idle candidate: */
@@ -6430,7 +6430,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
6430 if (recent_used_cpu != prev && 6430 if (recent_used_cpu != prev &&
6431 recent_used_cpu != target && 6431 recent_used_cpu != target &&
6432 cpus_share_cache(recent_used_cpu, target) && 6432 cpus_share_cache(recent_used_cpu, target) &&
6433 idle_cpu(recent_used_cpu) && 6433 available_idle_cpu(recent_used_cpu) &&
6434 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { 6434 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
6435 /* 6435 /*
6436 * Replace recent_used_cpu with prev as it is a potential 6436 * Replace recent_used_cpu with prev as it is a potential