aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2011-11-17 14:08:23 -0500
committerIngo Molnar <mingo@elte.hu>2011-12-06 02:51:25 -0500
commit77e81365e0b7d7479fc444a21cea0cd4def70b45 (patch)
treeedb0aa41e7ca436f3e1097ea5ea9da6ebee4ec81 /kernel/sched
parentb781a602ac745ee3d5d745276f1e1905a2c101f9 (diff)
sched: Clean up domain traversal in select_idle_sibling()
Instead of going through the scheduler domain hierarchy multiple times (for giving priority to an idle core over an idle SMT sibling in a busy core), start with the highest scheduler domain with the SD_SHARE_PKG_RESOURCES flag and traverse the domain hierarchy down till we find an idle group. This cleanup also addresses an issue reported by Mike where the recent changes returned the busy thread even in the presence of an idle SMT sibling in single socket platforms. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Tested-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1321556904.15339.25.camel@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c38
-rw-r--r--kernel/sched/sched.h2
2 files changed, 27 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c62e2bf234f..96a9ecec699b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2644,6 +2644,28 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2644 return idlest; 2644 return idlest;
2645} 2645}
2646 2646
2647/**
2648 * highest_flag_domain - Return highest sched_domain containing flag.
2649 * @cpu: The cpu whose highest level of sched domain is to
2650 * be returned.
2651 * @flag: The flag to check for the highest sched_domain
2652 * for the given cpu.
2653 *
2654 * Returns the highest sched_domain of a cpu which contains the given flag.
2655 */
2656static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
2657{
2658 struct sched_domain *sd, *hsd = NULL;
2659
2660 for_each_domain(cpu, sd) {
2661 if (!(sd->flags & flag))
2662 break;
2663 hsd = sd;
2664 }
2665
2666 return hsd;
2667}
2668
2647/* 2669/*
2648 * Try and locate an idle CPU in the sched_domain. 2670 * Try and locate an idle CPU in the sched_domain.
2649 */ 2671 */
@@ -2653,7 +2675,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
2653 int prev_cpu = task_cpu(p); 2675 int prev_cpu = task_cpu(p);
2654 struct sched_domain *sd; 2676 struct sched_domain *sd;
2655 struct sched_group *sg; 2677 struct sched_group *sg;
2656 int i, smt = 0; 2678 int i;
2657 2679
2658 /* 2680 /*
2659 * If the task is going to be woken-up on this cpu and if it is 2681 * If the task is going to be woken-up on this cpu and if it is
@@ -2673,19 +2695,9 @@ static int select_idle_sibling(struct task_struct *p, int target)
2673 * Otherwise, iterate the domains and find an elegible idle cpu. 2695 * Otherwise, iterate the domains and find an elegible idle cpu.
2674 */ 2696 */
2675 rcu_read_lock(); 2697 rcu_read_lock();
2676again:
2677 for_each_domain(target, sd) {
2678 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2679 continue;
2680
2681 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
2682 if (!smt) {
2683 smt = 1;
2684 goto again;
2685 }
2686 break;
2687 }
2688 2698
2699 sd = highest_flag_domain(target, SD_SHARE_PKG_RESOURCES);
2700 for_each_lower_domain(sd) {
2689 sg = sd->groups; 2701 sg = sd->groups;
2690 do { 2702 do {
2691 if (!cpumask_intersects(sched_group_cpus(sg), 2703 if (!cpumask_intersects(sched_group_cpus(sg),
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2e780234c31..8715055979d1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -501,6 +501,8 @@ DECLARE_PER_CPU(struct rq, runqueues);
501#define for_each_domain(cpu, __sd) \ 501#define for_each_domain(cpu, __sd) \
502 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 502 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
503 503
504#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
505
504#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 506#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
505#define this_rq() (&__get_cpu_var(runqueues)) 507#define this_rq() (&__get_cpu_var(runqueues))
506#define task_rq(p) cpu_rq(task_cpu(p)) 508#define task_rq(p) cpu_rq(task_cpu(p))