aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-16 15:29:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-16 15:29:43 -0400
commit37407ea7f93864c2cfc03edf8f37872ec539ea2b (patch)
tree7c07e7adadd40fc94cebfe816f1c65a4a630b147
parent3f0c3c8fe30c725c1264fb6db8cc4b69db3a658a (diff)
Revert "sched: Improve scalability via 'CPU buddies', which withstand random perturbations"
This reverts commit 970e178985cadbca660feb02f4d2ee3a09f7fdda. Nikolay Ulyanitsky reported thatthe 3.6-rc5 kernel has a 15-20% performance drop on PostgreSQL 9.2 on his machine (running "pgbench"). Borislav Petkov was able to reproduce this, and bisected it to this commit 970e178985ca ("sched: Improve scalability via 'CPU buddies' ...") apparently because the new single-idle-buddy model simply doesn't find idle CPU's to reschedule on aggressively enough. Mike Galbraith suspects that it is likely due to the user-mode spinlocks in PostgreSQL not reacting well to preemption, but we don't really know the details - I'll just revert the commit for now. There are hopefully other approaches to improve scheduler scalability without it causing these kinds of downsides. Reported-by: Nikolay Ulyanitsky <lystor@gmail.com> Bisected-by: Borislav Petkov <bp@alien8.de> Acked-by: Mike Galbraith <efault@gmx.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c39
-rw-r--r--kernel/sched/fair.c28
3 files changed, 22 insertions, 46 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b8c86648a2f9..23bddac4bad8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -954,7 +954,6 @@ struct sched_domain {
954 unsigned int smt_gain; 954 unsigned int smt_gain;
955 int flags; /* See SD_* */ 955 int flags; /* See SD_* */
956 int level; 956 int level;
957 int idle_buddy; /* cpu assigned to select_idle_sibling() */
958 957
959 /* Runtime fields. */ 958 /* Runtime fields. */
960 unsigned long last_balance; /* init to jiffies. units in jiffies */ 959 unsigned long last_balance; /* init to jiffies. units in jiffies */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a4ea245f3d85..649c9f876cb1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6014,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6014 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 6014 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6015 * allows us to avoid some pointer chasing select_idle_sibling(). 6015 * allows us to avoid some pointer chasing select_idle_sibling().
6016 * 6016 *
6017 * Iterate domains and sched_groups downward, assigning CPUs to be
6018 * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
6019 * due to random perturbation self canceling, ie sw buddies pull
6020 * their counterpart to their CPU's hw counterpart.
6021 *
6022 * Also keep a unique ID per domain (we use the first cpu number in 6017 * Also keep a unique ID per domain (we use the first cpu number in
6023 * the cpumask of the domain), this allows us to quickly tell if 6018 * the cpumask of the domain), this allows us to quickly tell if
6024 * two cpus are in the same cache domain, see cpus_share_cache(). 6019 * two cpus are in the same cache domain, see cpus_share_cache().
@@ -6032,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
6032 int id = cpu; 6027 int id = cpu;
6033 6028
6034 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6029 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6035 if (sd) { 6030 if (sd)
6036 struct sched_domain *tmp = sd;
6037 struct sched_group *sg, *prev;
6038 bool right;
6039
6040 /*
6041 * Traverse to first CPU in group, and count hops
6042 * to cpu from there, switching direction on each
6043 * hop, never ever pointing the last CPU rightward.
6044 */
6045 do {
6046 id = cpumask_first(sched_domain_span(tmp));
6047 prev = sg = tmp->groups;
6048 right = 1;
6049
6050 while (cpumask_first(sched_group_cpus(sg)) != id)
6051 sg = sg->next;
6052
6053 while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
6054 prev = sg;
6055 sg = sg->next;
6056 right = !right;
6057 }
6058
6059 /* A CPU went down, never point back to domain start. */
6060 if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
6061 right = false;
6062
6063 sg = right ? sg->next : prev;
6064 tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
6065 } while ((tmp = tmp->child));
6066
6067 id = cpumask_first(sched_domain_span(sd)); 6031 id = cpumask_first(sched_domain_span(sd));
6068 }
6069 6032
6070 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6033 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6071 per_cpu(sd_llc_id, cpu) = id; 6034 per_cpu(sd_llc_id, cpu) = id;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 42d9df6a5ca4..96e2b18b6283 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
2637 int cpu = smp_processor_id(); 2637 int cpu = smp_processor_id();
2638 int prev_cpu = task_cpu(p); 2638 int prev_cpu = task_cpu(p);
2639 struct sched_domain *sd; 2639 struct sched_domain *sd;
2640 struct sched_group *sg;
2641 int i;
2640 2642
2641 /* 2643 /*
2642 * If the task is going to be woken-up on this cpu and if it is 2644 * If the task is going to be woken-up on this cpu and if it is
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target)
2653 return prev_cpu; 2655 return prev_cpu;
2654 2656
2655 /* 2657 /*
2656 * Otherwise, check assigned siblings to find an elegible idle cpu. 2658 * Otherwise, iterate the domains and find an elegible idle cpu.
2657 */ 2659 */
2658 sd = rcu_dereference(per_cpu(sd_llc, target)); 2660 sd = rcu_dereference(per_cpu(sd_llc, target));
2659
2660 for_each_lower_domain(sd) { 2661 for_each_lower_domain(sd) {
2661 if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p))) 2662 sg = sd->groups;
2662 continue; 2663 do {
2663 if (idle_cpu(sd->idle_buddy)) 2664 if (!cpumask_intersects(sched_group_cpus(sg),
2664 return sd->idle_buddy; 2665 tsk_cpus_allowed(p)))
2665 } 2666 goto next;
2666 2667
2668 for_each_cpu(i, sched_group_cpus(sg)) {
2669 if (!idle_cpu(i))
2670 goto next;
2671 }
2672
2673 target = cpumask_first_and(sched_group_cpus(sg),
2674 tsk_cpus_allowed(p));
2675 goto done;
2676next:
2677 sg = sg->next;
2678 } while (sg != sd->groups);
2679 }
2680done:
2667 return target; 2681 return target;
2668} 2682}
2669 2683