aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-31 06:37:30 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-01 03:27:16 -0400
commite51fd5e22e12b39f49b1bb60b37b300b17378a43 (patch)
tree391500ff509dc30991db38e3d54eaccfe385d1cb /kernel
parent54e88fad223c4e1d94289611a90c7fe3ebe5631b (diff)
sched: Fix wake_affine() vs RT tasks
Mike reports that since e9e9250b (sched: Scale down cpu_power due to RT tasks), wake_affine() goes funny on RT tasks due to them still having a !0 weight and wake_affine() still subtracts that from the rq weight. Since nobody should be using se->weight for RT tasks, set the value to zero. Also, since we now use ->cpu_power to normalize rq weights to account for RT cpu usage, add that factor into the imbalance computation. Reported-by: Mike Galbraith <efault@gmx.de> Tested-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1275316109.27810.22969.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c24
-rw-r--r--kernel/sched_fair.c22
2 files changed, 22 insertions, 24 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d48408142503..f8b8996228dd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -544,6 +544,8 @@ struct rq {
544 struct root_domain *rd; 544 struct root_domain *rd;
545 struct sched_domain *sd; 545 struct sched_domain *sd;
546 546
547 unsigned long cpu_power;
548
547 unsigned char idle_at_tick; 549 unsigned char idle_at_tick;
548 /* For active balancing */ 550 /* For active balancing */
549 int post_schedule; 551 int post_schedule;
@@ -1499,24 +1501,9 @@ static unsigned long target_load(int cpu, int type)
1499 return max(rq->cpu_load[type-1], total); 1501 return max(rq->cpu_load[type-1], total);
1500} 1502}
1501 1503
1502static struct sched_group *group_of(int cpu)
1503{
1504 struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
1505
1506 if (!sd)
1507 return NULL;
1508
1509 return sd->groups;
1510}
1511
1512static unsigned long power_of(int cpu) 1504static unsigned long power_of(int cpu)
1513{ 1505{
1514 struct sched_group *group = group_of(cpu); 1506 return cpu_rq(cpu)->cpu_power;
1515
1516 if (!group)
1517 return SCHED_LOAD_SCALE;
1518
1519 return group->cpu_power;
1520} 1507}
1521 1508
1522static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1509static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
@@ -1854,8 +1841,8 @@ static void dec_nr_running(struct rq *rq)
1854static void set_load_weight(struct task_struct *p) 1841static void set_load_weight(struct task_struct *p)
1855{ 1842{
1856 if (task_has_rt_policy(p)) { 1843 if (task_has_rt_policy(p)) {
1857 p->se.load.weight = prio_to_weight[0] * 2; 1844 p->se.load.weight = 0;
1858 p->se.load.inv_weight = prio_to_wmult[0] >> 1; 1845 p->se.load.inv_weight = WMULT_CONST;
1859 return; 1846 return;
1860 } 1847 }
1861 1848
@@ -7605,6 +7592,7 @@ void __init sched_init(void)
7605#ifdef CONFIG_SMP 7592#ifdef CONFIG_SMP
7606 rq->sd = NULL; 7593 rq->sd = NULL;
7607 rq->rd = NULL; 7594 rq->rd = NULL;
7595 rq->cpu_power = SCHED_LOAD_SCALE;
7608 rq->post_schedule = 0; 7596 rq->post_schedule = 0;
7609 rq->active_balance = 0; 7597 rq->active_balance = 0;
7610 rq->next_balance = jiffies; 7598 rq->next_balance = jiffies;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 217e4a9393e4..eed35eded602 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1225,7 +1225,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1225 unsigned long this_load, load; 1225 unsigned long this_load, load;
1226 int idx, this_cpu, prev_cpu; 1226 int idx, this_cpu, prev_cpu;
1227 unsigned long tl_per_task; 1227 unsigned long tl_per_task;
1228 unsigned int imbalance;
1229 struct task_group *tg; 1228 struct task_group *tg;
1230 unsigned long weight; 1229 unsigned long weight;
1231 int balanced; 1230 int balanced;
@@ -1252,8 +1251,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1252 tg = task_group(p); 1251 tg = task_group(p);
1253 weight = p->se.load.weight; 1252 weight = p->se.load.weight;
1254 1253
1255 imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1256
1257 /* 1254 /*
1258 * In low-load situations, where prev_cpu is idle and this_cpu is idle 1255 * In low-load situations, where prev_cpu is idle and this_cpu is idle
1259 * due to the sync cause above having dropped this_load to 0, we'll 1256 * due to the sync cause above having dropped this_load to 0, we'll
@@ -1263,9 +1260,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1263 * Otherwise check if either cpus are near enough in load to allow this 1260 * Otherwise check if either cpus are near enough in load to allow this
1264 * task to be woken on this_cpu. 1261 * task to be woken on this_cpu.
1265 */ 1262 */
1266 balanced = !this_load || 1263 if (this_load) {
1267 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= 1264 unsigned long this_eff_load, prev_eff_load;
1268 imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); 1265
1266 this_eff_load = 100;
1267 this_eff_load *= power_of(prev_cpu);
1268 this_eff_load *= this_load +
1269 effective_load(tg, this_cpu, weight, weight);
1270
1271 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1272 prev_eff_load *= power_of(this_cpu);
1273 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1274
1275 balanced = this_eff_load <= prev_eff_load;
1276 } else
1277 balanced = true;
1269 1278
1270 /* 1279 /*
1271 * If the currently running task will sleep within 1280 * If the currently running task will sleep within
@@ -2298,6 +2307,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
2298 if (!power) 2307 if (!power)
2299 power = 1; 2308 power = 1;
2300 2309
2310 cpu_rq(cpu)->cpu_power = power;
2301 sdg->cpu_power = power; 2311 sdg->cpu_power = power;
2302} 2312}
2303 2313