diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-27 15:45:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-27 15:45:04 -0500 |
commit | bffb276fffc93000e05a19ee0bdee844dff6a88d (patch) | |
tree | 57e0a6e59f0487e096d362d3150cbba3e536109f | |
parent | f7b548fa3d74f0db9af4fd41fbef973231d384fd (diff) | |
parent | 05ca62c6ca17f39b88fa956d5ebc1fa6e93ad5e3 (diff) |
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Use rq->clock_task instead of rq->clock for correctly maintaining load averages
sched: Fix/remove redundant cfs_rq checks
sched: Fix sign under-flows in wake_affine
-rw-r--r-- | kernel/sched_fair.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 354769979c02..0c26e2df450e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -722,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
722 | u64 now, delta; | 722 | u64 now, delta; |
723 | unsigned long load = cfs_rq->load.weight; | 723 | unsigned long load = cfs_rq->load.weight; |
724 | 724 | ||
725 | if (!cfs_rq) | 725 | if (cfs_rq->tg == &root_task_group) |
726 | return; | 726 | return; |
727 | 727 | ||
728 | now = rq_of(cfs_rq)->clock; | 728 | now = rq_of(cfs_rq)->clock_task; |
729 | delta = now - cfs_rq->load_stamp; | 729 | delta = now - cfs_rq->load_stamp; |
730 | 730 | ||
731 | /* truncate load history at 4 idle periods */ | 731 | /* truncate load history at 4 idle periods */ |
@@ -830,9 +830,6 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
830 | struct sched_entity *se; | 830 | struct sched_entity *se; |
831 | long shares; | 831 | long shares; |
832 | 832 | ||
833 | if (!cfs_rq) | ||
834 | return; | ||
835 | |||
836 | tg = cfs_rq->tg; | 833 | tg = cfs_rq->tg; |
837 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 834 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
838 | if (!se) | 835 | if (!se) |
@@ -1432,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1432 | 1429 | ||
1433 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 1430 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
1434 | { | 1431 | { |
1435 | unsigned long this_load, load; | 1432 | s64 this_load, load; |
1436 | int idx, this_cpu, prev_cpu; | 1433 | int idx, this_cpu, prev_cpu; |
1437 | unsigned long tl_per_task; | 1434 | unsigned long tl_per_task; |
1438 | struct task_group *tg; | 1435 | struct task_group *tg; |
@@ -1471,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1471 | * Otherwise check if either cpus are near enough in load to allow this | 1468 | * Otherwise check if either cpus are near enough in load to allow this |
1472 | * task to be woken on this_cpu. | 1469 | * task to be woken on this_cpu. |
1473 | */ | 1470 | */ |
1474 | if (this_load) { | 1471 | if (this_load > 0) { |
1475 | unsigned long this_eff_load, prev_eff_load; | 1472 | s64 this_eff_load, prev_eff_load; |
1476 | 1473 | ||
1477 | this_eff_load = 100; | 1474 | this_eff_load = 100; |
1478 | this_eff_load *= power_of(prev_cpu); | 1475 | this_eff_load *= power_of(prev_cpu); |