diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-05-22 08:04:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-30 08:02:24 -0400 |
commit | b654f7de41b0e3903ee2b51d3b8db77fe52ce728 (patch) | |
tree | cdcd46a04d8b59d75045f1d3ae753e4843dbe2c0 /kernel/sched | |
parent | 74a5ce20e6eeeb3751340b390e7ac1d1d07bbf55 (diff) |
sched: Make sure to not re-read variables after validation
We could re-read rq->rt_avg after we validated it was smaller than
total, invalidating the check and resulting in an unintended negative.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: David Rientjes <rientjes@google.com>
Link: http://lkml.kernel.org/r/1337688268.9698.29.camel@twins
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f0380d4987b3..2b449a762074 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | |||
3503 | unsigned long scale_rt_power(int cpu) | 3503 | unsigned long scale_rt_power(int cpu) |
3504 | { | 3504 | { |
3505 | struct rq *rq = cpu_rq(cpu); | 3505 | struct rq *rq = cpu_rq(cpu); |
3506 | u64 total, available; | 3506 | u64 total, available, age_stamp, avg; |
3507 | 3507 | ||
3508 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 3508 | /* |
3509 | * Since we're reading these variables without serialization make sure | ||
3510 | * we read them once before doing sanity checks on them. | ||
3511 | */ | ||
3512 | age_stamp = ACCESS_ONCE(rq->age_stamp); | ||
3513 | avg = ACCESS_ONCE(rq->rt_avg); | ||
3514 | |||
3515 | total = sched_avg_period() + (rq->clock - age_stamp); | ||
3509 | 3516 | ||
3510 | if (unlikely(total < rq->rt_avg)) { | 3517 | if (unlikely(total < avg)) { |
3511 | /* Ensures that power won't end up being negative */ | 3518 | /* Ensures that power won't end up being negative */ |
3512 | available = 0; | 3519 | available = 0; |
3513 | } else { | 3520 | } else { |
3514 | available = total - rq->rt_avg; | 3521 | available = total - avg; |
3515 | } | 3522 | } |
3516 | 3523 | ||
3517 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | 3524 | if (unlikely((s64)total < SCHED_POWER_SCALE)) |