diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-01-05 05:18:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-01-14 07:34:19 -0500 |
commit | cebde6d681aa45f96111cfcffc1544cf2a0454ff (patch) | |
tree | 87cfce57adc5dde39b37ee7bc2e665d572f8117a /kernel | |
parent | 1b537c7d1e58c761212a193085f9049b58f672e6 (diff) |
sched/core: Validate rq_clock*() serialization
rq->clock{,_task} are serialized by rq->lock, verify this.
One immediate fail is the usage in scale_rt_capability, so 'annotate'
that for now, there's more 'funny' there. Maybe change rq->lock into a
raw_seqlock_t?
(Only 32-bit is affected)
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20150105103554.361872747@infradead.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: umgwanakikbuti@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 7 |
2 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2a0b302e51de..50ff90289293 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5948,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu) | |||
5948 | */ | 5948 | */ |
5949 | age_stamp = ACCESS_ONCE(rq->age_stamp); | 5949 | age_stamp = ACCESS_ONCE(rq->age_stamp); |
5950 | avg = ACCESS_ONCE(rq->rt_avg); | 5950 | avg = ACCESS_ONCE(rq->rt_avg); |
5951 | delta = __rq_clock_broken(rq) - age_stamp; | ||
5951 | 5952 | ||
5952 | delta = rq_clock(rq) - age_stamp; | ||
5953 | if (unlikely(delta < 0)) | 5953 | if (unlikely(delta < 0)) |
5954 | delta = 0; | 5954 | delta = 0; |
5955 | 5955 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a2a45c970e7..bd2373273a9e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -687,13 +687,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | |||
687 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 687 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
688 | #define raw_rq() raw_cpu_ptr(&runqueues) | 688 | #define raw_rq() raw_cpu_ptr(&runqueues) |
689 | 689 | ||
690 | static inline u64 __rq_clock_broken(struct rq *rq) | ||
691 | { | ||
692 | return ACCESS_ONCE(rq->clock); | ||
693 | } | ||
694 | |||
690 | static inline u64 rq_clock(struct rq *rq) | 695 | static inline u64 rq_clock(struct rq *rq) |
691 | { | 696 | { |
697 | lockdep_assert_held(&rq->lock); | ||
692 | return rq->clock; | 698 | return rq->clock; |
693 | } | 699 | } |
694 | 700 | ||
695 | static inline u64 rq_clock_task(struct rq *rq) | 701 | static inline u64 rq_clock_task(struct rq *rq) |
696 | { | 702 | { |
703 | lockdep_assert_held(&rq->lock); | ||
697 | return rq->clock_task; | 704 | return rq->clock_task; |
698 | } | 705 | } |
699 | 706 | ||