diff options
author | Matt Fleming <matt@codeblueprint.co.uk> | 2017-02-17 07:07:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-16 04:21:01 -0400 |
commit | caeb5882979bc6f3c8766fcf59c6269b38f521bc (patch) | |
tree | ee5e0c30b3ecfd99651f74ffd1403db8ac530280 /kernel/sched/loadavg.c | |
parent | 6e5f32f7a43f45ee55c401c0b9585eb01f9629a8 (diff) |
sched/loadavg: Use {READ,WRITE}_ONCE() for sample window
'calc_load_update' is accessed without any kind of locking and there's
a clear assumption in the code that only a single value is read or
written.
Make this explicit by using READ_ONCE() and WRITE_ONCE(), and avoid
unintentionally seeing multiple values, or having the load/stores
split.
Technically the loads in calc_global_*() don't require this since
those are the only functions that update 'calc_load_update', but I've
added the READ_ONCE() for consistency.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: http://lkml.kernel.org/r/20170217120731.11868-3-matt@codeblueprint.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/loadavg.c')
-rw-r--r-- | kernel/sched/loadavg.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 3a55f3f9ffe4..f15fb2bdbc0d 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c | |||
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void) | |||
169 | * If the folding window started, make sure we start writing in the | 169 | * If the folding window started, make sure we start writing in the |
170 | * next idle-delta. | 170 | * next idle-delta. |
171 | */ | 171 | */ |
172 | if (!time_before(jiffies, calc_load_update)) | 172 | if (!time_before(jiffies, READ_ONCE(calc_load_update))) |
173 | idx++; | 173 | idx++; |
174 | 174 | ||
175 | return idx & 1; | 175 | return idx & 1; |
@@ -204,7 +204,7 @@ void calc_load_exit_idle(void) | |||
204 | /* | 204 | /* |
205 | * If we're still before the pending sample window, we're done. | 205 | * If we're still before the pending sample window, we're done. |
206 | */ | 206 | */ |
207 | this_rq->calc_load_update = calc_load_update; | 207 | this_rq->calc_load_update = READ_ONCE(calc_load_update); |
208 | if (time_before(jiffies, this_rq->calc_load_update)) | 208 | if (time_before(jiffies, this_rq->calc_load_update)) |
209 | return; | 209 | return; |
210 | 210 | ||
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp, | |||
308 | */ | 308 | */ |
309 | static void calc_global_nohz(void) | 309 | static void calc_global_nohz(void) |
310 | { | 310 | { |
311 | unsigned long sample_window; | ||
311 | long delta, active, n; | 312 | long delta, active, n; |
312 | 313 | ||
313 | if (!time_before(jiffies, calc_load_update + 10)) { | 314 | sample_window = READ_ONCE(calc_load_update); |
315 | if (!time_before(jiffies, sample_window + 10)) { | ||
314 | /* | 316 | /* |
315 | * Catch-up, fold however many we are behind still | 317 | * Catch-up, fold however many we are behind still |
316 | */ | 318 | */ |
317 | delta = jiffies - calc_load_update - 10; | 319 | delta = jiffies - sample_window - 10; |
318 | n = 1 + (delta / LOAD_FREQ); | 320 | n = 1 + (delta / LOAD_FREQ); |
319 | 321 | ||
320 | active = atomic_long_read(&calc_load_tasks); | 322 | active = atomic_long_read(&calc_load_tasks); |
@@ -324,7 +326,7 @@ static void calc_global_nohz(void) | |||
324 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); | 326 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); |
325 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); | 327 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); |
326 | 328 | ||
327 | calc_load_update += n * LOAD_FREQ; | 329 | WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); |
328 | } | 330 | } |
329 | 331 | ||
330 | /* | 332 | /* |
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { } | |||
352 | */ | 354 | */ |
353 | void calc_global_load(unsigned long ticks) | 355 | void calc_global_load(unsigned long ticks) |
354 | { | 356 | { |
357 | unsigned long sample_window; | ||
355 | long active, delta; | 358 | long active, delta; |
356 | 359 | ||
357 | if (time_before(jiffies, calc_load_update + 10)) | 360 | sample_window = READ_ONCE(calc_load_update); |
361 | if (time_before(jiffies, sample_window + 10)) | ||
358 | return; | 362 | return; |
359 | 363 | ||
360 | /* | 364 | /* |
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks) | |||
371 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); | 375 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
372 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); | 376 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
373 | 377 | ||
374 | calc_load_update += LOAD_FREQ; | 378 | WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); |
375 | 379 | ||
376 | /* | 380 | /* |
377 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. | 381 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. |