diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2015-02-22 22:31:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-24 02:44:16 -0500 |
commit | 4d3199e4ca8e6670b54dc5ee070ffd54385988e9 (patch) | |
tree | 5529bcb16c3217c02416e0d17d7c28f277c63581 /kernel/locking/rwsem-xadd.c | |
parent | 2ae79026818e7d49fead82b79b1a543e3b9c8a23 (diff) |
locking: Remove ACCESS_ONCE() usage
With the new standardized functions, we can replace all
ACCESS_ONCE() calls across relevant locking - this includes
lockref and seqlock while at it.
ACCESS_ONCE() does not work reliably on non-scalar types.
For example gcc 4.6 and 4.7 might remove the volatile tag
for such accesses during the SRA (scalar replacement of
aggregates) step:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145
Update the new calls regardless of if it is a scalar type,
this is cleaner than having three alternatives.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index e4ad019e23f5..06e2214edf98 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) | |||
279 | */ | 279 | */ |
280 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | 280 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) |
281 | { | 281 | { |
282 | long old, count = ACCESS_ONCE(sem->count); | 282 | long old, count = READ_ONCE(sem->count); |
283 | 283 | ||
284 | while (true) { | 284 | while (true) { |
285 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) | 285 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) |
@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |||
304 | return false; | 304 | return false; |
305 | 305 | ||
306 | rcu_read_lock(); | 306 | rcu_read_lock(); |
307 | owner = ACCESS_ONCE(sem->owner); | 307 | owner = READ_ONCE(sem->owner); |
308 | if (!owner) { | 308 | if (!owner) { |
309 | long count = ACCESS_ONCE(sem->count); | 309 | long count = READ_ONCE(sem->count); |
310 | /* | 310 | /* |
311 | * If sem->owner is not set, yet we have just recently entered the | 311 | * If sem->owner is not set, yet we have just recently entered the |
312 | * slowpath with the lock being active, then there is a possibility | 312 | * slowpath with the lock being active, then there is a possibility |
@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |||
385 | goto done; | 385 | goto done; |
386 | 386 | ||
387 | while (true) { | 387 | while (true) { |
388 | owner = ACCESS_ONCE(sem->owner); | 388 | owner = READ_ONCE(sem->owner); |
389 | if (owner && !rwsem_spin_on_owner(sem, owner)) | 389 | if (owner && !rwsem_spin_on_owner(sem, owner)) |
390 | break; | 390 | break; |
391 | 391 | ||
@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
459 | 459 | ||
460 | /* we're now waiting on the lock, but no longer actively locking */ | 460 | /* we're now waiting on the lock, but no longer actively locking */ |
461 | if (waiting) { | 461 | if (waiting) { |
462 | count = ACCESS_ONCE(sem->count); | 462 | count = READ_ONCE(sem->count); |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * If there were already threads queued before us and there are | 465 | * If there were already threads queued before us and there are |