diff options
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index e4ad019e23f5..06e2214edf98 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) | |||
279 | */ | 279 | */ |
280 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | 280 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) |
281 | { | 281 | { |
282 | long old, count = ACCESS_ONCE(sem->count); | 282 | long old, count = READ_ONCE(sem->count); |
283 | 283 | ||
284 | while (true) { | 284 | while (true) { |
285 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) | 285 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) |
@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |||
304 | return false; | 304 | return false; |
305 | 305 | ||
306 | rcu_read_lock(); | 306 | rcu_read_lock(); |
307 | owner = ACCESS_ONCE(sem->owner); | 307 | owner = READ_ONCE(sem->owner); |
308 | if (!owner) { | 308 | if (!owner) { |
309 | long count = ACCESS_ONCE(sem->count); | 309 | long count = READ_ONCE(sem->count); |
310 | /* | 310 | /* |
311 | * If sem->owner is not set, yet we have just recently entered the | 311 | * If sem->owner is not set, yet we have just recently entered the |
312 | * slowpath with the lock being active, then there is a possibility | 312 | * slowpath with the lock being active, then there is a possibility |
@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |||
385 | goto done; | 385 | goto done; |
386 | 386 | ||
387 | while (true) { | 387 | while (true) { |
388 | owner = ACCESS_ONCE(sem->owner); | 388 | owner = READ_ONCE(sem->owner); |
389 | if (owner && !rwsem_spin_on_owner(sem, owner)) | 389 | if (owner && !rwsem_spin_on_owner(sem, owner)) |
390 | break; | 390 | break; |
391 | 391 | ||
@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
459 | 459 | ||
460 | /* we're now waiting on the lock, but no longer actively locking */ | 460 | /* we're now waiting on the lock, but no longer actively locking */ |
461 | if (waiting) { | 461 | if (waiting) { |
462 | count = ACCESS_ONCE(sem->count); | 462 | count = READ_ONCE(sem->count); |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * If there were already threads queued before us and there are | 465 | * If there were already threads queued before us and there are |