summaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-02-22 22:31:41 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-24 02:44:16 -0500
commit4d3199e4ca8e6670b54dc5ee070ffd54385988e9 (patch)
tree5529bcb16c3217c02416e0d17d7c28f277c63581 /kernel/locking/mutex.c
parent2ae79026818e7d49fead82b79b1a543e3b9c8a23 (diff)
locking: Remove ACCESS_ONCE() usage
With the new standardized functions, we can replace all ACCESS_ONCE() calls across relevant locking - this includes lockref and seqlock while at it. ACCESS_ONCE() does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Update the new calls regardless of if it is a scalar type, this is cleaner than having three alternatives. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 43bf25ef3c81..16b2d3cc88b0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
266 return 0; 266 return 0;
267 267
268 rcu_read_lock(); 268 rcu_read_lock();
269 owner = ACCESS_ONCE(lock->owner); 269 owner = READ_ONCE(lock->owner);
270 if (owner) 270 if (owner)
271 retval = owner->on_cpu; 271 retval = owner->on_cpu;
272 rcu_read_unlock(); 272 rcu_read_unlock();
@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
340 * As such, when deadlock detection needs to be 340 * As such, when deadlock detection needs to be
341 * performed the optimistic spinning cannot be done. 341 * performed the optimistic spinning cannot be done.
342 */ 342 */
343 if (ACCESS_ONCE(ww->ctx)) 343 if (READ_ONCE(ww->ctx))
344 break; 344 break;
345 } 345 }
346 346
@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
348 * If there's an owner, wait for it to either 348 * If there's an owner, wait for it to either
349 * release the lock or go to sleep. 349 * release the lock or go to sleep.
350 */ 350 */
351 owner = ACCESS_ONCE(lock->owner); 351 owner = READ_ONCE(lock->owner);
352 if (owner && !mutex_spin_on_owner(lock, owner)) 352 if (owner && !mutex_spin_on_owner(lock, owner))
353 break; 353 break;
354 354
@@ -487,7 +487,7 @@ static inline int __sched
487__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 487__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
488{ 488{
489 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 489 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
490 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 490 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
491 491
492 if (!hold_ctx) 492 if (!hold_ctx)
493 return 0; 493 return 0;