aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mcs_spinlock.h
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-02-22 22:31:41 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-24 02:44:16 -0500
commit4d3199e4ca8e6670b54dc5ee070ffd54385988e9 (patch)
tree5529bcb16c3217c02416e0d17d7c28f277c63581 /kernel/locking/mcs_spinlock.h
parent2ae79026818e7d49fead82b79b1a543e3b9c8a23 (diff)
locking: Remove ACCESS_ONCE() usage
With the new standardized functions, we can replace all ACCESS_ONCE() calls across relevant locking - this includes lockref and seqlock while at it. ACCESS_ONCE() does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Update the new calls regardless of if it is a scalar type, this is cleaner than having three alternatives. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mcs_spinlock.h')
-rw-r--r--kernel/locking/mcs_spinlock.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index d1fe2ba5bac9..75e114bdf3f2 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
78 */ 78 */
79 return; 79 return;
80 } 80 }
81 ACCESS_ONCE(prev->next) = node; 81 WRITE_ONCE(prev->next, node);
82 82
83 /* Wait until the lock holder passes the lock down. */ 83 /* Wait until the lock holder passes the lock down. */
84 arch_mcs_spin_lock_contended(&node->locked); 84 arch_mcs_spin_lock_contended(&node->locked);
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91static inline 91static inline
92void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) 92void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
93{ 93{
94 struct mcs_spinlock *next = ACCESS_ONCE(node->next); 94 struct mcs_spinlock *next = READ_ONCE(node->next);
95 95
96 if (likely(!next)) { 96 if (likely(!next)) {
97 /* 97 /*
@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
100 if (likely(cmpxchg(lock, node, NULL) == node)) 100 if (likely(cmpxchg(lock, node, NULL) == node))
101 return; 101 return;
102 /* Wait until the next pointer is set */ 102 /* Wait until the next pointer is set */
103 while (!(next = ACCESS_ONCE(node->next))) 103 while (!(next = READ_ONCE(node->next)))
104 cpu_relax_lowlatency(); 104 cpu_relax_lowlatency();
105 } 105 }
106 106