aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/spinlock.h
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2014-11-24 04:53:46 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-12-18 03:54:38 -0500
commit4f9d1382e6f80dcfa891b2c02d5a35c53be485f1 (patch)
treed4d95efc68ca6661661e08fe78b1ff3538baa5de /arch/x86/include/asm/spinlock.h
parente37c698270633327245beb0fbd8699db8a4b65b4 (diff)
x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
ACCESS_ONCE does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145) Change the spinlock code to replace ACCESS_ONCE with READ_ONCE. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r--arch/x86/include/asm/spinlock.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 9295016485c9..12a69b406d40 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
92 unsigned count = SPIN_THRESHOLD; 92 unsigned count = SPIN_THRESHOLD;
93 93
94 do { 94 do {
95 if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 95 if (READ_ONCE(lock->tickets.head) == inc.tail)
96 goto out; 96 goto out;
97 cpu_relax(); 97 cpu_relax();
98 } while (--count); 98 } while (--count);
@@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
105{ 105{
106 arch_spinlock_t old, new; 106 arch_spinlock_t old, new;
107 107
108 old.tickets = ACCESS_ONCE(lock->tickets); 108 old.tickets = READ_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
110 return 0; 110 return 0;
111 111
@@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
162 162
163static inline int arch_spin_is_locked(arch_spinlock_t *lock) 163static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164{ 164{
165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 165 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
166 166
167 return tmp.tail != tmp.head; 167 return tmp.tail != tmp.head;
168} 168}
169 169
170static inline int arch_spin_is_contended(arch_spinlock_t *lock) 170static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171{ 171{
172 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 172 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
173 173
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
175} 175}