aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-20 19:48:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-20 19:48:59 -0500
commit60815cf2e05057db5b78e398d9734c493560b11e (patch)
tree23d7f55df13cc5a0c072cc8a6f361f8e7050b825 /arch/x86/include
parentbfc7249cc293deac8f2678b7ec3d2407b68c0a33 (diff)
parent5de72a2247ac05bde7c89039631b3d0c6186fafb (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux
Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger: "kernel: Provide READ_ONCE and ASSIGN_ONCE As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com ACCESS_ONCE might fail with specific compilers for non-scalar accesses. Here is a set of patches to tackle that problem. The first patch introduce READ_ONCE and ASSIGN_ONCE. If the data structure is larger than the machine word size memcpy is used and a warning is emitted. The next patches fix up several in-tree users of ACCESS_ONCE on non-scalar types. This does not yet contain a patch that forces ACCESS_ONCE to work only on scalar types. This is targetted for the next merge window as Linux next already contains new offenders regarding ACCESS_ONCE vs. non-scalar types" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux: s390/kvm: REPLACE barrier fixup with READ_ONCE arm/spinlock: Replace ACCESS_ONCE with READ_ONCE arm64/spinlock: Replace ACCESS_ONCE READ_ONCE mips/gup: Replace ACCESS_ONCE with READ_ONCE x86/gup: Replace ACCESS_ONCE with READ_ONCE x86/spinlock: Replace ACCESS_ONCE with READ_ONCE mm: replace ACCESS_ONCE with READ_ONCE or barriers kernel: Provide READ_ONCE and ASSIGN_ONCE
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/spinlock.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index a4efe477ceab..625660f8a2fc 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
92 unsigned count = SPIN_THRESHOLD; 92 unsigned count = SPIN_THRESHOLD;
93 93
94 do { 94 do {
95 if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 95 if (READ_ONCE(lock->tickets.head) == inc.tail)
96 goto out; 96 goto out;
97 cpu_relax(); 97 cpu_relax();
98 } while (--count); 98 } while (--count);
@@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
105{ 105{
106 arch_spinlock_t old, new; 106 arch_spinlock_t old, new;
107 107
108 old.tickets = ACCESS_ONCE(lock->tickets); 108 old.tickets = READ_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
110 return 0; 110 return 0;
111 111
@@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
162 162
163static inline int arch_spin_is_locked(arch_spinlock_t *lock) 163static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164{ 164{
165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 165 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
166 166
167 return tmp.tail != tmp.head; 167 return tmp.tail != tmp.head;
168} 168}
169 169
170static inline int arch_spin_is_contended(arch_spinlock_t *lock) 170static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171{ 171{
172 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 172 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
173 173
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
175} 175}