diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-20 19:48:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-20 19:48:59 -0500 |
commit | 60815cf2e05057db5b78e398d9734c493560b11e (patch) | |
tree | 23d7f55df13cc5a0c072cc8a6f361f8e7050b825 /arch/arm64 | |
parent | bfc7249cc293deac8f2678b7ec3d2407b68c0a33 (diff) | |
parent | 5de72a2247ac05bde7c89039631b3d0c6186fafb (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux
Pull ACCESS_ONCE cleanup preparation from Christian Borntraeger:
"kernel: Provide READ_ONCE and ASSIGN_ONCE
As discussed on LKML http://marc.info/?i=54611D86.4040306%40de.ibm.com
ACCESS_ONCE might fail with specific compilers for non-scalar
accesses.
Here is a set of patches to tackle that problem.
The first patch introduce READ_ONCE and ASSIGN_ONCE. If the data
structure is larger than the machine word size memcpy is used and a
warning is emitted. The next patches fix up several in-tree users of
ACCESS_ONCE on non-scalar types.
This does not yet contain a patch that forces ACCESS_ONCE to work only
on scalar types. This is targetted for the next merge window as Linux
next already contains new offenders regarding ACCESS_ONCE vs.
non-scalar types"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux:
s390/kvm: REPLACE barrier fixup with READ_ONCE
arm/spinlock: Replace ACCESS_ONCE with READ_ONCE
arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
mips/gup: Replace ACCESS_ONCE with READ_ONCE
x86/gup: Replace ACCESS_ONCE with READ_ONCE
x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
mm: replace ACCESS_ONCE with READ_ONCE or barriers
kernel: Provide READ_ONCE and ASSIGN_ONCE
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/spinlock.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index c45b7b1b7197..cee128732435 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | |||
99 | 99 | ||
100 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 100 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
101 | { | 101 | { |
102 | return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); | 102 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | 105 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
106 | { | 106 | { |
107 | arch_spinlock_t lockval = ACCESS_ONCE(*lock); | 107 | arch_spinlock_t lockval = READ_ONCE(*lock); |
108 | return (lockval.next - lockval.owner) > 1; | 108 | return (lockval.next - lockval.owner) > 1; |
109 | } | 109 | } |
110 | #define arch_spin_is_contended arch_spin_is_contended | 110 | #define arch_spin_is_contended arch_spin_is_contended |