aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /arch/arm64
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/atomic.h5
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/include/asm/bitops.h9
3 files changed, 3 insertions, 14 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 0237f0867e37..57e8cb49824c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -152,11 +152,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
152 152
153#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 153#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
154 154
155#define smp_mb__before_atomic_dec() smp_mb()
156#define smp_mb__after_atomic_dec() smp_mb()
157#define smp_mb__before_atomic_inc() smp_mb()
158#define smp_mb__after_atomic_inc() smp_mb()
159
160/* 155/*
161 * 64-bit atomic operations. 156 * 64-bit atomic operations.
162 */ 157 */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 66eb7648043b..48b9e704af7c 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -98,6 +98,9 @@ do { \
98#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 98#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
99#define nop() asm volatile("nop"); 99#define nop() asm volatile("nop");
100 100
101#define smp_mb__before_atomic() smp_mb()
102#define smp_mb__after_atomic() smp_mb()
103
101#endif /* __ASSEMBLY__ */ 104#endif /* __ASSEMBLY__ */
102 105
103#endif /* __ASM_BARRIER_H */ 106#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index aa5b59d6ba43..9c19594ce7cb 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -17,17 +17,8 @@
17#define __ASM_BITOPS_H 17#define __ASM_BITOPS_H
18 18
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20
21#include <asm/barrier.h> 20#include <asm/barrier.h>
22 21
23/*
24 * clear_bit may not imply a memory barrier
25 */
26#ifndef smp_mb__before_clear_bit
27#define smp_mb__before_clear_bit() smp_mb()
28#define smp_mb__after_clear_bit() smp_mb()
29#endif
30
31#ifndef _LINUX_BITOPS_H 22#ifndef _LINUX_BITOPS_H
32#error only <linux/bitops.h> can be included directly 23#error only <linux/bitops.h> can be included directly
33#endif 24#endif