diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
commit | 776edb59317ada867dfcddde40b55648beeb0078 (patch) | |
tree | f6a6136374642323cfefd7d6399ea429f9018ade /arch/arm/include/asm | |
parent | 59a3d4c3631e553357b7305dc09db1990aa6757c (diff) | |
parent | 3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle were:
- reduced/streamlined smp_mb__*() interface that allows more usecases
and makes the existing ones less buggy, especially in rarer
architectures
- add rwsem implementation comments
- bump up lockdep limits"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
rwsem: Add comments to explain the meaning of the rwsem's count field
lockdep: Increase static allocations
arch: Mass conversion of smp_mb__*()
arch,doc: Convert smp_mb__*()
arch,xtensa: Convert smp_mb__*()
arch,x86: Convert smp_mb__*()
arch,tile: Convert smp_mb__*()
arch,sparc: Convert smp_mb__*()
arch,sh: Convert smp_mb__*()
arch,score: Convert smp_mb__*()
arch,s390: Convert smp_mb__*()
arch,powerpc: Convert smp_mb__*()
arch,parisc: Convert smp_mb__*()
arch,openrisc: Convert smp_mb__*()
arch,mn10300: Convert smp_mb__*()
arch,mips: Convert smp_mb__*()
arch,metag: Convert smp_mb__*()
arch,m68k: Convert smp_mb__*()
arch,m32r: Convert smp_mb__*()
arch,ia64: Convert smp_mb__*()
...
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/barrier.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/bitops.h | 4 |
3 files changed, 4 insertions, 8 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9a92fd7864a8..3040359094d9 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -241,11 +241,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
241 | 241 | ||
242 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | 242 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) |
243 | 243 | ||
244 | #define smp_mb__before_atomic_dec() smp_mb() | ||
245 | #define smp_mb__after_atomic_dec() smp_mb() | ||
246 | #define smp_mb__before_atomic_inc() smp_mb() | ||
247 | #define smp_mb__after_atomic_inc() smp_mb() | ||
248 | |||
249 | #ifndef CONFIG_GENERIC_ATOMIC64 | 244 | #ifndef CONFIG_GENERIC_ATOMIC64 |
250 | typedef struct { | 245 | typedef struct { |
251 | long long counter; | 246 | long long counter; |
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 2f59f7443396..c6a3e73a6e24 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h | |||
@@ -79,5 +79,8 @@ do { \ | |||
79 | 79 | ||
80 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 80 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
81 | 81 | ||
82 | #define smp_mb__before_atomic() smp_mb() | ||
83 | #define smp_mb__after_atomic() smp_mb() | ||
84 | |||
82 | #endif /* !__ASSEMBLY__ */ | 85 | #endif /* !__ASSEMBLY__ */ |
83 | #endif /* __ASM_BARRIER_H */ | 86 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b2e298a90d76..56380995f4c3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -25,9 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
27 | #include <linux/irqflags.h> | 27 | #include <linux/irqflags.h> |
28 | 28 | #include <asm/barrier.h> | |
29 | #define smp_mb__before_clear_bit() smp_mb() | ||
30 | #define smp_mb__after_clear_bit() smp_mb() | ||
31 | 29 | ||
32 | /* | 30 | /* |
33 | * These functions are the basis of our bit ops. | 31 | * These functions are the basis of our bit ops. |