aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /include/asm-generic
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h7
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/bitops.h9
-rw-r--r--include/asm-generic/bitops/atomic.h2
-rw-r--r--include/asm-generic/bitops/lock.h2
5 files changed, 12 insertions, 16 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 33bd2de3bc1e..9c79e7603459 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -16,6 +16,7 @@
16#define __ASM_GENERIC_ATOMIC_H 16#define __ASM_GENERIC_ATOMIC_H
17 17
18#include <asm/cmpxchg.h> 18#include <asm/cmpxchg.h>
19#include <asm/barrier.h>
19 20
20#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
21/* Force people to define core atomics */ 22/* Force people to define core atomics */
@@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
182} 183}
183#endif 184#endif
184 185
185/* Assume that atomic operations are already serializing */
186#define smp_mb__before_atomic_dec() barrier()
187#define smp_mb__after_atomic_dec() barrier()
188#define smp_mb__before_atomic_inc() barrier()
189#define smp_mb__after_atomic_inc() barrier()
190
191#endif /* __KERNEL__ */ 186#endif /* __KERNEL__ */
192#endif /* __ASM_GENERIC_ATOMIC_H */ 187#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 6f692f8ac664..1402fa855388 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -62,6 +62,14 @@
62#define set_mb(var, value) do { (var) = (value); mb(); } while (0) 62#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
63#endif 63#endif
64 64
65#ifndef smp_mb__before_atomic
66#define smp_mb__before_atomic() smp_mb()
67#endif
68
69#ifndef smp_mb__after_atomic
70#define smp_mb__after_atomic() smp_mb()
71#endif
72
65#define smp_store_release(p, v) \ 73#define smp_store_release(p, v) \
66do { \ 74do { \
67 compiletime_assert_atomic_type(*p); \ 75 compiletime_assert_atomic_type(*p); \
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 280ca7a96f75..dcdcacf2fd2b 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -11,14 +11,7 @@
11 11
12#include <linux/irqflags.h> 12#include <linux/irqflags.h>
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14 14#include <asm/barrier.h>
15/*
16 * clear_bit may not imply a memory barrier
17 */
18#ifndef smp_mb__before_clear_bit
19#define smp_mb__before_clear_bit() smp_mb()
20#define smp_mb__after_clear_bit() smp_mb()
21#endif
22 15
23#include <asm-generic/bitops/__ffs.h> 16#include <asm-generic/bitops/__ffs.h>
24#include <asm-generic/bitops/ffz.h> 17#include <asm-generic/bitops/ffz.h>
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 9ae6c34dc191..49673510b484 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
80 * 80 *
81 * clear_bit() is atomic and may not be reordered. However, it does 81 * clear_bit() is atomic and may not be reordered. However, it does
82 * not contain a memory barrier, so if it is used for locking purposes, 82 * not contain a memory barrier, so if it is used for locking purposes,
83 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 83 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
84 * in order to ensure changes are visible on other processors. 84 * in order to ensure changes are visible on other processors.
85 */ 85 */
86static inline void clear_bit(int nr, volatile unsigned long *addr) 86static inline void clear_bit(int nr, volatile unsigned long *addr)
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 308a9e22c802..c30266e94806 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -20,7 +20,7 @@
20 */ 20 */
21#define clear_bit_unlock(nr, addr) \ 21#define clear_bit_unlock(nr, addr) \
22do { \ 22do { \
23 smp_mb__before_clear_bit(); \ 23 smp_mb__before_atomic(); \
24 clear_bit(nr, addr); \ 24 clear_bit(nr, addr); \
25} while (0) 25} while (0)
26 26