aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/bitops/lock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/bitops/lock.h')
-rw-r--r--include/asm-generic/bitops/lock.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94806..8ef0ccbf8167 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
29 * @nr: the bit to set 29 * @nr: the bit to set
30 * @addr: the address to start counting from 30 * @addr: the address to start counting from
31 * 31 *
32 * This operation is like clear_bit_unlock, however it is not atomic. 32 * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
33 * It does provide release barrier semantics so it can be used to unlock 33 * the bits in the word are protected by this lock some archs can use weaker
34 * a bit lock, however it would only be used if no other CPU can modify 34 * ops to safely unlock.
35 * any bits in the memory until the lock is released (a good example is 35 *
36 * if the bit lock itself protects access to the other bits in the word). 36 * See for example x86's implementation.
37 */ 37 */
38#define __clear_bit_unlock(nr, addr) \ 38#define __clear_bit_unlock(nr, addr) \
39do { \ 39do { \
40 smp_mb(); \ 40 smp_mb__before_atomic(); \
41 __clear_bit(nr, addr); \ 41 clear_bit(nr, addr); \
42} while (0) 42} while (0)
43 43
44#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ 44#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */