aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/bitops_32.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-19 01:13:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-23 16:37:22 -0400
commit418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd (patch)
treed5b968d92b0051ae18b32940d4d7d4da15bcf031 /include/asm-x86/bitops_32.h
parentea5806559f92a3e7439bc7a4f2c0d04692e68931 (diff)
x86: lock bitops
I missed an obvious one! x86 CPUs are defined not to reorder stores past earlier loads, so there is no hardware memory barrier required to implement a release-consistent store (all stores are, by definition). So ditch the generic lock bitops, and implement optimised versions for x86, which removes the mfence from __clear_bit_unlock (which is already a useful primitive for SLUB). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/bitops_32.h')
-rw-r--r--include/asm-x86/bitops_32.h43
1 files changed, 42 insertions, 1 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 3268a341cf49..36ebb5b02b4f 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
80 :"Ir" (nr)); 80 :"Ir" (nr));
81} 81}
82 82
83/*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92{
93 barrier();
94 clear_bit(nr, addr);
95}
96
83static inline void __clear_bit(int nr, volatile unsigned long * addr) 97static inline void __clear_bit(int nr, volatile unsigned long * addr)
84{ 98{
85 __asm__ __volatile__( 99 __asm__ __volatile__(
@@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
87 :"+m" (ADDR) 101 :"+m" (ADDR)
88 :"Ir" (nr)); 102 :"Ir" (nr));
89} 103}
104
105/*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118{
119 barrier();
120 __clear_bit(nr, addr);
121}
122
90#define smp_mb__before_clear_bit() barrier() 123#define smp_mb__before_clear_bit() barrier()
91#define smp_mb__after_clear_bit() barrier() 124#define smp_mb__after_clear_bit() barrier()
92 125
@@ -146,6 +179,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
146} 179}
147 180
148/** 181/**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86
187 */
188#define test_and_set_bit_lock test_and_set_bit
189
190/**
149 * __test_and_set_bit - Set a bit and return its old value 191 * __test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set 192 * @nr: Bit to set
151 * @addr: Address to count from 193 * @addr: Address to count from
@@ -406,7 +448,6 @@ static inline int fls(int x)
406} 448}
407 449
408#include <asm-generic/bitops/hweight.h> 450#include <asm-generic/bitops/hweight.h>
409#include <asm-generic/bitops/lock.h>
410 451
411#endif /* __KERNEL__ */ 452#endif /* __KERNEL__ */
412 453