aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-19 01:13:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-23 16:37:22 -0400
commit418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd (patch)
treed5b968d92b0051ae18b32940d4d7d4da15bcf031 /include
parentea5806559f92a3e7439bc7a4f2c0d04692e68931 (diff)
x86: lock bitops
I missed an obvious one! x86 CPUs are defined not to reorder stores past earlier loads, so there is no hardware memory barrier required to implement a release-consistent store (all stores are, by definition). So ditch the generic lock bitops, and implement optimised versions for x86, which removes the mfence from __clear_bit_unlock (which is already a useful primitive for SLUB). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/bitops_32.h43
-rw-r--r--include/asm-x86/bitops_64.h42
2 files changed, 83 insertions, 2 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 3268a341cf49..36ebb5b02b4f 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
80 :"Ir" (nr)); 80 :"Ir" (nr));
81} 81}
82 82
83/*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92{
93 barrier();
94 clear_bit(nr, addr);
95}
96
83static inline void __clear_bit(int nr, volatile unsigned long * addr) 97static inline void __clear_bit(int nr, volatile unsigned long * addr)
84{ 98{
85 __asm__ __volatile__( 99 __asm__ __volatile__(
@@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
87 :"+m" (ADDR) 101 :"+m" (ADDR)
88 :"Ir" (nr)); 102 :"Ir" (nr));
89} 103}
104
105/*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118{
119 barrier();
120 __clear_bit(nr, addr);
121}
122
90#define smp_mb__before_clear_bit() barrier() 123#define smp_mb__before_clear_bit() barrier()
91#define smp_mb__after_clear_bit() barrier() 124#define smp_mb__after_clear_bit() barrier()
92 125
@@ -146,6 +179,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
146} 179}
147 180
148/** 181/**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86
187 */
188#define test_and_set_bit_lock test_and_set_bit
189
190/**
149 * __test_and_set_bit - Set a bit and return its old value 191 * __test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set 192 * @nr: Bit to set
151 * @addr: Address to count from 193 * @addr: Address to count from
@@ -406,7 +448,6 @@ static inline int fls(int x)
406} 448}
407 449
408#include <asm-generic/bitops/hweight.h> 450#include <asm-generic/bitops/hweight.h>
409#include <asm-generic/bitops/lock.h>
410 451
411#endif /* __KERNEL__ */ 452#endif /* __KERNEL__ */
412 453
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index dacaa5f1febc..b4d47940b959 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
72 :"dIr" (nr)); 72 :"dIr" (nr));
73} 73}
74 74
75/*
76 * clear_bit_unlock - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and implies release semantics before the memory
81 * operation. It can be used for an unlock.
82 */
83static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
84{
85 barrier();
86 clear_bit(nr, addr);
87}
88
75static __inline__ void __clear_bit(int nr, volatile void * addr) 89static __inline__ void __clear_bit(int nr, volatile void * addr)
76{ 90{
77 __asm__ __volatile__( 91 __asm__ __volatile__(
@@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
80 :"dIr" (nr)); 94 :"dIr" (nr));
81} 95}
82 96
97/*
98 * __clear_bit_unlock - Clears a bit in memory
99 * @nr: Bit to clear
100 * @addr: Address to start counting from
101 *
102 * __clear_bit() is non-atomic and implies release semantics before the memory
103 * operation. It can be used for an unlock if no other CPUs can concurrently
104 * modify other bits in the word.
105 *
106 * No memory barrier is required here, because x86 cannot reorder stores past
107 * older loads. Same principle as spin_unlock.
108 */
109static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
110{
111 barrier();
112 __clear_bit(nr, addr);
113}
114
83#define smp_mb__before_clear_bit() barrier() 115#define smp_mb__before_clear_bit() barrier()
84#define smp_mb__after_clear_bit() barrier() 116#define smp_mb__after_clear_bit() barrier()
85 117
@@ -137,6 +169,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
137} 169}
138 170
139/** 171/**
172 * test_and_set_bit_lock - Set a bit and return its old value for lock
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This is the same as test_and_set_bit on x86
177 */
178#define test_and_set_bit_lock test_and_set_bit
179
180/**
140 * __test_and_set_bit - Set a bit and return its old value 181 * __test_and_set_bit - Set a bit and return its old value
141 * @nr: Bit to set 182 * @nr: Bit to set
142 * @addr: Address to count from 183 * @addr: Address to count from
@@ -412,7 +453,6 @@ static __inline__ int fls(int x)
412#define ARCH_HAS_FAST_MULTIPLIER 1 453#define ARCH_HAS_FAST_MULTIPLIER 1
413 454
414#include <asm-generic/bitops/hweight.h> 455#include <asm-generic/bitops/hweight.h>
415#include <asm-generic/bitops/lock.h>
416 456
417#endif /* __KERNEL__ */ 457#endif /* __KERNEL__ */
418 458