aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/bitops_64.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-19 01:13:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-23 16:37:22 -0400
commit418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd (patch)
treed5b968d92b0051ae18b32940d4d7d4da15bcf031 /include/asm-x86/bitops_64.h
parentea5806559f92a3e7439bc7a4f2c0d04692e68931 (diff)
x86: lock bitops
I missed an obvious one! x86 CPUs are defined not to reorder stores past earlier loads, so there is no hardware memory barrier required to implement a release-consistent store (all stores are, by definition). So ditch the generic lock bitops, and implement optimised versions for x86, which removes the mfence from __clear_bit_unlock (which is already a useful primitive for SLUB). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/bitops_64.h')
-rw-r--r--include/asm-x86/bitops_64.h42
1 files changed, 41 insertions, 1 deletions
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index dacaa5f1febc..b4d47940b959 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
72 :"dIr" (nr)); 72 :"dIr" (nr));
73} 73}
74 74
75/*
76 * clear_bit_unlock - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and implies release semantics before the memory
81 * operation. It can be used for an unlock.
82 */
83static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
84{
85 barrier();
86 clear_bit(nr, addr);
87}
88
75static __inline__ void __clear_bit(int nr, volatile void * addr) 89static __inline__ void __clear_bit(int nr, volatile void * addr)
76{ 90{
77 __asm__ __volatile__( 91 __asm__ __volatile__(
@@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
80 :"dIr" (nr)); 94 :"dIr" (nr));
81} 95}
82 96
97/*
98 * __clear_bit_unlock - Clears a bit in memory
99 * @nr: Bit to clear
100 * @addr: Address to start counting from
101 *
102 * __clear_bit() is non-atomic and implies release semantics before the memory
103 * operation. It can be used for an unlock if no other CPUs can concurrently
104 * modify other bits in the word.
105 *
106 * No memory barrier is required here, because x86 cannot reorder stores past
107 * older loads. Same principle as spin_unlock.
108 */
109static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
110{
111 barrier();
112 __clear_bit(nr, addr);
113}
114
83#define smp_mb__before_clear_bit() barrier() 115#define smp_mb__before_clear_bit() barrier()
84#define smp_mb__after_clear_bit() barrier() 116#define smp_mb__after_clear_bit() barrier()
85 117
@@ -137,6 +169,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
137} 169}
138 170
139/** 171/**
172 * test_and_set_bit_lock - Set a bit and return its old value for lock
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This is the same as test_and_set_bit on x86
177 */
178#define test_and_set_bit_lock test_and_set_bit
179
180/**
140 * __test_and_set_bit - Set a bit and return its old value 181 * __test_and_set_bit - Set a bit and return its old value
141 * @nr: Bit to set 182 * @nr: Bit to set
142 * @addr: Address to count from 183 * @addr: Address to count from
@@ -412,7 +453,6 @@ static __inline__ int fls(int x)
412#define ARCH_HAS_FAST_MULTIPLIER 1 453#define ARCH_HAS_FAST_MULTIPLIER 1
413 454
414#include <asm-generic/bitops/hweight.h> 455#include <asm-generic/bitops/hweight.h>
415#include <asm-generic/bitops/lock.h>
416 456
417#endif /* __KERNEL__ */ 457#endif /* __KERNEL__ */
418 458