aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/bitops_32.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 07:30:55 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:55 -0500
commit1c54d77078056cde0f195b1a982cb681850efc08 (patch)
tree2acf18bdf1cd7ff38f79ea09dd75025aa6f60a65 /include/asm-x86/bitops_32.h
parent7bf0c23ed24b0d95a2a717f86dce1f210e16f8a5 (diff)
x86: partial unification of asm-x86/bitops.h
This unifies the set/clear/test bit functions of asm/bitops.h. I have not attempted to merge the bit-finding functions, since they rely on the machine word size and can't be easily restructured to work generically without a lot of #ifdefs. In particular, the 64-bit code can assume the presence of conditional move instructions, whereas 32-bit needs to be more careful. The inline assembly for the bit operations has been changed to remove explicit sizing hints on the instructions, so the assembler will pick the appropriate instruction forms depending on the architecture and the context. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/bitops_32.h')
-rw-r--r--include/asm-x86/bitops_32.h308
1 files changed, 0 insertions, 308 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 5a29cce6a91..e4d75fcf9c0 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -5,314 +5,6 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <linux/compiler.h>
13#include <asm/alternative.h>
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23#define ADDR (*(volatile long *) addr)
24
25/**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
29 *
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
32 *
33 * Note: there are no guarantees that this function will not be reordered
34 * on non x86 architectures, so if you are writing portable code,
35 * make sure not to rely on its reordering guarantees.
36 *
37 * Note that @nr may be almost arbitrarily large; this function is not
38 * restricted to acting on a single-word quantity.
39 */
40static inline void set_bit(int nr, volatile unsigned long *addr)
41{
42 __asm__ __volatile__( LOCK_PREFIX
43 "btsl %1,%0"
44 :"+m" (ADDR)
45 :"Ir" (nr));
46}
47
48/**
49 * __set_bit - Set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
52 *
53 * Unlike set_bit(), this function is non-atomic and may be reordered.
54 * If it's called on the same region of memory simultaneously, the effect
55 * may be that only one operation succeeds.
56 */
57static inline void __set_bit(int nr, volatile unsigned long *addr)
58{
59 __asm__(
60 "btsl %1,%0"
61 :"+m" (ADDR)
62 :"Ir" (nr));
63}
64
65/**
66 * clear_bit - Clears a bit in memory
67 * @nr: Bit to clear
68 * @addr: Address to start counting from
69 *
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
74 */
75static inline void clear_bit(int nr, volatile unsigned long *addr)
76{
77 __asm__ __volatile__( LOCK_PREFIX
78 "btrl %1,%0"
79 :"+m" (ADDR)
80 :"Ir" (nr));
81}
82
83/*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92{
93 barrier();
94 clear_bit(nr, addr);
95}
96
97static inline void __clear_bit(int nr, volatile unsigned long *addr)
98{
99 __asm__ __volatile__(
100 "btrl %1,%0"
101 :"+m" (ADDR)
102 :"Ir" (nr));
103}
104
105/*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118{
119 barrier();
120 __clear_bit(nr, addr);
121}
122
123#define smp_mb__before_clear_bit() barrier()
124#define smp_mb__after_clear_bit() barrier()
125
126/**
127 * __change_bit - Toggle a bit in memory
128 * @nr: the bit to change
129 * @addr: the address to start counting from
130 *
131 * Unlike change_bit(), this function is non-atomic and may be reordered.
132 * If it's called on the same region of memory simultaneously, the effect
133 * may be that only one operation succeeds.
134 */
135static inline void __change_bit(int nr, volatile unsigned long *addr)
136{
137 __asm__ __volatile__(
138 "btcl %1,%0"
139 :"+m" (ADDR)
140 :"Ir" (nr));
141}
142
143/**
144 * change_bit - Toggle a bit in memory
145 * @nr: Bit to change
146 * @addr: Address to start counting from
147 *
148 * change_bit() is atomic and may not be reordered. It may be
149 * reordered on other architectures than x86.
150 * Note that @nr may be almost arbitrarily large; this function is not
151 * restricted to acting on a single-word quantity.
152 */
153static inline void change_bit(int nr, volatile unsigned long *addr)
154{
155 __asm__ __volatile__( LOCK_PREFIX
156 "btcl %1,%0"
157 :"+m" (ADDR)
158 :"Ir" (nr));
159}
160
161/**
162 * test_and_set_bit - Set a bit and return its old value
163 * @nr: Bit to set
164 * @addr: Address to count from
165 *
166 * This operation is atomic and cannot be reordered.
167 * It may be reordered on other architectures than x86.
168 * It also implies a memory barrier.
169 */
170static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
171{
172 int oldbit;
173
174 __asm__ __volatile__( LOCK_PREFIX
175 "btsl %2,%1\n\tsbbl %0,%0"
176 :"=r" (oldbit),"+m" (ADDR)
177 :"Ir" (nr) : "memory");
178 return oldbit;
179}
180
181/**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86.
187 */
188static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
189{
190 return test_and_set_bit(nr, addr);
191}
192
193/**
194 * __test_and_set_bit - Set a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is non-atomic and can be reordered.
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail. You must protect multiple accesses with a lock.
201 */
202static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
203{
204 int oldbit;
205
206 __asm__(
207 "btsl %2,%1\n\tsbbl %0,%0"
208 :"=r" (oldbit),"+m" (ADDR)
209 :"Ir" (nr));
210 return oldbit;
211}
212
213/**
214 * test_and_clear_bit - Clear a bit and return its old value
215 * @nr: Bit to clear
216 * @addr: Address to count from
217 *
218 * This operation is atomic and cannot be reordered.
219 * It can be reorderdered on other architectures other than x86.
220 * It also implies a memory barrier.
221 */
222static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
223{
224 int oldbit;
225
226 __asm__ __volatile__( LOCK_PREFIX
227 "btrl %2,%1\n\tsbbl %0,%0"
228 :"=r" (oldbit),"+m" (ADDR)
229 :"Ir" (nr) : "memory");
230 return oldbit;
231}
232
233/**
234 * __test_and_clear_bit - Clear a bit and return its old value
235 * @nr: Bit to clear
236 * @addr: Address to count from
237 *
238 * This operation is non-atomic and can be reordered.
239 * If two examples of this operation race, one can appear to succeed
240 * but actually fail. You must protect multiple accesses with a lock.
241 */
242static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
243{
244 int oldbit;
245
246 __asm__(
247 "btrl %2,%1\n\tsbbl %0,%0"
248 :"=r" (oldbit),"+m" (ADDR)
249 :"Ir" (nr));
250 return oldbit;
251}
252
253/* WARNING: non atomic and it can be reordered! */
254static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
255{
256 int oldbit;
257
258 __asm__ __volatile__(
259 "btcl %2,%1\n\tsbbl %0,%0"
260 :"=r" (oldbit),"+m" (ADDR)
261 :"Ir" (nr) : "memory");
262 return oldbit;
263}
264
265/**
266 * test_and_change_bit - Change a bit and return its old value
267 * @nr: Bit to change
268 * @addr: Address to count from
269 *
270 * This operation is atomic and cannot be reordered.
271 * It also implies a memory barrier.
272 */
273static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
274{
275 int oldbit;
276
277 __asm__ __volatile__( LOCK_PREFIX
278 "btcl %2,%1\n\tsbbl %0,%0"
279 :"=r" (oldbit),"+m" (ADDR)
280 :"Ir" (nr) : "memory");
281 return oldbit;
282}
283
284#if 0 /* Fool kernel-doc since it doesn't do macros yet */
285/**
286 * test_bit - Determine whether a bit is set
287 * @nr: bit number to test
288 * @addr: Address to start counting from
289 */
290static int test_bit(int nr, const volatile void *addr);
291#endif
292
293static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
294{
295 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
296}
297
298static inline int variable_test_bit(int nr, const volatile unsigned long *addr)
299{
300 int oldbit;
301
302 __asm__ __volatile__(
303 "btl %2,%1\n\tsbbl %0,%0"
304 :"=r" (oldbit)
305 :"m" (ADDR),"Ir" (nr));
306 return oldbit;
307}
308
309#define test_bit(nr, addr) \
310 (__builtin_constant_p(nr) ? \
311 constant_test_bit((nr), (addr)) : \
312 variable_test_bit((nr), (addr)))
313
314#undef ADDR
315
316/** 8/**
317 * find_first_zero_bit - find the first zero bit in a memory region 9 * find_first_zero_bit - find the first zero bit in a memory region
318 * @addr: The address to start the search at 10 * @addr: The address to start the search at