aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 07:30:55 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:55 -0500
commit1c54d77078056cde0f195b1a982cb681850efc08 (patch)
tree2acf18bdf1cd7ff38f79ea09dd75025aa6f60a65
parent7bf0c23ed24b0d95a2a717f86dce1f210e16f8a5 (diff)
x86: partial unification of asm-x86/bitops.h
This unifies the set/clear/test bit functions of asm/bitops.h. I have not attempted to merge the bit-finding functions, since they rely on the machine word size and can't be easily restructured to work generically without a lot of #ifdefs. In particular, the 64-bit code can assume the presence of conditional move instructions, whereas 32-bit needs to be more careful. The inline assembly for the bit operations has been changed to remove explicit sizing hints on the instructions, so the assembler will pick the appropriate instruction forms depending on the architecture and the context. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/asm-x86/bitops.h315
-rw-r--r--include/asm-x86/bitops_32.h308
-rw-r--r--include/asm-x86/bitops_64.h297
3 files changed, 315 insertions, 605 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 07e3f6d4fe47..c6dd7e259b46 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -1,5 +1,320 @@
1#ifndef _ASM_X86_BITOPS_H
2#define _ASM_X86_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <linux/compiler.h>
13#include <asm/alternative.h>
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */
26#define ADDR "=m" (*(volatile long *) addr)
27#else
28#define ADDR "+m" (*(volatile long *) addr)
29#endif
30
31/**
32 * set_bit - Atomically set a bit in memory
33 * @nr: the bit to set
34 * @addr: the address to start counting from
35 *
36 * This function is atomic and may not be reordered. See __set_bit()
37 * if you do not require the atomic guarantees.
38 *
39 * Note: there are no guarantees that this function will not be reordered
40 * on non x86 architectures, so if you are writing portable code,
41 * make sure not to rely on its reordering guarantees.
42 *
43 * Note that @nr may be almost arbitrarily large; this function is not
44 * restricted to acting on a single-word quantity.
45 */
46static inline void set_bit(int nr, volatile unsigned long *addr)
47{
48 asm volatile(LOCK_PREFIX "bts %1,%0"
49 : ADDR
50 : "Ir" (nr) : "memory");
51}
52
53/**
54 * __set_bit - Set a bit in memory
55 * @nr: the bit to set
56 * @addr: the address to start counting from
57 *
58 * Unlike set_bit(), this function is non-atomic and may be reordered.
59 * If it's called on the same region of memory simultaneously, the effect
60 * may be that only one operation succeeds.
61 */
62static inline void __set_bit(int nr, volatile unsigned long *addr)
63{
64 asm volatile("bts %1,%0"
65 : ADDR
66 : "Ir" (nr) : "memory");
67}
68
69
70/**
71 * clear_bit - Clears a bit in memory
72 * @nr: Bit to clear
73 * @addr: Address to start counting from
74 *
75 * clear_bit() is atomic and may not be reordered. However, it does
76 * not contain a memory barrier, so if it is used for locking purposes,
77 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
78 * in order to ensure changes are visible on other processors.
79 */
80static inline void clear_bit(int nr, volatile unsigned long *addr)
81{
82 asm volatile(LOCK_PREFIX "btr %1,%0"
83 : ADDR
84 : "Ir" (nr));
85}
86
87/*
88 * clear_bit_unlock - Clears a bit in memory
89 * @nr: Bit to clear
90 * @addr: Address to start counting from
91 *
92 * clear_bit() is atomic and implies release semantics before the memory
93 * operation. It can be used for an unlock.
94 */
95static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
96{
97 barrier();
98 clear_bit(nr, addr);
99}
100
101static inline void __clear_bit(int nr, volatile unsigned long *addr)
102{
103 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
104}
105
106/*
107 * __clear_bit_unlock - Clears a bit in memory
108 * @nr: Bit to clear
109 * @addr: Address to start counting from
110 *
111 * __clear_bit() is non-atomic and implies release semantics before the memory
112 * operation. It can be used for an unlock if no other CPUs can concurrently
113 * modify other bits in the word.
114 *
115 * No memory barrier is required here, because x86 cannot reorder stores past
116 * older loads. Same principle as spin_unlock.
117 */
118static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
119{
120 barrier();
121 __clear_bit(nr, addr);
122}
123
124#define smp_mb__before_clear_bit() barrier()
125#define smp_mb__after_clear_bit() barrier()
126
127/**
128 * __change_bit - Toggle a bit in memory
129 * @nr: the bit to change
130 * @addr: the address to start counting from
131 *
132 * Unlike change_bit(), this function is non-atomic and may be reordered.
133 * If it's called on the same region of memory simultaneously, the effect
134 * may be that only one operation succeeds.
135 */
136static inline void __change_bit(int nr, volatile unsigned long *addr)
137{
138 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
139}
140
141/**
142 * change_bit - Toggle a bit in memory
143 * @nr: Bit to change
144 * @addr: Address to start counting from
145 *
146 * change_bit() is atomic and may not be reordered.
147 * Note that @nr may be almost arbitrarily large; this function is not
148 * restricted to acting on a single-word quantity.
149 */
150static inline void change_bit(int nr, volatile unsigned long *addr)
151{
152 asm volatile(LOCK_PREFIX "btc %1,%0"
153 : ADDR : "Ir" (nr));
154}
155
156/**
157 * test_and_set_bit - Set a bit and return its old value
158 * @nr: Bit to set
159 * @addr: Address to count from
160 *
161 * This operation is atomic and cannot be reordered.
162 * It also implies a memory barrier.
163 */
164static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
165{
166 int oldbit;
167
168 asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
169 "sbb %0,%0"
170 : "=r" (oldbit), ADDR
171 : "Ir" (nr) : "memory");
172
173 return oldbit;
174}
175
176/**
177 * test_and_set_bit_lock - Set a bit and return its old value for lock
178 * @nr: Bit to set
179 * @addr: Address to count from
180 *
181 * This is the same as test_and_set_bit on x86.
182 */
183static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
184{
185 return test_and_set_bit(nr, addr);
186}
187
188/**
189 * __test_and_set_bit - Set a bit and return its old value
190 * @nr: Bit to set
191 * @addr: Address to count from
192 *
193 * This operation is non-atomic and can be reordered.
194 * If two examples of this operation race, one can appear to succeed
195 * but actually fail. You must protect multiple accesses with a lock.
196 */
197static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
198{
199 int oldbit;
200
201 asm("bts %2,%1\n\t"
202 "sbb %0,%0"
203 : "=r" (oldbit), ADDR
204 : "Ir" (nr));
205 return oldbit;
206}
207
208/**
209 * test_and_clear_bit - Clear a bit and return its old value
210 * @nr: Bit to clear
211 * @addr: Address to count from
212 *
213 * This operation is atomic and cannot be reordered.
214 * It also implies a memory barrier.
215 */
216static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
217{
218 int oldbit;
219
220 asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
221 "sbb %0,%0"
222 : "=r" (oldbit), ADDR
223 : "Ir" (nr) : "memory");
224
225 return oldbit;
226}
227
228/**
229 * __test_and_clear_bit - Clear a bit and return its old value
230 * @nr: Bit to clear
231 * @addr: Address to count from
232 *
233 * This operation is non-atomic and can be reordered.
234 * If two examples of this operation race, one can appear to succeed
235 * but actually fail. You must protect multiple accesses with a lock.
236 */
237static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
238{
239 int oldbit;
240
241 asm volatile("btr %2,%1\n\t"
242 "sbb %0,%0"
243 : "=r" (oldbit), ADDR
244 : "Ir" (nr));
245 return oldbit;
246}
247
248/* WARNING: non atomic and it can be reordered! */
249static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
250{
251 int oldbit;
252
253 asm volatile("btc %2,%1\n\t"
254 "sbb %0,%0"
255 : "=r" (oldbit), ADDR
256 : "Ir" (nr) : "memory");
257
258 return oldbit;
259}
260
261/**
262 * test_and_change_bit - Change a bit and return its old value
263 * @nr: Bit to change
264 * @addr: Address to count from
265 *
266 * This operation is atomic and cannot be reordered.
267 * It also implies a memory barrier.
268 */
269static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
270{
271 int oldbit;
272
273 asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
274 "sbb %0,%0"
275 : "=r" (oldbit), ADDR
276 : "Ir" (nr) : "memory");
277
278 return oldbit;
279}
280
281static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
282{
283 return ((1UL << (nr % BITS_PER_LONG)) & (addr[nr / BITS_PER_LONG])) != 0;
284}
285
286static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
287{
288 int oldbit;
289
290 asm volatile("bt %2,%1\n\t"
291 "sbb %0,%0"
292 : "=r" (oldbit)
293 : "m" (*addr), "Ir" (nr));
294
295 return oldbit;
296}
297
298#if 0 /* Fool kernel-doc since it doesn't do macros yet */
299/**
300 * test_bit - Determine whether a bit is set
301 * @nr: bit number to test
302 * @addr: Address to start counting from
303 */
304static int test_bit(int nr, const volatile unsigned long *addr);
305#endif
306
307#define test_bit(nr,addr) \
308 (__builtin_constant_p(nr) ? \
309 constant_test_bit((nr),(addr)) : \
310 variable_test_bit((nr),(addr)))
311
312#undef ADDR
313
1#ifdef CONFIG_X86_32 314#ifdef CONFIG_X86_32
2# include "bitops_32.h" 315# include "bitops_32.h"
3#else 316#else
4# include "bitops_64.h" 317# include "bitops_64.h"
5#endif 318#endif
319
320#endif /* _ASM_X86_BITOPS_H */
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 5a29cce6a91d..e4d75fcf9c03 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -5,314 +5,6 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <linux/compiler.h>
13#include <asm/alternative.h>
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23#define ADDR (*(volatile long *) addr)
24
25/**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
29 *
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
32 *
33 * Note: there are no guarantees that this function will not be reordered
34 * on non x86 architectures, so if you are writing portable code,
35 * make sure not to rely on its reordering guarantees.
36 *
37 * Note that @nr may be almost arbitrarily large; this function is not
38 * restricted to acting on a single-word quantity.
39 */
40static inline void set_bit(int nr, volatile unsigned long *addr)
41{
42 __asm__ __volatile__( LOCK_PREFIX
43 "btsl %1,%0"
44 :"+m" (ADDR)
45 :"Ir" (nr));
46}
47
48/**
49 * __set_bit - Set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
52 *
53 * Unlike set_bit(), this function is non-atomic and may be reordered.
54 * If it's called on the same region of memory simultaneously, the effect
55 * may be that only one operation succeeds.
56 */
57static inline void __set_bit(int nr, volatile unsigned long *addr)
58{
59 __asm__(
60 "btsl %1,%0"
61 :"+m" (ADDR)
62 :"Ir" (nr));
63}
64
65/**
66 * clear_bit - Clears a bit in memory
67 * @nr: Bit to clear
68 * @addr: Address to start counting from
69 *
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
74 */
75static inline void clear_bit(int nr, volatile unsigned long *addr)
76{
77 __asm__ __volatile__( LOCK_PREFIX
78 "btrl %1,%0"
79 :"+m" (ADDR)
80 :"Ir" (nr));
81}
82
83/*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92{
93 barrier();
94 clear_bit(nr, addr);
95}
96
97static inline void __clear_bit(int nr, volatile unsigned long *addr)
98{
99 __asm__ __volatile__(
100 "btrl %1,%0"
101 :"+m" (ADDR)
102 :"Ir" (nr));
103}
104
105/*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118{
119 barrier();
120 __clear_bit(nr, addr);
121}
122
123#define smp_mb__before_clear_bit() barrier()
124#define smp_mb__after_clear_bit() barrier()
125
126/**
127 * __change_bit - Toggle a bit in memory
128 * @nr: the bit to change
129 * @addr: the address to start counting from
130 *
131 * Unlike change_bit(), this function is non-atomic and may be reordered.
132 * If it's called on the same region of memory simultaneously, the effect
133 * may be that only one operation succeeds.
134 */
135static inline void __change_bit(int nr, volatile unsigned long *addr)
136{
137 __asm__ __volatile__(
138 "btcl %1,%0"
139 :"+m" (ADDR)
140 :"Ir" (nr));
141}
142
143/**
144 * change_bit - Toggle a bit in memory
145 * @nr: Bit to change
146 * @addr: Address to start counting from
147 *
148 * change_bit() is atomic and may not be reordered. It may be
149 * reordered on other architectures than x86.
150 * Note that @nr may be almost arbitrarily large; this function is not
151 * restricted to acting on a single-word quantity.
152 */
153static inline void change_bit(int nr, volatile unsigned long *addr)
154{
155 __asm__ __volatile__( LOCK_PREFIX
156 "btcl %1,%0"
157 :"+m" (ADDR)
158 :"Ir" (nr));
159}
160
161/**
162 * test_and_set_bit - Set a bit and return its old value
163 * @nr: Bit to set
164 * @addr: Address to count from
165 *
166 * This operation is atomic and cannot be reordered.
167 * It may be reordered on other architectures than x86.
168 * It also implies a memory barrier.
169 */
170static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
171{
172 int oldbit;
173
174 __asm__ __volatile__( LOCK_PREFIX
175 "btsl %2,%1\n\tsbbl %0,%0"
176 :"=r" (oldbit),"+m" (ADDR)
177 :"Ir" (nr) : "memory");
178 return oldbit;
179}
180
181/**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86.
187 */
188static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
189{
190 return test_and_set_bit(nr, addr);
191}
192
193/**
194 * __test_and_set_bit - Set a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is non-atomic and can be reordered.
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail. You must protect multiple accesses with a lock.
201 */
202static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
203{
204 int oldbit;
205
206 __asm__(
207 "btsl %2,%1\n\tsbbl %0,%0"
208 :"=r" (oldbit),"+m" (ADDR)
209 :"Ir" (nr));
210 return oldbit;
211}
212
213/**
214 * test_and_clear_bit - Clear a bit and return its old value
215 * @nr: Bit to clear
216 * @addr: Address to count from
217 *
218 * This operation is atomic and cannot be reordered.
219 * It can be reorderdered on other architectures other than x86.
220 * It also implies a memory barrier.
221 */
222static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
223{
224 int oldbit;
225
226 __asm__ __volatile__( LOCK_PREFIX
227 "btrl %2,%1\n\tsbbl %0,%0"
228 :"=r" (oldbit),"+m" (ADDR)
229 :"Ir" (nr) : "memory");
230 return oldbit;
231}
232
233/**
234 * __test_and_clear_bit - Clear a bit and return its old value
235 * @nr: Bit to clear
236 * @addr: Address to count from
237 *
238 * This operation is non-atomic and can be reordered.
239 * If two examples of this operation race, one can appear to succeed
240 * but actually fail. You must protect multiple accesses with a lock.
241 */
242static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
243{
244 int oldbit;
245
246 __asm__(
247 "btrl %2,%1\n\tsbbl %0,%0"
248 :"=r" (oldbit),"+m" (ADDR)
249 :"Ir" (nr));
250 return oldbit;
251}
252
253/* WARNING: non atomic and it can be reordered! */
254static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
255{
256 int oldbit;
257
258 __asm__ __volatile__(
259 "btcl %2,%1\n\tsbbl %0,%0"
260 :"=r" (oldbit),"+m" (ADDR)
261 :"Ir" (nr) : "memory");
262 return oldbit;
263}
264
265/**
266 * test_and_change_bit - Change a bit and return its old value
267 * @nr: Bit to change
268 * @addr: Address to count from
269 *
270 * This operation is atomic and cannot be reordered.
271 * It also implies a memory barrier.
272 */
273static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
274{
275 int oldbit;
276
277 __asm__ __volatile__( LOCK_PREFIX
278 "btcl %2,%1\n\tsbbl %0,%0"
279 :"=r" (oldbit),"+m" (ADDR)
280 :"Ir" (nr) : "memory");
281 return oldbit;
282}
283
284#if 0 /* Fool kernel-doc since it doesn't do macros yet */
285/**
286 * test_bit - Determine whether a bit is set
287 * @nr: bit number to test
288 * @addr: Address to start counting from
289 */
290static int test_bit(int nr, const volatile void *addr);
291#endif
292
293static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
294{
295 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
296}
297
298static inline int variable_test_bit(int nr, const volatile unsigned long *addr)
299{
300 int oldbit;
301
302 __asm__ __volatile__(
303 "btl %2,%1\n\tsbbl %0,%0"
304 :"=r" (oldbit)
305 :"m" (ADDR),"Ir" (nr));
306 return oldbit;
307}
308
309#define test_bit(nr, addr) \
310 (__builtin_constant_p(nr) ? \
311 constant_test_bit((nr), (addr)) : \
312 variable_test_bit((nr), (addr)))
313
314#undef ADDR
315
316/** 8/**
317 * find_first_zero_bit - find the first zero bit in a memory region 9 * find_first_zero_bit - find the first zero bit in a memory region
318 * @addr: The address to start the search at 10 * @addr: The address to start the search at
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index 766bcc0470a6..48adbf56ca60 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -5,303 +5,6 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <asm/alternative.h>
13
14#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
15/* Technically wrong, but this avoids compilation errors on some gcc
16 versions. */
17#define ADDR "=m" (*(volatile long *) addr)
18#else
19#define ADDR "+m" (*(volatile long *) addr)
20#endif
21
22/**
23 * set_bit - Atomically set a bit in memory
24 * @nr: the bit to set
25 * @addr: the address to start counting from
26 *
27 * This function is atomic and may not be reordered. See __set_bit()
28 * if you do not require the atomic guarantees.
29 * Note that @nr may be almost arbitrarily large; this function is not
30 * restricted to acting on a single-word quantity.
31 */
32static inline void set_bit(int nr, volatile void *addr)
33{
34 __asm__ __volatile__( LOCK_PREFIX
35 "btsl %1,%0"
36 :ADDR
37 :"dIr" (nr) : "memory");
38}
39
40/**
41 * __set_bit - Set a bit in memory
42 * @nr: the bit to set
43 * @addr: the address to start counting from
44 *
45 * Unlike set_bit(), this function is non-atomic and may be reordered.
46 * If it's called on the same region of memory simultaneously, the effect
47 * may be that only one operation succeeds.
48 */
49static inline void __set_bit(int nr, volatile void *addr)
50{
51 __asm__ volatile(
52 "btsl %1,%0"
53 :ADDR
54 :"dIr" (nr) : "memory");
55}
56
57/**
58 * clear_bit - Clears a bit in memory
59 * @nr: Bit to clear
60 * @addr: Address to start counting from
61 *
62 * clear_bit() is atomic and may not be reordered. However, it does
63 * not contain a memory barrier, so if it is used for locking purposes,
64 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
65 * in order to ensure changes are visible on other processors.
66 */
67static inline void clear_bit(int nr, volatile void *addr)
68{
69 __asm__ __volatile__( LOCK_PREFIX
70 "btrl %1,%0"
71 :ADDR
72 :"dIr" (nr));
73}
74
75/*
76 * clear_bit_unlock - Clears a bit in memory
77 * @nr: Bit to clear
78 * @addr: Address to start counting from
79 *
80 * clear_bit() is atomic and implies release semantics before the memory
81 * operation. It can be used for an unlock.
82 */
83static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
84{
85 barrier();
86 clear_bit(nr, addr);
87}
88
89static inline void __clear_bit(int nr, volatile void *addr)
90{
91 __asm__ __volatile__(
92 "btrl %1,%0"
93 :ADDR
94 :"dIr" (nr));
95}
96
97/*
98 * __clear_bit_unlock - Clears a bit in memory
99 * @nr: Bit to clear
100 * @addr: Address to start counting from
101 *
102 * __clear_bit() is non-atomic and implies release semantics before the memory
103 * operation. It can be used for an unlock if no other CPUs can concurrently
104 * modify other bits in the word.
105 *
106 * No memory barrier is required here, because x86 cannot reorder stores past
107 * older loads. Same principle as spin_unlock.
108 */
109static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
110{
111 barrier();
112 __clear_bit(nr, addr);
113}
114
115#define smp_mb__before_clear_bit() barrier()
116#define smp_mb__after_clear_bit() barrier()
117
118/**
119 * __change_bit - Toggle a bit in memory
120 * @nr: the bit to change
121 * @addr: the address to start counting from
122 *
123 * Unlike change_bit(), this function is non-atomic and may be reordered.
124 * If it's called on the same region of memory simultaneously, the effect
125 * may be that only one operation succeeds.
126 */
127static inline void __change_bit(int nr, volatile void *addr)
128{
129 __asm__ __volatile__(
130 "btcl %1,%0"
131 :ADDR
132 :"dIr" (nr));
133}
134
135/**
136 * change_bit - Toggle a bit in memory
137 * @nr: Bit to change
138 * @addr: Address to start counting from
139 *
140 * change_bit() is atomic and may not be reordered.
141 * Note that @nr may be almost arbitrarily large; this function is not
142 * restricted to acting on a single-word quantity.
143 */
144static inline void change_bit(int nr, volatile void *addr)
145{
146 __asm__ __volatile__( LOCK_PREFIX
147 "btcl %1,%0"
148 :ADDR
149 :"dIr" (nr));
150}
151
152/**
153 * test_and_set_bit - Set a bit and return its old value
154 * @nr: Bit to set
155 * @addr: Address to count from
156 *
157 * This operation is atomic and cannot be reordered.
158 * It also implies a memory barrier.
159 */
160static inline int test_and_set_bit(int nr, volatile void *addr)
161{
162 int oldbit;
163
164 __asm__ __volatile__( LOCK_PREFIX
165 "btsl %2,%1\n\tsbbl %0,%0"
166 :"=r" (oldbit),ADDR
167 :"dIr" (nr) : "memory");
168 return oldbit;
169}
170
171/**
172 * test_and_set_bit_lock - Set a bit and return its old value for lock
173 * @nr: Bit to set
174 * @addr: Address to count from
175 *
176 * This is the same as test_and_set_bit on x86.
177 */
178static inline int test_and_set_bit_lock(int nr, volatile void *addr)
179{
180 return test_and_set_bit(nr, addr);
181}
182
183/**
184 * __test_and_set_bit - Set a bit and return its old value
185 * @nr: Bit to set
186 * @addr: Address to count from
187 *
188 * This operation is non-atomic and can be reordered.
189 * If two examples of this operation race, one can appear to succeed
190 * but actually fail. You must protect multiple accesses with a lock.
191 */
192static inline int __test_and_set_bit(int nr, volatile void *addr)
193{
194 int oldbit;
195
196 __asm__(
197 "btsl %2,%1\n\tsbbl %0,%0"
198 :"=r" (oldbit),ADDR
199 :"dIr" (nr));
200 return oldbit;
201}
202
203/**
204 * test_and_clear_bit - Clear a bit and return its old value
205 * @nr: Bit to clear
206 * @addr: Address to count from
207 *
208 * This operation is atomic and cannot be reordered.
209 * It also implies a memory barrier.
210 */
211static inline int test_and_clear_bit(int nr, volatile void *addr)
212{
213 int oldbit;
214
215 __asm__ __volatile__( LOCK_PREFIX
216 "btrl %2,%1\n\tsbbl %0,%0"
217 :"=r" (oldbit),ADDR
218 :"dIr" (nr) : "memory");
219 return oldbit;
220}
221
222/**
223 * __test_and_clear_bit - Clear a bit and return its old value
224 * @nr: Bit to clear
225 * @addr: Address to count from
226 *
227 * This operation is non-atomic and can be reordered.
228 * If two examples of this operation race, one can appear to succeed
229 * but actually fail. You must protect multiple accesses with a lock.
230 */
231static inline int __test_and_clear_bit(int nr, volatile void *addr)
232{
233 int oldbit;
234
235 __asm__(
236 "btrl %2,%1\n\tsbbl %0,%0"
237 :"=r" (oldbit),ADDR
238 :"dIr" (nr));
239 return oldbit;
240}
241
242/* WARNING: non atomic and it can be reordered! */
243static inline int __test_and_change_bit(int nr, volatile void *addr)
244{
245 int oldbit;
246
247 __asm__ __volatile__(
248 "btcl %2,%1\n\tsbbl %0,%0"
249 :"=r" (oldbit),ADDR
250 :"dIr" (nr) : "memory");
251 return oldbit;
252}
253
254/**
255 * test_and_change_bit - Change a bit and return its old value
256 * @nr: Bit to change
257 * @addr: Address to count from
258 *
259 * This operation is atomic and cannot be reordered.
260 * It also implies a memory barrier.
261 */
262static inline int test_and_change_bit(int nr, volatile void *addr)
263{
264 int oldbit;
265
266 __asm__ __volatile__( LOCK_PREFIX
267 "btcl %2,%1\n\tsbbl %0,%0"
268 :"=r" (oldbit),ADDR
269 :"dIr" (nr) : "memory");
270 return oldbit;
271}
272
273#if 0 /* Fool kernel-doc since it doesn't do macros yet */
274/**
275 * test_bit - Determine whether a bit is set
276 * @nr: bit number to test
277 * @addr: Address to start counting from
278 */
279static int test_bit(int nr, const volatile void *addr);
280#endif
281
282static inline int constant_test_bit(int nr, const volatile void *addr)
283{
284 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
285}
286
287static inline int variable_test_bit(int nr, volatile const void *addr)
288{
289 int oldbit;
290
291 __asm__ __volatile__(
292 "btl %2,%1\n\tsbbl %0,%0"
293 :"=r" (oldbit)
294 :"m" (*(volatile long *)addr),"dIr" (nr));
295 return oldbit;
296}
297
298#define test_bit(nr,addr) \
299(__builtin_constant_p(nr) ? \
300 constant_test_bit((nr),(addr)) : \
301 variable_test_bit((nr),(addr)))
302
303#undef ADDR
304
305extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); 8extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
306extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); 9extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
307extern long find_first_bit(const unsigned long *addr, unsigned long size); 10extern long find_first_bit(const unsigned long *addr, unsigned long size);