diff options
Diffstat (limited to 'include/asm-x86/bitops_32.h')
-rw-r--r-- | include/asm-x86/bitops_32.h | 324 |
1 files changed, 8 insertions, 316 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 0b40f6d20bea..e4d75fcf9c03 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h | |||
@@ -5,320 +5,12 @@ | |||
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/alternative.h> | ||
14 | |||
15 | /* | ||
16 | * These have to be done with inline assembly: that way the bit-setting | ||
17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
18 | * was cleared before the operation and != 0 if it was not. | ||
19 | * | ||
20 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
21 | */ | ||
22 | |||
23 | #define ADDR (*(volatile long *) addr) | ||
24 | |||
25 | /** | ||
26 | * set_bit - Atomically set a bit in memory | ||
27 | * @nr: the bit to set | ||
28 | * @addr: the address to start counting from | ||
29 | * | ||
30 | * This function is atomic and may not be reordered. See __set_bit() | ||
31 | * if you do not require the atomic guarantees. | ||
32 | * | ||
33 | * Note: there are no guarantees that this function will not be reordered | ||
34 | * on non x86 architectures, so if you are writing portable code, | ||
35 | * make sure not to rely on its reordering guarantees. | ||
36 | * | ||
37 | * Note that @nr may be almost arbitrarily large; this function is not | ||
38 | * restricted to acting on a single-word quantity. | ||
39 | */ | ||
40 | static inline void set_bit(int nr, volatile unsigned long * addr) | ||
41 | { | ||
42 | __asm__ __volatile__( LOCK_PREFIX | ||
43 | "btsl %1,%0" | ||
44 | :"+m" (ADDR) | ||
45 | :"Ir" (nr)); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * __set_bit - Set a bit in memory | ||
50 | * @nr: the bit to set | ||
51 | * @addr: the address to start counting from | ||
52 | * | ||
53 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
54 | * If it's called on the same region of memory simultaneously, the effect | ||
55 | * may be that only one operation succeeds. | ||
56 | */ | ||
57 | static inline void __set_bit(int nr, volatile unsigned long * addr) | ||
58 | { | ||
59 | __asm__( | ||
60 | "btsl %1,%0" | ||
61 | :"+m" (ADDR) | ||
62 | :"Ir" (nr)); | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * clear_bit - Clears a bit in memory | ||
67 | * @nr: Bit to clear | ||
68 | * @addr: Address to start counting from | ||
69 | * | ||
70 | * clear_bit() is atomic and may not be reordered. However, it does | ||
71 | * not contain a memory barrier, so if it is used for locking purposes, | ||
72 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
73 | * in order to ensure changes are visible on other processors. | ||
74 | */ | ||
75 | static inline void clear_bit(int nr, volatile unsigned long * addr) | ||
76 | { | ||
77 | __asm__ __volatile__( LOCK_PREFIX | ||
78 | "btrl %1,%0" | ||
79 | :"+m" (ADDR) | ||
80 | :"Ir" (nr)); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * clear_bit_unlock - Clears a bit in memory | ||
85 | * @nr: Bit to clear | ||
86 | * @addr: Address to start counting from | ||
87 | * | ||
88 | * clear_bit() is atomic and implies release semantics before the memory | ||
89 | * operation. It can be used for an unlock. | ||
90 | */ | ||
91 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
92 | { | ||
93 | barrier(); | ||
94 | clear_bit(nr, addr); | ||
95 | } | ||
96 | |||
97 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | ||
98 | { | ||
99 | __asm__ __volatile__( | ||
100 | "btrl %1,%0" | ||
101 | :"+m" (ADDR) | ||
102 | :"Ir" (nr)); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * __clear_bit_unlock - Clears a bit in memory | ||
107 | * @nr: Bit to clear | ||
108 | * @addr: Address to start counting from | ||
109 | * | ||
110 | * __clear_bit() is non-atomic and implies release semantics before the memory | ||
111 | * operation. It can be used for an unlock if no other CPUs can concurrently | ||
112 | * modify other bits in the word. | ||
113 | * | ||
114 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
115 | * older loads. Same principle as spin_unlock. | ||
116 | */ | ||
117 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
118 | { | ||
119 | barrier(); | ||
120 | __clear_bit(nr, addr); | ||
121 | } | ||
122 | |||
123 | #define smp_mb__before_clear_bit() barrier() | ||
124 | #define smp_mb__after_clear_bit() barrier() | ||
125 | |||
126 | /** | ||
127 | * __change_bit - Toggle a bit in memory | ||
128 | * @nr: the bit to change | ||
129 | * @addr: the address to start counting from | ||
130 | * | ||
131 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
132 | * If it's called on the same region of memory simultaneously, the effect | ||
133 | * may be that only one operation succeeds. | ||
134 | */ | ||
135 | static inline void __change_bit(int nr, volatile unsigned long * addr) | ||
136 | { | ||
137 | __asm__ __volatile__( | ||
138 | "btcl %1,%0" | ||
139 | :"+m" (ADDR) | ||
140 | :"Ir" (nr)); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * change_bit - Toggle a bit in memory | ||
145 | * @nr: Bit to change | ||
146 | * @addr: Address to start counting from | ||
147 | * | ||
148 | * change_bit() is atomic and may not be reordered. It may be | ||
149 | * reordered on other architectures than x86. | ||
150 | * Note that @nr may be almost arbitrarily large; this function is not | ||
151 | * restricted to acting on a single-word quantity. | ||
152 | */ | ||
153 | static inline void change_bit(int nr, volatile unsigned long * addr) | ||
154 | { | ||
155 | __asm__ __volatile__( LOCK_PREFIX | ||
156 | "btcl %1,%0" | ||
157 | :"+m" (ADDR) | ||
158 | :"Ir" (nr)); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * test_and_set_bit - Set a bit and return its old value | ||
163 | * @nr: Bit to set | ||
164 | * @addr: Address to count from | ||
165 | * | ||
166 | * This operation is atomic and cannot be reordered. | ||
167 | * It may be reordered on other architectures than x86. | ||
168 | * It also implies a memory barrier. | ||
169 | */ | ||
170 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
171 | { | ||
172 | int oldbit; | ||
173 | |||
174 | __asm__ __volatile__( LOCK_PREFIX | ||
175 | "btsl %2,%1\n\tsbbl %0,%0" | ||
176 | :"=r" (oldbit),"+m" (ADDR) | ||
177 | :"Ir" (nr) : "memory"); | ||
178 | return oldbit; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * test_and_set_bit_lock - Set a bit and return its old value for lock | ||
183 | * @nr: Bit to set | ||
184 | * @addr: Address to count from | ||
185 | * | ||
186 | * This is the same as test_and_set_bit on x86. | ||
187 | */ | ||
188 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
189 | { | ||
190 | return test_and_set_bit(nr, addr); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * __test_and_set_bit - Set a bit and return its old value | ||
195 | * @nr: Bit to set | ||
196 | * @addr: Address to count from | ||
197 | * | ||
198 | * This operation is non-atomic and can be reordered. | ||
199 | * If two examples of this operation race, one can appear to succeed | ||
200 | * but actually fail. You must protect multiple accesses with a lock. | ||
201 | */ | ||
202 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | ||
203 | { | ||
204 | int oldbit; | ||
205 | |||
206 | __asm__( | ||
207 | "btsl %2,%1\n\tsbbl %0,%0" | ||
208 | :"=r" (oldbit),"+m" (ADDR) | ||
209 | :"Ir" (nr)); | ||
210 | return oldbit; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * test_and_clear_bit - Clear a bit and return its old value | ||
215 | * @nr: Bit to clear | ||
216 | * @addr: Address to count from | ||
217 | * | ||
218 | * This operation is atomic and cannot be reordered. | ||
219 | * It can be reorderdered on other architectures other than x86. | ||
220 | * It also implies a memory barrier. | ||
221 | */ | ||
222 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
223 | { | ||
224 | int oldbit; | ||
225 | |||
226 | __asm__ __volatile__( LOCK_PREFIX | ||
227 | "btrl %2,%1\n\tsbbl %0,%0" | ||
228 | :"=r" (oldbit),"+m" (ADDR) | ||
229 | :"Ir" (nr) : "memory"); | ||
230 | return oldbit; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * __test_and_clear_bit - Clear a bit and return its old value | ||
235 | * @nr: Bit to clear | ||
236 | * @addr: Address to count from | ||
237 | * | ||
238 | * This operation is non-atomic and can be reordered. | ||
239 | * If two examples of this operation race, one can appear to succeed | ||
240 | * but actually fail. You must protect multiple accesses with a lock. | ||
241 | */ | ||
242 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
243 | { | ||
244 | int oldbit; | ||
245 | |||
246 | __asm__( | ||
247 | "btrl %2,%1\n\tsbbl %0,%0" | ||
248 | :"=r" (oldbit),"+m" (ADDR) | ||
249 | :"Ir" (nr)); | ||
250 | return oldbit; | ||
251 | } | ||
252 | |||
253 | /* WARNING: non atomic and it can be reordered! */ | ||
254 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
255 | { | ||
256 | int oldbit; | ||
257 | |||
258 | __asm__ __volatile__( | ||
259 | "btcl %2,%1\n\tsbbl %0,%0" | ||
260 | :"=r" (oldbit),"+m" (ADDR) | ||
261 | :"Ir" (nr) : "memory"); | ||
262 | return oldbit; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * test_and_change_bit - Change a bit and return its old value | ||
267 | * @nr: Bit to change | ||
268 | * @addr: Address to count from | ||
269 | * | ||
270 | * This operation is atomic and cannot be reordered. | ||
271 | * It also implies a memory barrier. | ||
272 | */ | ||
273 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | ||
274 | { | ||
275 | int oldbit; | ||
276 | |||
277 | __asm__ __volatile__( LOCK_PREFIX | ||
278 | "btcl %2,%1\n\tsbbl %0,%0" | ||
279 | :"=r" (oldbit),"+m" (ADDR) | ||
280 | :"Ir" (nr) : "memory"); | ||
281 | return oldbit; | ||
282 | } | ||
283 | |||
284 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
285 | /** | ||
286 | * test_bit - Determine whether a bit is set | ||
287 | * @nr: bit number to test | ||
288 | * @addr: Address to start counting from | ||
289 | */ | ||
290 | static int test_bit(int nr, const volatile void * addr); | ||
291 | #endif | ||
292 | |||
293 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) | ||
294 | { | ||
295 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | ||
296 | } | ||
297 | |||
298 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | ||
299 | { | ||
300 | int oldbit; | ||
301 | |||
302 | __asm__ __volatile__( | ||
303 | "btl %2,%1\n\tsbbl %0,%0" | ||
304 | :"=r" (oldbit) | ||
305 | :"m" (ADDR),"Ir" (nr)); | ||
306 | return oldbit; | ||
307 | } | ||
308 | |||
309 | #define test_bit(nr,addr) \ | ||
310 | (__builtin_constant_p(nr) ? \ | ||
311 | constant_test_bit((nr),(addr)) : \ | ||
312 | variable_test_bit((nr),(addr))) | ||
313 | |||
314 | #undef ADDR | ||
315 | |||
316 | /** | 8 | /** |
317 | * find_first_zero_bit - find the first zero bit in a memory region | 9 | * find_first_zero_bit - find the first zero bit in a memory region |
318 | * @addr: The address to start the search at | 10 | * @addr: The address to start the search at |
319 | * @size: The maximum size to search | 11 | * @size: The maximum size to search |
320 | * | 12 | * |
321 | * Returns the bit-number of the first zero bit, not the number of the byte | 13 | * Returns the bit number of the first zero bit, not the number of the byte |
322 | * containing a bit. | 14 | * containing a bit. |
323 | */ | 15 | */ |
324 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | 16 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) |
@@ -348,7 +40,7 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | |||
348 | /** | 40 | /** |
349 | * find_next_zero_bit - find the first zero bit in a memory region | 41 | * find_next_zero_bit - find the first zero bit in a memory region |
350 | * @addr: The address to base the search on | 42 | * @addr: The address to base the search on |
351 | * @offset: The bitnumber to start searching at | 43 | * @offset: The bit number to start searching at |
352 | * @size: The maximum size to search | 44 | * @size: The maximum size to search |
353 | */ | 45 | */ |
354 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | 46 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); |
@@ -372,7 +64,7 @@ static inline unsigned long __ffs(unsigned long word) | |||
372 | * @addr: The address to start the search at | 64 | * @addr: The address to start the search at |
373 | * @size: The maximum size to search | 65 | * @size: The maximum size to search |
374 | * | 66 | * |
375 | * Returns the bit-number of the first set bit, not the number of the byte | 67 | * Returns the bit number of the first set bit, not the number of the byte |
376 | * containing a bit. | 68 | * containing a bit. |
377 | */ | 69 | */ |
378 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | 70 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
@@ -391,7 +83,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | |||
391 | /** | 83 | /** |
392 | * find_next_bit - find the first set bit in a memory region | 84 | * find_next_bit - find the first set bit in a memory region |
393 | * @addr: The address to base the search on | 85 | * @addr: The address to base the search on |
394 | * @offset: The bitnumber to start searching at | 86 | * @offset: The bit number to start searching at |
395 | * @size: The maximum size to search | 87 | * @size: The maximum size to search |
396 | */ | 88 | */ |
397 | int find_next_bit(const unsigned long *addr, int size, int offset); | 89 | int find_next_bit(const unsigned long *addr, int size, int offset); |
@@ -460,10 +152,10 @@ static inline int fls(int x) | |||
460 | 152 | ||
461 | #include <asm-generic/bitops/ext2-non-atomic.h> | 153 | #include <asm-generic/bitops/ext2-non-atomic.h> |
462 | 154 | ||
463 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 155 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
464 | test_and_set_bit((nr),(unsigned long*)addr) | 156 | test_and_set_bit((nr), (unsigned long *)addr) |
465 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 157 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
466 | test_and_clear_bit((nr),(unsigned long*)addr) | 158 | test_and_clear_bit((nr), (unsigned long *)addr) |
467 | 159 | ||
468 | #include <asm-generic/bitops/minix.h> | 160 | #include <asm-generic/bitops/minix.h> |
469 | 161 | ||