diff options
Diffstat (limited to 'include/asm-x86/bitops_32.h')
-rw-r--r-- | include/asm-x86/bitops_32.h | 423 |
1 files changed, 423 insertions, 0 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h new file mode 100644 index 000000000000..a20fe9822f60 --- /dev/null +++ b/include/asm-x86/bitops_32.h | |||
@@ -0,0 +1,423 @@ | |||
1 | #ifndef _I386_BITOPS_H | ||
2 | #define _I386_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #include <linux/compiler.h> | ||
9 | #include <asm/alternative.h> | ||
10 | |||
11 | /* | ||
12 | * These have to be done with inline assembly: that way the bit-setting | ||
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
14 | * was cleared before the operation and != 0 if it was not. | ||
15 | * | ||
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
17 | */ | ||
18 | |||
19 | #define ADDR (*(volatile long *) addr) | ||
20 | |||
21 | /** | ||
22 | * set_bit - Atomically set a bit in memory | ||
23 | * @nr: the bit to set | ||
24 | * @addr: the address to start counting from | ||
25 | * | ||
26 | * This function is atomic and may not be reordered. See __set_bit() | ||
27 | * if you do not require the atomic guarantees. | ||
28 | * | ||
29 | * Note: there are no guarantees that this function will not be reordered | ||
30 | * on non x86 architectures, so if you are writing portable code, | ||
31 | * make sure not to rely on its reordering guarantees. | ||
32 | * | ||
33 | * Note that @nr may be almost arbitrarily large; this function is not | ||
34 | * restricted to acting on a single-word quantity. | ||
35 | */ | ||
36 | static inline void set_bit(int nr, volatile unsigned long * addr) | ||
37 | { | ||
38 | __asm__ __volatile__( LOCK_PREFIX | ||
39 | "btsl %1,%0" | ||
40 | :"+m" (ADDR) | ||
41 | :"Ir" (nr)); | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * __set_bit - Set a bit in memory | ||
46 | * @nr: the bit to set | ||
47 | * @addr: the address to start counting from | ||
48 | * | ||
49 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
50 | * If it's called on the same region of memory simultaneously, the effect | ||
51 | * may be that only one operation succeeds. | ||
52 | */ | ||
53 | static inline void __set_bit(int nr, volatile unsigned long * addr) | ||
54 | { | ||
55 | __asm__( | ||
56 | "btsl %1,%0" | ||
57 | :"+m" (ADDR) | ||
58 | :"Ir" (nr)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * clear_bit - Clears a bit in memory | ||
63 | * @nr: Bit to clear | ||
64 | * @addr: Address to start counting from | ||
65 | * | ||
66 | * clear_bit() is atomic and may not be reordered. However, it does | ||
67 | * not contain a memory barrier, so if it is used for locking purposes, | ||
68 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
69 | * in order to ensure changes are visible on other processors. | ||
70 | */ | ||
71 | static inline void clear_bit(int nr, volatile unsigned long * addr) | ||
72 | { | ||
73 | __asm__ __volatile__( LOCK_PREFIX | ||
74 | "btrl %1,%0" | ||
75 | :"+m" (ADDR) | ||
76 | :"Ir" (nr)); | ||
77 | } | ||
78 | |||
79 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | ||
80 | { | ||
81 | __asm__ __volatile__( | ||
82 | "btrl %1,%0" | ||
83 | :"+m" (ADDR) | ||
84 | :"Ir" (nr)); | ||
85 | } | ||
86 | #define smp_mb__before_clear_bit() barrier() | ||
87 | #define smp_mb__after_clear_bit() barrier() | ||
88 | |||
89 | /** | ||
90 | * __change_bit - Toggle a bit in memory | ||
91 | * @nr: the bit to change | ||
92 | * @addr: the address to start counting from | ||
93 | * | ||
94 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
95 | * If it's called on the same region of memory simultaneously, the effect | ||
96 | * may be that only one operation succeeds. | ||
97 | */ | ||
98 | static inline void __change_bit(int nr, volatile unsigned long * addr) | ||
99 | { | ||
100 | __asm__ __volatile__( | ||
101 | "btcl %1,%0" | ||
102 | :"+m" (ADDR) | ||
103 | :"Ir" (nr)); | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * change_bit - Toggle a bit in memory | ||
108 | * @nr: Bit to change | ||
109 | * @addr: Address to start counting from | ||
110 | * | ||
111 | * change_bit() is atomic and may not be reordered. It may be | ||
112 | * reordered on other architectures than x86. | ||
113 | * Note that @nr may be almost arbitrarily large; this function is not | ||
114 | * restricted to acting on a single-word quantity. | ||
115 | */ | ||
116 | static inline void change_bit(int nr, volatile unsigned long * addr) | ||
117 | { | ||
118 | __asm__ __volatile__( LOCK_PREFIX | ||
119 | "btcl %1,%0" | ||
120 | :"+m" (ADDR) | ||
121 | :"Ir" (nr)); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * test_and_set_bit - Set a bit and return its old value | ||
126 | * @nr: Bit to set | ||
127 | * @addr: Address to count from | ||
128 | * | ||
129 | * This operation is atomic and cannot be reordered. | ||
130 | * It may be reordered on other architectures than x86. | ||
131 | * It also implies a memory barrier. | ||
132 | */ | ||
133 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
134 | { | ||
135 | int oldbit; | ||
136 | |||
137 | __asm__ __volatile__( LOCK_PREFIX | ||
138 | "btsl %2,%1\n\tsbbl %0,%0" | ||
139 | :"=r" (oldbit),"+m" (ADDR) | ||
140 | :"Ir" (nr) : "memory"); | ||
141 | return oldbit; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * __test_and_set_bit - Set a bit and return its old value | ||
146 | * @nr: Bit to set | ||
147 | * @addr: Address to count from | ||
148 | * | ||
149 | * This operation is non-atomic and can be reordered. | ||
150 | * If two examples of this operation race, one can appear to succeed | ||
151 | * but actually fail. You must protect multiple accesses with a lock. | ||
152 | */ | ||
153 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | ||
154 | { | ||
155 | int oldbit; | ||
156 | |||
157 | __asm__( | ||
158 | "btsl %2,%1\n\tsbbl %0,%0" | ||
159 | :"=r" (oldbit),"+m" (ADDR) | ||
160 | :"Ir" (nr)); | ||
161 | return oldbit; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * test_and_clear_bit - Clear a bit and return its old value | ||
166 | * @nr: Bit to clear | ||
167 | * @addr: Address to count from | ||
168 | * | ||
169 | * This operation is atomic and cannot be reordered. | ||
170 | * It can be reorderdered on other architectures other than x86. | ||
171 | * It also implies a memory barrier. | ||
172 | */ | ||
173 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
174 | { | ||
175 | int oldbit; | ||
176 | |||
177 | __asm__ __volatile__( LOCK_PREFIX | ||
178 | "btrl %2,%1\n\tsbbl %0,%0" | ||
179 | :"=r" (oldbit),"+m" (ADDR) | ||
180 | :"Ir" (nr) : "memory"); | ||
181 | return oldbit; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * __test_and_clear_bit - Clear a bit and return its old value | ||
186 | * @nr: Bit to clear | ||
187 | * @addr: Address to count from | ||
188 | * | ||
189 | * This operation is non-atomic and can be reordered. | ||
190 | * If two examples of this operation race, one can appear to succeed | ||
191 | * but actually fail. You must protect multiple accesses with a lock. | ||
192 | */ | ||
193 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
194 | { | ||
195 | int oldbit; | ||
196 | |||
197 | __asm__( | ||
198 | "btrl %2,%1\n\tsbbl %0,%0" | ||
199 | :"=r" (oldbit),"+m" (ADDR) | ||
200 | :"Ir" (nr)); | ||
201 | return oldbit; | ||
202 | } | ||
203 | |||
204 | /* WARNING: non atomic and it can be reordered! */ | ||
205 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
206 | { | ||
207 | int oldbit; | ||
208 | |||
209 | __asm__ __volatile__( | ||
210 | "btcl %2,%1\n\tsbbl %0,%0" | ||
211 | :"=r" (oldbit),"+m" (ADDR) | ||
212 | :"Ir" (nr) : "memory"); | ||
213 | return oldbit; | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * test_and_change_bit - Change a bit and return its old value | ||
218 | * @nr: Bit to change | ||
219 | * @addr: Address to count from | ||
220 | * | ||
221 | * This operation is atomic and cannot be reordered. | ||
222 | * It also implies a memory barrier. | ||
223 | */ | ||
224 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | ||
225 | { | ||
226 | int oldbit; | ||
227 | |||
228 | __asm__ __volatile__( LOCK_PREFIX | ||
229 | "btcl %2,%1\n\tsbbl %0,%0" | ||
230 | :"=r" (oldbit),"+m" (ADDR) | ||
231 | :"Ir" (nr) : "memory"); | ||
232 | return oldbit; | ||
233 | } | ||
234 | |||
235 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
236 | /** | ||
237 | * test_bit - Determine whether a bit is set | ||
238 | * @nr: bit number to test | ||
239 | * @addr: Address to start counting from | ||
240 | */ | ||
241 | static int test_bit(int nr, const volatile void * addr); | ||
242 | #endif | ||
243 | |||
244 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) | ||
245 | { | ||
246 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | ||
247 | } | ||
248 | |||
249 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | ||
250 | { | ||
251 | int oldbit; | ||
252 | |||
253 | __asm__ __volatile__( | ||
254 | "btl %2,%1\n\tsbbl %0,%0" | ||
255 | :"=r" (oldbit) | ||
256 | :"m" (ADDR),"Ir" (nr)); | ||
257 | return oldbit; | ||
258 | } | ||
259 | |||
260 | #define test_bit(nr,addr) \ | ||
261 | (__builtin_constant_p(nr) ? \ | ||
262 | constant_test_bit((nr),(addr)) : \ | ||
263 | variable_test_bit((nr),(addr))) | ||
264 | |||
265 | #undef ADDR | ||
266 | |||
267 | /** | ||
268 | * find_first_zero_bit - find the first zero bit in a memory region | ||
269 | * @addr: The address to start the search at | ||
270 | * @size: The maximum size to search | ||
271 | * | ||
272 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
273 | * containing a bit. | ||
274 | */ | ||
275 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | ||
276 | { | ||
277 | int d0, d1, d2; | ||
278 | int res; | ||
279 | |||
280 | if (!size) | ||
281 | return 0; | ||
282 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | ||
283 | __asm__ __volatile__( | ||
284 | "movl $-1,%%eax\n\t" | ||
285 | "xorl %%edx,%%edx\n\t" | ||
286 | "repe; scasl\n\t" | ||
287 | "je 1f\n\t" | ||
288 | "xorl -4(%%edi),%%eax\n\t" | ||
289 | "subl $4,%%edi\n\t" | ||
290 | "bsfl %%eax,%%edx\n" | ||
291 | "1:\tsubl %%ebx,%%edi\n\t" | ||
292 | "shll $3,%%edi\n\t" | ||
293 | "addl %%edi,%%edx" | ||
294 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | ||
295 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | ||
296 | return res; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * find_next_zero_bit - find the first zero bit in a memory region | ||
301 | * @addr: The address to base the search on | ||
302 | * @offset: The bitnumber to start searching at | ||
303 | * @size: The maximum size to search | ||
304 | */ | ||
305 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | ||
306 | |||
307 | /** | ||
308 | * __ffs - find first bit in word. | ||
309 | * @word: The word to search | ||
310 | * | ||
311 | * Undefined if no bit exists, so code should check against 0 first. | ||
312 | */ | ||
313 | static inline unsigned long __ffs(unsigned long word) | ||
314 | { | ||
315 | __asm__("bsfl %1,%0" | ||
316 | :"=r" (word) | ||
317 | :"rm" (word)); | ||
318 | return word; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * find_first_bit - find the first set bit in a memory region | ||
323 | * @addr: The address to start the search at | ||
324 | * @size: The maximum size to search | ||
325 | * | ||
326 | * Returns the bit-number of the first set bit, not the number of the byte | ||
327 | * containing a bit. | ||
328 | */ | ||
329 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | ||
330 | { | ||
331 | unsigned x = 0; | ||
332 | |||
333 | while (x < size) { | ||
334 | unsigned long val = *addr++; | ||
335 | if (val) | ||
336 | return __ffs(val) + x; | ||
337 | x += (sizeof(*addr)<<3); | ||
338 | } | ||
339 | return x; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * find_next_bit - find the first set bit in a memory region | ||
344 | * @addr: The address to base the search on | ||
345 | * @offset: The bitnumber to start searching at | ||
346 | * @size: The maximum size to search | ||
347 | */ | ||
348 | int find_next_bit(const unsigned long *addr, int size, int offset); | ||
349 | |||
350 | /** | ||
351 | * ffz - find first zero in word. | ||
352 | * @word: The word to search | ||
353 | * | ||
354 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
355 | */ | ||
356 | static inline unsigned long ffz(unsigned long word) | ||
357 | { | ||
358 | __asm__("bsfl %1,%0" | ||
359 | :"=r" (word) | ||
360 | :"r" (~word)); | ||
361 | return word; | ||
362 | } | ||
363 | |||
364 | #ifdef __KERNEL__ | ||
365 | |||
366 | #include <asm-generic/bitops/sched.h> | ||
367 | |||
368 | /** | ||
369 | * ffs - find first bit set | ||
370 | * @x: the word to search | ||
371 | * | ||
372 | * This is defined the same way as | ||
373 | * the libc and compiler builtin ffs routines, therefore | ||
374 | * differs in spirit from the above ffz() (man ffs). | ||
375 | */ | ||
376 | static inline int ffs(int x) | ||
377 | { | ||
378 | int r; | ||
379 | |||
380 | __asm__("bsfl %1,%0\n\t" | ||
381 | "jnz 1f\n\t" | ||
382 | "movl $-1,%0\n" | ||
383 | "1:" : "=r" (r) : "rm" (x)); | ||
384 | return r+1; | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * fls - find last bit set | ||
389 | * @x: the word to search | ||
390 | * | ||
391 | * This is defined the same way as ffs(). | ||
392 | */ | ||
393 | static inline int fls(int x) | ||
394 | { | ||
395 | int r; | ||
396 | |||
397 | __asm__("bsrl %1,%0\n\t" | ||
398 | "jnz 1f\n\t" | ||
399 | "movl $-1,%0\n" | ||
400 | "1:" : "=r" (r) : "rm" (x)); | ||
401 | return r+1; | ||
402 | } | ||
403 | |||
404 | #include <asm-generic/bitops/hweight.h> | ||
405 | |||
406 | #endif /* __KERNEL__ */ | ||
407 | |||
408 | #include <asm-generic/bitops/fls64.h> | ||
409 | |||
410 | #ifdef __KERNEL__ | ||
411 | |||
412 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
413 | |||
414 | #define ext2_set_bit_atomic(lock,nr,addr) \ | ||
415 | test_and_set_bit((nr),(unsigned long*)addr) | ||
416 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | ||
417 | test_and_clear_bit((nr),(unsigned long*)addr) | ||
418 | |||
419 | #include <asm-generic/bitops/minix.h> | ||
420 | |||
421 | #endif /* __KERNEL__ */ | ||
422 | |||
423 | #endif /* _I386_BITOPS_H */ | ||