diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-i386/bitops.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-i386/bitops.h')
-rw-r--r-- | include/asm-i386/bitops.h | 462 |
1 files changed, 462 insertions, 0 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h new file mode 100644 index 000000000000..9db0b712d57a --- /dev/null +++ b/include/asm-i386/bitops.h | |||
@@ -0,0 +1,462 @@ | |||
1 | #ifndef _I386_BITOPS_H | ||
2 | #define _I386_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/compiler.h> | ||
10 | |||
11 | /* | ||
12 | * These have to be done with inline assembly: that way the bit-setting | ||
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
14 | * was cleared before the operation and != 0 if it was not. | ||
15 | * | ||
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
17 | */ | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | #define LOCK_PREFIX "lock ; " | ||
21 | #else | ||
22 | #define LOCK_PREFIX "" | ||
23 | #endif | ||
24 | |||
25 | #define ADDR (*(volatile long *) addr) | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. See __set_bit() | ||
33 | * if you do not require the atomic guarantees. | ||
34 | * | ||
35 | * Note: there are no guarantees that this function will not be reordered | ||
36 | * on non x86 architectures, so if you are writting portable code, | ||
37 | * make sure not to rely on its reordering guarantees. | ||
38 | * | ||
39 | * Note that @nr may be almost arbitrarily large; this function is not | ||
40 | * restricted to acting on a single-word quantity. | ||
41 | */ | ||
42 | static inline void set_bit(int nr, volatile unsigned long * addr) | ||
43 | { | ||
44 | __asm__ __volatile__( LOCK_PREFIX | ||
45 | "btsl %1,%0" | ||
46 | :"=m" (ADDR) | ||
47 | :"Ir" (nr)); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * __set_bit - Set a bit in memory | ||
52 | * @nr: the bit to set | ||
53 | * @addr: the address to start counting from | ||
54 | * | ||
55 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
56 | * If it's called on the same region of memory simultaneously, the effect | ||
57 | * may be that only one operation succeeds. | ||
58 | */ | ||
59 | static inline void __set_bit(int nr, volatile unsigned long * addr) | ||
60 | { | ||
61 | __asm__( | ||
62 | "btsl %1,%0" | ||
63 | :"=m" (ADDR) | ||
64 | :"Ir" (nr)); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * clear_bit - Clears a bit in memory | ||
69 | * @nr: Bit to clear | ||
70 | * @addr: Address to start counting from | ||
71 | * | ||
72 | * clear_bit() is atomic and may not be reordered. However, it does | ||
73 | * not contain a memory barrier, so if it is used for locking purposes, | ||
74 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
75 | * in order to ensure changes are visible on other processors. | ||
76 | */ | ||
77 | static inline void clear_bit(int nr, volatile unsigned long * addr) | ||
78 | { | ||
79 | __asm__ __volatile__( LOCK_PREFIX | ||
80 | "btrl %1,%0" | ||
81 | :"=m" (ADDR) | ||
82 | :"Ir" (nr)); | ||
83 | } | ||
84 | |||
85 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | ||
86 | { | ||
87 | __asm__ __volatile__( | ||
88 | "btrl %1,%0" | ||
89 | :"=m" (ADDR) | ||
90 | :"Ir" (nr)); | ||
91 | } | ||
92 | #define smp_mb__before_clear_bit() barrier() | ||
93 | #define smp_mb__after_clear_bit() barrier() | ||
94 | |||
95 | /** | ||
96 | * __change_bit - Toggle a bit in memory | ||
97 | * @nr: the bit to change | ||
98 | * @addr: the address to start counting from | ||
99 | * | ||
100 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
101 | * If it's called on the same region of memory simultaneously, the effect | ||
102 | * may be that only one operation succeeds. | ||
103 | */ | ||
104 | static inline void __change_bit(int nr, volatile unsigned long * addr) | ||
105 | { | ||
106 | __asm__ __volatile__( | ||
107 | "btcl %1,%0" | ||
108 | :"=m" (ADDR) | ||
109 | :"Ir" (nr)); | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * change_bit - Toggle a bit in memory | ||
114 | * @nr: Bit to change | ||
115 | * @addr: Address to start counting from | ||
116 | * | ||
117 | * change_bit() is atomic and may not be reordered. It may be | ||
118 | * reordered on other architectures than x86. | ||
119 | * Note that @nr may be almost arbitrarily large; this function is not | ||
120 | * restricted to acting on a single-word quantity. | ||
121 | */ | ||
122 | static inline void change_bit(int nr, volatile unsigned long * addr) | ||
123 | { | ||
124 | __asm__ __volatile__( LOCK_PREFIX | ||
125 | "btcl %1,%0" | ||
126 | :"=m" (ADDR) | ||
127 | :"Ir" (nr)); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * test_and_set_bit - Set a bit and return its old value | ||
132 | * @nr: Bit to set | ||
133 | * @addr: Address to count from | ||
134 | * | ||
135 | * This operation is atomic and cannot be reordered. | ||
136 | * It may be reordered on other architectures than x86. | ||
137 | * It also implies a memory barrier. | ||
138 | */ | ||
139 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
140 | { | ||
141 | int oldbit; | ||
142 | |||
143 | __asm__ __volatile__( LOCK_PREFIX | ||
144 | "btsl %2,%1\n\tsbbl %0,%0" | ||
145 | :"=r" (oldbit),"=m" (ADDR) | ||
146 | :"Ir" (nr) : "memory"); | ||
147 | return oldbit; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * __test_and_set_bit - Set a bit and return its old value | ||
152 | * @nr: Bit to set | ||
153 | * @addr: Address to count from | ||
154 | * | ||
155 | * This operation is non-atomic and can be reordered. | ||
156 | * If two examples of this operation race, one can appear to succeed | ||
157 | * but actually fail. You must protect multiple accesses with a lock. | ||
158 | */ | ||
159 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | ||
160 | { | ||
161 | int oldbit; | ||
162 | |||
163 | __asm__( | ||
164 | "btsl %2,%1\n\tsbbl %0,%0" | ||
165 | :"=r" (oldbit),"=m" (ADDR) | ||
166 | :"Ir" (nr)); | ||
167 | return oldbit; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * test_and_clear_bit - Clear a bit and return its old value | ||
172 | * @nr: Bit to clear | ||
173 | * @addr: Address to count from | ||
174 | * | ||
175 | * This operation is atomic and cannot be reordered. | ||
176 | * It can be reorderdered on other architectures other than x86. | ||
177 | * It also implies a memory barrier. | ||
178 | */ | ||
179 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
180 | { | ||
181 | int oldbit; | ||
182 | |||
183 | __asm__ __volatile__( LOCK_PREFIX | ||
184 | "btrl %2,%1\n\tsbbl %0,%0" | ||
185 | :"=r" (oldbit),"=m" (ADDR) | ||
186 | :"Ir" (nr) : "memory"); | ||
187 | return oldbit; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * __test_and_clear_bit - Clear a bit and return its old value | ||
192 | * @nr: Bit to clear | ||
193 | * @addr: Address to count from | ||
194 | * | ||
195 | * This operation is non-atomic and can be reordered. | ||
196 | * If two examples of this operation race, one can appear to succeed | ||
197 | * but actually fail. You must protect multiple accesses with a lock. | ||
198 | */ | ||
199 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
200 | { | ||
201 | int oldbit; | ||
202 | |||
203 | __asm__( | ||
204 | "btrl %2,%1\n\tsbbl %0,%0" | ||
205 | :"=r" (oldbit),"=m" (ADDR) | ||
206 | :"Ir" (nr)); | ||
207 | return oldbit; | ||
208 | } | ||
209 | |||
210 | /* WARNING: non atomic and it can be reordered! */ | ||
211 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
212 | { | ||
213 | int oldbit; | ||
214 | |||
215 | __asm__ __volatile__( | ||
216 | "btcl %2,%1\n\tsbbl %0,%0" | ||
217 | :"=r" (oldbit),"=m" (ADDR) | ||
218 | :"Ir" (nr) : "memory"); | ||
219 | return oldbit; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * test_and_change_bit - Change a bit and return its old value | ||
224 | * @nr: Bit to change | ||
225 | * @addr: Address to count from | ||
226 | * | ||
227 | * This operation is atomic and cannot be reordered. | ||
228 | * It also implies a memory barrier. | ||
229 | */ | ||
230 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | ||
231 | { | ||
232 | int oldbit; | ||
233 | |||
234 | __asm__ __volatile__( LOCK_PREFIX | ||
235 | "btcl %2,%1\n\tsbbl %0,%0" | ||
236 | :"=r" (oldbit),"=m" (ADDR) | ||
237 | :"Ir" (nr) : "memory"); | ||
238 | return oldbit; | ||
239 | } | ||
240 | |||
241 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
242 | /** | ||
243 | * test_bit - Determine whether a bit is set | ||
244 | * @nr: bit number to test | ||
245 | * @addr: Address to start counting from | ||
246 | */ | ||
247 | static int test_bit(int nr, const volatile void * addr); | ||
248 | #endif | ||
249 | |||
250 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | ||
251 | { | ||
252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | ||
253 | } | ||
254 | |||
255 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | ||
256 | { | ||
257 | int oldbit; | ||
258 | |||
259 | __asm__ __volatile__( | ||
260 | "btl %2,%1\n\tsbbl %0,%0" | ||
261 | :"=r" (oldbit) | ||
262 | :"m" (ADDR),"Ir" (nr)); | ||
263 | return oldbit; | ||
264 | } | ||
265 | |||
266 | #define test_bit(nr,addr) \ | ||
267 | (__builtin_constant_p(nr) ? \ | ||
268 | constant_test_bit((nr),(addr)) : \ | ||
269 | variable_test_bit((nr),(addr))) | ||
270 | |||
271 | #undef ADDR | ||
272 | |||
273 | /** | ||
274 | * find_first_zero_bit - find the first zero bit in a memory region | ||
275 | * @addr: The address to start the search at | ||
276 | * @size: The maximum size to search | ||
277 | * | ||
278 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
279 | * containing a bit. | ||
280 | */ | ||
281 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | ||
282 | { | ||
283 | int d0, d1, d2; | ||
284 | int res; | ||
285 | |||
286 | if (!size) | ||
287 | return 0; | ||
288 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | ||
289 | __asm__ __volatile__( | ||
290 | "movl $-1,%%eax\n\t" | ||
291 | "xorl %%edx,%%edx\n\t" | ||
292 | "repe; scasl\n\t" | ||
293 | "je 1f\n\t" | ||
294 | "xorl -4(%%edi),%%eax\n\t" | ||
295 | "subl $4,%%edi\n\t" | ||
296 | "bsfl %%eax,%%edx\n" | ||
297 | "1:\tsubl %%ebx,%%edi\n\t" | ||
298 | "shll $3,%%edi\n\t" | ||
299 | "addl %%edi,%%edx" | ||
300 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | ||
301 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | ||
302 | return res; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * find_next_zero_bit - find the first zero bit in a memory region | ||
307 | * @addr: The address to base the search on | ||
308 | * @offset: The bitnumber to start searching at | ||
309 | * @size: The maximum size to search | ||
310 | */ | ||
311 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | ||
312 | |||
313 | /** | ||
314 | * find_first_bit - find the first set bit in a memory region | ||
315 | * @addr: The address to start the search at | ||
316 | * @size: The maximum size to search | ||
317 | * | ||
318 | * Returns the bit-number of the first set bit, not the number of the byte | ||
319 | * containing a bit. | ||
320 | */ | ||
321 | static inline int find_first_bit(const unsigned long *addr, unsigned size) | ||
322 | { | ||
323 | int d0, d1; | ||
324 | int res; | ||
325 | |||
326 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | ||
327 | __asm__ __volatile__( | ||
328 | "xorl %%eax,%%eax\n\t" | ||
329 | "repe; scasl\n\t" | ||
330 | "jz 1f\n\t" | ||
331 | "leal -4(%%edi),%%edi\n\t" | ||
332 | "bsfl (%%edi),%%eax\n" | ||
333 | "1:\tsubl %%ebx,%%edi\n\t" | ||
334 | "shll $3,%%edi\n\t" | ||
335 | "addl %%edi,%%eax" | ||
336 | :"=a" (res), "=&c" (d0), "=&D" (d1) | ||
337 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | ||
338 | return res; | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * find_next_bit - find the first set bit in a memory region | ||
343 | * @addr: The address to base the search on | ||
344 | * @offset: The bitnumber to start searching at | ||
345 | * @size: The maximum size to search | ||
346 | */ | ||
347 | int find_next_bit(const unsigned long *addr, int size, int offset); | ||
348 | |||
349 | /** | ||
350 | * ffz - find first zero in word. | ||
351 | * @word: The word to search | ||
352 | * | ||
353 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
354 | */ | ||
355 | static inline unsigned long ffz(unsigned long word) | ||
356 | { | ||
357 | __asm__("bsfl %1,%0" | ||
358 | :"=r" (word) | ||
359 | :"r" (~word)); | ||
360 | return word; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * __ffs - find first bit in word. | ||
365 | * @word: The word to search | ||
366 | * | ||
367 | * Undefined if no bit exists, so code should check against 0 first. | ||
368 | */ | ||
369 | static inline unsigned long __ffs(unsigned long word) | ||
370 | { | ||
371 | __asm__("bsfl %1,%0" | ||
372 | :"=r" (word) | ||
373 | :"rm" (word)); | ||
374 | return word; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * fls: find last bit set. | ||
379 | */ | ||
380 | |||
381 | #define fls(x) generic_fls(x) | ||
382 | |||
383 | #ifdef __KERNEL__ | ||
384 | |||
385 | /* | ||
386 | * Every architecture must define this function. It's the fastest | ||
387 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
388 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
389 | * bits is cleared. | ||
390 | */ | ||
391 | static inline int sched_find_first_bit(const unsigned long *b) | ||
392 | { | ||
393 | if (unlikely(b[0])) | ||
394 | return __ffs(b[0]); | ||
395 | if (unlikely(b[1])) | ||
396 | return __ffs(b[1]) + 32; | ||
397 | if (unlikely(b[2])) | ||
398 | return __ffs(b[2]) + 64; | ||
399 | if (b[3]) | ||
400 | return __ffs(b[3]) + 96; | ||
401 | return __ffs(b[4]) + 128; | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * ffs - find first bit set | ||
406 | * @x: the word to search | ||
407 | * | ||
408 | * This is defined the same way as | ||
409 | * the libc and compiler builtin ffs routines, therefore | ||
410 | * differs in spirit from the above ffz (man ffs). | ||
411 | */ | ||
412 | static inline int ffs(int x) | ||
413 | { | ||
414 | int r; | ||
415 | |||
416 | __asm__("bsfl %1,%0\n\t" | ||
417 | "jnz 1f\n\t" | ||
418 | "movl $-1,%0\n" | ||
419 | "1:" : "=r" (r) : "rm" (x)); | ||
420 | return r+1; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * hweightN - returns the hamming weight of a N-bit word | ||
425 | * @x: the word to weigh | ||
426 | * | ||
427 | * The Hamming Weight of a number is the total number of bits set in it. | ||
428 | */ | ||
429 | |||
430 | #define hweight32(x) generic_hweight32(x) | ||
431 | #define hweight16(x) generic_hweight16(x) | ||
432 | #define hweight8(x) generic_hweight8(x) | ||
433 | |||
434 | #endif /* __KERNEL__ */ | ||
435 | |||
436 | #ifdef __KERNEL__ | ||
437 | |||
438 | #define ext2_set_bit(nr,addr) \ | ||
439 | __test_and_set_bit((nr),(unsigned long*)addr) | ||
440 | #define ext2_set_bit_atomic(lock,nr,addr) \ | ||
441 | test_and_set_bit((nr),(unsigned long*)addr) | ||
442 | #define ext2_clear_bit(nr, addr) \ | ||
443 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
444 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | ||
445 | test_and_clear_bit((nr),(unsigned long*)addr) | ||
446 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | ||
447 | #define ext2_find_first_zero_bit(addr, size) \ | ||
448 | find_first_zero_bit((unsigned long*)addr, size) | ||
449 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
450 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
451 | |||
452 | /* Bitmap functions for the minix filesystem. */ | ||
453 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
454 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
455 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
456 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
457 | #define minix_find_first_zero_bit(addr,size) \ | ||
458 | find_first_zero_bit((void*)addr,size) | ||
459 | |||
460 | #endif /* __KERNEL__ */ | ||
461 | |||
462 | #endif /* _I386_BITOPS_H */ | ||