diff options
Diffstat (limited to 'include/asm-sparc/bitops.h')
-rw-r--r-- | include/asm-sparc/bitops.h | 388 |
1 files changed, 12 insertions, 376 deletions
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index f25109d62032..04aa3318f76a 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h | |||
@@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
152 | : "memory", "cc"); | 152 | : "memory", "cc"); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | #include <asm-generic/bitops/non-atomic.h> |
156 | * non-atomic versions | ||
157 | */ | ||
158 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
159 | { | ||
160 | unsigned long mask = 1UL << (nr & 0x1f); | ||
161 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
162 | |||
163 | *p |= mask; | ||
164 | } | ||
165 | |||
166 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
167 | { | ||
168 | unsigned long mask = 1UL << (nr & 0x1f); | ||
169 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
170 | |||
171 | *p &= ~mask; | ||
172 | } | ||
173 | |||
174 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
175 | { | ||
176 | unsigned long mask = 1UL << (nr & 0x1f); | ||
177 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
178 | |||
179 | *p ^= mask; | ||
180 | } | ||
181 | |||
182 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
183 | { | ||
184 | unsigned long mask = 1UL << (nr & 0x1f); | ||
185 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
186 | unsigned long old = *p; | ||
187 | |||
188 | *p = old | mask; | ||
189 | return (old & mask) != 0; | ||
190 | } | ||
191 | |||
192 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
193 | { | ||
194 | unsigned long mask = 1UL << (nr & 0x1f); | ||
195 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
196 | unsigned long old = *p; | ||
197 | |||
198 | *p = old & ~mask; | ||
199 | return (old & mask) != 0; | ||
200 | } | ||
201 | |||
202 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
203 | { | ||
204 | unsigned long mask = 1UL << (nr & 0x1f); | ||
205 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
206 | unsigned long old = *p; | ||
207 | |||
208 | *p = old ^ mask; | ||
209 | return (old & mask) != 0; | ||
210 | } | ||
211 | 156 | ||
212 | #define smp_mb__before_clear_bit() do { } while(0) | 157 | #define smp_mb__before_clear_bit() do { } while(0) |
213 | #define smp_mb__after_clear_bit() do { } while(0) | 158 | #define smp_mb__after_clear_bit() do { } while(0) |
214 | 159 | ||
215 | /* The following routine need not be atomic. */ | 160 | #include <asm-generic/bitops/ffz.h> |
216 | static inline int test_bit(int nr, __const__ volatile unsigned long *addr) | 161 | #include <asm-generic/bitops/__ffs.h> |
217 | { | 162 | #include <asm-generic/bitops/sched.h> |
218 | return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; | 163 | #include <asm-generic/bitops/ffs.h> |
219 | } | 164 | #include <asm-generic/bitops/fls.h> |
220 | 165 | #include <asm-generic/bitops/fls64.h> | |
221 | /* The easy/cheese version for now. */ | 166 | #include <asm-generic/bitops/hweight.h> |
222 | static inline unsigned long ffz(unsigned long word) | 167 | #include <asm-generic/bitops/find.h> |
223 | { | 168 | #include <asm-generic/bitops/ext2-non-atomic.h> |
224 | unsigned long result = 0; | 169 | #include <asm-generic/bitops/ext2-atomic.h> |
225 | 170 | #include <asm-generic/bitops/minix.h> | |
226 | while(word & 1) { | ||
227 | result++; | ||
228 | word >>= 1; | ||
229 | } | ||
230 | return result; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * __ffs - find first bit in word. | ||
235 | * @word: The word to search | ||
236 | * | ||
237 | * Undefined if no bit exists, so code should check against 0 first. | ||
238 | */ | ||
239 | static inline int __ffs(unsigned long word) | ||
240 | { | ||
241 | int num = 0; | ||
242 | |||
243 | if ((word & 0xffff) == 0) { | ||
244 | num += 16; | ||
245 | word >>= 16; | ||
246 | } | ||
247 | if ((word & 0xff) == 0) { | ||
248 | num += 8; | ||
249 | word >>= 8; | ||
250 | } | ||
251 | if ((word & 0xf) == 0) { | ||
252 | num += 4; | ||
253 | word >>= 4; | ||
254 | } | ||
255 | if ((word & 0x3) == 0) { | ||
256 | num += 2; | ||
257 | word >>= 2; | ||
258 | } | ||
259 | if ((word & 0x1) == 0) | ||
260 | num += 1; | ||
261 | return num; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Every architecture must define this function. It's the fastest | ||
266 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
267 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
268 | * bits is cleared. | ||
269 | */ | ||
270 | static inline int sched_find_first_bit(unsigned long *b) | ||
271 | { | ||
272 | |||
273 | if (unlikely(b[0])) | ||
274 | return __ffs(b[0]); | ||
275 | if (unlikely(b[1])) | ||
276 | return __ffs(b[1]) + 32; | ||
277 | if (unlikely(b[2])) | ||
278 | return __ffs(b[2]) + 64; | ||
279 | if (b[3]) | ||
280 | return __ffs(b[3]) + 96; | ||
281 | return __ffs(b[4]) + 128; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * ffs: find first bit set. This is defined the same way as | ||
286 | * the libc and compiler builtin ffs routines, therefore | ||
287 | * differs in spirit from the above ffz (man ffs). | ||
288 | */ | ||
289 | static inline int ffs(int x) | ||
290 | { | ||
291 | if (!x) | ||
292 | return 0; | ||
293 | return __ffs((unsigned long)x) + 1; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * fls: find last (most-significant) bit set. | ||
298 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
299 | */ | ||
300 | #define fls(x) generic_fls(x) | ||
301 | #define fls64(x) generic_fls64(x) | ||
302 | |||
303 | /* | ||
304 | * hweightN: returns the hamming weight (i.e. the number | ||
305 | * of bits set) of a N-bit word | ||
306 | */ | ||
307 | #define hweight32(x) generic_hweight32(x) | ||
308 | #define hweight16(x) generic_hweight16(x) | ||
309 | #define hweight8(x) generic_hweight8(x) | ||
310 | |||
311 | /* | ||
312 | * find_next_zero_bit() finds the first zero bit in a bit string of length | ||
313 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
314 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
315 | */ | ||
316 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | ||
317 | unsigned long size, unsigned long offset) | ||
318 | { | ||
319 | const unsigned long *p = addr + (offset >> 5); | ||
320 | unsigned long result = offset & ~31UL; | ||
321 | unsigned long tmp; | ||
322 | |||
323 | if (offset >= size) | ||
324 | return size; | ||
325 | size -= result; | ||
326 | offset &= 31UL; | ||
327 | if (offset) { | ||
328 | tmp = *(p++); | ||
329 | tmp |= ~0UL >> (32-offset); | ||
330 | if (size < 32) | ||
331 | goto found_first; | ||
332 | if (~tmp) | ||
333 | goto found_middle; | ||
334 | size -= 32; | ||
335 | result += 32; | ||
336 | } | ||
337 | while (size & ~31UL) { | ||
338 | if (~(tmp = *(p++))) | ||
339 | goto found_middle; | ||
340 | result += 32; | ||
341 | size -= 32; | ||
342 | } | ||
343 | if (!size) | ||
344 | return result; | ||
345 | tmp = *p; | ||
346 | |||
347 | found_first: | ||
348 | tmp |= ~0UL << size; | ||
349 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
350 | return result + size; /* Nope. */ | ||
351 | found_middle: | ||
352 | return result + ffz(tmp); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Linus sez that gcc can optimize the following correctly, we'll see if this | ||
357 | * holds on the Sparc as it does for the ALPHA. | ||
358 | */ | ||
359 | #define find_first_zero_bit(addr, size) \ | ||
360 | find_next_zero_bit((addr), (size), 0) | ||
361 | |||
362 | /** | ||
363 | * find_next_bit - find the first set bit in a memory region | ||
364 | * @addr: The address to base the search on | ||
365 | * @offset: The bitnumber to start searching at | ||
366 | * @size: The maximum size to search | ||
367 | * | ||
368 | * Scheduler induced bitop, do not use. | ||
369 | */ | ||
370 | static inline int find_next_bit(const unsigned long *addr, int size, int offset) | ||
371 | { | ||
372 | const unsigned long *p = addr + (offset >> 5); | ||
373 | int num = offset & ~0x1f; | ||
374 | unsigned long word; | ||
375 | |||
376 | word = *p++; | ||
377 | word &= ~((1 << (offset & 0x1f)) - 1); | ||
378 | while (num < size) { | ||
379 | if (word != 0) { | ||
380 | return __ffs(word) + num; | ||
381 | } | ||
382 | word = *p++; | ||
383 | num += 0x20; | ||
384 | } | ||
385 | return num; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * find_first_bit - find the first set bit in a memory region | ||
390 | * @addr: The address to start the search at | ||
391 | * @size: The maximum size to search | ||
392 | * | ||
393 | * Returns the bit-number of the first set bit, not the number of the byte | ||
394 | * containing a bit. | ||
395 | */ | ||
396 | #define find_first_bit(addr, size) \ | ||
397 | find_next_bit((addr), (size), 0) | ||
398 | |||
399 | /* | ||
400 | */ | ||
401 | static inline int test_le_bit(int nr, __const__ unsigned long * addr) | ||
402 | { | ||
403 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
404 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * non-atomic versions | ||
409 | */ | ||
410 | static inline void __set_le_bit(int nr, unsigned long *addr) | ||
411 | { | ||
412 | unsigned char *ADDR = (unsigned char *)addr; | ||
413 | |||
414 | ADDR += nr >> 3; | ||
415 | *ADDR |= 1 << (nr & 0x07); | ||
416 | } | ||
417 | |||
418 | static inline void __clear_le_bit(int nr, unsigned long *addr) | ||
419 | { | ||
420 | unsigned char *ADDR = (unsigned char *)addr; | ||
421 | |||
422 | ADDR += nr >> 3; | ||
423 | *ADDR &= ~(1 << (nr & 0x07)); | ||
424 | } | ||
425 | |||
426 | static inline int __test_and_set_le_bit(int nr, unsigned long *addr) | ||
427 | { | ||
428 | int mask, retval; | ||
429 | unsigned char *ADDR = (unsigned char *)addr; | ||
430 | |||
431 | ADDR += nr >> 3; | ||
432 | mask = 1 << (nr & 0x07); | ||
433 | retval = (mask & *ADDR) != 0; | ||
434 | *ADDR |= mask; | ||
435 | return retval; | ||
436 | } | ||
437 | |||
438 | static inline int __test_and_clear_le_bit(int nr, unsigned long *addr) | ||
439 | { | ||
440 | int mask, retval; | ||
441 | unsigned char *ADDR = (unsigned char *)addr; | ||
442 | |||
443 | ADDR += nr >> 3; | ||
444 | mask = 1 << (nr & 0x07); | ||
445 | retval = (mask & *ADDR) != 0; | ||
446 | *ADDR &= ~mask; | ||
447 | return retval; | ||
448 | } | ||
449 | |||
450 | static inline unsigned long find_next_zero_le_bit(const unsigned long *addr, | ||
451 | unsigned long size, unsigned long offset) | ||
452 | { | ||
453 | const unsigned long *p = addr + (offset >> 5); | ||
454 | unsigned long result = offset & ~31UL; | ||
455 | unsigned long tmp; | ||
456 | |||
457 | if (offset >= size) | ||
458 | return size; | ||
459 | size -= result; | ||
460 | offset &= 31UL; | ||
461 | if(offset) { | ||
462 | tmp = *(p++); | ||
463 | tmp |= __swab32(~0UL >> (32-offset)); | ||
464 | if(size < 32) | ||
465 | goto found_first; | ||
466 | if(~tmp) | ||
467 | goto found_middle; | ||
468 | size -= 32; | ||
469 | result += 32; | ||
470 | } | ||
471 | while(size & ~31UL) { | ||
472 | if(~(tmp = *(p++))) | ||
473 | goto found_middle; | ||
474 | result += 32; | ||
475 | size -= 32; | ||
476 | } | ||
477 | if(!size) | ||
478 | return result; | ||
479 | tmp = *p; | ||
480 | |||
481 | found_first: | ||
482 | tmp = __swab32(tmp) | (~0UL << size); | ||
483 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
484 | return result + size; /* Nope. */ | ||
485 | return result + ffz(tmp); | ||
486 | |||
487 | found_middle: | ||
488 | return result + ffz(__swab32(tmp)); | ||
489 | } | ||
490 | |||
491 | #define find_first_zero_le_bit(addr, size) \ | ||
492 | find_next_zero_le_bit((addr), (size), 0) | ||
493 | |||
494 | #define ext2_set_bit(nr,addr) \ | ||
495 | __test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
496 | #define ext2_clear_bit(nr,addr) \ | ||
497 | __test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
498 | |||
499 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
500 | ({ \ | ||
501 | int ret; \ | ||
502 | spin_lock(lock); \ | ||
503 | ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ | ||
504 | spin_unlock(lock); \ | ||
505 | ret; \ | ||
506 | }) | ||
507 | |||
508 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
509 | ({ \ | ||
510 | int ret; \ | ||
511 | spin_lock(lock); \ | ||
512 | ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ | ||
513 | spin_unlock(lock); \ | ||
514 | ret; \ | ||
515 | }) | ||
516 | |||
517 | #define ext2_test_bit(nr,addr) \ | ||
518 | test_le_bit((nr),(unsigned long *)(addr)) | ||
519 | #define ext2_find_first_zero_bit(addr, size) \ | ||
520 | find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
521 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
522 | find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
523 | |||
524 | /* Bitmap functions for the minix filesystem. */ | ||
525 | #define minix_test_and_set_bit(nr,addr) \ | ||
526 | __test_and_set_bit((nr),(unsigned long *)(addr)) | ||
527 | #define minix_set_bit(nr,addr) \ | ||
528 | __set_bit((nr),(unsigned long *)(addr)) | ||
529 | #define minix_test_and_clear_bit(nr,addr) \ | ||
530 | __test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
531 | #define minix_test_bit(nr,addr) \ | ||
532 | test_bit((nr),(unsigned long *)(addr)) | ||
533 | #define minix_find_first_zero_bit(addr,size) \ | ||
534 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
535 | 171 | ||
536 | #endif /* __KERNEL__ */ | 172 | #endif /* __KERNEL__ */ |
537 | 173 | ||