diff options
author | Akinobu Mita <mita@miraclelinux.com> | 2006-03-26 04:39:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-26 11:57:12 -0500 |
commit | 6d9f937b559d664b6f222cb91eca9c6802bfe89a (patch) | |
tree | f431c052f0c0d9f2d29bb6e39e3ff142f23e10c9 | |
parent | 2875aef8bd0e42367a66a78ef7abe10f3bba27b5 (diff) |
[PATCH] bitops: m32r: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- remove ffz()
- remove find_{next,first}{,_zero}_bit()
- remove __ffs()
- remove generic_fls()
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_ffs()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/m32r/Kconfig | 8 | ||||
-rw-r--r-- | include/asm-m32r/bitops.h | 457 |
2 files changed, 20 insertions, 445 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index a3dcc3fab4b7..05c864c6c2d9 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -214,6 +214,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
214 | bool | 214 | bool |
215 | default n | 215 | default n |
216 | 216 | ||
217 | config GENERIC_FIND_NEXT_BIT | ||
218 | bool | ||
219 | default y | ||
220 | |||
221 | config GENERIC_HWEIGHT | ||
222 | bool | ||
223 | default y | ||
224 | |||
217 | config GENERIC_CALIBRATE_DELAY | 225 | config GENERIC_CALIBRATE_DELAY |
218 | bool | 226 | bool |
219 | default y | 227 | default y |
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h index f8e993e0bbc0..902a366101a5 100644 --- a/include/asm-m32r/bitops.h +++ b/include/asm-m32r/bitops.h | |||
@@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * __set_bit - Set a bit in memory | ||
67 | * @nr: the bit to set | ||
68 | * @addr: the address to start counting from | ||
69 | * | ||
70 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
71 | * If it's called on the same region of memory simultaneously, the effect | ||
72 | * may be that only one operation succeeds. | ||
73 | */ | ||
74 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
75 | { | ||
76 | __u32 mask; | ||
77 | volatile __u32 *a = addr; | ||
78 | |||
79 | a += (nr >> 5); | ||
80 | mask = (1 << (nr & 0x1F)); | ||
81 | *a |= mask; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * clear_bit - Clears a bit in memory | 66 | * clear_bit - Clears a bit in memory |
86 | * @nr: Bit to clear | 67 | * @nr: Bit to clear |
87 | * @addr: Address to start counting from | 68 | * @addr: Address to start counting from |
@@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
118 | local_irq_restore(flags); | 99 | local_irq_restore(flags); |
119 | } | 100 | } |
120 | 101 | ||
121 | static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) | ||
122 | { | ||
123 | unsigned long mask; | ||
124 | volatile unsigned long *a = addr; | ||
125 | |||
126 | a += (nr >> 5); | ||
127 | mask = (1 << (nr & 0x1F)); | ||
128 | *a &= ~mask; | ||
129 | } | ||
130 | |||
131 | #define smp_mb__before_clear_bit() barrier() | 102 | #define smp_mb__before_clear_bit() barrier() |
132 | #define smp_mb__after_clear_bit() barrier() | 103 | #define smp_mb__after_clear_bit() barrier() |
133 | 104 | ||
134 | /** | 105 | /** |
135 | * __change_bit - Toggle a bit in memory | ||
136 | * @nr: the bit to set | ||
137 | * @addr: the address to start counting from | ||
138 | * | ||
139 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
140 | * If it's called on the same region of memory simultaneously, the effect | ||
141 | * may be that only one operation succeeds. | ||
142 | */ | ||
143 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
144 | { | ||
145 | __u32 mask; | ||
146 | volatile __u32 *a = addr; | ||
147 | |||
148 | a += (nr >> 5); | ||
149 | mask = (1 << (nr & 0x1F)); | ||
150 | *a ^= mask; | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * change_bit - Toggle a bit in memory | 106 | * change_bit - Toggle a bit in memory |
155 | * @nr: Bit to clear | 107 | * @nr: Bit to clear |
156 | * @addr: Address to start counting from | 108 | * @addr: Address to start counting from |
@@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
221 | } | 173 | } |
222 | 174 | ||
223 | /** | 175 | /** |
224 | * __test_and_set_bit - Set a bit and return its old value | ||
225 | * @nr: Bit to set | ||
226 | * @addr: Address to count from | ||
227 | * | ||
228 | * This operation is non-atomic and can be reordered. | ||
229 | * If two examples of this operation race, one can appear to succeed | ||
230 | * but actually fail. You must protect multiple accesses with a lock. | ||
231 | */ | ||
232 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
233 | { | ||
234 | __u32 mask, oldbit; | ||
235 | volatile __u32 *a = addr; | ||
236 | |||
237 | a += (nr >> 5); | ||
238 | mask = (1 << (nr & 0x1F)); | ||
239 | oldbit = (*a & mask); | ||
240 | *a |= mask; | ||
241 | |||
242 | return (oldbit != 0); | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * test_and_clear_bit - Clear a bit and return its old value | 176 | * test_and_clear_bit - Clear a bit and return its old value |
247 | * @nr: Bit to set | 177 | * @nr: Bit to set |
248 | * @addr: Address to count from | 178 | * @addr: Address to count from |
@@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
280 | } | 210 | } |
281 | 211 | ||
282 | /** | 212 | /** |
283 | * __test_and_clear_bit - Clear a bit and return its old value | ||
284 | * @nr: Bit to set | ||
285 | * @addr: Address to count from | ||
286 | * | ||
287 | * This operation is non-atomic and can be reordered. | ||
288 | * If two examples of this operation race, one can appear to succeed | ||
289 | * but actually fail. You must protect multiple accesses with a lock. | ||
290 | */ | ||
291 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
292 | { | ||
293 | __u32 mask, oldbit; | ||
294 | volatile __u32 *a = addr; | ||
295 | |||
296 | a += (nr >> 5); | ||
297 | mask = (1 << (nr & 0x1F)); | ||
298 | oldbit = (*a & mask); | ||
299 | *a &= ~mask; | ||
300 | |||
301 | return (oldbit != 0); | ||
302 | } | ||
303 | |||
304 | /* WARNING: non atomic and it can be reordered! */ | ||
305 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | ||
306 | { | ||
307 | __u32 mask, oldbit; | ||
308 | volatile __u32 *a = addr; | ||
309 | |||
310 | a += (nr >> 5); | ||
311 | mask = (1 << (nr & 0x1F)); | ||
312 | oldbit = (*a & mask); | ||
313 | *a ^= mask; | ||
314 | |||
315 | return (oldbit != 0); | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * test_and_change_bit - Change a bit and return its old value | 213 | * test_and_change_bit - Change a bit and return its old value |
320 | * @nr: Bit to set | 214 | * @nr: Bit to set |
321 | * @addr: Address to count from | 215 | * @addr: Address to count from |
@@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
350 | return (oldbit != 0); | 244 | return (oldbit != 0); |
351 | } | 245 | } |
352 | 246 | ||
353 | /** | 247 | #include <asm-generic/bitops/non-atomic.h> |
354 | * test_bit - Determine whether a bit is set | 248 | #include <asm-generic/bitops/ffz.h> |
355 | * @nr: bit number to test | 249 | #include <asm-generic/bitops/__ffs.h> |
356 | * @addr: Address to start counting from | 250 | #include <asm-generic/bitops/fls.h> |
357 | */ | 251 | #include <asm-generic/bitops/fls64.h> |
358 | static __inline__ int test_bit(int nr, const volatile void * addr) | ||
359 | { | ||
360 | __u32 mask; | ||
361 | const volatile __u32 *a = addr; | ||
362 | |||
363 | a += (nr >> 5); | ||
364 | mask = (1 << (nr & 0x1F)); | ||
365 | |||
366 | return ((*a & mask) != 0); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * ffz - find first zero in word. | ||
371 | * @word: The word to search | ||
372 | * | ||
373 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
374 | */ | ||
375 | static __inline__ unsigned long ffz(unsigned long word) | ||
376 | { | ||
377 | int k; | ||
378 | |||
379 | word = ~word; | ||
380 | k = 0; | ||
381 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
382 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
383 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
384 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
385 | if (!(word & 0x00000001)) { k += 1; } | ||
386 | |||
387 | return k; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * find_first_zero_bit - find the first zero bit in a memory region | ||
392 | * @addr: The address to start the search at | ||
393 | * @size: The maximum size to search | ||
394 | * | ||
395 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
396 | * containing a bit. | ||
397 | */ | ||
398 | |||
399 | #define find_first_zero_bit(addr, size) \ | ||
400 | find_next_zero_bit((addr), (size), 0) | ||
401 | |||
402 | /** | ||
403 | * find_next_zero_bit - find the first zero bit in a memory region | ||
404 | * @addr: The address to base the search on | ||
405 | * @offset: The bitnumber to start searching at | ||
406 | * @size: The maximum size to search | ||
407 | */ | ||
408 | static __inline__ int find_next_zero_bit(const unsigned long *addr, | ||
409 | int size, int offset) | ||
410 | { | ||
411 | const unsigned long *p = addr + (offset >> 5); | ||
412 | unsigned long result = offset & ~31UL; | ||
413 | unsigned long tmp; | ||
414 | |||
415 | if (offset >= size) | ||
416 | return size; | ||
417 | size -= result; | ||
418 | offset &= 31UL; | ||
419 | if (offset) { | ||
420 | tmp = *(p++); | ||
421 | tmp |= ~0UL >> (32-offset); | ||
422 | if (size < 32) | ||
423 | goto found_first; | ||
424 | if (~tmp) | ||
425 | goto found_middle; | ||
426 | size -= 32; | ||
427 | result += 32; | ||
428 | } | ||
429 | while (size & ~31UL) { | ||
430 | if (~(tmp = *(p++))) | ||
431 | goto found_middle; | ||
432 | result += 32; | ||
433 | size -= 32; | ||
434 | } | ||
435 | if (!size) | ||
436 | return result; | ||
437 | tmp = *p; | ||
438 | |||
439 | found_first: | ||
440 | tmp |= ~0UL << size; | ||
441 | found_middle: | ||
442 | return result + ffz(tmp); | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * __ffs - find first bit in word. | ||
447 | * @word: The word to search | ||
448 | * | ||
449 | * Undefined if no bit exists, so code should check against 0 first. | ||
450 | */ | ||
451 | static __inline__ unsigned long __ffs(unsigned long word) | ||
452 | { | ||
453 | int k = 0; | ||
454 | |||
455 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
456 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
457 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
458 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
459 | if (!(word & 0x00000001)) { k += 1;} | ||
460 | |||
461 | return k; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * fls: find last bit set. | ||
466 | */ | ||
467 | #define fls(x) generic_fls(x) | ||
468 | #define fls64(x) generic_fls64(x) | ||
469 | 252 | ||
470 | #ifdef __KERNEL__ | 253 | #ifdef __KERNEL__ |
471 | 254 | ||
472 | /* | 255 | #include <asm-generic/bitops/sched.h> |
473 | * Every architecture must define this function. It's the fastest | 256 | #include <asm-generic/bitops/find.h> |
474 | * way of searching a 140-bit bitmap where the first 100 bits are | 257 | #include <asm-generic/bitops/ffs.h> |
475 | * unlikely to be set. It's guaranteed that at least one of the 140 | 258 | #include <asm-generic/bitops/hweight.h> |
476 | * bits is cleared. | ||
477 | */ | ||
478 | static inline int sched_find_first_bit(unsigned long *b) | ||
479 | { | ||
480 | if (unlikely(b[0])) | ||
481 | return __ffs(b[0]); | ||
482 | if (unlikely(b[1])) | ||
483 | return __ffs(b[1]) + 32; | ||
484 | if (unlikely(b[2])) | ||
485 | return __ffs(b[2]) + 64; | ||
486 | if (b[3]) | ||
487 | return __ffs(b[3]) + 96; | ||
488 | return __ffs(b[4]) + 128; | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * find_next_bit - find the first set bit in a memory region | ||
493 | * @addr: The address to base the search on | ||
494 | * @offset: The bitnumber to start searching at | ||
495 | * @size: The maximum size to search | ||
496 | */ | ||
497 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
498 | unsigned long size, unsigned long offset) | ||
499 | { | ||
500 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
501 | unsigned int result = offset & ~31UL; | ||
502 | unsigned int tmp; | ||
503 | |||
504 | if (offset >= size) | ||
505 | return size; | ||
506 | size -= result; | ||
507 | offset &= 31UL; | ||
508 | if (offset) { | ||
509 | tmp = *p++; | ||
510 | tmp &= ~0UL << offset; | ||
511 | if (size < 32) | ||
512 | goto found_first; | ||
513 | if (tmp) | ||
514 | goto found_middle; | ||
515 | size -= 32; | ||
516 | result += 32; | ||
517 | } | ||
518 | while (size >= 32) { | ||
519 | if ((tmp = *p++) != 0) | ||
520 | goto found_middle; | ||
521 | result += 32; | ||
522 | size -= 32; | ||
523 | } | ||
524 | if (!size) | ||
525 | return result; | ||
526 | tmp = *p; | ||
527 | |||
528 | found_first: | ||
529 | tmp &= ~0UL >> (32 - size); | ||
530 | if (tmp == 0UL) /* Are any bits set? */ | ||
531 | return result + size; /* Nope. */ | ||
532 | found_middle: | ||
533 | return result + __ffs(tmp); | ||
534 | } | ||
535 | |||
536 | /** | ||
537 | * find_first_bit - find the first set bit in a memory region | ||
538 | * @addr: The address to start the search at | ||
539 | * @size: The maximum size to search | ||
540 | * | ||
541 | * Returns the bit-number of the first set bit, not the number of the byte | ||
542 | * containing a bit. | ||
543 | */ | ||
544 | #define find_first_bit(addr, size) \ | ||
545 | find_next_bit((addr), (size), 0) | ||
546 | |||
547 | /** | ||
548 | * ffs - find first bit set | ||
549 | * @x: the word to search | ||
550 | * | ||
551 | * This is defined the same way as | ||
552 | * the libc and compiler builtin ffs routines, therefore | ||
553 | * differs in spirit from the above ffz (man ffs). | ||
554 | */ | ||
555 | #define ffs(x) generic_ffs(x) | ||
556 | |||
557 | /** | ||
558 | * hweightN - returns the hamming weight of a N-bit word | ||
559 | * @x: the word to weigh | ||
560 | * | ||
561 | * The Hamming Weight of a number is the total number of bits set in it. | ||
562 | */ | ||
563 | |||
564 | #define hweight32(x) generic_hweight32(x) | ||
565 | #define hweight16(x) generic_hweight16(x) | ||
566 | #define hweight8(x) generic_hweight8(x) | ||
567 | 259 | ||
568 | #endif /* __KERNEL__ */ | 260 | #endif /* __KERNEL__ */ |
569 | 261 | ||
570 | #ifdef __KERNEL__ | 262 | #ifdef __KERNEL__ |
571 | 263 | ||
572 | /* | 264 | #include <asm-generic/bitops/ext2-non-atomic.h> |
573 | * ext2_XXXX function | 265 | #include <asm-generic/bitops/ext2-atomic.h> |
574 | * orig: include/asm-sh/bitops.h | 266 | #include <asm-generic/bitops/minix.h> |
575 | */ | ||
576 | |||
577 | #ifdef __LITTLE_ENDIAN__ | ||
578 | #define ext2_set_bit __test_and_set_bit | ||
579 | #define ext2_clear_bit __test_and_clear_bit | ||
580 | #define ext2_test_bit test_bit | ||
581 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
582 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
583 | #else | ||
584 | static inline int ext2_set_bit(int nr, volatile void * addr) | ||
585 | { | ||
586 | __u8 mask, oldbit; | ||
587 | volatile __u8 *a = addr; | ||
588 | |||
589 | a += (nr >> 3); | ||
590 | mask = (1 << (nr & 0x07)); | ||
591 | oldbit = (*a & mask); | ||
592 | *a |= mask; | ||
593 | |||
594 | return (oldbit != 0); | ||
595 | } | ||
596 | |||
597 | static inline int ext2_clear_bit(int nr, volatile void * addr) | ||
598 | { | ||
599 | __u8 mask, oldbit; | ||
600 | volatile __u8 *a = addr; | ||
601 | |||
602 | a += (nr >> 3); | ||
603 | mask = (1 << (nr & 0x07)); | ||
604 | oldbit = (*a & mask); | ||
605 | *a &= ~mask; | ||
606 | |||
607 | return (oldbit != 0); | ||
608 | } | ||
609 | |||
610 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
611 | { | ||
612 | __u32 mask; | ||
613 | const volatile __u8 *a = addr; | ||
614 | |||
615 | a += (nr >> 3); | ||
616 | mask = (1 << (nr & 0x07)); | ||
617 | |||
618 | return ((mask & *a) != 0); | ||
619 | } | ||
620 | |||
621 | #define ext2_find_first_zero_bit(addr, size) \ | ||
622 | ext2_find_next_zero_bit((addr), (size), 0) | ||
623 | |||
624 | static inline unsigned long ext2_find_next_zero_bit(void *addr, | ||
625 | unsigned long size, unsigned long offset) | ||
626 | { | ||
627 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
628 | unsigned long result = offset & ~31UL; | ||
629 | unsigned long tmp; | ||
630 | |||
631 | if (offset >= size) | ||
632 | return size; | ||
633 | size -= result; | ||
634 | offset &= 31UL; | ||
635 | if(offset) { | ||
636 | /* We hold the little endian value in tmp, but then the | ||
637 | * shift is illegal. So we could keep a big endian value | ||
638 | * in tmp, like this: | ||
639 | * | ||
640 | * tmp = __swab32(*(p++)); | ||
641 | * tmp |= ~0UL >> (32-offset); | ||
642 | * | ||
643 | * but this would decrease preformance, so we change the | ||
644 | * shift: | ||
645 | */ | ||
646 | tmp = *(p++); | ||
647 | tmp |= __swab32(~0UL >> (32-offset)); | ||
648 | if(size < 32) | ||
649 | goto found_first; | ||
650 | if(~tmp) | ||
651 | goto found_middle; | ||
652 | size -= 32; | ||
653 | result += 32; | ||
654 | } | ||
655 | while(size & ~31UL) { | ||
656 | if(~(tmp = *(p++))) | ||
657 | goto found_middle; | ||
658 | result += 32; | ||
659 | size -= 32; | ||
660 | } | ||
661 | if(!size) | ||
662 | return result; | ||
663 | tmp = *p; | ||
664 | |||
665 | found_first: | ||
666 | /* tmp is little endian, so we would have to swab the shift, | ||
667 | * see above. But then we have to swab tmp below for ffz, so | ||
668 | * we might as well do this here. | ||
669 | */ | ||
670 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
671 | found_middle: | ||
672 | return result + ffz(__swab32(tmp)); | ||
673 | } | ||
674 | #endif | ||
675 | |||
676 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
677 | ({ \ | ||
678 | int ret; \ | ||
679 | spin_lock(lock); \ | ||
680 | ret = ext2_set_bit((nr), (addr)); \ | ||
681 | spin_unlock(lock); \ | ||
682 | ret; \ | ||
683 | }) | ||
684 | |||
685 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
686 | ({ \ | ||
687 | int ret; \ | ||
688 | spin_lock(lock); \ | ||
689 | ret = ext2_clear_bit((nr), (addr)); \ | ||
690 | spin_unlock(lock); \ | ||
691 | ret; \ | ||
692 | }) | ||
693 | |||
694 | /* Bitmap functions for the minix filesystem. */ | ||
695 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
696 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
697 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
698 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
699 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
700 | 267 | ||
701 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
702 | 269 | ||