aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--include/asm-mips/bitops.h465
2 files changed, 24 insertions, 449 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac2012f033d6..5080ea1799a4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -801,6 +801,14 @@ config RWSEM_GENERIC_SPINLOCK
801config RWSEM_XCHGADD_ALGORITHM 801config RWSEM_XCHGADD_ALGORITHM
802 bool 802 bool
803 803
804config GENERIC_FIND_NEXT_BIT
805 bool
806 default y
807
808config GENERIC_HWEIGHT
809 bool
810 default y
811
804config GENERIC_CALIBRATE_DELAY 812config GENERIC_CALIBRATE_DELAY
805 bool 813 bool
806 default y 814 default y
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 0e83abc829d4..a1728f8c0705 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
105} 105}
106 106
107/* 107/*
108 * __set_bit - Set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
111 *
112 * Unlike set_bit(), this function is non-atomic and may be reordered.
113 * If it's called on the same region of memory simultaneously, the effect
114 * may be that only one operation succeeds.
115 */
116static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
117{
118 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119
120 *m |= 1UL << (nr & SZLONG_MASK);
121}
122
123/*
124 * clear_bit - Clears a bit in memory 108 * clear_bit - Clears a bit in memory
125 * @nr: Bit to clear 109 * @nr: Bit to clear
126 * @addr: Address to start counting from 110 * @addr: Address to start counting from
@@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
169} 153}
170 154
171/* 155/*
172 * __clear_bit - Clears a bit in memory
173 * @nr: Bit to clear
174 * @addr: Address to start counting from
175 *
176 * Unlike clear_bit(), this function is non-atomic and may be reordered.
177 * If it's called on the same region of memory simultaneously, the effect
178 * may be that only one operation succeeds.
179 */
180static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
181{
182 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
183
184 *m &= ~(1UL << (nr & SZLONG_MASK));
185}
186
187/*
188 * change_bit - Toggle a bit in memory 156 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change 157 * @nr: Bit to change
190 * @addr: Address to start counting from 158 * @addr: Address to start counting from
@@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
235} 203}
236 204
237/* 205/*
238 * __change_bit - Toggle a bit in memory
239 * @nr: the bit to change
240 * @addr: the address to start counting from
241 *
242 * Unlike change_bit(), this function is non-atomic and may be reordered.
243 * If it's called on the same region of memory simultaneously, the effect
244 * may be that only one operation succeeds.
245 */
246static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
247{
248 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
249
250 *m ^= 1UL << (nr & SZLONG_MASK);
251}
252
253/*
254 * test_and_set_bit - Set a bit and return its old value 206 * test_and_set_bit - Set a bit and return its old value
255 * @nr: Bit to set 207 * @nr: Bit to set
256 * @addr: Address to count from 208 * @addr: Address to count from
@@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr,
321} 273}
322 274
323/* 275/*
324 * __test_and_set_bit - Set a bit and return its old value
325 * @nr: Bit to set
326 * @addr: Address to count from
327 *
328 * This operation is non-atomic and can be reordered.
329 * If two examples of this operation race, one can appear to succeed
330 * but actually fail. You must protect multiple accesses with a lock.
331 */
332static inline int __test_and_set_bit(unsigned long nr,
333 volatile unsigned long *addr)
334{
335 volatile unsigned long *a = addr;
336 unsigned long mask;
337 int retval;
338
339 a += nr >> SZLONG_LOG;
340 mask = 1UL << (nr & SZLONG_MASK);
341 retval = (mask & *a) != 0;
342 *a |= mask;
343
344 return retval;
345}
346
347/*
348 * test_and_clear_bit - Clear a bit and return its old value 276 * test_and_clear_bit - Clear a bit and return its old value
349 * @nr: Bit to clear 277 * @nr: Bit to clear
350 * @addr: Address to count from 278 * @addr: Address to count from
@@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr,
417} 345}
418 346
419/* 347/*
420 * __test_and_clear_bit - Clear a bit and return its old value
421 * @nr: Bit to clear
422 * @addr: Address to count from
423 *
424 * This operation is non-atomic and can be reordered.
425 * If two examples of this operation race, one can appear to succeed
426 * but actually fail. You must protect multiple accesses with a lock.
427 */
428static inline int __test_and_clear_bit(unsigned long nr,
429 volatile unsigned long * addr)
430{
431 volatile unsigned long *a = addr;
432 unsigned long mask;
433 int retval;
434
435 a += (nr >> SZLONG_LOG);
436 mask = 1UL << (nr & SZLONG_MASK);
437 retval = ((mask & *a) != 0);
438 *a &= ~mask;
439
440 return retval;
441}
442
443/*
444 * test_and_change_bit - Change a bit and return its old value 348 * test_and_change_bit - Change a bit and return its old value
445 * @nr: Bit to change 349 * @nr: Bit to change
446 * @addr: Address to count from 350 * @addr: Address to count from
@@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr,
509 } 413 }
510} 414}
511 415
512/*
513 * __test_and_change_bit - Change a bit and return its old value
514 * @nr: Bit to change
515 * @addr: Address to count from
516 *
517 * This operation is non-atomic and can be reordered.
518 * If two examples of this operation race, one can appear to succeed
519 * but actually fail. You must protect multiple accesses with a lock.
520 */
521static inline int __test_and_change_bit(unsigned long nr,
522 volatile unsigned long *addr)
523{
524 volatile unsigned long *a = addr;
525 unsigned long mask;
526 int retval;
527
528 a += (nr >> SZLONG_LOG);
529 mask = 1UL << (nr & SZLONG_MASK);
530 retval = ((mask & *a) != 0);
531 *a ^= mask;
532
533 return retval;
534}
535
536#undef __bi_flags 416#undef __bi_flags
537#undef __bi_local_irq_save 417#undef __bi_local_irq_save
538#undef __bi_local_irq_restore 418#undef __bi_local_irq_restore
539 419
540/* 420#include <asm-generic/bitops/non-atomic.h>
541 * test_bit - Determine whether a bit is set
542 * @nr: bit number to test
543 * @addr: Address to start counting from
544 */
545static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
546{
547 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
548}
549 421
550/* 422/*
551 * Return the bit position (0..63) of the most significant 1 bit in a word 423 * Return the bit position (0..63) of the most significant 1 bit in a word
@@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x)
580 return 63 - lz; 452 return 63 - lz;
581} 453}
582 454
455#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
456
583/* 457/*
584 * __ffs - find first bit in word. 458 * __ffs - find first bit in word.
585 * @word: The word to search 459 * @word: The word to search
@@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x)
589 */ 463 */
590static inline unsigned long __ffs(unsigned long word) 464static inline unsigned long __ffs(unsigned long word)
591{ 465{
592#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 return __ilog2(word & -word); 466 return __ilog2(word & -word);
594#else
595 int b = 0, s;
596
597#ifdef CONFIG_32BIT
598 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
599 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
600 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
601 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
602 s = 1; if (word << 31 != 0) s = 0; b += s;
603
604 return b;
605#endif
606#ifdef CONFIG_64BIT
607 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
608 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
609 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
610 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
611 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
612 s = 1; if (word << 63 != 0) s = 0; b += s;
613
614 return b;
615#endif
616#endif
617} 467}
618 468
619/* 469/*
@@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word)
652 */ 502 */
653static inline unsigned long fls(unsigned long word) 503static inline unsigned long fls(unsigned long word)
654{ 504{
655#ifdef CONFIG_32BIT
656#ifdef CONFIG_CPU_MIPS32 505#ifdef CONFIG_CPU_MIPS32
657 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); 506 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
658 507
659 return 32 - word; 508 return 32 - word;
660#else
661 {
662 int r = 32, s;
663
664 if (word == 0)
665 return 0;
666
667 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
668 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
669 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
670 s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
671 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
672
673 return r;
674 }
675#endif 509#endif
676#endif /* CONFIG_32BIT */
677 510
678#ifdef CONFIG_64BIT
679#ifdef CONFIG_CPU_MIPS64 511#ifdef CONFIG_CPU_MIPS64
680
681 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); 512 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
682 513
683 return 64 - word; 514 return 64 - word;
684#else
685 {
686 int r = 64, s;
687
688 if (word == 0)
689 return 0;
690
691 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
692 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
693 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
694 s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
695 s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
696 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
697
698 return r;
699 }
700#endif 515#endif
701#endif /* CONFIG_64BIT */
702} 516}
703 517
704#define fls64(x) generic_fls64(x) 518#else
705
706/*
707 * find_next_zero_bit - find the first zero bit in a memory region
708 * @addr: The address to base the search on
709 * @offset: The bitnumber to start searching at
710 * @size: The maximum size to search
711 */
712static inline unsigned long find_next_zero_bit(const unsigned long *addr,
713 unsigned long size, unsigned long offset)
714{
715 const unsigned long *p = addr + (offset >> SZLONG_LOG);
716 unsigned long result = offset & ~SZLONG_MASK;
717 unsigned long tmp;
718
719 if (offset >= size)
720 return size;
721 size -= result;
722 offset &= SZLONG_MASK;
723 if (offset) {
724 tmp = *(p++);
725 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
726 if (size < _MIPS_SZLONG)
727 goto found_first;
728 if (~tmp)
729 goto found_middle;
730 size -= _MIPS_SZLONG;
731 result += _MIPS_SZLONG;
732 }
733 while (size & ~SZLONG_MASK) {
734 if (~(tmp = *(p++)))
735 goto found_middle;
736 result += _MIPS_SZLONG;
737 size -= _MIPS_SZLONG;
738 }
739 if (!size)
740 return result;
741 tmp = *p;
742
743found_first:
744 tmp |= ~0UL << size;
745 if (tmp == ~0UL) /* Are any bits zero? */
746 return result + size; /* Nope. */
747found_middle:
748 return result + ffz(tmp);
749}
750 519
751#define find_first_zero_bit(addr, size) \ 520#include <asm-generic/bitops/__ffs.h>
752 find_next_zero_bit((addr), (size), 0) 521#include <asm-generic/bitops/ffs.h>
522#include <asm-generic/bitops/ffz.h>
523#include <asm-generic/bitops/fls.h>
753 524
754/* 525#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
755 * find_next_bit - find the next set bit in a memory region
756 * @addr: The address to base the search on
757 * @offset: The bitnumber to start searching at
758 * @size: The maximum size to search
759 */
760static inline unsigned long find_next_bit(const unsigned long *addr,
761 unsigned long size, unsigned long offset)
762{
763 const unsigned long *p = addr + (offset >> SZLONG_LOG);
764 unsigned long result = offset & ~SZLONG_MASK;
765 unsigned long tmp;
766
767 if (offset >= size)
768 return size;
769 size -= result;
770 offset &= SZLONG_MASK;
771 if (offset) {
772 tmp = *(p++);
773 tmp &= ~0UL << offset;
774 if (size < _MIPS_SZLONG)
775 goto found_first;
776 if (tmp)
777 goto found_middle;
778 size -= _MIPS_SZLONG;
779 result += _MIPS_SZLONG;
780 }
781 while (size & ~SZLONG_MASK) {
782 if ((tmp = *(p++)))
783 goto found_middle;
784 result += _MIPS_SZLONG;
785 size -= _MIPS_SZLONG;
786 }
787 if (!size)
788 return result;
789 tmp = *p;
790
791found_first:
792 tmp &= ~0UL >> (_MIPS_SZLONG - size);
793 if (tmp == 0UL) /* Are any bits set? */
794 return result + size; /* Nope. */
795found_middle:
796 return result + __ffs(tmp);
797}
798 526
799/* 527#include <asm-generic/bitops/fls64.h>
800 * find_first_bit - find the first set bit in a memory region 528#include <asm-generic/bitops/find.h>
801 * @addr: The address to start the search at
802 * @size: The maximum size to search
803 *
804 * Returns the bit-number of the first set bit, not the number of the byte
805 * containing a bit.
806 */
807#define find_first_bit(addr, size) \
808 find_next_bit((addr), (size), 0)
809 529
810#ifdef __KERNEL__ 530#ifdef __KERNEL__
811 531
812/* 532#include <asm-generic/bitops/sched.h>
813 * Every architecture must define this function. It's the fastest 533#include <asm-generic/bitops/hweight.h>
814 * way of searching a 140-bit bitmap where the first 100 bits are 534#include <asm-generic/bitops/ext2-non-atomic.h>
815 * unlikely to be set. It's guaranteed that at least one of the 140 535#include <asm-generic/bitops/ext2-atomic.h>
816 * bits is cleared. 536#include <asm-generic/bitops/minix.h>
817 */
818static inline int sched_find_first_bit(const unsigned long *b)
819{
820#ifdef CONFIG_32BIT
821 if (unlikely(b[0]))
822 return __ffs(b[0]);
823 if (unlikely(b[1]))
824 return __ffs(b[1]) + 32;
825 if (unlikely(b[2]))
826 return __ffs(b[2]) + 64;
827 if (b[3])
828 return __ffs(b[3]) + 96;
829 return __ffs(b[4]) + 128;
830#endif
831#ifdef CONFIG_64BIT
832 if (unlikely(b[0]))
833 return __ffs(b[0]);
834 if (unlikely(b[1]))
835 return __ffs(b[1]) + 64;
836 return __ffs(b[2]) + 128;
837#endif
838}
839
840/*
841 * hweightN - returns the hamming weight of a N-bit word
842 * @x: the word to weigh
843 *
844 * The Hamming Weight of a number is the total number of bits set in it.
845 */
846
847#define hweight64(x) generic_hweight64(x)
848#define hweight32(x) generic_hweight32(x)
849#define hweight16(x) generic_hweight16(x)
850#define hweight8(x) generic_hweight8(x)
851
852static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
853{
854 unsigned char *ADDR = (unsigned char *) addr;
855 int mask, retval;
856
857 ADDR += nr >> 3;
858 mask = 1 << (nr & 0x07);
859 retval = (mask & *ADDR) != 0;
860 *ADDR |= mask;
861
862 return retval;
863}
864
865static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
866{
867 unsigned char *ADDR = (unsigned char *) addr;
868 int mask, retval;
869
870 ADDR += nr >> 3;
871 mask = 1 << (nr & 0x07);
872 retval = (mask & *ADDR) != 0;
873 *ADDR &= ~mask;
874
875 return retval;
876}
877
878static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
879{
880 const unsigned char *ADDR = (const unsigned char *) addr;
881 int mask;
882
883 ADDR += nr >> 3;
884 mask = 1 << (nr & 0x07);
885
886 return ((mask & *ADDR) != 0);
887}
888
889static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
890 unsigned long size, unsigned long offset)
891{
892 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
893 unsigned long result = offset & ~SZLONG_MASK;
894 unsigned long tmp;
895
896 if (offset >= size)
897 return size;
898 size -= result;
899 offset &= SZLONG_MASK;
900 if (offset) {
901 tmp = cpu_to_lelongp(p++);
902 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
903 if (size < _MIPS_SZLONG)
904 goto found_first;
905 if (~tmp)
906 goto found_middle;
907 size -= _MIPS_SZLONG;
908 result += _MIPS_SZLONG;
909 }
910 while (size & ~SZLONG_MASK) {
911 if (~(tmp = cpu_to_lelongp(p++)))
912 goto found_middle;
913 result += _MIPS_SZLONG;
914 size -= _MIPS_SZLONG;
915 }
916 if (!size)
917 return result;
918 tmp = cpu_to_lelongp(p);
919
920found_first:
921 tmp |= ~0UL << size;
922 if (tmp == ~0UL) /* Are any bits zero? */
923 return result + size; /* Nope. */
924
925found_middle:
926 return result + ffz(tmp);
927}
928
929#define find_first_zero_le_bit(addr, size) \
930 find_next_zero_le_bit((addr), (size), 0)
931
932#define ext2_set_bit(nr,addr) \
933 __test_and_set_le_bit((nr),(unsigned long*)addr)
934#define ext2_clear_bit(nr, addr) \
935 __test_and_clear_le_bit((nr),(unsigned long*)addr)
936 #define ext2_set_bit_atomic(lock, nr, addr) \
937({ \
938 int ret; \
939 spin_lock(lock); \
940 ret = ext2_set_bit((nr), (addr)); \
941 spin_unlock(lock); \
942 ret; \
943})
944
945#define ext2_clear_bit_atomic(lock, nr, addr) \
946({ \
947 int ret; \
948 spin_lock(lock); \
949 ret = ext2_clear_bit((nr), (addr)); \
950 spin_unlock(lock); \
951 ret; \
952})
953#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
954#define ext2_find_first_zero_bit(addr, size) \
955 find_first_zero_le_bit((unsigned long*)addr, size)
956#define ext2_find_next_zero_bit(addr, size, off) \
957 find_next_zero_le_bit((unsigned long*)addr, size, off)
958
959/*
960 * Bitmap functions for the minix filesystem.
961 *
962 * FIXME: These assume that Minix uses the native byte/bitorder.
963 * This limits the Minix filesystem's value for data exchange very much.
964 */
965#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
966#define minix_set_bit(nr,addr) __set_bit(nr,addr)
967#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
968#define minix_test_bit(nr,addr) test_bit(nr,addr)
969#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
970 537
971#endif /* __KERNEL__ */ 538#endif /* __KERNEL__ */
972 539