diff options
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 9 | ||||
-rw-r--r-- | arch/powerpc/lib/Makefile | 9 | ||||
-rw-r--r-- | arch/powerpc/lib/bitops.c (renamed from arch/ppc64/kernel/bitops.c) | 97 | ||||
-rw-r--r-- | arch/ppc/Makefile | 3 | ||||
-rw-r--r-- | arch/ppc/kernel/bitops.c | 126 | ||||
-rw-r--r-- | arch/ppc64/kernel/Makefile | 2 | ||||
-rw-r--r-- | include/asm-powerpc/bitops.h | 437 | ||||
-rw-r--r-- | include/asm-ppc/bitops.h | 460 | ||||
-rw-r--r-- | include/asm-ppc64/bitops.h | 360 | ||||
-rw-r--r-- | include/asm-ppc64/mmu_context.h | 15 |
10 files changed, 495 insertions, 1023 deletions
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 8bc540337ba0..47d6f7e2ea9f 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -81,15 +81,6 @@ EXPORT_SYMBOL(_prep_type); | |||
81 | EXPORT_SYMBOL(ucSystemType); | 81 | EXPORT_SYMBOL(ucSystemType); |
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | #if !defined(__INLINE_BITOPS) | ||
85 | EXPORT_SYMBOL(set_bit); | ||
86 | EXPORT_SYMBOL(clear_bit); | ||
87 | EXPORT_SYMBOL(change_bit); | ||
88 | EXPORT_SYMBOL(test_and_set_bit); | ||
89 | EXPORT_SYMBOL(test_and_clear_bit); | ||
90 | EXPORT_SYMBOL(test_and_change_bit); | ||
91 | #endif /* __INLINE_BITOPS */ | ||
92 | |||
93 | EXPORT_SYMBOL(strcpy); | 84 | EXPORT_SYMBOL(strcpy); |
94 | EXPORT_SYMBOL(strncpy); | 85 | EXPORT_SYMBOL(strncpy); |
95 | EXPORT_SYMBOL(strcat); | 86 | EXPORT_SYMBOL(strcat); |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index dfb33915ad61..34f5c2e074c9 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -3,13 +3,14 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | ifeq ($(CONFIG_PPC_MERGE),y) | 5 | ifeq ($(CONFIG_PPC_MERGE),y) |
6 | obj-y := string.o | 6 | obj-y := string.o strcase.o |
7 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o | ||
7 | endif | 8 | endif |
8 | 9 | ||
9 | obj-y += strcase.o | 10 | obj-y += bitops.o |
10 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o | ||
11 | obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ | 11 | obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ |
12 | memcpy_64.o usercopy_64.o mem_64.o string.o | 12 | memcpy_64.o usercopy_64.o mem_64.o string.o \ |
13 | strcase.o | ||
13 | obj-$(CONFIG_PPC_ISERIES) += e2a.o | 14 | obj-$(CONFIG_PPC_ISERIES) += e2a.o |
14 | obj-$(CONFIG_XMON) += sstep.o | 15 | obj-$(CONFIG_XMON) += sstep.o |
15 | 16 | ||
diff --git a/arch/ppc64/kernel/bitops.c b/arch/powerpc/lib/bitops.c index ae329e8b4acb..b67ce3004ebf 100644 --- a/arch/ppc64/kernel/bitops.c +++ b/arch/powerpc/lib/bitops.c | |||
@@ -1,93 +1,97 @@ | |||
1 | /* | 1 | #include <linux/types.h> |
2 | * These are too big to be inlined. | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/module.h> | 2 | #include <linux/module.h> |
7 | #include <linux/bitops.h> | ||
8 | #include <asm/byteorder.h> | 3 | #include <asm/byteorder.h> |
4 | #include <asm/bitops.h> | ||
9 | 5 | ||
10 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 6 | /** |
11 | unsigned long offset) | 7 | * find_next_bit - find the next set bit in a memory region |
8 | * @addr: The address to base the search on | ||
9 | * @offset: The bitnumber to start searching at | ||
10 | * @size: The maximum size to search | ||
11 | */ | ||
12 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | ||
13 | unsigned long offset) | ||
12 | { | 14 | { |
13 | const unsigned long *p = addr + (offset >> 6); | 15 | const unsigned long *p = addr + BITOP_WORD(offset); |
14 | unsigned long result = offset & ~63UL; | 16 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
15 | unsigned long tmp; | 17 | unsigned long tmp; |
16 | 18 | ||
17 | if (offset >= size) | 19 | if (offset >= size) |
18 | return size; | 20 | return size; |
19 | size -= result; | 21 | size -= result; |
20 | offset &= 63UL; | 22 | offset %= BITS_PER_LONG; |
21 | if (offset) { | 23 | if (offset) { |
22 | tmp = *(p++); | 24 | tmp = *(p++); |
23 | tmp |= ~0UL >> (64 - offset); | 25 | tmp &= (~0UL << offset); |
24 | if (size < 64) | 26 | if (size < BITS_PER_LONG) |
25 | goto found_first; | 27 | goto found_first; |
26 | if (~tmp) | 28 | if (tmp) |
27 | goto found_middle; | 29 | goto found_middle; |
28 | size -= 64; | 30 | size -= BITS_PER_LONG; |
29 | result += 64; | 31 | result += BITS_PER_LONG; |
30 | } | 32 | } |
31 | while (size & ~63UL) { | 33 | while (size & ~(BITS_PER_LONG-1)) { |
32 | if (~(tmp = *(p++))) | 34 | if ((tmp = *(p++))) |
33 | goto found_middle; | 35 | goto found_middle; |
34 | result += 64; | 36 | result += BITS_PER_LONG; |
35 | size -= 64; | 37 | size -= BITS_PER_LONG; |
36 | } | 38 | } |
37 | if (!size) | 39 | if (!size) |
38 | return result; | 40 | return result; |
39 | tmp = *p; | 41 | tmp = *p; |
40 | 42 | ||
41 | found_first: | 43 | found_first: |
42 | tmp |= ~0UL << size; | 44 | tmp &= (~0UL >> (64 - size)); |
43 | if (tmp == ~0UL) /* Are any bits zero? */ | 45 | if (tmp == 0UL) /* Are any bits set? */ |
44 | return result + size; /* Nope. */ | 46 | return result + size; /* Nope. */ |
45 | found_middle: | 47 | found_middle: |
46 | return result + ffz(tmp); | 48 | return result + __ffs(tmp); |
47 | } | 49 | } |
50 | EXPORT_SYMBOL(find_next_bit); | ||
48 | 51 | ||
49 | EXPORT_SYMBOL(find_next_zero_bit); | 52 | /* |
50 | 53 | * This implementation of find_{first,next}_zero_bit was stolen from | |
51 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 54 | * Linus' asm-alpha/bitops.h. |
52 | unsigned long offset) | 55 | */ |
56 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | ||
57 | unsigned long offset) | ||
53 | { | 58 | { |
54 | const unsigned long *p = addr + (offset >> 6); | 59 | const unsigned long *p = addr + BITOP_WORD(offset); |
55 | unsigned long result = offset & ~63UL; | 60 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
56 | unsigned long tmp; | 61 | unsigned long tmp; |
57 | 62 | ||
58 | if (offset >= size) | 63 | if (offset >= size) |
59 | return size; | 64 | return size; |
60 | size -= result; | 65 | size -= result; |
61 | offset &= 63UL; | 66 | offset %= BITS_PER_LONG; |
62 | if (offset) { | 67 | if (offset) { |
63 | tmp = *(p++); | 68 | tmp = *(p++); |
64 | tmp &= (~0UL << offset); | 69 | tmp |= ~0UL >> (BITS_PER_LONG - offset); |
65 | if (size < 64) | 70 | if (size < BITS_PER_LONG) |
66 | goto found_first; | 71 | goto found_first; |
67 | if (tmp) | 72 | if (~tmp) |
68 | goto found_middle; | 73 | goto found_middle; |
69 | size -= 64; | 74 | size -= BITS_PER_LONG; |
70 | result += 64; | 75 | result += BITS_PER_LONG; |
71 | } | 76 | } |
72 | while (size & ~63UL) { | 77 | while (size & ~(BITS_PER_LONG-1)) { |
73 | if ((tmp = *(p++))) | 78 | if (~(tmp = *(p++))) |
74 | goto found_middle; | 79 | goto found_middle; |
75 | result += 64; | 80 | result += BITS_PER_LONG; |
76 | size -= 64; | 81 | size -= BITS_PER_LONG; |
77 | } | 82 | } |
78 | if (!size) | 83 | if (!size) |
79 | return result; | 84 | return result; |
80 | tmp = *p; | 85 | tmp = *p; |
81 | 86 | ||
82 | found_first: | 87 | found_first: |
83 | tmp &= (~0UL >> (64 - size)); | 88 | tmp |= ~0UL << size; |
84 | if (tmp == 0UL) /* Are any bits set? */ | 89 | if (tmp == ~0UL) /* Are any bits zero? */ |
85 | return result + size; /* Nope. */ | 90 | return result + size; /* Nope. */ |
86 | found_middle: | 91 | found_middle: |
87 | return result + __ffs(tmp); | 92 | return result + ffz(tmp); |
88 | } | 93 | } |
89 | 94 | EXPORT_SYMBOL(find_next_zero_bit); | |
90 | EXPORT_SYMBOL(find_next_bit); | ||
91 | 95 | ||
92 | static inline unsigned int ext2_ilog2(unsigned int x) | 96 | static inline unsigned int ext2_ilog2(unsigned int x) |
93 | { | 97 | { |
@@ -106,8 +110,8 @@ static inline unsigned int ext2_ffz(unsigned int x) | |||
106 | return rc; | 110 | return rc; |
107 | } | 111 | } |
108 | 112 | ||
109 | unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, | 113 | unsigned long find_next_zero_le_bit(const unsigned long *addr, |
110 | unsigned long offset) | 114 | unsigned long size, unsigned long offset) |
111 | { | 115 | { |
112 | const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5); | 116 | const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5); |
113 | unsigned int result = offset & ~31; | 117 | unsigned int result = offset & ~31; |
@@ -143,5 +147,4 @@ found_first: | |||
143 | found_middle: | 147 | found_middle: |
144 | return result + ext2_ffz(tmp); | 148 | return result + ext2_ffz(tmp); |
145 | } | 149 | } |
146 | |||
147 | EXPORT_SYMBOL(find_next_zero_le_bit); | 150 | EXPORT_SYMBOL(find_next_zero_le_bit); |
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index 94d5716fa7c3..e719a4933af1 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile | |||
@@ -66,7 +66,8 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o | |||
66 | core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \ | 66 | core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \ |
67 | arch/ppc/platforms/ \ | 67 | arch/ppc/platforms/ \ |
68 | arch/ppc/mm/ arch/ppc/lib/ \ | 68 | arch/ppc/mm/ arch/ppc/lib/ \ |
69 | arch/ppc/syslib/ arch/powerpc/sysdev/ | 69 | arch/ppc/syslib/ arch/powerpc/sysdev/ \ |
70 | arch/powerpc/lib/ | ||
70 | core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ | 71 | core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ |
71 | core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ | 72 | core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ |
72 | core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ | 73 | core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ |
diff --git a/arch/ppc/kernel/bitops.c b/arch/ppc/kernel/bitops.c deleted file mode 100644 index 7f53d193968b..000000000000 --- a/arch/ppc/kernel/bitops.c +++ /dev/null | |||
@@ -1,126 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 Paul Mackerras. | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/bitops.h> | ||
7 | |||
8 | /* | ||
9 | * If the bitops are not inlined in bitops.h, they are defined here. | ||
10 | * -- paulus | ||
11 | */ | ||
12 | #if !__INLINE_BITOPS | ||
13 | void set_bit(int nr, volatile void * addr) | ||
14 | { | ||
15 | unsigned long old; | ||
16 | unsigned long mask = 1 << (nr & 0x1f); | ||
17 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
18 | |||
19 | __asm__ __volatile__(SMP_WMB "\n\ | ||
20 | 1: lwarx %0,0,%3 \n\ | ||
21 | or %0,%0,%2 \n" | ||
22 | PPC405_ERR77(0,%3) | ||
23 | " stwcx. %0,0,%3 \n\ | ||
24 | bne 1b" | ||
25 | SMP_MB | ||
26 | : "=&r" (old), "=m" (*p) | ||
27 | : "r" (mask), "r" (p), "m" (*p) | ||
28 | : "cc" ); | ||
29 | } | ||
30 | |||
31 | void clear_bit(int nr, volatile void *addr) | ||
32 | { | ||
33 | unsigned long old; | ||
34 | unsigned long mask = 1 << (nr & 0x1f); | ||
35 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
36 | |||
37 | __asm__ __volatile__(SMP_WMB "\n\ | ||
38 | 1: lwarx %0,0,%3 \n\ | ||
39 | andc %0,%0,%2 \n" | ||
40 | PPC405_ERR77(0,%3) | ||
41 | " stwcx. %0,0,%3 \n\ | ||
42 | bne 1b" | ||
43 | SMP_MB | ||
44 | : "=&r" (old), "=m" (*p) | ||
45 | : "r" (mask), "r" (p), "m" (*p) | ||
46 | : "cc"); | ||
47 | } | ||
48 | |||
49 | void change_bit(int nr, volatile void *addr) | ||
50 | { | ||
51 | unsigned long old; | ||
52 | unsigned long mask = 1 << (nr & 0x1f); | ||
53 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
54 | |||
55 | __asm__ __volatile__(SMP_WMB "\n\ | ||
56 | 1: lwarx %0,0,%3 \n\ | ||
57 | xor %0,%0,%2 \n" | ||
58 | PPC405_ERR77(0,%3) | ||
59 | " stwcx. %0,0,%3 \n\ | ||
60 | bne 1b" | ||
61 | SMP_MB | ||
62 | : "=&r" (old), "=m" (*p) | ||
63 | : "r" (mask), "r" (p), "m" (*p) | ||
64 | : "cc"); | ||
65 | } | ||
66 | |||
67 | int test_and_set_bit(int nr, volatile void *addr) | ||
68 | { | ||
69 | unsigned int old, t; | ||
70 | unsigned int mask = 1 << (nr & 0x1f); | ||
71 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
72 | |||
73 | __asm__ __volatile__(SMP_WMB "\n\ | ||
74 | 1: lwarx %0,0,%4 \n\ | ||
75 | or %1,%0,%3 \n" | ||
76 | PPC405_ERR77(0,%4) | ||
77 | " stwcx. %1,0,%4 \n\ | ||
78 | bne 1b" | ||
79 | SMP_MB | ||
80 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
81 | : "r" (mask), "r" (p), "m" (*p) | ||
82 | : "cc"); | ||
83 | |||
84 | return (old & mask) != 0; | ||
85 | } | ||
86 | |||
87 | int test_and_clear_bit(int nr, volatile void *addr) | ||
88 | { | ||
89 | unsigned int old, t; | ||
90 | unsigned int mask = 1 << (nr & 0x1f); | ||
91 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
92 | |||
93 | __asm__ __volatile__(SMP_WMB "\n\ | ||
94 | 1: lwarx %0,0,%4 \n\ | ||
95 | andc %1,%0,%3 \n" | ||
96 | PPC405_ERR77(0,%4) | ||
97 | " stwcx. %1,0,%4 \n\ | ||
98 | bne 1b" | ||
99 | SMP_MB | ||
100 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
101 | : "r" (mask), "r" (p), "m" (*p) | ||
102 | : "cc"); | ||
103 | |||
104 | return (old & mask) != 0; | ||
105 | } | ||
106 | |||
107 | int test_and_change_bit(int nr, volatile void *addr) | ||
108 | { | ||
109 | unsigned int old, t; | ||
110 | unsigned int mask = 1 << (nr & 0x1f); | ||
111 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
112 | |||
113 | __asm__ __volatile__(SMP_WMB "\n\ | ||
114 | 1: lwarx %0,0,%4 \n\ | ||
115 | xor %1,%0,%3 \n" | ||
116 | PPC405_ERR77(0,%4) | ||
117 | " stwcx. %1,0,%4 \n\ | ||
118 | bne 1b" | ||
119 | SMP_MB | ||
120 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
121 | : "r" (mask), "r" (p), "m" (*p) | ||
122 | : "cc"); | ||
123 | |||
124 | return (old & mask) != 0; | ||
125 | } | ||
126 | #endif /* !__INLINE_BITOPS */ | ||
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index 247d2fc6a8ed..990df0905c87 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -13,7 +13,7 @@ endif | |||
13 | 13 | ||
14 | obj-y += irq.o idle.o dma.o \ | 14 | obj-y += irq.o idle.o dma.o \ |
15 | signal.o \ | 15 | signal.o \ |
16 | align.o bitops.o pacaData.o \ | 16 | align.o pacaData.o \ |
17 | udbg.o ioctl32.o \ | 17 | udbg.o ioctl32.o \ |
18 | rtc.o \ | 18 | rtc.o \ |
19 | cpu_setup_power4.o \ | 19 | cpu_setup_power4.o \ |
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h new file mode 100644 index 000000000000..dc25c53704d5 --- /dev/null +++ b/include/asm-powerpc/bitops.h | |||
@@ -0,0 +1,437 @@ | |||
1 | /* | ||
2 | * PowerPC atomic bit operations. | ||
3 | * | ||
4 | * Merged version by David Gibson <david@gibson.dropbear.id.au>. | ||
5 | * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don | ||
6 | * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They | ||
7 | * originally took it from the ppc32 code. | ||
8 | * | ||
9 | * Within a word, bits are numbered LSB first. Lot's of places make | ||
10 | * this assumption by directly testing bits with (val & (1<<nr)). | ||
11 | * This can cause confusion for large (> 1 word) bitmaps on a | ||
12 | * big-endian system because, unlike little endian, the number of each | ||
13 | * bit depends on the word size. | ||
14 | * | ||
15 | * The bitop functions are defined to work on unsigned longs, so for a | ||
16 | * ppc64 system the bits end up numbered: | ||
17 | * |63..............0|127............64|191...........128|255...........196| | ||
18 | * and on ppc32: | ||
19 | * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| | ||
20 | * | ||
21 | * There are a few little-endian macros used mostly for filesystem | ||
22 | * bitmaps, these work on similar bit arrays layouts, but | ||
23 | * byte-oriented: | ||
24 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | ||
25 | * | ||
26 | * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit | ||
27 | * number field needs to be reversed compared to the big-endian bit | ||
28 | * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). | ||
29 | * | ||
30 | * This program is free software; you can redistribute it and/or | ||
31 | * modify it under the terms of the GNU General Public License | ||
32 | * as published by the Free Software Foundation; either version | ||
33 | * 2 of the License, or (at your option) any later version. | ||
34 | */ | ||
35 | |||
36 | #ifndef _ASM_POWERPC_BITOPS_H | ||
37 | #define _ASM_POWERPC_BITOPS_H | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | |||
41 | #include <linux/compiler.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/synch.h> | ||
44 | |||
45 | /* | ||
46 | * clear_bit doesn't imply a memory barrier | ||
47 | */ | ||
48 | #define smp_mb__before_clear_bit() smp_mb() | ||
49 | #define smp_mb__after_clear_bit() smp_mb() | ||
50 | |||
51 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
52 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
53 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
54 | |||
55 | #ifdef CONFIG_PPC64 | ||
56 | #define LARXL "ldarx" | ||
57 | #define STCXL "stdcx." | ||
58 | #define CNTLZL "cntlzd" | ||
59 | #else | ||
60 | #define LARXL "lwarx" | ||
61 | #define STCXL "stwcx." | ||
62 | #define CNTLZL "cntlzw" | ||
63 | #endif | ||
64 | |||
65 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | ||
66 | { | ||
67 | unsigned long old; | ||
68 | unsigned long mask = BITOP_MASK(nr); | ||
69 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
70 | |||
71 | __asm__ __volatile__( | ||
72 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
73 | "or %0,%0,%2\n" | ||
74 | PPC405_ERR77(0,%3) | ||
75 | STCXL " %0,0,%3\n" | ||
76 | "bne- 1b" | ||
77 | : "=&r"(old), "=m"(*p) | ||
78 | : "r"(mask), "r"(p), "m"(*p) | ||
79 | : "cc" ); | ||
80 | } | ||
81 | |||
82 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | ||
83 | { | ||
84 | unsigned long old; | ||
85 | unsigned long mask = BITOP_MASK(nr); | ||
86 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
87 | |||
88 | __asm__ __volatile__( | ||
89 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
90 | "andc %0,%0,%2\n" | ||
91 | PPC405_ERR77(0,%3) | ||
92 | STCXL " %0,0,%3\n" | ||
93 | "bne- 1b" | ||
94 | : "=&r"(old), "=m"(*p) | ||
95 | : "r"(mask), "r"(p), "m"(*p) | ||
96 | : "cc" ); | ||
97 | } | ||
98 | |||
99 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | ||
100 | { | ||
101 | unsigned long old; | ||
102 | unsigned long mask = BITOP_MASK(nr); | ||
103 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
104 | |||
105 | __asm__ __volatile__( | ||
106 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
107 | "xor %0,%0,%2\n" | ||
108 | PPC405_ERR77(0,%3) | ||
109 | STCXL " %0,0,%3\n" | ||
110 | "bne- 1b" | ||
111 | : "=&r"(old), "=m"(*p) | ||
112 | : "r"(mask), "r"(p), "m"(*p) | ||
113 | : "cc" ); | ||
114 | } | ||
115 | |||
116 | static __inline__ int test_and_set_bit(unsigned long nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long old, t; | ||
120 | unsigned long mask = BITOP_MASK(nr); | ||
121 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
122 | |||
123 | __asm__ __volatile__( | ||
124 | EIEIO_ON_SMP | ||
125 | "1:" LARXL " %0,0,%3 # test_and_set_bit\n" | ||
126 | "or %1,%0,%2 \n" | ||
127 | PPC405_ERR77(0,%3) | ||
128 | STCXL " %1,0,%3 \n" | ||
129 | "bne- 1b" | ||
130 | ISYNC_ON_SMP | ||
131 | : "=&r" (old), "=&r" (t) | ||
132 | : "r" (mask), "r" (p) | ||
133 | : "cc", "memory"); | ||
134 | |||
135 | return (old & mask) != 0; | ||
136 | } | ||
137 | |||
138 | static __inline__ int test_and_clear_bit(unsigned long nr, | ||
139 | volatile unsigned long *addr) | ||
140 | { | ||
141 | unsigned long old, t; | ||
142 | unsigned long mask = BITOP_MASK(nr); | ||
143 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
144 | |||
145 | __asm__ __volatile__( | ||
146 | EIEIO_ON_SMP | ||
147 | "1:" LARXL " %0,0,%3 # test_and_clear_bit\n" | ||
148 | "andc %1,%0,%2 \n" | ||
149 | PPC405_ERR77(0,%3) | ||
150 | STCXL " %1,0,%3 \n" | ||
151 | "bne- 1b" | ||
152 | ISYNC_ON_SMP | ||
153 | : "=&r" (old), "=&r" (t) | ||
154 | : "r" (mask), "r" (p) | ||
155 | : "cc", "memory"); | ||
156 | |||
157 | return (old & mask) != 0; | ||
158 | } | ||
159 | |||
160 | static __inline__ int test_and_change_bit(unsigned long nr, | ||
161 | volatile unsigned long *addr) | ||
162 | { | ||
163 | unsigned long old, t; | ||
164 | unsigned long mask = BITOP_MASK(nr); | ||
165 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
166 | |||
167 | __asm__ __volatile__( | ||
168 | EIEIO_ON_SMP | ||
169 | "1:" LARXL " %0,0,%3 # test_and_change_bit\n" | ||
170 | "xor %1,%0,%2 \n" | ||
171 | PPC405_ERR77(0,%3) | ||
172 | STCXL " %1,0,%3 \n" | ||
173 | "bne- 1b" | ||
174 | ISYNC_ON_SMP | ||
175 | : "=&r" (old), "=&r" (t) | ||
176 | : "r" (mask), "r" (p) | ||
177 | : "cc", "memory"); | ||
178 | |||
179 | return (old & mask) != 0; | ||
180 | } | ||
181 | |||
182 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | ||
183 | { | ||
184 | unsigned long old; | ||
185 | |||
186 | __asm__ __volatile__( | ||
187 | "1:" LARXL " %0,0,%3 # set_bit\n" | ||
188 | "or %0,%0,%2\n" | ||
189 | STCXL " %0,0,%3\n" | ||
190 | "bne- 1b" | ||
191 | : "=&r" (old), "=m" (*addr) | ||
192 | : "r" (mask), "r" (addr), "m" (*addr) | ||
193 | : "cc"); | ||
194 | } | ||
195 | |||
196 | /* Non-atomic versions */ | ||
197 | static __inline__ int test_bit(unsigned long nr, | ||
198 | __const__ volatile unsigned long *addr) | ||
199 | { | ||
200 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
201 | } | ||
202 | |||
203 | static __inline__ void __set_bit(unsigned long nr, | ||
204 | volatile unsigned long *addr) | ||
205 | { | ||
206 | unsigned long mask = BITOP_MASK(nr); | ||
207 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
208 | |||
209 | *p |= mask; | ||
210 | } | ||
211 | |||
212 | static __inline__ void __clear_bit(unsigned long nr, | ||
213 | volatile unsigned long *addr) | ||
214 | { | ||
215 | unsigned long mask = BITOP_MASK(nr); | ||
216 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
217 | |||
218 | *p &= ~mask; | ||
219 | } | ||
220 | |||
221 | static __inline__ void __change_bit(unsigned long nr, | ||
222 | volatile unsigned long *addr) | ||
223 | { | ||
224 | unsigned long mask = BITOP_MASK(nr); | ||
225 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
226 | |||
227 | *p ^= mask; | ||
228 | } | ||
229 | |||
230 | static __inline__ int __test_and_set_bit(unsigned long nr, | ||
231 | volatile unsigned long *addr) | ||
232 | { | ||
233 | unsigned long mask = BITOP_MASK(nr); | ||
234 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
235 | unsigned long old = *p; | ||
236 | |||
237 | *p = old | mask; | ||
238 | return (old & mask) != 0; | ||
239 | } | ||
240 | |||
241 | static __inline__ int __test_and_clear_bit(unsigned long nr, | ||
242 | volatile unsigned long *addr) | ||
243 | { | ||
244 | unsigned long mask = BITOP_MASK(nr); | ||
245 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
246 | unsigned long old = *p; | ||
247 | |||
248 | *p = old & ~mask; | ||
249 | return (old & mask) != 0; | ||
250 | } | ||
251 | |||
252 | static __inline__ int __test_and_change_bit(unsigned long nr, | ||
253 | volatile unsigned long *addr) | ||
254 | { | ||
255 | unsigned long mask = BITOP_MASK(nr); | ||
256 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
257 | unsigned long old = *p; | ||
258 | |||
259 | *p = old ^ mask; | ||
260 | return (old & mask) != 0; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Return the zero-based bit position (LE, not IBM bit numbering) of | ||
265 | * the most significant 1-bit in a double word. | ||
266 | */ | ||
267 | static __inline__ int __ilog2(unsigned long x) | ||
268 | { | ||
269 | int lz; | ||
270 | |||
271 | asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x)); | ||
272 | return BITS_PER_LONG - 1 - lz; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Determines the bit position of the least significant 0 bit in the | ||
277 | * specified double word. The returned bit position will be | ||
278 | * zero-based, starting from the right side (63/31 - 0). | ||
279 | */ | ||
280 | static __inline__ unsigned long ffz(unsigned long x) | ||
281 | { | ||
282 | /* no zero exists anywhere in the 8 byte area. */ | ||
283 | if ((x = ~x) == 0) | ||
284 | return BITS_PER_LONG; | ||
285 | |||
286 | /* | ||
287 | * Calculate the bit position of the least signficant '1' bit in x | ||
288 | * (since x has been changed this will actually be the least signficant | ||
289 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | ||
290 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | ||
291 | */ | ||
292 | return __ilog2(x & -x); | ||
293 | } | ||
294 | |||
295 | static __inline__ int __ffs(unsigned long x) | ||
296 | { | ||
297 | return __ilog2(x & -x); | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * ffs: find first bit set. This is defined the same way as | ||
302 | * the libc and compiler builtin ffs routines, therefore | ||
303 | * differs in spirit from the above ffz (man ffs). | ||
304 | */ | ||
305 | static __inline__ int ffs(int x) | ||
306 | { | ||
307 | unsigned long i = (unsigned long)x; | ||
308 | return __ilog2(i & -i) + 1; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * fls: find last (most-significant) bit set. | ||
313 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
314 | */ | ||
315 | static __inline__ int fls(unsigned int x) | ||
316 | { | ||
317 | int lz; | ||
318 | |||
319 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
320 | return 32 - lz; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * hweightN: returns the hamming weight (i.e. the number | ||
325 | * of bits set) of a N-bit word | ||
326 | */ | ||
327 | #define hweight64(x) generic_hweight64(x) | ||
328 | #define hweight32(x) generic_hweight32(x) | ||
329 | #define hweight16(x) generic_hweight16(x) | ||
330 | #define hweight8(x) generic_hweight8(x) | ||
331 | |||
332 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
333 | unsigned long find_next_zero_bit(const unsigned long *addr, | ||
334 | unsigned long size, unsigned long offset); | ||
335 | /** | ||
336 | * find_first_bit - find the first set bit in a memory region | ||
337 | * @addr: The address to start the search at | ||
338 | * @size: The maximum size to search | ||
339 | * | ||
340 | * Returns the bit-number of the first set bit, not the number of the byte | ||
341 | * containing a bit. | ||
342 | */ | ||
343 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
344 | unsigned long find_next_bit(const unsigned long *addr, | ||
345 | unsigned long size, unsigned long offset); | ||
346 | |||
347 | /* Little-endian versions */ | ||
348 | |||
349 | static __inline__ int test_le_bit(unsigned long nr, | ||
350 | __const__ unsigned long *addr) | ||
351 | { | ||
352 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; | ||
353 | return (tmp[nr >> 3] >> (nr & 7)) & 1; | ||
354 | } | ||
355 | |||
356 | #define __set_le_bit(nr, addr) \ | ||
357 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
358 | #define __clear_le_bit(nr, addr) \ | ||
359 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
360 | |||
361 | #define test_and_set_le_bit(nr, addr) \ | ||
362 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
363 | #define test_and_clear_le_bit(nr, addr) \ | ||
364 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
365 | |||
366 | #define __test_and_set_le_bit(nr, addr) \ | ||
367 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
368 | #define __test_and_clear_le_bit(nr, addr) \ | ||
369 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
370 | |||
371 | #define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0) | ||
372 | unsigned long find_next_zero_le_bit(const unsigned long *addr, | ||
373 | unsigned long size, unsigned long offset); | ||
374 | |||
375 | /* Bitmap functions for the ext2 filesystem */ | ||
376 | |||
377 | #define ext2_set_bit(nr,addr) \ | ||
378 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
379 | #define ext2_clear_bit(nr, addr) \ | ||
380 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
381 | |||
382 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
383 | test_and_set_le_bit((nr), (unsigned long*)addr) | ||
384 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
385 | test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
386 | |||
387 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
388 | |||
389 | #define ext2_find_first_zero_bit(addr, size) \ | ||
390 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
391 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
392 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
393 | |||
394 | /* Bitmap functions for the minix filesystem. */ | ||
395 | |||
396 | #define minix_test_and_set_bit(nr,addr) \ | ||
397 | __test_and_set_le_bit(nr, (unsigned long *)addr) | ||
398 | #define minix_set_bit(nr,addr) \ | ||
399 | __set_le_bit(nr, (unsigned long *)addr) | ||
400 | #define minix_test_and_clear_bit(nr,addr) \ | ||
401 | __test_and_clear_le_bit(nr, (unsigned long *)addr) | ||
402 | #define minix_test_bit(nr,addr) \ | ||
403 | test_le_bit(nr, (unsigned long *)addr) | ||
404 | |||
405 | #define minix_find_first_zero_bit(addr,size) \ | ||
406 | find_first_zero_le_bit((unsigned long *)addr, size) | ||
407 | |||
408 | /* | ||
409 | * Every architecture must define this function. It's the fastest | ||
410 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
411 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
412 | * bits is cleared. | ||
413 | */ | ||
414 | static inline int sched_find_first_bit(const unsigned long *b) | ||
415 | { | ||
416 | #ifdef CONFIG_PPC64 | ||
417 | if (unlikely(b[0])) | ||
418 | return __ffs(b[0]); | ||
419 | if (unlikely(b[1])) | ||
420 | return __ffs(b[1]) + 64; | ||
421 | return __ffs(b[2]) + 128; | ||
422 | #else | ||
423 | if (unlikely(b[0])) | ||
424 | return __ffs(b[0]); | ||
425 | if (unlikely(b[1])) | ||
426 | return __ffs(b[1]) + 32; | ||
427 | if (unlikely(b[2])) | ||
428 | return __ffs(b[2]) + 64; | ||
429 | if (b[3]) | ||
430 | return __ffs(b[3]) + 96; | ||
431 | return __ffs(b[4]) + 128; | ||
432 | #endif | ||
433 | } | ||
434 | |||
435 | #endif /* __KERNEL__ */ | ||
436 | |||
437 | #endif /* _ASM_POWERPC_BITOPS_H */ | ||
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h deleted file mode 100644 index e30f536fd830..000000000000 --- a/include/asm-ppc/bitops.h +++ /dev/null | |||
@@ -1,460 +0,0 @@ | |||
1 | /* | ||
2 | * bitops.h: Bit string operations on the ppc | ||
3 | */ | ||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | #ifndef _PPC_BITOPS_H | ||
7 | #define _PPC_BITOPS_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/compiler.h> | ||
11 | #include <asm/byteorder.h> | ||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | /* | ||
15 | * The test_and_*_bit operations are taken to imply a memory barrier | ||
16 | * on SMP systems. | ||
17 | */ | ||
18 | #ifdef CONFIG_SMP | ||
19 | #define SMP_WMB "eieio\n" | ||
20 | #define SMP_MB "\nsync" | ||
21 | #else | ||
22 | #define SMP_WMB | ||
23 | #define SMP_MB | ||
24 | #endif /* CONFIG_SMP */ | ||
25 | |||
26 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | ||
27 | { | ||
28 | unsigned long old; | ||
29 | unsigned long mask = 1 << (nr & 0x1f); | ||
30 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
31 | |||
32 | __asm__ __volatile__("\n\ | ||
33 | 1: lwarx %0,0,%3 \n\ | ||
34 | or %0,%0,%2 \n" | ||
35 | PPC405_ERR77(0,%3) | ||
36 | " stwcx. %0,0,%3 \n\ | ||
37 | bne- 1b" | ||
38 | : "=&r" (old), "=m" (*p) | ||
39 | : "r" (mask), "r" (p), "m" (*p) | ||
40 | : "cc" ); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * non-atomic version | ||
45 | */ | ||
46 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) | ||
47 | { | ||
48 | unsigned long mask = 1 << (nr & 0x1f); | ||
49 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
50 | |||
51 | *p |= mask; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * clear_bit doesn't imply a memory barrier | ||
56 | */ | ||
57 | #define smp_mb__before_clear_bit() smp_mb() | ||
58 | #define smp_mb__after_clear_bit() smp_mb() | ||
59 | |||
60 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | ||
61 | { | ||
62 | unsigned long old; | ||
63 | unsigned long mask = 1 << (nr & 0x1f); | ||
64 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
65 | |||
66 | __asm__ __volatile__("\n\ | ||
67 | 1: lwarx %0,0,%3 \n\ | ||
68 | andc %0,%0,%2 \n" | ||
69 | PPC405_ERR77(0,%3) | ||
70 | " stwcx. %0,0,%3 \n\ | ||
71 | bne- 1b" | ||
72 | : "=&r" (old), "=m" (*p) | ||
73 | : "r" (mask), "r" (p), "m" (*p) | ||
74 | : "cc"); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * non-atomic version | ||
79 | */ | ||
80 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
81 | { | ||
82 | unsigned long mask = 1 << (nr & 0x1f); | ||
83 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
84 | |||
85 | *p &= ~mask; | ||
86 | } | ||
87 | |||
88 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | ||
89 | { | ||
90 | unsigned long old; | ||
91 | unsigned long mask = 1 << (nr & 0x1f); | ||
92 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
93 | |||
94 | __asm__ __volatile__("\n\ | ||
95 | 1: lwarx %0,0,%3 \n\ | ||
96 | xor %0,%0,%2 \n" | ||
97 | PPC405_ERR77(0,%3) | ||
98 | " stwcx. %0,0,%3 \n\ | ||
99 | bne- 1b" | ||
100 | : "=&r" (old), "=m" (*p) | ||
101 | : "r" (mask), "r" (p), "m" (*p) | ||
102 | : "cc"); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * non-atomic version | ||
107 | */ | ||
108 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) | ||
109 | { | ||
110 | unsigned long mask = 1 << (nr & 0x1f); | ||
111 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
112 | |||
113 | *p ^= mask; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * test_and_*_bit do imply a memory barrier (?) | ||
118 | */ | ||
119 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
120 | { | ||
121 | unsigned int old, t; | ||
122 | unsigned int mask = 1 << (nr & 0x1f); | ||
123 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
124 | |||
125 | __asm__ __volatile__(SMP_WMB "\n\ | ||
126 | 1: lwarx %0,0,%4 \n\ | ||
127 | or %1,%0,%3 \n" | ||
128 | PPC405_ERR77(0,%4) | ||
129 | " stwcx. %1,0,%4 \n\ | ||
130 | bne 1b" | ||
131 | SMP_MB | ||
132 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
133 | : "r" (mask), "r" (p), "m" (*p) | ||
134 | : "cc", "memory"); | ||
135 | |||
136 | return (old & mask) != 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * non-atomic version | ||
141 | */ | ||
142 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
143 | { | ||
144 | unsigned long mask = 1 << (nr & 0x1f); | ||
145 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
146 | unsigned long old = *p; | ||
147 | |||
148 | *p = old | mask; | ||
149 | return (old & mask) != 0; | ||
150 | } | ||
151 | |||
152 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
153 | { | ||
154 | unsigned int old, t; | ||
155 | unsigned int mask = 1 << (nr & 0x1f); | ||
156 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
157 | |||
158 | __asm__ __volatile__(SMP_WMB "\n\ | ||
159 | 1: lwarx %0,0,%4 \n\ | ||
160 | andc %1,%0,%3 \n" | ||
161 | PPC405_ERR77(0,%4) | ||
162 | " stwcx. %1,0,%4 \n\ | ||
163 | bne 1b" | ||
164 | SMP_MB | ||
165 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
166 | : "r" (mask), "r" (p), "m" (*p) | ||
167 | : "cc", "memory"); | ||
168 | |||
169 | return (old & mask) != 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * non-atomic version | ||
174 | */ | ||
175 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
176 | { | ||
177 | unsigned long mask = 1 << (nr & 0x1f); | ||
178 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
179 | unsigned long old = *p; | ||
180 | |||
181 | *p = old & ~mask; | ||
182 | return (old & mask) != 0; | ||
183 | } | ||
184 | |||
185 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
186 | { | ||
187 | unsigned int old, t; | ||
188 | unsigned int mask = 1 << (nr & 0x1f); | ||
189 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
190 | |||
191 | __asm__ __volatile__(SMP_WMB "\n\ | ||
192 | 1: lwarx %0,0,%4 \n\ | ||
193 | xor %1,%0,%3 \n" | ||
194 | PPC405_ERR77(0,%4) | ||
195 | " stwcx. %1,0,%4 \n\ | ||
196 | bne 1b" | ||
197 | SMP_MB | ||
198 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
199 | : "r" (mask), "r" (p), "m" (*p) | ||
200 | : "cc", "memory"); | ||
201 | |||
202 | return (old & mask) != 0; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * non-atomic version | ||
207 | */ | ||
208 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
209 | { | ||
210 | unsigned long mask = 1 << (nr & 0x1f); | ||
211 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
212 | unsigned long old = *p; | ||
213 | |||
214 | *p = old ^ mask; | ||
215 | return (old & mask) != 0; | ||
216 | } | ||
217 | |||
218 | static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) | ||
219 | { | ||
220 | return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0; | ||
221 | } | ||
222 | |||
223 | /* Return the bit position of the most significant 1 bit in a word */ | ||
224 | static __inline__ int __ilog2(unsigned long x) | ||
225 | { | ||
226 | int lz; | ||
227 | |||
228 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
229 | return 31 - lz; | ||
230 | } | ||
231 | |||
232 | static __inline__ int ffz(unsigned long x) | ||
233 | { | ||
234 | if ((x = ~x) == 0) | ||
235 | return 32; | ||
236 | return __ilog2(x & -x); | ||
237 | } | ||
238 | |||
239 | static inline int __ffs(unsigned long x) | ||
240 | { | ||
241 | return __ilog2(x & -x); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * ffs: find first bit set. This is defined the same way as | ||
246 | * the libc and compiler builtin ffs routines, therefore | ||
247 | * differs in spirit from the above ffz (man ffs). | ||
248 | */ | ||
249 | static __inline__ int ffs(int x) | ||
250 | { | ||
251 | return __ilog2(x & -x) + 1; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * fls: find last (most-significant) bit set. | ||
256 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
257 | */ | ||
258 | static __inline__ int fls(unsigned int x) | ||
259 | { | ||
260 | int lz; | ||
261 | |||
262 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
263 | return 32 - lz; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * hweightN: returns the hamming weight (i.e. the number | ||
268 | * of bits set) of a N-bit word | ||
269 | */ | ||
270 | |||
271 | #define hweight32(x) generic_hweight32(x) | ||
272 | #define hweight16(x) generic_hweight16(x) | ||
273 | #define hweight8(x) generic_hweight8(x) | ||
274 | |||
275 | /* | ||
276 | * Find the first bit set in a 140-bit bitmap. | ||
277 | * The first 100 bits are unlikely to be set. | ||
278 | */ | ||
279 | static inline int sched_find_first_bit(const unsigned long *b) | ||
280 | { | ||
281 | if (unlikely(b[0])) | ||
282 | return __ffs(b[0]); | ||
283 | if (unlikely(b[1])) | ||
284 | return __ffs(b[1]) + 32; | ||
285 | if (unlikely(b[2])) | ||
286 | return __ffs(b[2]) + 64; | ||
287 | if (b[3]) | ||
288 | return __ffs(b[3]) + 96; | ||
289 | return __ffs(b[4]) + 128; | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * find_next_bit - find the next set bit in a memory region | ||
294 | * @addr: The address to base the search on | ||
295 | * @offset: The bitnumber to start searching at | ||
296 | * @size: The maximum size to search | ||
297 | */ | ||
298 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
299 | unsigned long size, unsigned long offset) | ||
300 | { | ||
301 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
302 | unsigned int result = offset & ~31UL; | ||
303 | unsigned int tmp; | ||
304 | |||
305 | if (offset >= size) | ||
306 | return size; | ||
307 | size -= result; | ||
308 | offset &= 31UL; | ||
309 | if (offset) { | ||
310 | tmp = *p++; | ||
311 | tmp &= ~0UL << offset; | ||
312 | if (size < 32) | ||
313 | goto found_first; | ||
314 | if (tmp) | ||
315 | goto found_middle; | ||
316 | size -= 32; | ||
317 | result += 32; | ||
318 | } | ||
319 | while (size >= 32) { | ||
320 | if ((tmp = *p++) != 0) | ||
321 | goto found_middle; | ||
322 | result += 32; | ||
323 | size -= 32; | ||
324 | } | ||
325 | if (!size) | ||
326 | return result; | ||
327 | tmp = *p; | ||
328 | |||
329 | found_first: | ||
330 | tmp &= ~0UL >> (32 - size); | ||
331 | if (tmp == 0UL) /* Are any bits set? */ | ||
332 | return result + size; /* Nope. */ | ||
333 | found_middle: | ||
334 | return result + __ffs(tmp); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * find_first_bit - find the first set bit in a memory region | ||
339 | * @addr: The address to start the search at | ||
340 | * @size: The maximum size to search | ||
341 | * | ||
342 | * Returns the bit-number of the first set bit, not the number of the byte | ||
343 | * containing a bit. | ||
344 | */ | ||
345 | #define find_first_bit(addr, size) \ | ||
346 | find_next_bit((addr), (size), 0) | ||
347 | |||
348 | /* | ||
349 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
350 | * Linus' asm-alpha/bitops.h. | ||
351 | */ | ||
352 | #define find_first_zero_bit(addr, size) \ | ||
353 | find_next_zero_bit((addr), (size), 0) | ||
354 | |||
355 | static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr, | ||
356 | unsigned long size, unsigned long offset) | ||
357 | { | ||
358 | unsigned int * p = ((unsigned int *) addr) + (offset >> 5); | ||
359 | unsigned int result = offset & ~31UL; | ||
360 | unsigned int tmp; | ||
361 | |||
362 | if (offset >= size) | ||
363 | return size; | ||
364 | size -= result; | ||
365 | offset &= 31UL; | ||
366 | if (offset) { | ||
367 | tmp = *p++; | ||
368 | tmp |= ~0UL >> (32-offset); | ||
369 | if (size < 32) | ||
370 | goto found_first; | ||
371 | if (tmp != ~0U) | ||
372 | goto found_middle; | ||
373 | size -= 32; | ||
374 | result += 32; | ||
375 | } | ||
376 | while (size >= 32) { | ||
377 | if ((tmp = *p++) != ~0U) | ||
378 | goto found_middle; | ||
379 | result += 32; | ||
380 | size -= 32; | ||
381 | } | ||
382 | if (!size) | ||
383 | return result; | ||
384 | tmp = *p; | ||
385 | found_first: | ||
386 | tmp |= ~0UL << size; | ||
387 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
388 | return result + size; /* Nope. */ | ||
389 | found_middle: | ||
390 | return result + ffz(tmp); | ||
391 | } | ||
392 | |||
393 | |||
394 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
395 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
396 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
397 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
398 | |||
399 | static __inline__ int ext2_test_bit(int nr, __const__ void * addr) | ||
400 | { | ||
401 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
402 | |||
403 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * This implementation of ext2_find_{first,next}_zero_bit was stolen from | ||
408 | * Linus' asm-alpha/bitops.h and modified for a big-endian machine. | ||
409 | */ | ||
410 | |||
411 | #define ext2_find_first_zero_bit(addr, size) \ | ||
412 | ext2_find_next_zero_bit((addr), (size), 0) | ||
413 | |||
414 | static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr, | ||
415 | unsigned long size, unsigned long offset) | ||
416 | { | ||
417 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
418 | unsigned int result = offset & ~31UL; | ||
419 | unsigned int tmp; | ||
420 | |||
421 | if (offset >= size) | ||
422 | return size; | ||
423 | size -= result; | ||
424 | offset &= 31UL; | ||
425 | if (offset) { | ||
426 | tmp = cpu_to_le32p(p++); | ||
427 | tmp |= ~0UL >> (32-offset); | ||
428 | if (size < 32) | ||
429 | goto found_first; | ||
430 | if (tmp != ~0U) | ||
431 | goto found_middle; | ||
432 | size -= 32; | ||
433 | result += 32; | ||
434 | } | ||
435 | while (size >= 32) { | ||
436 | if ((tmp = cpu_to_le32p(p++)) != ~0U) | ||
437 | goto found_middle; | ||
438 | result += 32; | ||
439 | size -= 32; | ||
440 | } | ||
441 | if (!size) | ||
442 | return result; | ||
443 | tmp = cpu_to_le32p(p); | ||
444 | found_first: | ||
445 | tmp |= ~0U << size; | ||
446 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
447 | return result + size; /* Nope. */ | ||
448 | found_middle: | ||
449 | return result + ffz(tmp); | ||
450 | } | ||
451 | |||
452 | /* Bitmap functions for the minix filesystem. */ | ||
453 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
454 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | ||
455 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
456 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
457 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
458 | |||
459 | #endif /* _PPC_BITOPS_H */ | ||
460 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h deleted file mode 100644 index dbfa42ef4a99..000000000000 --- a/include/asm-ppc64/bitops.h +++ /dev/null | |||
@@ -1,360 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC64 atomic bit operations. | ||
3 | * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner, | ||
4 | * Anton Blanchard | ||
5 | * | ||
6 | * Originally taken from the 32b PPC code. Modified to use 64b values for | ||
7 | * the various counters & memory references. | ||
8 | * | ||
9 | * Bitops are odd when viewed on big-endian systems. They were designed | ||
10 | * on little endian so the size of the bitset doesn't matter (low order bytes | ||
11 | * come first) as long as the bit in question is valid. | ||
12 | * | ||
13 | * Bits are "tested" often using the C expression (val & (1<<nr)) so we do | ||
14 | * our best to stay compatible with that. The assumption is that val will | ||
15 | * be unsigned long for such tests. As such, we assume the bits are stored | ||
16 | * as an array of unsigned long (the usual case is a single unsigned long, | ||
17 | * of course). Here's an example bitset with bit numbering: | ||
18 | * | ||
19 | * |63..........0|127........64|195.......128|255.......196| | ||
20 | * | ||
21 | * This leads to a problem. If an int, short or char is passed as a bitset | ||
22 | * it will be a bad memory reference since we want to store in chunks | ||
23 | * of unsigned long (64 bits here) size. | ||
24 | * | ||
25 | * There are a few little-endian macros used mostly for filesystem bitmaps, | ||
26 | * these work on similar bit arrays layouts, but byte-oriented: | ||
27 | * | ||
28 | * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| | ||
29 | * | ||
30 | * The main difference is that bit 3-5 in the bit number field needs to be | ||
31 | * reversed compared to the big-endian bit fields. This can be achieved | ||
32 | * by XOR with 0b111000 (0x38). | ||
33 | * | ||
34 | * This program is free software; you can redistribute it and/or | ||
35 | * modify it under the terms of the GNU General Public License | ||
36 | * as published by the Free Software Foundation; either version | ||
37 | * 2 of the License, or (at your option) any later version. | ||
38 | */ | ||
39 | |||
40 | #ifndef _PPC64_BITOPS_H | ||
41 | #define _PPC64_BITOPS_H | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | |||
45 | #include <asm/synch.h> | ||
46 | |||
47 | /* | ||
48 | * clear_bit doesn't imply a memory barrier | ||
49 | */ | ||
50 | #define smp_mb__before_clear_bit() smp_mb() | ||
51 | #define smp_mb__after_clear_bit() smp_mb() | ||
52 | |||
53 | static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr) | ||
54 | { | ||
55 | return (1UL & (addr[nr >> 6] >> (nr & 63))); | ||
56 | } | ||
57 | |||
58 | static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) | ||
59 | { | ||
60 | unsigned long old; | ||
61 | unsigned long mask = 1UL << (nr & 0x3f); | ||
62 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
63 | |||
64 | __asm__ __volatile__( | ||
65 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
66 | or %0,%0,%2\n\ | ||
67 | stdcx. %0,0,%3\n\ | ||
68 | bne- 1b" | ||
69 | : "=&r" (old), "=m" (*p) | ||
70 | : "r" (mask), "r" (p), "m" (*p) | ||
71 | : "cc"); | ||
72 | } | ||
73 | |||
74 | static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
75 | { | ||
76 | unsigned long old; | ||
77 | unsigned long mask = 1UL << (nr & 0x3f); | ||
78 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
79 | |||
80 | __asm__ __volatile__( | ||
81 | "1: ldarx %0,0,%3 # clear_bit\n\ | ||
82 | andc %0,%0,%2\n\ | ||
83 | stdcx. %0,0,%3\n\ | ||
84 | bne- 1b" | ||
85 | : "=&r" (old), "=m" (*p) | ||
86 | : "r" (mask), "r" (p), "m" (*p) | ||
87 | : "cc"); | ||
88 | } | ||
89 | |||
90 | static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) | ||
91 | { | ||
92 | unsigned long old; | ||
93 | unsigned long mask = 1UL << (nr & 0x3f); | ||
94 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
95 | |||
96 | __asm__ __volatile__( | ||
97 | "1: ldarx %0,0,%3 # change_bit\n\ | ||
98 | xor %0,%0,%2\n\ | ||
99 | stdcx. %0,0,%3\n\ | ||
100 | bne- 1b" | ||
101 | : "=&r" (old), "=m" (*p) | ||
102 | : "r" (mask), "r" (p), "m" (*p) | ||
103 | : "cc"); | ||
104 | } | ||
105 | |||
106 | static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
107 | { | ||
108 | unsigned long old, t; | ||
109 | unsigned long mask = 1UL << (nr & 0x3f); | ||
110 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
111 | |||
112 | __asm__ __volatile__( | ||
113 | EIEIO_ON_SMP | ||
114 | "1: ldarx %0,0,%3 # test_and_set_bit\n\ | ||
115 | or %1,%0,%2 \n\ | ||
116 | stdcx. %1,0,%3 \n\ | ||
117 | bne- 1b" | ||
118 | ISYNC_ON_SMP | ||
119 | : "=&r" (old), "=&r" (t) | ||
120 | : "r" (mask), "r" (p) | ||
121 | : "cc", "memory"); | ||
122 | |||
123 | return (old & mask) != 0; | ||
124 | } | ||
125 | |||
126 | static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
127 | { | ||
128 | unsigned long old, t; | ||
129 | unsigned long mask = 1UL << (nr & 0x3f); | ||
130 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
131 | |||
132 | __asm__ __volatile__( | ||
133 | EIEIO_ON_SMP | ||
134 | "1: ldarx %0,0,%3 # test_and_clear_bit\n\ | ||
135 | andc %1,%0,%2\n\ | ||
136 | stdcx. %1,0,%3\n\ | ||
137 | bne- 1b" | ||
138 | ISYNC_ON_SMP | ||
139 | : "=&r" (old), "=&r" (t) | ||
140 | : "r" (mask), "r" (p) | ||
141 | : "cc", "memory"); | ||
142 | |||
143 | return (old & mask) != 0; | ||
144 | } | ||
145 | |||
146 | static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
147 | { | ||
148 | unsigned long old, t; | ||
149 | unsigned long mask = 1UL << (nr & 0x3f); | ||
150 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | EIEIO_ON_SMP | ||
154 | "1: ldarx %0,0,%3 # test_and_change_bit\n\ | ||
155 | xor %1,%0,%2\n\ | ||
156 | stdcx. %1,0,%3\n\ | ||
157 | bne- 1b" | ||
158 | ISYNC_ON_SMP | ||
159 | : "=&r" (old), "=&r" (t) | ||
160 | : "r" (mask), "r" (p) | ||
161 | : "cc", "memory"); | ||
162 | |||
163 | return (old & mask) != 0; | ||
164 | } | ||
165 | |||
166 | static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | ||
167 | { | ||
168 | unsigned long old; | ||
169 | |||
170 | __asm__ __volatile__( | ||
171 | "1: ldarx %0,0,%3 # set_bit\n\ | ||
172 | or %0,%0,%2\n\ | ||
173 | stdcx. %0,0,%3\n\ | ||
174 | bne- 1b" | ||
175 | : "=&r" (old), "=m" (*addr) | ||
176 | : "r" (mask), "r" (addr), "m" (*addr) | ||
177 | : "cc"); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * non-atomic versions | ||
182 | */ | ||
183 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr) | ||
184 | { | ||
185 | unsigned long mask = 1UL << (nr & 0x3f); | ||
186 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
187 | |||
188 | *p |= mask; | ||
189 | } | ||
190 | |||
191 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
192 | { | ||
193 | unsigned long mask = 1UL << (nr & 0x3f); | ||
194 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
195 | |||
196 | *p &= ~mask; | ||
197 | } | ||
198 | |||
199 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr) | ||
200 | { | ||
201 | unsigned long mask = 1UL << (nr & 0x3f); | ||
202 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
203 | |||
204 | *p ^= mask; | ||
205 | } | ||
206 | |||
207 | static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) | ||
208 | { | ||
209 | unsigned long mask = 1UL << (nr & 0x3f); | ||
210 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
211 | unsigned long old = *p; | ||
212 | |||
213 | *p = old | mask; | ||
214 | return (old & mask) != 0; | ||
215 | } | ||
216 | |||
217 | static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | ||
218 | { | ||
219 | unsigned long mask = 1UL << (nr & 0x3f); | ||
220 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
221 | unsigned long old = *p; | ||
222 | |||
223 | *p = old & ~mask; | ||
224 | return (old & mask) != 0; | ||
225 | } | ||
226 | |||
227 | static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | ||
228 | { | ||
229 | unsigned long mask = 1UL << (nr & 0x3f); | ||
230 | unsigned long *p = ((unsigned long *)addr) + (nr >> 6); | ||
231 | unsigned long old = *p; | ||
232 | |||
233 | *p = old ^ mask; | ||
234 | return (old & mask) != 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the | ||
239 | * most significant (left-most) 1-bit in a double word. | ||
240 | */ | ||
241 | static __inline__ int __ilog2(unsigned long x) | ||
242 | { | ||
243 | int lz; | ||
244 | |||
245 | asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); | ||
246 | return 63 - lz; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Determines the bit position of the least significant (rightmost) 0 bit | ||
251 | * in the specified double word. The returned bit position will be zero-based, | ||
252 | * starting from the right side (63 - 0). | ||
253 | */ | ||
254 | static __inline__ unsigned long ffz(unsigned long x) | ||
255 | { | ||
256 | /* no zero exists anywhere in the 8 byte area. */ | ||
257 | if ((x = ~x) == 0) | ||
258 | return 64; | ||
259 | |||
260 | /* | ||
261 | * Calculate the bit position of the least signficant '1' bit in x | ||
262 | * (since x has been changed this will actually be the least signficant | ||
263 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | ||
264 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | ||
265 | */ | ||
266 | return __ilog2(x & -x); | ||
267 | } | ||
268 | |||
269 | static __inline__ int __ffs(unsigned long x) | ||
270 | { | ||
271 | return __ilog2(x & -x); | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * ffs: find first bit set. This is defined the same way as | ||
276 | * the libc and compiler builtin ffs routines, therefore | ||
277 | * differs in spirit from the above ffz (man ffs). | ||
278 | */ | ||
279 | static __inline__ int ffs(int x) | ||
280 | { | ||
281 | unsigned long i = (unsigned long)x; | ||
282 | return __ilog2(i & -i) + 1; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * fls: find last (most-significant) bit set. | ||
287 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
288 | */ | ||
289 | #define fls(x) generic_fls(x) | ||
290 | |||
291 | /* | ||
292 | * hweightN: returns the hamming weight (i.e. the number | ||
293 | * of bits set) of a N-bit word | ||
294 | */ | ||
295 | #define hweight64(x) generic_hweight64(x) | ||
296 | #define hweight32(x) generic_hweight32(x) | ||
297 | #define hweight16(x) generic_hweight16(x) | ||
298 | #define hweight8(x) generic_hweight8(x) | ||
299 | |||
300 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
301 | #define find_first_zero_bit(addr, size) \ | ||
302 | find_next_zero_bit((addr), (size), 0) | ||
303 | |||
304 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
305 | #define find_first_bit(addr, size) \ | ||
306 | find_next_bit((addr), (size), 0) | ||
307 | |||
308 | extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); | ||
309 | #define find_first_zero_le_bit(addr, size) \ | ||
310 | find_next_zero_le_bit((addr), (size), 0) | ||
311 | |||
312 | static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr) | ||
313 | { | ||
314 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
315 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
316 | } | ||
317 | |||
318 | #define test_and_clear_le_bit(nr, addr) \ | ||
319 | test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
320 | #define test_and_set_le_bit(nr, addr) \ | ||
321 | test_and_set_bit((nr) ^ 0x38, (addr)) | ||
322 | |||
323 | /* | ||
324 | * non-atomic versions | ||
325 | */ | ||
326 | |||
327 | #define __set_le_bit(nr, addr) \ | ||
328 | __set_bit((nr) ^ 0x38, (addr)) | ||
329 | #define __clear_le_bit(nr, addr) \ | ||
330 | __clear_bit((nr) ^ 0x38, (addr)) | ||
331 | #define __test_and_clear_le_bit(nr, addr) \ | ||
332 | __test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
333 | #define __test_and_set_le_bit(nr, addr) \ | ||
334 | __test_and_set_bit((nr) ^ 0x38, (addr)) | ||
335 | |||
336 | #define ext2_set_bit(nr,addr) \ | ||
337 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
338 | #define ext2_clear_bit(nr, addr) \ | ||
339 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
340 | |||
341 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
342 | test_and_set_le_bit((nr), (unsigned long*)addr) | ||
343 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
344 | test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
345 | |||
346 | |||
347 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
348 | #define ext2_find_first_zero_bit(addr, size) \ | ||
349 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
350 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
351 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
352 | |||
353 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
354 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
355 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
356 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
357 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
358 | |||
359 | #endif /* __KERNEL__ */ | ||
360 | #endif /* _PPC64_BITOPS_H */ | ||
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index 77a743402db4..820dd729b895 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h | |||
@@ -16,21 +16,6 @@ | |||
16 | * 2 of the License, or (at your option) any later version. | 16 | * 2 of the License, or (at your option) any later version. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | ||
20 | * Every architecture must define this function. It's the fastest | ||
21 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
22 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
23 | * bits is cleared. | ||
24 | */ | ||
25 | static inline int sched_find_first_bit(unsigned long *b) | ||
26 | { | ||
27 | if (unlikely(b[0])) | ||
28 | return __ffs(b[0]); | ||
29 | if (unlikely(b[1])) | ||
30 | return __ffs(b[1]) + 64; | ||
31 | return __ffs(b[2]) + 128; | ||
32 | } | ||
33 | |||
34 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 19 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
35 | { | 20 | { |
36 | } | 21 | } |