diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-frv/bitops.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-frv/bitops.h')
-rw-r--r-- | include/asm-frv/bitops.h | 341 |
1 files changed, 341 insertions, 0 deletions
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h new file mode 100644 index 000000000000..b664bd5b6663 --- /dev/null +++ b/include/asm-frv/bitops.h | |||
@@ -0,0 +1,341 @@ | |||
1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs | ||
2 | * | ||
3 | * For an explanation of how atomic ops work in this arch, see: | ||
4 | * Documentation/fujitsu/frv/atomic-ops.txt | ||
5 | * | ||
6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
7 | * Written by David Howells (dhowells@redhat.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | #ifndef _ASM_BITOPS_H | ||
15 | #define _ASM_BITOPS_H | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/compiler.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/atomic.h> | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | |||
25 | /* | ||
26 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
27 | * so code should check against ~0UL first.. | ||
28 | */ | ||
29 | static inline unsigned long ffz(unsigned long word) | ||
30 | { | ||
31 | unsigned long result = 0; | ||
32 | |||
33 | while (word & 1) { | ||
34 | result++; | ||
35 | word >>= 1; | ||
36 | } | ||
37 | return result; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * clear_bit() doesn't provide any barrier for the compiler. | ||
42 | */ | ||
43 | #define smp_mb__before_clear_bit() barrier() | ||
44 | #define smp_mb__after_clear_bit() barrier() | ||
45 | |||
46 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
47 | { | ||
48 | volatile unsigned long *ptr = addr; | ||
49 | unsigned long mask = 1UL << (nr & 31); | ||
50 | ptr += nr >> 5; | ||
51 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; | ||
52 | } | ||
53 | |||
54 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
55 | { | ||
56 | volatile unsigned long *ptr = addr; | ||
57 | unsigned long mask = 1UL << (nr & 31); | ||
58 | ptr += nr >> 5; | ||
59 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; | ||
60 | } | ||
61 | |||
62 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
63 | { | ||
64 | volatile unsigned long *ptr = addr; | ||
65 | unsigned long mask = 1UL << (nr & 31); | ||
66 | ptr += nr >> 5; | ||
67 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; | ||
68 | } | ||
69 | |||
70 | static inline void clear_bit(int nr, volatile void *addr) | ||
71 | { | ||
72 | test_and_clear_bit(nr, addr); | ||
73 | } | ||
74 | |||
75 | static inline void set_bit(int nr, volatile void *addr) | ||
76 | { | ||
77 | test_and_set_bit(nr, addr); | ||
78 | } | ||
79 | |||
80 | static inline void change_bit(int nr, volatile void * addr) | ||
81 | { | ||
82 | test_and_change_bit(nr, addr); | ||
83 | } | ||
84 | |||
85 | static inline void __clear_bit(int nr, volatile void * addr) | ||
86 | { | ||
87 | volatile unsigned long *a = addr; | ||
88 | int mask; | ||
89 | |||
90 | a += nr >> 5; | ||
91 | mask = 1 << (nr & 31); | ||
92 | *a &= ~mask; | ||
93 | } | ||
94 | |||
95 | static inline void __set_bit(int nr, volatile void * addr) | ||
96 | { | ||
97 | volatile unsigned long *a = addr; | ||
98 | int mask; | ||
99 | |||
100 | a += nr >> 5; | ||
101 | mask = 1 << (nr & 31); | ||
102 | *a |= mask; | ||
103 | } | ||
104 | |||
105 | static inline void __change_bit(int nr, volatile void *addr) | ||
106 | { | ||
107 | volatile unsigned long *a = addr; | ||
108 | int mask; | ||
109 | |||
110 | a += nr >> 5; | ||
111 | mask = 1 << (nr & 31); | ||
112 | *a ^= mask; | ||
113 | } | ||
114 | |||
115 | static inline int __test_and_clear_bit(int nr, volatile void * addr) | ||
116 | { | ||
117 | volatile unsigned long *a = addr; | ||
118 | int mask, retval; | ||
119 | |||
120 | a += nr >> 5; | ||
121 | mask = 1 << (nr & 31); | ||
122 | retval = (mask & *a) != 0; | ||
123 | *a &= ~mask; | ||
124 | return retval; | ||
125 | } | ||
126 | |||
127 | static inline int __test_and_set_bit(int nr, volatile void * addr) | ||
128 | { | ||
129 | volatile unsigned long *a = addr; | ||
130 | int mask, retval; | ||
131 | |||
132 | a += nr >> 5; | ||
133 | mask = 1 << (nr & 31); | ||
134 | retval = (mask & *a) != 0; | ||
135 | *a |= mask; | ||
136 | return retval; | ||
137 | } | ||
138 | |||
139 | static inline int __test_and_change_bit(int nr, volatile void * addr) | ||
140 | { | ||
141 | volatile unsigned long *a = addr; | ||
142 | int mask, retval; | ||
143 | |||
144 | a += nr >> 5; | ||
145 | mask = 1 << (nr & 31); | ||
146 | retval = (mask & *a) != 0; | ||
147 | *a ^= mask; | ||
148 | return retval; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * This routine doesn't need to be atomic. | ||
153 | */ | ||
154 | static inline int __constant_test_bit(int nr, const volatile void * addr) | ||
155 | { | ||
156 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | ||
157 | } | ||
158 | |||
159 | static inline int __test_bit(int nr, const volatile void * addr) | ||
160 | { | ||
161 | int * a = (int *) addr; | ||
162 | int mask; | ||
163 | |||
164 | a += nr >> 5; | ||
165 | mask = 1 << (nr & 0x1f); | ||
166 | return ((mask & *a) != 0); | ||
167 | } | ||
168 | |||
169 | #define test_bit(nr,addr) \ | ||
170 | (__builtin_constant_p(nr) ? \ | ||
171 | __constant_test_bit((nr),(addr)) : \ | ||
172 | __test_bit((nr),(addr))) | ||
173 | |||
174 | extern int find_next_bit(const unsigned long *addr, int size, int offset); | ||
175 | |||
176 | #define find_first_bit(addr, size) find_next_bit(addr, size, 0) | ||
177 | |||
178 | #define find_first_zero_bit(addr, size) \ | ||
179 | find_next_zero_bit((addr), (size), 0) | ||
180 | |||
181 | static inline int find_next_zero_bit(const void *addr, int size, int offset) | ||
182 | { | ||
183 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
184 | unsigned long result = offset & ~31UL; | ||
185 | unsigned long tmp; | ||
186 | |||
187 | if (offset >= size) | ||
188 | return size; | ||
189 | size -= result; | ||
190 | offset &= 31UL; | ||
191 | if (offset) { | ||
192 | tmp = *(p++); | ||
193 | tmp |= ~0UL >> (32-offset); | ||
194 | if (size < 32) | ||
195 | goto found_first; | ||
196 | if (~tmp) | ||
197 | goto found_middle; | ||
198 | size -= 32; | ||
199 | result += 32; | ||
200 | } | ||
201 | while (size & ~31UL) { | ||
202 | if (~(tmp = *(p++))) | ||
203 | goto found_middle; | ||
204 | result += 32; | ||
205 | size -= 32; | ||
206 | } | ||
207 | if (!size) | ||
208 | return result; | ||
209 | tmp = *p; | ||
210 | |||
211 | found_first: | ||
212 | tmp |= ~0UL >> size; | ||
213 | found_middle: | ||
214 | return result + ffz(tmp); | ||
215 | } | ||
216 | |||
217 | #define ffs(x) generic_ffs(x) | ||
218 | #define __ffs(x) (ffs(x) - 1) | ||
219 | |||
220 | /* | ||
221 | * fls: find last bit set. | ||
222 | */ | ||
223 | #define fls(x) \ | ||
224 | ({ \ | ||
225 | int bit; \ | ||
226 | \ | ||
227 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x)); \ | ||
228 | \ | ||
229 | bit ? 33 - bit : bit; \ | ||
230 | }) | ||
231 | |||
232 | /* | ||
233 | * Every architecture must define this function. It's the fastest | ||
234 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
235 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
236 | * bits is cleared. | ||
237 | */ | ||
238 | static inline int sched_find_first_bit(const unsigned long *b) | ||
239 | { | ||
240 | if (unlikely(b[0])) | ||
241 | return __ffs(b[0]); | ||
242 | if (unlikely(b[1])) | ||
243 | return __ffs(b[1]) + 32; | ||
244 | if (unlikely(b[2])) | ||
245 | return __ffs(b[2]) + 64; | ||
246 | if (b[3]) | ||
247 | return __ffs(b[3]) + 96; | ||
248 | return __ffs(b[4]) + 128; | ||
249 | } | ||
250 | |||
251 | |||
252 | /* | ||
253 | * hweightN: returns the hamming weight (i.e. the number | ||
254 | * of bits set) of a N-bit word | ||
255 | */ | ||
256 | |||
257 | #define hweight32(x) generic_hweight32(x) | ||
258 | #define hweight16(x) generic_hweight16(x) | ||
259 | #define hweight8(x) generic_hweight8(x) | ||
260 | |||
261 | #define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr)) | ||
262 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr)) | ||
263 | |||
264 | #define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr) | ||
265 | #define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr) | ||
266 | |||
267 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
268 | { | ||
269 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
270 | int mask; | ||
271 | |||
272 | ADDR += nr >> 3; | ||
273 | mask = 1 << (nr & 0x07); | ||
274 | return ((mask & *ADDR) != 0); | ||
275 | } | ||
276 | |||
277 | #define ext2_find_first_zero_bit(addr, size) \ | ||
278 | ext2_find_next_zero_bit((addr), (size), 0) | ||
279 | |||
280 | static inline unsigned long ext2_find_next_zero_bit(const void *addr, | ||
281 | unsigned long size, | ||
282 | unsigned long offset) | ||
283 | { | ||
284 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
285 | unsigned long result = offset & ~31UL; | ||
286 | unsigned long tmp; | ||
287 | |||
288 | if (offset >= size) | ||
289 | return size; | ||
290 | size -= result; | ||
291 | offset &= 31UL; | ||
292 | if(offset) { | ||
293 | /* We hold the little endian value in tmp, but then the | ||
294 | * shift is illegal. So we could keep a big endian value | ||
295 | * in tmp, like this: | ||
296 | * | ||
297 | * tmp = __swab32(*(p++)); | ||
298 | * tmp |= ~0UL >> (32-offset); | ||
299 | * | ||
300 | * but this would decrease preformance, so we change the | ||
301 | * shift: | ||
302 | */ | ||
303 | tmp = *(p++); | ||
304 | tmp |= __swab32(~0UL >> (32-offset)); | ||
305 | if(size < 32) | ||
306 | goto found_first; | ||
307 | if(~tmp) | ||
308 | goto found_middle; | ||
309 | size -= 32; | ||
310 | result += 32; | ||
311 | } | ||
312 | while(size & ~31UL) { | ||
313 | if(~(tmp = *(p++))) | ||
314 | goto found_middle; | ||
315 | result += 32; | ||
316 | size -= 32; | ||
317 | } | ||
318 | if(!size) | ||
319 | return result; | ||
320 | tmp = *p; | ||
321 | |||
322 | found_first: | ||
323 | /* tmp is little endian, so we would have to swab the shift, | ||
324 | * see above. But then we have to swab tmp below for ffz, so | ||
325 | * we might as well do this here. | ||
326 | */ | ||
327 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
328 | found_middle: | ||
329 | return result + ffz(__swab32(tmp)); | ||
330 | } | ||
331 | |||
332 | /* Bitmap functions for the minix filesystem. */ | ||
333 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
334 | #define minix_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
335 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
336 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
337 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
338 | |||
339 | #endif /* __KERNEL__ */ | ||
340 | |||
341 | #endif /* _ASM_BITOPS_H */ | ||