diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ppc/bitops.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ppc/bitops.h')
-rw-r--r-- | include/asm-ppc/bitops.h | 460 |
1 files changed, 460 insertions, 0 deletions
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h new file mode 100644 index 000000000000..e30f536fd830 --- /dev/null +++ b/include/asm-ppc/bitops.h | |||
@@ -0,0 +1,460 @@ | |||
1 | /* | ||
2 | * bitops.h: Bit string operations on the ppc | ||
3 | */ | ||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | #ifndef _PPC_BITOPS_H | ||
7 | #define _PPC_BITOPS_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/compiler.h> | ||
11 | #include <asm/byteorder.h> | ||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | /* | ||
15 | * The test_and_*_bit operations are taken to imply a memory barrier | ||
16 | * on SMP systems. | ||
17 | */ | ||
18 | #ifdef CONFIG_SMP | ||
19 | #define SMP_WMB "eieio\n" | ||
20 | #define SMP_MB "\nsync" | ||
21 | #else | ||
22 | #define SMP_WMB | ||
23 | #define SMP_MB | ||
24 | #endif /* CONFIG_SMP */ | ||
25 | |||
26 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | ||
27 | { | ||
28 | unsigned long old; | ||
29 | unsigned long mask = 1 << (nr & 0x1f); | ||
30 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
31 | |||
32 | __asm__ __volatile__("\n\ | ||
33 | 1: lwarx %0,0,%3 \n\ | ||
34 | or %0,%0,%2 \n" | ||
35 | PPC405_ERR77(0,%3) | ||
36 | " stwcx. %0,0,%3 \n\ | ||
37 | bne- 1b" | ||
38 | : "=&r" (old), "=m" (*p) | ||
39 | : "r" (mask), "r" (p), "m" (*p) | ||
40 | : "cc" ); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * non-atomic version | ||
45 | */ | ||
46 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) | ||
47 | { | ||
48 | unsigned long mask = 1 << (nr & 0x1f); | ||
49 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
50 | |||
51 | *p |= mask; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * clear_bit doesn't imply a memory barrier | ||
56 | */ | ||
57 | #define smp_mb__before_clear_bit() smp_mb() | ||
58 | #define smp_mb__after_clear_bit() smp_mb() | ||
59 | |||
60 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | ||
61 | { | ||
62 | unsigned long old; | ||
63 | unsigned long mask = 1 << (nr & 0x1f); | ||
64 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
65 | |||
66 | __asm__ __volatile__("\n\ | ||
67 | 1: lwarx %0,0,%3 \n\ | ||
68 | andc %0,%0,%2 \n" | ||
69 | PPC405_ERR77(0,%3) | ||
70 | " stwcx. %0,0,%3 \n\ | ||
71 | bne- 1b" | ||
72 | : "=&r" (old), "=m" (*p) | ||
73 | : "r" (mask), "r" (p), "m" (*p) | ||
74 | : "cc"); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * non-atomic version | ||
79 | */ | ||
80 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
81 | { | ||
82 | unsigned long mask = 1 << (nr & 0x1f); | ||
83 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
84 | |||
85 | *p &= ~mask; | ||
86 | } | ||
87 | |||
88 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | ||
89 | { | ||
90 | unsigned long old; | ||
91 | unsigned long mask = 1 << (nr & 0x1f); | ||
92 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
93 | |||
94 | __asm__ __volatile__("\n\ | ||
95 | 1: lwarx %0,0,%3 \n\ | ||
96 | xor %0,%0,%2 \n" | ||
97 | PPC405_ERR77(0,%3) | ||
98 | " stwcx. %0,0,%3 \n\ | ||
99 | bne- 1b" | ||
100 | : "=&r" (old), "=m" (*p) | ||
101 | : "r" (mask), "r" (p), "m" (*p) | ||
102 | : "cc"); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * non-atomic version | ||
107 | */ | ||
108 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) | ||
109 | { | ||
110 | unsigned long mask = 1 << (nr & 0x1f); | ||
111 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
112 | |||
113 | *p ^= mask; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * test_and_*_bit do imply a memory barrier (?) | ||
118 | */ | ||
119 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
120 | { | ||
121 | unsigned int old, t; | ||
122 | unsigned int mask = 1 << (nr & 0x1f); | ||
123 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
124 | |||
125 | __asm__ __volatile__(SMP_WMB "\n\ | ||
126 | 1: lwarx %0,0,%4 \n\ | ||
127 | or %1,%0,%3 \n" | ||
128 | PPC405_ERR77(0,%4) | ||
129 | " stwcx. %1,0,%4 \n\ | ||
130 | bne 1b" | ||
131 | SMP_MB | ||
132 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
133 | : "r" (mask), "r" (p), "m" (*p) | ||
134 | : "cc", "memory"); | ||
135 | |||
136 | return (old & mask) != 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * non-atomic version | ||
141 | */ | ||
142 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
143 | { | ||
144 | unsigned long mask = 1 << (nr & 0x1f); | ||
145 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
146 | unsigned long old = *p; | ||
147 | |||
148 | *p = old | mask; | ||
149 | return (old & mask) != 0; | ||
150 | } | ||
151 | |||
152 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
153 | { | ||
154 | unsigned int old, t; | ||
155 | unsigned int mask = 1 << (nr & 0x1f); | ||
156 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
157 | |||
158 | __asm__ __volatile__(SMP_WMB "\n\ | ||
159 | 1: lwarx %0,0,%4 \n\ | ||
160 | andc %1,%0,%3 \n" | ||
161 | PPC405_ERR77(0,%4) | ||
162 | " stwcx. %1,0,%4 \n\ | ||
163 | bne 1b" | ||
164 | SMP_MB | ||
165 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
166 | : "r" (mask), "r" (p), "m" (*p) | ||
167 | : "cc", "memory"); | ||
168 | |||
169 | return (old & mask) != 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * non-atomic version | ||
174 | */ | ||
175 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
176 | { | ||
177 | unsigned long mask = 1 << (nr & 0x1f); | ||
178 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
179 | unsigned long old = *p; | ||
180 | |||
181 | *p = old & ~mask; | ||
182 | return (old & mask) != 0; | ||
183 | } | ||
184 | |||
185 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
186 | { | ||
187 | unsigned int old, t; | ||
188 | unsigned int mask = 1 << (nr & 0x1f); | ||
189 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
190 | |||
191 | __asm__ __volatile__(SMP_WMB "\n\ | ||
192 | 1: lwarx %0,0,%4 \n\ | ||
193 | xor %1,%0,%3 \n" | ||
194 | PPC405_ERR77(0,%4) | ||
195 | " stwcx. %1,0,%4 \n\ | ||
196 | bne 1b" | ||
197 | SMP_MB | ||
198 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
199 | : "r" (mask), "r" (p), "m" (*p) | ||
200 | : "cc", "memory"); | ||
201 | |||
202 | return (old & mask) != 0; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * non-atomic version | ||
207 | */ | ||
208 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
209 | { | ||
210 | unsigned long mask = 1 << (nr & 0x1f); | ||
211 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
212 | unsigned long old = *p; | ||
213 | |||
214 | *p = old ^ mask; | ||
215 | return (old & mask) != 0; | ||
216 | } | ||
217 | |||
218 | static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) | ||
219 | { | ||
220 | return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0; | ||
221 | } | ||
222 | |||
223 | /* Return the bit position of the most significant 1 bit in a word */ | ||
224 | static __inline__ int __ilog2(unsigned long x) | ||
225 | { | ||
226 | int lz; | ||
227 | |||
228 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
229 | return 31 - lz; | ||
230 | } | ||
231 | |||
232 | static __inline__ int ffz(unsigned long x) | ||
233 | { | ||
234 | if ((x = ~x) == 0) | ||
235 | return 32; | ||
236 | return __ilog2(x & -x); | ||
237 | } | ||
238 | |||
239 | static inline int __ffs(unsigned long x) | ||
240 | { | ||
241 | return __ilog2(x & -x); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * ffs: find first bit set. This is defined the same way as | ||
246 | * the libc and compiler builtin ffs routines, therefore | ||
247 | * differs in spirit from the above ffz (man ffs). | ||
248 | */ | ||
249 | static __inline__ int ffs(int x) | ||
250 | { | ||
251 | return __ilog2(x & -x) + 1; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * fls: find last (most-significant) bit set. | ||
256 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
257 | */ | ||
258 | static __inline__ int fls(unsigned int x) | ||
259 | { | ||
260 | int lz; | ||
261 | |||
262 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | ||
263 | return 32 - lz; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * hweightN: returns the hamming weight (i.e. the number | ||
268 | * of bits set) of a N-bit word | ||
269 | */ | ||
270 | |||
271 | #define hweight32(x) generic_hweight32(x) | ||
272 | #define hweight16(x) generic_hweight16(x) | ||
273 | #define hweight8(x) generic_hweight8(x) | ||
274 | |||
275 | /* | ||
276 | * Find the first bit set in a 140-bit bitmap. | ||
277 | * The first 100 bits are unlikely to be set. | ||
278 | */ | ||
279 | static inline int sched_find_first_bit(const unsigned long *b) | ||
280 | { | ||
281 | if (unlikely(b[0])) | ||
282 | return __ffs(b[0]); | ||
283 | if (unlikely(b[1])) | ||
284 | return __ffs(b[1]) + 32; | ||
285 | if (unlikely(b[2])) | ||
286 | return __ffs(b[2]) + 64; | ||
287 | if (b[3]) | ||
288 | return __ffs(b[3]) + 96; | ||
289 | return __ffs(b[4]) + 128; | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * find_next_bit - find the next set bit in a memory region | ||
294 | * @addr: The address to base the search on | ||
295 | * @offset: The bitnumber to start searching at | ||
296 | * @size: The maximum size to search | ||
297 | */ | ||
298 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
299 | unsigned long size, unsigned long offset) | ||
300 | { | ||
301 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
302 | unsigned int result = offset & ~31UL; | ||
303 | unsigned int tmp; | ||
304 | |||
305 | if (offset >= size) | ||
306 | return size; | ||
307 | size -= result; | ||
308 | offset &= 31UL; | ||
309 | if (offset) { | ||
310 | tmp = *p++; | ||
311 | tmp &= ~0UL << offset; | ||
312 | if (size < 32) | ||
313 | goto found_first; | ||
314 | if (tmp) | ||
315 | goto found_middle; | ||
316 | size -= 32; | ||
317 | result += 32; | ||
318 | } | ||
319 | while (size >= 32) { | ||
320 | if ((tmp = *p++) != 0) | ||
321 | goto found_middle; | ||
322 | result += 32; | ||
323 | size -= 32; | ||
324 | } | ||
325 | if (!size) | ||
326 | return result; | ||
327 | tmp = *p; | ||
328 | |||
329 | found_first: | ||
330 | tmp &= ~0UL >> (32 - size); | ||
331 | if (tmp == 0UL) /* Are any bits set? */ | ||
332 | return result + size; /* Nope. */ | ||
333 | found_middle: | ||
334 | return result + __ffs(tmp); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * find_first_bit - find the first set bit in a memory region | ||
339 | * @addr: The address to start the search at | ||
340 | * @size: The maximum size to search | ||
341 | * | ||
342 | * Returns the bit-number of the first set bit, not the number of the byte | ||
343 | * containing a bit. | ||
344 | */ | ||
345 | #define find_first_bit(addr, size) \ | ||
346 | find_next_bit((addr), (size), 0) | ||
347 | |||
348 | /* | ||
349 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
350 | * Linus' asm-alpha/bitops.h. | ||
351 | */ | ||
352 | #define find_first_zero_bit(addr, size) \ | ||
353 | find_next_zero_bit((addr), (size), 0) | ||
354 | |||
355 | static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr, | ||
356 | unsigned long size, unsigned long offset) | ||
357 | { | ||
358 | unsigned int * p = ((unsigned int *) addr) + (offset >> 5); | ||
359 | unsigned int result = offset & ~31UL; | ||
360 | unsigned int tmp; | ||
361 | |||
362 | if (offset >= size) | ||
363 | return size; | ||
364 | size -= result; | ||
365 | offset &= 31UL; | ||
366 | if (offset) { | ||
367 | tmp = *p++; | ||
368 | tmp |= ~0UL >> (32-offset); | ||
369 | if (size < 32) | ||
370 | goto found_first; | ||
371 | if (tmp != ~0U) | ||
372 | goto found_middle; | ||
373 | size -= 32; | ||
374 | result += 32; | ||
375 | } | ||
376 | while (size >= 32) { | ||
377 | if ((tmp = *p++) != ~0U) | ||
378 | goto found_middle; | ||
379 | result += 32; | ||
380 | size -= 32; | ||
381 | } | ||
382 | if (!size) | ||
383 | return result; | ||
384 | tmp = *p; | ||
385 | found_first: | ||
386 | tmp |= ~0UL << size; | ||
387 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
388 | return result + size; /* Nope. */ | ||
389 | found_middle: | ||
390 | return result + ffz(tmp); | ||
391 | } | ||
392 | |||
393 | |||
394 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
395 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
396 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
397 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr)) | ||
398 | |||
399 | static __inline__ int ext2_test_bit(int nr, __const__ void * addr) | ||
400 | { | ||
401 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
402 | |||
403 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * This implementation of ext2_find_{first,next}_zero_bit was stolen from | ||
408 | * Linus' asm-alpha/bitops.h and modified for a big-endian machine. | ||
409 | */ | ||
410 | |||
411 | #define ext2_find_first_zero_bit(addr, size) \ | ||
412 | ext2_find_next_zero_bit((addr), (size), 0) | ||
413 | |||
414 | static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr, | ||
415 | unsigned long size, unsigned long offset) | ||
416 | { | ||
417 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
418 | unsigned int result = offset & ~31UL; | ||
419 | unsigned int tmp; | ||
420 | |||
421 | if (offset >= size) | ||
422 | return size; | ||
423 | size -= result; | ||
424 | offset &= 31UL; | ||
425 | if (offset) { | ||
426 | tmp = cpu_to_le32p(p++); | ||
427 | tmp |= ~0UL >> (32-offset); | ||
428 | if (size < 32) | ||
429 | goto found_first; | ||
430 | if (tmp != ~0U) | ||
431 | goto found_middle; | ||
432 | size -= 32; | ||
433 | result += 32; | ||
434 | } | ||
435 | while (size >= 32) { | ||
436 | if ((tmp = cpu_to_le32p(p++)) != ~0U) | ||
437 | goto found_middle; | ||
438 | result += 32; | ||
439 | size -= 32; | ||
440 | } | ||
441 | if (!size) | ||
442 | return result; | ||
443 | tmp = cpu_to_le32p(p); | ||
444 | found_first: | ||
445 | tmp |= ~0U << size; | ||
446 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
447 | return result + size; /* Nope. */ | ||
448 | found_middle: | ||
449 | return result + ffz(tmp); | ||
450 | } | ||
451 | |||
452 | /* Bitmap functions for the minix filesystem. */ | ||
453 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
454 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | ||
455 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
456 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
457 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
458 | |||
459 | #endif /* _PPC_BITOPS_H */ | ||
460 | #endif /* __KERNEL__ */ | ||