aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/bitops.h
diff options
context:
space:
mode:
authorAkinobu Mita <mita@miraclelinux.com>2006-03-26 04:39:31 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-26 11:57:14 -0500
commit59e18a2e1c8f6642c307032939daaf474c16344e (patch)
treea0c58aa0e68703cf3844108f46636a6adf941ea6 /include/asm-parisc/bitops.h
parent3c9ee7ef87414cba80dbdf433d3547bb20055ef7 (diff)
[PATCH] bitops: parisc: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit() - remove ffz() - remove generic_fls64() - remove generic_hweight{32,16,8}() - remove generic_hweight64() - remove sched_find_first_bit() - remove find_{next,first}{,_zero}_bit() - remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit() Signed-off-by: Akinobu Mita <mita@miraclelinux.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-parisc/bitops.h')
-rw-r--r--include/asm-parisc/bitops.h286
1 files changed, 9 insertions, 277 deletions
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index ca6119af20af..900561922c4c 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
35 _atomic_spin_unlock_irqrestore(addr, flags); 35 _atomic_spin_unlock_irqrestore(addr, flags);
36} 36}
37 37
38static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
39{
40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
41
42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
43}
44
45static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 38static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
46{ 39{
47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); 40 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
@@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
53 _atomic_spin_unlock_irqrestore(addr, flags); 46 _atomic_spin_unlock_irqrestore(addr, flags);
54} 47}
55 48
56static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
57{
58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
59
60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
61}
62
63static __inline__ void change_bit(int nr, volatile unsigned long * addr) 49static __inline__ void change_bit(int nr, volatile unsigned long * addr)
64{ 50{
65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 51 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
71 _atomic_spin_unlock_irqrestore(addr, flags); 57 _atomic_spin_unlock_irqrestore(addr, flags);
72} 58}
73 59
74static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
75{
76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
77
78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
79}
80
81static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 60static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
82{ 61{
83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 62 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
93 return (oldbit & mask) ? 1 : 0; 72 return (oldbit & mask) ? 1 : 0;
94} 73}
95 74
96static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
97{
98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
99 unsigned long oldbit;
100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
101
102 oldbit = *addr;
103 *addr = oldbit | mask;
104
105 return (oldbit & mask) ? 1 : 0;
106}
107
108static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 75static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
109{ 76{
110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 77 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
120 return (oldbit & mask) ? 1 : 0; 87 return (oldbit & mask) ? 1 : 0;
121} 88}
122 89
123static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
124{
125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
127 unsigned long oldbit;
128
129 oldbit = *addr;
130 *addr = oldbit & ~mask;
131
132 return (oldbit & mask) ? 1 : 0;
133}
134
135static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 90static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
136{ 91{
137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 92 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
147 return (oldbit & mask) ? 1 : 0; 102 return (oldbit & mask) ? 1 : 0;
148} 103}
149 104
150static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) 105#include <asm-generic/bitops/non-atomic.h>
151{
152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 unsigned long oldbit;
155
156 oldbit = *addr;
157 *addr = oldbit ^ mask;
158
159 return (oldbit & mask) ? 1 : 0;
160}
161
162static __inline__ int test_bit(int nr, const volatile unsigned long *address)
163{
164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
166
167 return !!(*addr & mask);
168}
169 106
170#ifdef __KERNEL__ 107#ifdef __KERNEL__
171 108
@@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
219 return ret; 156 return ret;
220} 157}
221 158
222/* Undefined if no bit is zero. */ 159#include <asm-generic/bitops/ffz.h>
223#define ffz(x) __ffs(~(x))
224 160
225/* 161/*
226 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) 162 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
@@ -263,155 +199,22 @@ static __inline__ int fls(int x)
263 199
264 return ret; 200 return ret;
265} 201}
266#define fls64(x) generic_fls64(x)
267 202
268/* 203#include <asm-generic/bitops/fls64.h>
269 * hweightN: returns the hamming weight (i.e. the number 204#include <asm-generic/bitops/hweight.h>
270 * of bits set) of a N-bit word 205#include <asm-generic/bitops/sched.h>
271 */
272#define hweight64(x) generic_hweight64(x)
273#define hweight32(x) generic_hweight32(x)
274#define hweight16(x) generic_hweight16(x)
275#define hweight8(x) generic_hweight8(x)
276
277/*
278 * Every architecture must define this function. It's the fastest
279 * way of searching a 140-bit bitmap where the first 100 bits are
280 * unlikely to be set. It's guaranteed that at least one of the 140
281 * bits is cleared.
282 */
283static inline int sched_find_first_bit(const unsigned long *b)
284{
285#ifdef __LP64__
286 if (unlikely(b[0]))
287 return __ffs(b[0]);
288 if (unlikely(b[1]))
289 return __ffs(b[1]) + 64;
290 return __ffs(b[2]) + 128;
291#else
292 if (unlikely(b[0]))
293 return __ffs(b[0]);
294 if (unlikely(b[1]))
295 return __ffs(b[1]) + 32;
296 if (unlikely(b[2]))
297 return __ffs(b[2]) + 64;
298 if (b[3])
299 return __ffs(b[3]) + 96;
300 return __ffs(b[4]) + 128;
301#endif
302}
303 206
304#endif /* __KERNEL__ */ 207#endif /* __KERNEL__ */
305 208
306/* 209#include <asm-generic/bitops/find.h>
307 * This implementation of find_{first,next}_zero_bit was stolen from
308 * Linus' asm-alpha/bitops.h.
309 */
310#define find_first_zero_bit(addr, size) \
311 find_next_zero_bit((addr), (size), 0)
312
313static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
314{
315 const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
316 unsigned long result = offset & ~(BITS_PER_LONG-1);
317 unsigned long tmp;
318
319 if (offset >= size)
320 return size;
321 size -= result;
322 offset &= (BITS_PER_LONG-1);
323 if (offset) {
324 tmp = *(p++);
325 tmp |= ~0UL >> (BITS_PER_LONG-offset);
326 if (size < BITS_PER_LONG)
327 goto found_first;
328 if (~tmp)
329 goto found_middle;
330 size -= BITS_PER_LONG;
331 result += BITS_PER_LONG;
332 }
333 while (size & ~(BITS_PER_LONG -1)) {
334 if (~(tmp = *(p++)))
335 goto found_middle;
336 result += BITS_PER_LONG;
337 size -= BITS_PER_LONG;
338 }
339 if (!size)
340 return result;
341 tmp = *p;
342found_first:
343 tmp |= ~0UL << size;
344found_middle:
345 return result + ffz(tmp);
346}
347
348static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
349{
350 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
351 unsigned long result = offset & ~(BITS_PER_LONG-1);
352 unsigned long tmp;
353
354 if (offset >= size)
355 return size;
356 size -= result;
357 offset &= (BITS_PER_LONG-1);
358 if (offset) {
359 tmp = *(p++);
360 tmp &= (~0UL << offset);
361 if (size < BITS_PER_LONG)
362 goto found_first;
363 if (tmp)
364 goto found_middle;
365 size -= BITS_PER_LONG;
366 result += BITS_PER_LONG;
367 }
368 while (size & ~(BITS_PER_LONG-1)) {
369 if ((tmp = *(p++)))
370 goto found_middle;
371 result += BITS_PER_LONG;
372 size -= BITS_PER_LONG;
373 }
374 if (!size)
375 return result;
376 tmp = *p;
377
378found_first:
379 tmp &= (~0UL >> (BITS_PER_LONG - size));
380 if (tmp == 0UL) /* Are any bits set? */
381 return result + size; /* Nope. */
382found_middle:
383 return result + __ffs(tmp);
384}
385
386/**
387 * find_first_bit - find the first set bit in a memory region
388 * @addr: The address to start the search at
389 * @size: The maximum size to search
390 *
391 * Returns the bit-number of the first set bit, not the number of the byte
392 * containing a bit.
393 */
394#define find_first_bit(addr, size) \
395 find_next_bit((addr), (size), 0)
396
397#define _EXT2_HAVE_ASM_BITOPS_
398 210
399#ifdef __KERNEL__ 211#ifdef __KERNEL__
400/* 212
401 * test_and_{set,clear}_bit guarantee atomicity without 213#include <asm-generic/bitops/ext2-non-atomic.h>
402 * disabling interrupts.
403 */
404 214
405/* '3' is bits per byte */ 215/* '3' is bits per byte */
406#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) 216#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
407 217
408#define ext2_test_bit(nr, addr) \
409 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
410#define ext2_set_bit(nr, addr) \
411 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
412#define ext2_clear_bit(nr, addr) \
413 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
414
415#define ext2_set_bit_atomic(l,nr,addr) \ 218#define ext2_set_bit_atomic(l,nr,addr) \
416 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) 219 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
417#define ext2_clear_bit_atomic(l,nr,addr) \ 220#define ext2_clear_bit_atomic(l,nr,addr) \
@@ -419,77 +222,6 @@ found_middle:
419 222
420#endif /* __KERNEL__ */ 223#endif /* __KERNEL__ */
421 224
422 225#include <asm-generic/bitops/minix-le.h>
423#define ext2_find_first_zero_bit(addr, size) \
424 ext2_find_next_zero_bit((addr), (size), 0)
425
426/* include/linux/byteorder does not support "unsigned long" type */
427static inline unsigned long ext2_swabp(unsigned long * x)
428{
429#ifdef __LP64__
430 return (unsigned long) __swab64p((u64 *) x);
431#else
432 return (unsigned long) __swab32p((u32 *) x);
433#endif
434}
435
436/* include/linux/byteorder doesn't support "unsigned long" type */
437static inline unsigned long ext2_swab(unsigned long y)
438{
439#ifdef __LP64__
440 return (unsigned long) __swab64((u64) y);
441#else
442 return (unsigned long) __swab32((u32) y);
443#endif
444}
445
446static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
447{
448 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
449 unsigned long result = offset & ~(BITS_PER_LONG - 1);
450 unsigned long tmp;
451
452 if (offset >= size)
453 return size;
454 size -= result;
455 offset &= (BITS_PER_LONG - 1UL);
456 if (offset) {
457 tmp = ext2_swabp(p++);
458 tmp |= (~0UL >> (BITS_PER_LONG - offset));
459 if (size < BITS_PER_LONG)
460 goto found_first;
461 if (~tmp)
462 goto found_middle;
463 size -= BITS_PER_LONG;
464 result += BITS_PER_LONG;
465 }
466
467 while (size & ~(BITS_PER_LONG - 1)) {
468 if (~(tmp = *(p++)))
469 goto found_middle_swap;
470 result += BITS_PER_LONG;
471 size -= BITS_PER_LONG;
472 }
473 if (!size)
474 return result;
475 tmp = ext2_swabp(p);
476found_first:
477 tmp |= ~0UL << size;
478 if (tmp == ~0UL) /* Are any bits zero? */
479 return result + size; /* Nope. Skip ffz */
480found_middle:
481 return result + ffz(tmp);
482
483found_middle_swap:
484 return result + ffz(ext2_swab(tmp));
485}
486
487
488/* Bitmap functions for the minix filesystem. */
489#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
490#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
491#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
492#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
493#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
494 226
495#endif /* _PARISC_BITOPS_H */ 227#endif /* _PARISC_BITOPS_H */