aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc
diff options
context:
space:
mode:
authorGrant Grundler <grundler@parisc-linux.org>2005-10-21 22:45:22 -0400
committerKyle McMartin <kyle@parisc-linux.org>2005-10-21 22:45:22 -0400
commita366064c3ff46c985a3c7243468be197d29874dc (patch)
tree379418f6bbbfcfb8ebd6a53786e8d5e0a19d0327 /include/asm-parisc
parent2464212f68136527f6364d63c23a529e1fd7d168 (diff)
[PARISC] Update bitops from parisc tree
Optimize ext2_find_next_zero_bit. Gives about 25% perf improvement with a rsync test with ext3. Signed-off-by: Randolph Chung <tausq@parisc-linux.org> fix ext3 performance - ext2_find_next_zero() was culprit. Kudos to jejb for pointing out the the possibility that ext2_test_bit and ext2_find_next_zero() may in fact not be enumerating bits in the bitmap because of endianess. Took sparc64 implementation and adapted it to our tree. I suspect the real problem is ffz() wants an unsigned long and was getting garbage in the top half of the unsigned int. Not confirmed but that's what I suspect. Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Fix find_next_bit for 32-bit Make masking consistent for bitops From: Joel Soete <soete.joel@tiscali.be> Signed-off-by: Randolph Chung <tausq@parisc-linux.org> Add back incorrectly removed ext2_find_first_zero_bit definition Signed-off-by: James Bottomley <jejb@parisc-linux.org> Fixup bitops.h to use volatile for *_bit() ops Based on this email thread: http://marc.theaimsgroup.com/?t=108826637900003 In a nutshell: *_bit() want use of volatile. __*_bit() are "relaxed" and don't use spinlock or volatile. other minor changes: o replaces hweight64() macro with alias to generic_hweight64() (Joel Soete) o cleanup ext2* macros so (a) it's obvious what the XOR magic is about and (b) one version that works for both 32/64-bit. o replace 2 uses of CONFIG_64BIT with __LP64__. bitops.h used both. I think header files that might go to user space should use something userspace will know about (__LP64__). Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Move SHIFT_PER_LONG to standard location for BITS_PER_LONG (asm/types.h) and ditch the second definition of BITS_PER_LONG in bitops.h Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/bitops.h290
-rw-r--r--include/asm-parisc/types.h2
2 files changed, 134 insertions, 158 deletions
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index af7db694b22d..55b98c67fd82 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -2,7 +2,7 @@
2#define _PARISC_BITOPS_H 2#define _PARISC_BITOPS_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/spinlock.h> 5#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
6#include <asm/byteorder.h> 6#include <asm/byteorder.h>
7#include <asm/atomic.h> 7#include <asm/atomic.h>
8 8
@@ -12,193 +12,157 @@
12 * to include/asm-i386/bitops.h or kerneldoc 12 * to include/asm-i386/bitops.h or kerneldoc
13 */ 13 */
14 14
15#ifdef __LP64__ 15#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
16# define SHIFT_PER_LONG 6
17#ifndef BITS_PER_LONG
18# define BITS_PER_LONG 64
19#endif
20#else
21# define SHIFT_PER_LONG 5
22#ifndef BITS_PER_LONG
23# define BITS_PER_LONG 32
24#endif
25#endif
26
27#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
28 16
29 17
30#define smp_mb__before_clear_bit() smp_mb() 18#define smp_mb__before_clear_bit() smp_mb()
31#define smp_mb__after_clear_bit() smp_mb() 19#define smp_mb__after_clear_bit() smp_mb()
32 20
33static __inline__ void set_bit(int nr, volatile unsigned long * address) 21/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
22 * on use of volatile and __*_bit() (set/clear/change):
23 * *_bit() want use of volatile.
24 * __*_bit() are "relaxed" and don't use spinlock or volatile.
25 */
26
27static __inline__ void set_bit(int nr, volatile unsigned long * addr)
34{ 28{
35 unsigned long mask; 29 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
36 unsigned long *addr = (unsigned long *) address;
37 unsigned long flags; 30 unsigned long flags;
38 31
39 addr += (nr >> SHIFT_PER_LONG); 32 addr += (nr >> SHIFT_PER_LONG);
40 mask = 1L << CHOP_SHIFTCOUNT(nr);
41 _atomic_spin_lock_irqsave(addr, flags); 33 _atomic_spin_lock_irqsave(addr, flags);
42 *addr |= mask; 34 *addr |= mask;
43 _atomic_spin_unlock_irqrestore(addr, flags); 35 _atomic_spin_unlock_irqrestore(addr, flags);
44} 36}
45 37
46static __inline__ void __set_bit(int nr, volatile unsigned long * address) 38static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
47{ 39{
48 unsigned long mask; 40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
49 unsigned long *addr = (unsigned long *) address;
50 41
51 addr += (nr >> SHIFT_PER_LONG); 42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
52 mask = 1L << CHOP_SHIFTCOUNT(nr);
53 *addr |= mask;
54} 43}
55 44
56static __inline__ void clear_bit(int nr, volatile unsigned long * address) 45static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
57{ 46{
58 unsigned long mask; 47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
59 unsigned long *addr = (unsigned long *) address;
60 unsigned long flags; 48 unsigned long flags;
61 49
62 addr += (nr >> SHIFT_PER_LONG); 50 addr += (nr >> SHIFT_PER_LONG);
63 mask = 1L << CHOP_SHIFTCOUNT(nr);
64 _atomic_spin_lock_irqsave(addr, flags); 51 _atomic_spin_lock_irqsave(addr, flags);
65 *addr &= ~mask; 52 *addr &= mask;
66 _atomic_spin_unlock_irqrestore(addr, flags); 53 _atomic_spin_unlock_irqrestore(addr, flags);
67} 54}
68 55
69static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address) 56static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
70{ 57{
71 unsigned long mask; 58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
72 unsigned long *addr = (unsigned long *) address;
73 59
74 addr += (nr >> SHIFT_PER_LONG); 60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
75 mask = 1L << CHOP_SHIFTCOUNT(nr);
76 *addr &= ~mask;
77} 61}
78 62
79static __inline__ void change_bit(int nr, volatile unsigned long * address) 63static __inline__ void change_bit(int nr, volatile unsigned long * addr)
80{ 64{
81 unsigned long mask; 65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
82 unsigned long *addr = (unsigned long *) address;
83 unsigned long flags; 66 unsigned long flags;
84 67
85 addr += (nr >> SHIFT_PER_LONG); 68 addr += (nr >> SHIFT_PER_LONG);
86 mask = 1L << CHOP_SHIFTCOUNT(nr);
87 _atomic_spin_lock_irqsave(addr, flags); 69 _atomic_spin_lock_irqsave(addr, flags);
88 *addr ^= mask; 70 *addr ^= mask;
89 _atomic_spin_unlock_irqrestore(addr, flags); 71 _atomic_spin_unlock_irqrestore(addr, flags);
90} 72}
91 73
92static __inline__ void __change_bit(int nr, volatile unsigned long * address) 74static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
93{ 75{
94 unsigned long mask; 76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
95 unsigned long *addr = (unsigned long *) address;
96 77
97 addr += (nr >> SHIFT_PER_LONG); 78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
98 mask = 1L << CHOP_SHIFTCOUNT(nr);
99 *addr ^= mask;
100} 79}
101 80
102static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address) 81static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
103{ 82{
104 unsigned long mask; 83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
105 unsigned long *addr = (unsigned long *) address; 84 unsigned long oldbit;
106 int oldbit;
107 unsigned long flags; 85 unsigned long flags;
108 86
109 addr += (nr >> SHIFT_PER_LONG); 87 addr += (nr >> SHIFT_PER_LONG);
110 mask = 1L << CHOP_SHIFTCOUNT(nr);
111 _atomic_spin_lock_irqsave(addr, flags); 88 _atomic_spin_lock_irqsave(addr, flags);
112 oldbit = (*addr & mask) ? 1 : 0; 89 oldbit = *addr;
113 *addr |= mask; 90 *addr = oldbit | mask;
114 _atomic_spin_unlock_irqrestore(addr, flags); 91 _atomic_spin_unlock_irqrestore(addr, flags);
115 92
116 return oldbit; 93 return (oldbit & mask) ? 1 : 0;
117} 94}
118 95
119static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) 96static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
120{ 97{
121 unsigned long mask; 98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
122 unsigned long *addr = (unsigned long *) address; 99 unsigned long oldbit;
123 int oldbit; 100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
124 101
125 addr += (nr >> SHIFT_PER_LONG); 102 oldbit = *addr;
126 mask = 1L << CHOP_SHIFTCOUNT(nr); 103 *addr = oldbit | mask;
127 oldbit = (*addr & mask) ? 1 : 0;
128 *addr |= mask;
129 104
130 return oldbit; 105 return (oldbit & mask) ? 1 : 0;
131} 106}
132 107
133static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address) 108static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
134{ 109{
135 unsigned long mask; 110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
136 unsigned long *addr = (unsigned long *) address; 111 unsigned long oldbit;
137 int oldbit;
138 unsigned long flags; 112 unsigned long flags;
139 113
140 addr += (nr >> SHIFT_PER_LONG); 114 addr += (nr >> SHIFT_PER_LONG);
141 mask = 1L << CHOP_SHIFTCOUNT(nr);
142 _atomic_spin_lock_irqsave(addr, flags); 115 _atomic_spin_lock_irqsave(addr, flags);
143 oldbit = (*addr & mask) ? 1 : 0; 116 oldbit = *addr;
144 *addr &= ~mask; 117 *addr = oldbit & ~mask;
145 _atomic_spin_unlock_irqrestore(addr, flags); 118 _atomic_spin_unlock_irqrestore(addr, flags);
146 119
147 return oldbit; 120 return (oldbit & mask) ? 1 : 0;
148} 121}
149 122
150static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) 123static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
151{ 124{
152 unsigned long mask; 125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *) address; 126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 int oldbit; 127 unsigned long oldbit;
155 128
156 addr += (nr >> SHIFT_PER_LONG); 129 oldbit = *addr;
157 mask = 1L << CHOP_SHIFTCOUNT(nr); 130 *addr = oldbit & ~mask;
158 oldbit = (*addr & mask) ? 1 : 0;
159 *addr &= ~mask;
160 131
161 return oldbit; 132 return (oldbit & mask) ? 1 : 0;
162} 133}
163 134
164static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address) 135static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
165{ 136{
166 unsigned long mask; 137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
167 unsigned long *addr = (unsigned long *) address; 138 unsigned long oldbit;
168 int oldbit;
169 unsigned long flags; 139 unsigned long flags;
170 140
171 addr += (nr >> SHIFT_PER_LONG); 141 addr += (nr >> SHIFT_PER_LONG);
172 mask = 1L << CHOP_SHIFTCOUNT(nr);
173 _atomic_spin_lock_irqsave(addr, flags); 142 _atomic_spin_lock_irqsave(addr, flags);
174 oldbit = (*addr & mask) ? 1 : 0; 143 oldbit = *addr;
175 *addr ^= mask; 144 *addr = oldbit ^ mask;
176 _atomic_spin_unlock_irqrestore(addr, flags); 145 _atomic_spin_unlock_irqrestore(addr, flags);
177 146
178 return oldbit; 147 return (oldbit & mask) ? 1 : 0;
179} 148}
180 149
181static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) 150static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
182{ 151{
183 unsigned long mask; 152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
184 unsigned long *addr = (unsigned long *) address; 153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
185 int oldbit; 154 unsigned long oldbit;
186 155
187 addr += (nr >> SHIFT_PER_LONG); 156 oldbit = *addr;
188 mask = 1L << CHOP_SHIFTCOUNT(nr); 157 *addr = oldbit ^ mask;
189 oldbit = (*addr & mask) ? 1 : 0;
190 *addr ^= mask;
191 158
192 return oldbit; 159 return (oldbit & mask) ? 1 : 0;
193} 160}
194 161
195static __inline__ int test_bit(int nr, const volatile unsigned long *address) 162static __inline__ int test_bit(int nr, const volatile unsigned long *address)
196{ 163{
197 unsigned long mask; 164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
198 const unsigned long *addr = (const unsigned long *)address; 165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
199
200 addr += (nr >> SHIFT_PER_LONG);
201 mask = 1L << CHOP_SHIFTCOUNT(nr);
202 166
203 return !!(*addr & mask); 167 return !!(*addr & mask);
204} 168}
@@ -229,7 +193,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
229 unsigned long ret; 193 unsigned long ret;
230 194
231 __asm__( 195 __asm__(
232#if BITS_PER_LONG > 32 196#ifdef __LP64__
233 " ldi 63,%1\n" 197 " ldi 63,%1\n"
234 " extrd,u,*<> %0,63,32,%%r0\n" 198 " extrd,u,*<> %0,63,32,%%r0\n"
235 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ 199 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
@@ -304,14 +268,7 @@ static __inline__ int fls(int x)
304 * hweightN: returns the hamming weight (i.e. the number 268 * hweightN: returns the hamming weight (i.e. the number
305 * of bits set) of a N-bit word 269 * of bits set) of a N-bit word
306 */ 270 */
307#define hweight64(x) \ 271#define hweight64(x) generic_hweight64(x)
308({ \
309 unsigned long __x = (x); \
310 unsigned int __w; \
311 __w = generic_hweight32((unsigned int) __x); \
312 __w += generic_hweight32((unsigned int) (__x>>32)); \
313 __w; \
314})
315#define hweight32(x) generic_hweight32(x) 272#define hweight32(x) generic_hweight32(x)
316#define hweight16(x) generic_hweight16(x) 273#define hweight16(x) generic_hweight16(x)
317#define hweight8(x) generic_hweight8(x) 274#define hweight8(x) generic_hweight8(x)
@@ -324,7 +281,13 @@ static __inline__ int fls(int x)
324 */ 281 */
325static inline int sched_find_first_bit(const unsigned long *b) 282static inline int sched_find_first_bit(const unsigned long *b)
326{ 283{
327#ifndef __LP64__ 284#ifdef __LP64__
285 if (unlikely(b[0]))
286 return __ffs(b[0]);
287 if (unlikely(b[1]))
288 return __ffs(b[1]) + 64;
289 return __ffs(b[2]) + 128;
290#else
328 if (unlikely(b[0])) 291 if (unlikely(b[0]))
329 return __ffs(b[0]); 292 return __ffs(b[0]);
330 if (unlikely(b[1])) 293 if (unlikely(b[1]))
@@ -334,14 +297,6 @@ static inline int sched_find_first_bit(const unsigned long *b)
334 if (b[3]) 297 if (b[3])
335 return __ffs(b[3]) + 96; 298 return __ffs(b[3]) + 96;
336 return __ffs(b[4]) + 128; 299 return __ffs(b[4]) + 128;
337#else
338 if (unlikely(b[0]))
339 return __ffs(b[0]);
340 if (unlikely(((unsigned int)b[1])))
341 return __ffs(b[1]) + 64;
342 if (b[1] >> 32)
343 return __ffs(b[1] >> 32) + 96;
344 return __ffs(b[2]) + 128;
345#endif 300#endif
346} 301}
347 302
@@ -391,7 +346,7 @@ found_middle:
391 346
392static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) 347static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
393{ 348{
394 const unsigned long *p = addr + (offset >> 6); 349 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
395 unsigned long result = offset & ~(BITS_PER_LONG-1); 350 unsigned long result = offset & ~(BITS_PER_LONG-1);
396 unsigned long tmp; 351 unsigned long tmp;
397 352
@@ -445,71 +400,90 @@ found_middle:
445 * test_and_{set,clear}_bit guarantee atomicity without 400 * test_and_{set,clear}_bit guarantee atomicity without
446 * disabling interrupts. 401 * disabling interrupts.
447 */ 402 */
448#ifdef __LP64__
449#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
450#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
451#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
452#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
453#else
454#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
455#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
456#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
457#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
458#endif
459 403
460#endif /* __KERNEL__ */ 404/* '3' is bits per byte */
405#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
461 406
462static __inline__ int ext2_test_bit(int nr, __const__ void * addr) 407#define ext2_test_bit(nr, addr) \
463{ 408 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
464 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; 409#define ext2_set_bit(nr, addr) \
410 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
411#define ext2_clear_bit(nr, addr) \
412 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
465 413
466 return (ADDR[nr >> 3] >> (nr & 7)) & 1; 414#define ext2_set_bit_atomic(l,nr,addr) \
467} 415 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
416#define ext2_clear_bit_atomic(l,nr,addr) \
417 test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
418
419#endif /* __KERNEL__ */
468 420
469/*
470 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
471 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
472 */
473 421
474#define ext2_find_first_zero_bit(addr, size) \ 422#define ext2_find_first_zero_bit(addr, size) \
475 ext2_find_next_zero_bit((addr), (size), 0) 423 ext2_find_next_zero_bit((addr), (size), 0)
476 424
477extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, 425/* include/linux/byteorder does not support "unsigned long" type */
478 unsigned long size, unsigned long offset) 426static inline unsigned long ext2_swabp(unsigned long * x)
479{ 427{
480 unsigned int *p = ((unsigned int *) addr) + (offset >> 5); 428#ifdef __LP64__
481 unsigned int result = offset & ~31UL; 429 return (unsigned long) __swab64p((u64 *) x);
482 unsigned int tmp; 430#else
431 return (unsigned long) __swab32p((u32 *) x);
432#endif
433}
434
435/* include/linux/byteorder doesn't support "unsigned long" type */
436static inline unsigned long ext2_swab(unsigned long y)
437{
438#ifdef __LP64__
439 return (unsigned long) __swab64((u64) y);
440#else
441 return (unsigned long) __swab32((u32) y);
442#endif
443}
444
445static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
446{
447 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
448 unsigned long result = offset & ~(BITS_PER_LONG - 1);
449 unsigned long tmp;
483 450
484 if (offset >= size) 451 if (offset >= size)
485 return size; 452 return size;
486 size -= result; 453 size -= result;
487 offset &= 31UL; 454 offset &= (BITS_PER_LONG - 1UL);
488 if (offset) { 455 if (offset) {
489 tmp = cpu_to_le32p(p++); 456 tmp = ext2_swabp(p++);
490 tmp |= ~0UL >> (32-offset); 457 tmp |= (~0UL >> (BITS_PER_LONG - offset));
491 if (size < 32) 458 if (size < BITS_PER_LONG)
492 goto found_first; 459 goto found_first;
493 if (tmp != ~0U) 460 if (~tmp)
494 goto found_middle; 461 goto found_middle;
495 size -= 32; 462 size -= BITS_PER_LONG;
496 result += 32; 463 result += BITS_PER_LONG;
497 } 464 }
498 while (size >= 32) { 465
499 if ((tmp = cpu_to_le32p(p++)) != ~0U) 466 while (size & ~(BITS_PER_LONG - 1)) {
500 goto found_middle; 467 if (~(tmp = *(p++)))
501 result += 32; 468 goto found_middle_swap;
502 size -= 32; 469 result += BITS_PER_LONG;
470 size -= BITS_PER_LONG;
503 } 471 }
504 if (!size) 472 if (!size)
505 return result; 473 return result;
506 tmp = cpu_to_le32p(p); 474 tmp = ext2_swabp(p);
507found_first: 475found_first:
508 tmp |= ~0U << size; 476 tmp |= ~0UL << size;
477 if (tmp == ~0UL) /* Are any bits zero? */
478 return result + size; /* Nope. Skip ffz */
509found_middle: 479found_middle:
510 return result + ffz(tmp); 480 return result + ffz(tmp);
481
482found_middle_swap:
483 return result + ffz(ext2_swab(tmp));
511} 484}
512 485
486
513/* Bitmap functions for the minix filesystem. */ 487/* Bitmap functions for the minix filesystem. */
514#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) 488#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
515#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) 489#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
diff --git a/include/asm-parisc/types.h b/include/asm-parisc/types.h
index d21b9d0d63ea..34fdce361a5a 100644
--- a/include/asm-parisc/types.h
+++ b/include/asm-parisc/types.h
@@ -33,8 +33,10 @@ typedef unsigned long long __u64;
33 33
34#ifdef __LP64__ 34#ifdef __LP64__
35#define BITS_PER_LONG 64 35#define BITS_PER_LONG 64
36#define SHIFT_PER_LONG 6
36#else 37#else
37#define BITS_PER_LONG 32 38#define BITS_PER_LONG 32
39#define SHIFT_PER_LONG 5
38#endif 40#endif
39 41
40#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__