aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/bitops.h')
-rw-r--r--include/asm-powerpc/bitops.h105
1 files changed, 4 insertions, 101 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index bf6941a810b8..d1c2a4405660 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
184 : "cc"); 184 : "cc");
185} 185}
186 186
187/* Non-atomic versions */ 187#include <asm-generic/bitops/non-atomic.h>
188static __inline__ int test_bit(unsigned long nr,
189 __const__ volatile unsigned long *addr)
190{
191 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
192}
193
194static __inline__ void __set_bit(unsigned long nr,
195 volatile unsigned long *addr)
196{
197 unsigned long mask = BITOP_MASK(nr);
198 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
199
200 *p |= mask;
201}
202
203static __inline__ void __clear_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p &= ~mask;
210}
211
212static __inline__ void __change_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p ^= mask;
219}
220
221static __inline__ int __test_and_set_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226 unsigned long old = *p;
227
228 *p = old | mask;
229 return (old & mask) != 0;
230}
231
232static __inline__ int __test_and_clear_bit(unsigned long nr,
233 volatile unsigned long *addr)
234{
235 unsigned long mask = BITOP_MASK(nr);
236 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
237 unsigned long old = *p;
238
239 *p = old & ~mask;
240 return (old & mask) != 0;
241}
242
243static __inline__ int __test_and_change_bit(unsigned long nr,
244 volatile unsigned long *addr)
245{
246 unsigned long mask = BITOP_MASK(nr);
247 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
248 unsigned long old = *p;
249
250 *p = old ^ mask;
251 return (old & mask) != 0;
252}
253 188
254/* 189/*
255 * Return the zero-based bit position (LE, not IBM bit numbering) of 190 * Return the zero-based bit position (LE, not IBM bit numbering) of
@@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x)
310 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); 245 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
311 return 32 - lz; 246 return 32 - lz;
312} 247}
313#define fls64(x) generic_fls64(x) 248#include <asm-generic/bitops/fls64.h>
314 249
315/* 250#include <asm-generic/bitops/hweight.h>
316 * hweightN: returns the hamming weight (i.e. the number
317 * of bits set) of a N-bit word
318 */
319#define hweight64(x) generic_hweight64(x)
320#define hweight32(x) generic_hweight32(x)
321#define hweight16(x) generic_hweight16(x)
322#define hweight8(x) generic_hweight8(x)
323 251
324#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 252#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
325unsigned long find_next_zero_bit(const unsigned long *addr, 253unsigned long find_next_zero_bit(const unsigned long *addr,
@@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr,
397#define minix_find_first_zero_bit(addr,size) \ 325#define minix_find_first_zero_bit(addr,size) \
398 find_first_zero_le_bit((unsigned long *)addr, size) 326 find_first_zero_le_bit((unsigned long *)addr, size)
399 327
400/* 328#include <asm-generic/bitops/sched.h>
401 * Every architecture must define this function. It's the fastest
402 * way of searching a 140-bit bitmap where the first 100 bits are
403 * unlikely to be set. It's guaranteed that at least one of the 140
404 * bits is cleared.
405 */
406static inline int sched_find_first_bit(const unsigned long *b)
407{
408#ifdef CONFIG_PPC64
409 if (unlikely(b[0]))
410 return __ffs(b[0]);
411 if (unlikely(b[1]))
412 return __ffs(b[1]) + 64;
413 return __ffs(b[2]) + 128;
414#else
415 if (unlikely(b[0]))
416 return __ffs(b[0]);
417 if (unlikely(b[1]))
418 return __ffs(b[1]) + 32;
419 if (unlikely(b[2]))
420 return __ffs(b[2]) + 64;
421 if (b[3])
422 return __ffs(b[3]) + 96;
423 return __ffs(b[4]) + 128;
424#endif
425}
426 329
427#endif /* __KERNEL__ */ 330#endif /* __KERNEL__ */
428 331