aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/bitops.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-parisc/bitops.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-parisc/bitops.h')
-rw-r--r--include/asm-parisc/bitops.h520
1 files changed, 520 insertions, 0 deletions
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
new file mode 100644
index 000000000000..928e5ef850bd
--- /dev/null
+++ b/include/asm-parisc/bitops.h
@@ -0,0 +1,520 @@
1#ifndef _PARISC_BITOPS_H
2#define _PARISC_BITOPS_H
3
4#include <linux/compiler.h>
5#include <asm/system.h>
6#include <asm/byteorder.h>
7#include <asm/atomic.h>
8
9/*
10 * HP-PARISC specific bit operations
11 * for a detailed description of the functions please refer
12 * to include/asm-i386/bitops.h or kerneldoc
13 */
14
15#ifdef __LP64__
16# define SHIFT_PER_LONG 6
17#ifndef BITS_PER_LONG
18# define BITS_PER_LONG 64
19#endif
20#else
21# define SHIFT_PER_LONG 5
22#ifndef BITS_PER_LONG
23# define BITS_PER_LONG 32
24#endif
25#endif
26
27#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
28
29
30#define smp_mb__before_clear_bit() smp_mb()
31#define smp_mb__after_clear_bit() smp_mb()
32
33static __inline__ void set_bit(int nr, volatile unsigned long * address)
34{
35 unsigned long mask;
36 unsigned long *addr = (unsigned long *) address;
37 unsigned long flags;
38
39 addr += (nr >> SHIFT_PER_LONG);
40 mask = 1L << CHOP_SHIFTCOUNT(nr);
41 _atomic_spin_lock_irqsave(addr, flags);
42 *addr |= mask;
43 _atomic_spin_unlock_irqrestore(addr, flags);
44}
45
46static __inline__ void __set_bit(int nr, volatile unsigned long * address)
47{
48 unsigned long mask;
49 unsigned long *addr = (unsigned long *) address;
50
51 addr += (nr >> SHIFT_PER_LONG);
52 mask = 1L << CHOP_SHIFTCOUNT(nr);
53 *addr |= mask;
54}
55
56static __inline__ void clear_bit(int nr, volatile unsigned long * address)
57{
58 unsigned long mask;
59 unsigned long *addr = (unsigned long *) address;
60 unsigned long flags;
61
62 addr += (nr >> SHIFT_PER_LONG);
63 mask = 1L << CHOP_SHIFTCOUNT(nr);
64 _atomic_spin_lock_irqsave(addr, flags);
65 *addr &= ~mask;
66 _atomic_spin_unlock_irqrestore(addr, flags);
67}
68
69static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
70{
71 unsigned long mask;
72 unsigned long *addr = (unsigned long *) address;
73
74 addr += (nr >> SHIFT_PER_LONG);
75 mask = 1L << CHOP_SHIFTCOUNT(nr);
76 *addr &= ~mask;
77}
78
79static __inline__ void change_bit(int nr, volatile unsigned long * address)
80{
81 unsigned long mask;
82 unsigned long *addr = (unsigned long *) address;
83 unsigned long flags;
84
85 addr += (nr >> SHIFT_PER_LONG);
86 mask = 1L << CHOP_SHIFTCOUNT(nr);
87 _atomic_spin_lock_irqsave(addr, flags);
88 *addr ^= mask;
89 _atomic_spin_unlock_irqrestore(addr, flags);
90}
91
92static __inline__ void __change_bit(int nr, volatile unsigned long * address)
93{
94 unsigned long mask;
95 unsigned long *addr = (unsigned long *) address;
96
97 addr += (nr >> SHIFT_PER_LONG);
98 mask = 1L << CHOP_SHIFTCOUNT(nr);
99 *addr ^= mask;
100}
101
102static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
103{
104 unsigned long mask;
105 unsigned long *addr = (unsigned long *) address;
106 int oldbit;
107 unsigned long flags;
108
109 addr += (nr >> SHIFT_PER_LONG);
110 mask = 1L << CHOP_SHIFTCOUNT(nr);
111 _atomic_spin_lock_irqsave(addr, flags);
112 oldbit = (*addr & mask) ? 1 : 0;
113 *addr |= mask;
114 _atomic_spin_unlock_irqrestore(addr, flags);
115
116 return oldbit;
117}
118
119static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
120{
121 unsigned long mask;
122 unsigned long *addr = (unsigned long *) address;
123 int oldbit;
124
125 addr += (nr >> SHIFT_PER_LONG);
126 mask = 1L << CHOP_SHIFTCOUNT(nr);
127 oldbit = (*addr & mask) ? 1 : 0;
128 *addr |= mask;
129
130 return oldbit;
131}
132
133static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address)
134{
135 unsigned long mask;
136 unsigned long *addr = (unsigned long *) address;
137 int oldbit;
138 unsigned long flags;
139
140 addr += (nr >> SHIFT_PER_LONG);
141 mask = 1L << CHOP_SHIFTCOUNT(nr);
142 _atomic_spin_lock_irqsave(addr, flags);
143 oldbit = (*addr & mask) ? 1 : 0;
144 *addr &= ~mask;
145 _atomic_spin_unlock_irqrestore(addr, flags);
146
147 return oldbit;
148}
149
150static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
151{
152 unsigned long mask;
153 unsigned long *addr = (unsigned long *) address;
154 int oldbit;
155
156 addr += (nr >> SHIFT_PER_LONG);
157 mask = 1L << CHOP_SHIFTCOUNT(nr);
158 oldbit = (*addr & mask) ? 1 : 0;
159 *addr &= ~mask;
160
161 return oldbit;
162}
163
164static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address)
165{
166 unsigned long mask;
167 unsigned long *addr = (unsigned long *) address;
168 int oldbit;
169 unsigned long flags;
170
171 addr += (nr >> SHIFT_PER_LONG);
172 mask = 1L << CHOP_SHIFTCOUNT(nr);
173 _atomic_spin_lock_irqsave(addr, flags);
174 oldbit = (*addr & mask) ? 1 : 0;
175 *addr ^= mask;
176 _atomic_spin_unlock_irqrestore(addr, flags);
177
178 return oldbit;
179}
180
181static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
182{
183 unsigned long mask;
184 unsigned long *addr = (unsigned long *) address;
185 int oldbit;
186
187 addr += (nr >> SHIFT_PER_LONG);
188 mask = 1L << CHOP_SHIFTCOUNT(nr);
189 oldbit = (*addr & mask) ? 1 : 0;
190 *addr ^= mask;
191
192 return oldbit;
193}
194
195static __inline__ int test_bit(int nr, const volatile unsigned long *address)
196{
197 unsigned long mask;
198 const unsigned long *addr = (const unsigned long *)address;
199
200 addr += (nr >> SHIFT_PER_LONG);
201 mask = 1L << CHOP_SHIFTCOUNT(nr);
202
203 return !!(*addr & mask);
204}
205
206#ifdef __KERNEL__
207
208/**
209 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
210 * @word: The word to search
211 *
212 * __ffs() return is undefined if no bit is set.
213 *
214 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
215 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
216 * (with help from willy/jejb to get the semantics right)
217 *
218 * This algorithm avoids branches by making use of nullification.
219 * One side effect of "extr" instructions is it sets PSW[N] bit.
220 * How PSW[N] (nullify next insn) gets set is determined by the
221 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
222 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
223 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
224 * cycles for each mispredicted branch.
225 */
226
227static __inline__ unsigned long __ffs(unsigned long x)
228{
229 unsigned long ret;
230
231 __asm__(
232#if BITS_PER_LONG > 32
233 " ldi 63,%1\n"
234 " extrd,u,*<> %0,63,32,%%r0\n"
235 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
236 " addi -32,%1,%1\n"
237#else
238 " ldi 31,%1\n"
239#endif
240 " extru,<> %0,31,16,%%r0\n"
241 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
242 " addi -16,%1,%1\n"
243 " extru,<> %0,31,8,%%r0\n"
244 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
245 " addi -8,%1,%1\n"
246 " extru,<> %0,31,4,%%r0\n"
247 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
248 " addi -4,%1,%1\n"
249 " extru,<> %0,31,2,%%r0\n"
250 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
251 " addi -2,%1,%1\n"
252 " extru,= %0,31,1,%%r0\n" /* check last bit */
253 " addi -1,%1,%1\n"
254 : "+r" (x), "=r" (ret) );
255 return ret;
256}
257
258/* Undefined if no bit is zero. */
259#define ffz(x) __ffs(~x)
260
261/*
262 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
263 * This is defined the same way as the libc and compiler builtin
264 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
265 */
266static __inline__ int ffs(int x)
267{
268 return x ? (__ffs((unsigned long)x) + 1) : 0;
269}
270
271/*
272 * fls: find last (most significant) bit set.
273 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
274 */
275
276static __inline__ int fls(int x)
277{
278 int ret;
279 if (!x)
280 return 0;
281
282 __asm__(
283 " ldi 1,%1\n"
284 " extru,<> %0,15,16,%%r0\n"
285 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
286 " addi 16,%1,%1\n"
287 " extru,<> %0,7,8,%%r0\n"
288 " zdep,TR %0,23,24,%0\n" /* xx000000 */
289 " addi 8,%1,%1\n"
290 " extru,<> %0,3,4,%%r0\n"
291 " zdep,TR %0,27,28,%0\n" /* x0000000 */
292 " addi 4,%1,%1\n"
293 " extru,<> %0,1,2,%%r0\n"
294 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
295 " addi 2,%1,%1\n"
296 " extru,= %0,0,1,%%r0\n"
297 " addi 1,%1,%1\n" /* if y & 8, add 1 */
298 : "+r" (x), "=r" (ret) );
299
300 return ret;
301}
302
303/*
304 * hweightN: returns the hamming weight (i.e. the number
305 * of bits set) of a N-bit word
306 */
307#define hweight64(x) \
308({ \
309 unsigned long __x = (x); \
310 unsigned int __w; \
311 __w = generic_hweight32((unsigned int) __x); \
312 __w += generic_hweight32((unsigned int) (__x>>32)); \
313 __w; \
314})
315#define hweight32(x) generic_hweight32(x)
316#define hweight16(x) generic_hweight16(x)
317#define hweight8(x) generic_hweight8(x)
318
319/*
320 * Every architecture must define this function. It's the fastest
321 * way of searching a 140-bit bitmap where the first 100 bits are
322 * unlikely to be set. It's guaranteed that at least one of the 140
323 * bits is cleared.
324 */
325static inline int sched_find_first_bit(const unsigned long *b)
326{
327#ifndef __LP64__
328 if (unlikely(b[0]))
329 return __ffs(b[0]);
330 if (unlikely(b[1]))
331 return __ffs(b[1]) + 32;
332 if (unlikely(b[2]))
333 return __ffs(b[2]) + 64;
334 if (b[3])
335 return __ffs(b[3]) + 96;
336 return __ffs(b[4]) + 128;
337#else
338 if (unlikely(b[0]))
339 return __ffs(b[0]);
340 if (unlikely(((unsigned int)b[1])))
341 return __ffs(b[1]) + 64;
342 if (b[1] >> 32)
343 return __ffs(b[1] >> 32) + 96;
344 return __ffs(b[2]) + 128;
345#endif
346}
347
348#endif /* __KERNEL__ */
349
350/*
351 * This implementation of find_{first,next}_zero_bit was stolen from
352 * Linus' asm-alpha/bitops.h.
353 */
354#define find_first_zero_bit(addr, size) \
355 find_next_zero_bit((addr), (size), 0)
356
357static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
358{
359 const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
360 unsigned long result = offset & ~(BITS_PER_LONG-1);
361 unsigned long tmp;
362
363 if (offset >= size)
364 return size;
365 size -= result;
366 offset &= (BITS_PER_LONG-1);
367 if (offset) {
368 tmp = *(p++);
369 tmp |= ~0UL >> (BITS_PER_LONG-offset);
370 if (size < BITS_PER_LONG)
371 goto found_first;
372 if (~tmp)
373 goto found_middle;
374 size -= BITS_PER_LONG;
375 result += BITS_PER_LONG;
376 }
377 while (size & ~(BITS_PER_LONG -1)) {
378 if (~(tmp = *(p++)))
379 goto found_middle;
380 result += BITS_PER_LONG;
381 size -= BITS_PER_LONG;
382 }
383 if (!size)
384 return result;
385 tmp = *p;
386found_first:
387 tmp |= ~0UL << size;
388found_middle:
389 return result + ffz(tmp);
390}
391
392static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
393{
394 const unsigned long *p = addr + (offset >> 6);
395 unsigned long result = offset & ~(BITS_PER_LONG-1);
396 unsigned long tmp;
397
398 if (offset >= size)
399 return size;
400 size -= result;
401 offset &= (BITS_PER_LONG-1);
402 if (offset) {
403 tmp = *(p++);
404 tmp &= (~0UL << offset);
405 if (size < BITS_PER_LONG)
406 goto found_first;
407 if (tmp)
408 goto found_middle;
409 size -= BITS_PER_LONG;
410 result += BITS_PER_LONG;
411 }
412 while (size & ~(BITS_PER_LONG-1)) {
413 if ((tmp = *(p++)))
414 goto found_middle;
415 result += BITS_PER_LONG;
416 size -= BITS_PER_LONG;
417 }
418 if (!size)
419 return result;
420 tmp = *p;
421
422found_first:
423 tmp &= (~0UL >> (BITS_PER_LONG - size));
424 if (tmp == 0UL) /* Are any bits set? */
425 return result + size; /* Nope. */
426found_middle:
427 return result + __ffs(tmp);
428}
429
430/**
431 * find_first_bit - find the first set bit in a memory region
432 * @addr: The address to start the search at
433 * @size: The maximum size to search
434 *
435 * Returns the bit-number of the first set bit, not the number of the byte
436 * containing a bit.
437 */
438#define find_first_bit(addr, size) \
439 find_next_bit((addr), (size), 0)
440
441#define _EXT2_HAVE_ASM_BITOPS_
442
443#ifdef __KERNEL__
444/*
445 * test_and_{set,clear}_bit guarantee atomicity without
446 * disabling interrupts.
447 */
448#ifdef __LP64__
449#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
450#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
451#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
452#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
453#else
454#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
455#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
456#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
457#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
458#endif
459
460#endif /* __KERNEL__ */
461
462static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
463{
464 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
465
466 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
467}
468
469/*
470 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
471 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
472 */
473
474#define ext2_find_first_zero_bit(addr, size) \
475 ext2_find_next_zero_bit((addr), (size), 0)
476
477extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
478 unsigned long size, unsigned long offset)
479{
480 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
481 unsigned int result = offset & ~31UL;
482 unsigned int tmp;
483
484 if (offset >= size)
485 return size;
486 size -= result;
487 offset &= 31UL;
488 if (offset) {
489 tmp = cpu_to_le32p(p++);
490 tmp |= ~0UL >> (32-offset);
491 if (size < 32)
492 goto found_first;
493 if (tmp != ~0U)
494 goto found_middle;
495 size -= 32;
496 result += 32;
497 }
498 while (size >= 32) {
499 if ((tmp = cpu_to_le32p(p++)) != ~0U)
500 goto found_middle;
501 result += 32;
502 size -= 32;
503 }
504 if (!size)
505 return result;
506 tmp = cpu_to_le32p(p);
507found_first:
508 tmp |= ~0U << size;
509found_middle:
510 return result + ffz(tmp);
511}
512
513/* Bitmap functions for the minix filesystem. */
514#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
515#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
516#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
517#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
518#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
519
520#endif /* _PARISC_BITOPS_H */