aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/bitops.h')
-rw-r--r--include/asm-powerpc/bitops.h410
1 files changed, 0 insertions, 410 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
deleted file mode 100644
index 897eade3afbe..000000000000
--- a/include/asm-powerpc/bitops.h
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * PowerPC atomic bit operations.
3 *
4 * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
7 * originally took it from the ppc32 code.
8 *
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196|
18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20 *
21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but
23 * byte-oriented:
24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25 *
26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27 * number field needs to be reversed compared to the big-endian bit
28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#ifndef _ASM_POWERPC_BITOPS_H
37#define _ASM_POWERPC_BITOPS_H
38
39#ifdef __KERNEL__
40
41#ifndef _LINUX_BITOPS_H
42#error only <linux/bitops.h> can be included directly
43#endif
44
45#include <linux/compiler.h>
46#include <asm/asm-compat.h>
47#include <asm/synch.h>
48
49/*
50 * clear_bit doesn't imply a memory barrier
51 */
52#define smp_mb__before_clear_bit() smp_mb()
53#define smp_mb__after_clear_bit() smp_mb()
54
55#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
56#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
57#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
58
59static __inline__ void set_bit(int nr, volatile unsigned long *addr)
60{
61 unsigned long old;
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64
65 __asm__ __volatile__(
66"1:" PPC_LLARX "%0,0,%3 # set_bit\n"
67 "or %0,%0,%2\n"
68 PPC405_ERR77(0,%3)
69 PPC_STLCX "%0,0,%3\n"
70 "bne- 1b"
71 : "=&r" (old), "+m" (*p)
72 : "r" (mask), "r" (p)
73 : "cc" );
74}
75
76static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
77{
78 unsigned long old;
79 unsigned long mask = BITOP_MASK(nr);
80 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
81
82 __asm__ __volatile__(
83"1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
84 "andc %0,%0,%2\n"
85 PPC405_ERR77(0,%3)
86 PPC_STLCX "%0,0,%3\n"
87 "bne- 1b"
88 : "=&r" (old), "+m" (*p)
89 : "r" (mask), "r" (p)
90 : "cc" );
91}
92
93static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
94{
95 unsigned long old;
96 unsigned long mask = BITOP_MASK(nr);
97 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
98
99 __asm__ __volatile__(
100 LWSYNC_ON_SMP
101"1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n"
102 "andc %0,%0,%2\n"
103 PPC405_ERR77(0,%3)
104 PPC_STLCX "%0,0,%3\n"
105 "bne- 1b"
106 : "=&r" (old), "+m" (*p)
107 : "r" (mask), "r" (p)
108 : "cc", "memory");
109}
110
111static __inline__ void change_bit(int nr, volatile unsigned long *addr)
112{
113 unsigned long old;
114 unsigned long mask = BITOP_MASK(nr);
115 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
116
117 __asm__ __volatile__(
118"1:" PPC_LLARX "%0,0,%3 # change_bit\n"
119 "xor %0,%0,%2\n"
120 PPC405_ERR77(0,%3)
121 PPC_STLCX "%0,0,%3\n"
122 "bne- 1b"
123 : "=&r" (old), "+m" (*p)
124 : "r" (mask), "r" (p)
125 : "cc" );
126}
127
128static __inline__ int test_and_set_bit(unsigned long nr,
129 volatile unsigned long *addr)
130{
131 unsigned long old, t;
132 unsigned long mask = BITOP_MASK(nr);
133 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
134
135 __asm__ __volatile__(
136 LWSYNC_ON_SMP
137"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
138 "or %1,%0,%2 \n"
139 PPC405_ERR77(0,%3)
140 PPC_STLCX "%1,0,%3 \n"
141 "bne- 1b"
142 ISYNC_ON_SMP
143 : "=&r" (old), "=&r" (t)
144 : "r" (mask), "r" (p)
145 : "cc", "memory");
146
147 return (old & mask) != 0;
148}
149
150static __inline__ int test_and_set_bit_lock(unsigned long nr,
151 volatile unsigned long *addr)
152{
153 unsigned long old, t;
154 unsigned long mask = BITOP_MASK(nr);
155 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
156
157 __asm__ __volatile__(
158"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n"
159 "or %1,%0,%2 \n"
160 PPC405_ERR77(0,%3)
161 PPC_STLCX "%1,0,%3 \n"
162 "bne- 1b"
163 ISYNC_ON_SMP
164 : "=&r" (old), "=&r" (t)
165 : "r" (mask), "r" (p)
166 : "cc", "memory");
167
168 return (old & mask) != 0;
169}
170
171static __inline__ int test_and_clear_bit(unsigned long nr,
172 volatile unsigned long *addr)
173{
174 unsigned long old, t;
175 unsigned long mask = BITOP_MASK(nr);
176 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
177
178 __asm__ __volatile__(
179 LWSYNC_ON_SMP
180"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
181 "andc %1,%0,%2 \n"
182 PPC405_ERR77(0,%3)
183 PPC_STLCX "%1,0,%3 \n"
184 "bne- 1b"
185 ISYNC_ON_SMP
186 : "=&r" (old), "=&r" (t)
187 : "r" (mask), "r" (p)
188 : "cc", "memory");
189
190 return (old & mask) != 0;
191}
192
193static __inline__ int test_and_change_bit(unsigned long nr,
194 volatile unsigned long *addr)
195{
196 unsigned long old, t;
197 unsigned long mask = BITOP_MASK(nr);
198 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
199
200 __asm__ __volatile__(
201 LWSYNC_ON_SMP
202"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
203 "xor %1,%0,%2 \n"
204 PPC405_ERR77(0,%3)
205 PPC_STLCX "%1,0,%3 \n"
206 "bne- 1b"
207 ISYNC_ON_SMP
208 : "=&r" (old), "=&r" (t)
209 : "r" (mask), "r" (p)
210 : "cc", "memory");
211
212 return (old & mask) != 0;
213}
214
215static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
216{
217 unsigned long old;
218
219 __asm__ __volatile__(
220"1:" PPC_LLARX "%0,0,%3 # set_bits\n"
221 "or %0,%0,%2\n"
222 PPC_STLCX "%0,0,%3\n"
223 "bne- 1b"
224 : "=&r" (old), "+m" (*addr)
225 : "r" (mask), "r" (addr)
226 : "cc");
227}
228
229#include <asm-generic/bitops/non-atomic.h>
230
231static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
232{
233 __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory");
234 __clear_bit(nr, addr);
235}
236
237/*
238 * Return the zero-based bit position (LE, not IBM bit numbering) of
239 * the most significant 1-bit in a double word.
240 */
241static __inline__ __attribute__((const))
242int __ilog2(unsigned long x)
243{
244 int lz;
245
246 asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
247 return BITS_PER_LONG - 1 - lz;
248}
249
250static inline __attribute__((const))
251int __ilog2_u32(u32 n)
252{
253 int bit;
254 asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
255 return 31 - bit;
256}
257
258#ifdef __powerpc64__
259static inline __attribute__((const))
260int __ilog2_u64(u64 n)
261{
262 int bit;
263 asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
264 return 63 - bit;
265}
266#endif
267
268/*
269 * Determines the bit position of the least significant 0 bit in the
270 * specified double word. The returned bit position will be
271 * zero-based, starting from the right side (63/31 - 0).
272 */
273static __inline__ unsigned long ffz(unsigned long x)
274{
275 /* no zero exists anywhere in the 8 byte area. */
276 if ((x = ~x) == 0)
277 return BITS_PER_LONG;
278
279 /*
280 * Calculate the bit position of the least signficant '1' bit in x
281 * (since x has been changed this will actually be the least signficant
282 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
283 * is the least significant * (RIGHT-most) 1-bit of the value in x.
284 */
285 return __ilog2(x & -x);
286}
287
288static __inline__ int __ffs(unsigned long x)
289{
290 return __ilog2(x & -x);
291}
292
293/*
294 * ffs: find first bit set. This is defined the same way as
295 * the libc and compiler builtin ffs routines, therefore
296 * differs in spirit from the above ffz (man ffs).
297 */
298static __inline__ int ffs(int x)
299{
300 unsigned long i = (unsigned long)x;
301 return __ilog2(i & -i) + 1;
302}
303
304/*
305 * fls: find last (most-significant) bit set.
306 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
307 */
308static __inline__ int fls(unsigned int x)
309{
310 int lz;
311
312 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
313 return 32 - lz;
314}
315
316static __inline__ unsigned long __fls(unsigned long x)
317{
318 return __ilog2(x);
319}
320
321/*
322 * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
323 * instruction; for 32-bit we use the generic version, which does two
324 * 32-bit fls calls.
325 */
326#ifdef __powerpc64__
327static __inline__ int fls64(__u64 x)
328{
329 int lz;
330
331 asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
332 return 64 - lz;
333}
334#else
335#include <asm-generic/bitops/fls64.h>
336#endif /* __powerpc64__ */
337
338#include <asm-generic/bitops/hweight.h>
339#include <asm-generic/bitops/find.h>
340
341/* Little-endian versions */
342
343static __inline__ int test_le_bit(unsigned long nr,
344 __const__ unsigned long *addr)
345{
346 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
347 return (tmp[nr >> 3] >> (nr & 7)) & 1;
348}
349
350#define __set_le_bit(nr, addr) \
351 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
352#define __clear_le_bit(nr, addr) \
353 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
354
355#define test_and_set_le_bit(nr, addr) \
356 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
357#define test_and_clear_le_bit(nr, addr) \
358 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
359
360#define __test_and_set_le_bit(nr, addr) \
361 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
362#define __test_and_clear_le_bit(nr, addr) \
363 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
364
365#define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
366unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
367 unsigned long size, unsigned long offset);
368
369unsigned long generic_find_next_le_bit(const unsigned long *addr,
370 unsigned long size, unsigned long offset);
371/* Bitmap functions for the ext2 filesystem */
372
373#define ext2_set_bit(nr,addr) \
374 __test_and_set_le_bit((nr), (unsigned long*)addr)
375#define ext2_clear_bit(nr, addr) \
376 __test_and_clear_le_bit((nr), (unsigned long*)addr)
377
378#define ext2_set_bit_atomic(lock, nr, addr) \
379 test_and_set_le_bit((nr), (unsigned long*)addr)
380#define ext2_clear_bit_atomic(lock, nr, addr) \
381 test_and_clear_le_bit((nr), (unsigned long*)addr)
382
383#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
384
385#define ext2_find_first_zero_bit(addr, size) \
386 find_first_zero_le_bit((unsigned long*)addr, size)
387#define ext2_find_next_zero_bit(addr, size, off) \
388 generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
389
390#define ext2_find_next_bit(addr, size, off) \
391 generic_find_next_le_bit((unsigned long *)addr, size, off)
392/* Bitmap functions for the minix filesystem. */
393
394#define minix_test_and_set_bit(nr,addr) \
395 __test_and_set_le_bit(nr, (unsigned long *)addr)
396#define minix_set_bit(nr,addr) \
397 __set_le_bit(nr, (unsigned long *)addr)
398#define minix_test_and_clear_bit(nr,addr) \
399 __test_and_clear_le_bit(nr, (unsigned long *)addr)
400#define minix_test_bit(nr,addr) \
401 test_le_bit(nr, (unsigned long *)addr)
402
403#define minix_find_first_zero_bit(addr,size) \
404 find_first_zero_le_bit((unsigned long *)addr, size)
405
406#include <asm-generic/bitops/sched.h>
407
408#endif /* __KERNEL__ */
409
410#endif /* _ASM_POWERPC_BITOPS_H */