aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa/bitops.h
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-08-29 16:12:36 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-08-29 16:12:36 -0400
commit2fca877b68b2b4fc5b94277858a1bedd46017cde (patch)
treefd02725406299ba2f26354463b3c261721e9eb6b /include/asm-xtensa/bitops.h
parentff40c6d3d1437ecdf295b8e39adcb06c3d6021ef (diff)
parent02b3e4e2d71b6058ec11cc01c72ac651eb3ded2b (diff)
/spare/repo/libata-dev branch 'v2.6.13'
Diffstat (limited to 'include/asm-xtensa/bitops.h')
-rw-r--r--include/asm-xtensa/bitops.h446
1 files changed, 446 insertions, 0 deletions
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
new file mode 100644
index 000000000000..d395ef226c32
--- /dev/null
+++ b/include/asm-xtensa/bitops.h
@@ -0,0 +1,446 @@
1/*
2 * include/asm-xtensa/bitops.h
3 *
4 * Atomic operations that C can't guarantee us.Useful for resource counting etc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 */
12
13#ifndef _XTENSA_BITOPS_H
14#define _XTENSA_BITOPS_H
15
16#ifdef __KERNEL__
17
18#include <asm/processor.h>
19#include <asm/byteorder.h>
20#include <asm/system.h>
21
22#ifdef CONFIG_SMP
23# error SMP not supported on this architecture
24#endif
25
26static __inline__ void set_bit(int nr, volatile void * addr)
27{
28 unsigned long mask = 1 << (nr & 0x1f);
29 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
30 unsigned long flags;
31
32 local_irq_save(flags);
33 *a |= mask;
34 local_irq_restore(flags);
35}
36
37static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
38{
39 unsigned long mask = 1 << (nr & 0x1f);
40 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
41
42 *a |= mask;
43}
44
45static __inline__ void clear_bit(int nr, volatile void * addr)
46{
47 unsigned long mask = 1 << (nr & 0x1f);
48 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
49 unsigned long flags;
50
51 local_irq_save(flags);
52 *a &= ~mask;
53 local_irq_restore(flags);
54}
55
56static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
57{
58 unsigned long mask = 1 << (nr & 0x1f);
59 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
60
61 *a &= ~mask;
62}
63
64/*
65 * clear_bit() doesn't provide any barrier for the compiler.
66 */
67
68#define smp_mb__before_clear_bit() barrier()
69#define smp_mb__after_clear_bit() barrier()
70
71static __inline__ void change_bit(int nr, volatile void * addr)
72{
73 unsigned long mask = 1 << (nr & 0x1f);
74 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
75 unsigned long flags;
76
77 local_irq_save(flags);
78 *a ^= mask;
79 local_irq_restore(flags);
80}
81
82static __inline__ void __change_bit(int nr, volatile void * addr)
83{
84 unsigned long mask = 1 << (nr & 0x1f);
85 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
86
87 *a ^= mask;
88}
89
90static __inline__ int test_and_set_bit(int nr, volatile void * addr)
91{
92 unsigned long retval;
93 unsigned long mask = 1 << (nr & 0x1f);
94 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
95 unsigned long flags;
96
97 local_irq_save(flags);
98 retval = (mask & *a) != 0;
99 *a |= mask;
100 local_irq_restore(flags);
101
102 return retval;
103}
104
105static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
106{
107 unsigned long retval;
108 unsigned long mask = 1 << (nr & 0x1f);
109 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
110
111 retval = (mask & *a) != 0;
112 *a |= mask;
113
114 return retval;
115}
116
117static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
118{
119 unsigned long retval;
120 unsigned long mask = 1 << (nr & 0x1f);
121 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
122 unsigned long flags;
123
124 local_irq_save(flags);
125 retval = (mask & *a) != 0;
126 *a &= ~mask;
127 local_irq_restore(flags);
128
129 return retval;
130}
131
132static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
133{
134 unsigned long mask = 1 << (nr & 0x1f);
135 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
136 unsigned long old = *a;
137
138 *a = old & ~mask;
139 return (old & mask) != 0;
140}
141
142static __inline__ int test_and_change_bit(int nr, volatile void * addr)
143{
144 unsigned long retval;
145 unsigned long mask = 1 << (nr & 0x1f);
146 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
147 unsigned long flags;
148
149 local_irq_save(flags);
150
151 retval = (mask & *a) != 0;
152 *a ^= mask;
153 local_irq_restore(flags);
154
155 return retval;
156}
157
158/*
159 * non-atomic version; can be reordered
160 */
161
162static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
163{
164 unsigned long mask = 1 << (nr & 0x1f);
165 unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
166 unsigned long old = *a;
167
168 *a = old ^ mask;
169 return (old & mask) != 0;
170}
171
172static __inline__ int test_bit(int nr, const volatile void *addr)
173{
174 return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
175}
176
177#if XCHAL_HAVE_NSAU
178
179static __inline__ int __cntlz (unsigned long x)
180{
181 int lz;
182 asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
183 return 31 - lz;
184}
185
186#else
187
188static __inline__ int __cntlz (unsigned long x)
189{
190 unsigned long sum, x1, x2, x4, x8, x16;
191 x1 = x & 0xAAAAAAAA;
192 x2 = x & 0xCCCCCCCC;
193 x4 = x & 0xF0F0F0F0;
194 x8 = x & 0xFF00FF00;
195 x16 = x & 0xFFFF0000;
196 sum = x2 ? 2 : 0;
197 sum += (x16 != 0) * 16;
198 sum += (x8 != 0) * 8;
199 sum += (x4 != 0) * 4;
200 sum += (x1 != 0);
201
202 return sum;
203}
204
205#endif
206
207/*
208 * ffz: Find first zero in word. Undefined if no zero exists.
209 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
210 */
211
212static __inline__ int ffz(unsigned long x)
213{
214 if ((x = ~x) == 0)
215 return 32;
216 return __cntlz(x & -x);
217}
218
219/*
220 * __ffs: Find first bit set in word. Return 0 for bit 0
221 */
222
223static __inline__ int __ffs(unsigned long x)
224{
225 return __cntlz(x & -x);
226}
227
228/*
229 * ffs: Find first bit set in word. This is defined the same way as
230 * the libc and compiler builtin ffs routines, therefore
231 * differs in spirit from the above ffz (man ffs).
232 */
233
234static __inline__ int ffs(unsigned long x)
235{
236 return __cntlz(x & -x) + 1;
237}
238
239/*
240 * fls: Find last (most-significant) bit set in word.
241 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
242 */
243
244static __inline__ int fls (unsigned int x)
245{
246 return __cntlz(x);
247}
248
249static __inline__ int
250find_next_bit(const unsigned long *addr, int size, int offset)
251{
252 const unsigned long *p = addr + (offset >> 5);
253 unsigned long result = offset & ~31UL;
254 unsigned long tmp;
255
256 if (offset >= size)
257 return size;
258 size -= result;
259 offset &= 31UL;
260 if (offset) {
261 tmp = *p++;
262 tmp &= ~0UL << offset;
263 if (size < 32)
264 goto found_first;
265 if (tmp)
266 goto found_middle;
267 size -= 32;
268 result += 32;
269 }
270 while (size >= 32) {
271 if ((tmp = *p++) != 0)
272 goto found_middle;
273 result += 32;
274 size -= 32;
275 }
276 if (!size)
277 return result;
278 tmp = *p;
279
280found_first:
281 tmp &= ~0UL >> (32 - size);
282 if (tmp == 0UL) /* Are any bits set? */
283 return result + size; /* Nope. */
284found_middle:
285 return result + __ffs(tmp);
286}
287
288/**
289 * find_first_bit - find the first set bit in a memory region
290 * @addr: The address to start the search at
291 * @size: The maximum size to search
292 *
293 * Returns the bit-number of the first set bit, not the number of the byte
294 * containing a bit.
295 */
296
297#define find_first_bit(addr, size) \
298 find_next_bit((addr), (size), 0)
299
300static __inline__ int
301find_next_zero_bit(const unsigned long *addr, int size, int offset)
302{
303 const unsigned long *p = addr + (offset >> 5);
304 unsigned long result = offset & ~31UL;
305 unsigned long tmp;
306
307 if (offset >= size)
308 return size;
309 size -= result;
310 offset &= 31UL;
311 if (offset) {
312 tmp = *p++;
313 tmp |= ~0UL >> (32-offset);
314 if (size < 32)
315 goto found_first;
316 if (~tmp)
317 goto found_middle;
318 size -= 32;
319 result += 32;
320 }
321 while (size & ~31UL) {
322 if (~(tmp = *p++))
323 goto found_middle;
324 result += 32;
325 size -= 32;
326 }
327 if (!size)
328 return result;
329 tmp = *p;
330
331found_first:
332 tmp |= ~0UL << size;
333found_middle:
334 return result + ffz(tmp);
335}
336
337#define find_first_zero_bit(addr, size) \
338 find_next_zero_bit((addr), (size), 0)
339
340#ifdef __XTENSA_EL__
341# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr))
342# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
343# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr))
344# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
345# define ext2_test_bit(nr,addr) test_bit((nr), (addr))
346# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size))
347# define ext2_find_next_zero_bit(addr, size, offset) \
348 find_next_zero_bit((addr), (size), (offset))
349#elif defined(__XTENSA_EB__)
350# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr))
351# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
352# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr))
353# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
354# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr))
355# define ext2_find_first_zero_bit(addr, size) \
356 ext2_find_next_zero_bit((addr), (size), 0)
357
358static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
359{
360 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
361 unsigned long result = offset & ~31UL;
362 unsigned long tmp;
363
364 if (offset >= size)
365 return size;
366 size -= result;
367 offset &= 31UL;
368 if(offset) {
369 /* We hold the little endian value in tmp, but then the
370 * shift is illegal. So we could keep a big endian value
371 * in tmp, like this:
372 *
373 * tmp = __swab32(*(p++));
374 * tmp |= ~0UL >> (32-offset);
375 *
376 * but this would decrease preformance, so we change the
377 * shift:
378 */
379 tmp = *(p++);
380 tmp |= __swab32(~0UL >> (32-offset));
381 if(size < 32)
382 goto found_first;
383 if(~tmp)
384 goto found_middle;
385 size -= 32;
386 result += 32;
387 }
388 while(size & ~31UL) {
389 if(~(tmp = *(p++)))
390 goto found_middle;
391 result += 32;
392 size -= 32;
393 }
394 if(!size)
395 return result;
396 tmp = *p;
397
398found_first:
399 /* tmp is little endian, so we would have to swab the shift,
400 * see above. But then we have to swab tmp below for ffz, so
401 * we might as well do this here.
402 */
403 return result + ffz(__swab32(tmp) | (~0UL << size));
404found_middle:
405 return result + ffz(__swab32(tmp));
406}
407
408#else
409# error processor byte order undefined!
410#endif
411
412
413#define hweight32(x) generic_hweight32(x)
414#define hweight16(x) generic_hweight16(x)
415#define hweight8(x) generic_hweight8(x)
416
417/*
418 * Find the first bit set in a 140-bit bitmap.
419 * The first 100 bits are unlikely to be set.
420 */
421
422static inline int sched_find_first_bit(const unsigned long *b)
423{
424 if (unlikely(b[0]))
425 return __ffs(b[0]);
426 if (unlikely(b[1]))
427 return __ffs(b[1]) + 32;
428 if (unlikely(b[2]))
429 return __ffs(b[2]) + 64;
430 if (b[3])
431 return __ffs(b[3]) + 96;
432 return __ffs(b[4]) + 128;
433}
434
435
436/* Bitmap functions for the minix filesystem. */
437
438#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
439#define minix_set_bit(nr,addr) set_bit(nr,addr)
440#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
441#define minix_test_bit(nr,addr) test_bit(nr,addr)
442#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
443
444#endif /* __KERNEL__ */
445
446#endif /* _XTENSA_BITOPS_H */