diff options
-rw-r--r-- | arch/xtensa/Kconfig | 8 | ||||
-rw-r--r-- | include/asm-xtensa/bitops.h | 340 |
2 files changed, 16 insertions, 332 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e90ef5db8913..dbeb3504c3c8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
22 | bool | 22 | bool |
23 | default y | 23 | default y |
24 | 24 | ||
25 | config GENERIC_FIND_NEXT_BIT | ||
26 | bool | ||
27 | default y | ||
28 | |||
29 | config GENERIC_HWEIGHT | ||
30 | bool | ||
31 | default y | ||
32 | |||
25 | config GENERIC_HARDIRQS | 33 | config GENERIC_HARDIRQS |
26 | bool | 34 | bool |
27 | default y | 35 | default y |
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h index 50b83726a497..d815649617aa 100644 --- a/include/asm-xtensa/bitops.h +++ b/include/asm-xtensa/bitops.h | |||
@@ -23,156 +23,11 @@ | |||
23 | # error SMP not supported on this architecture | 23 | # error SMP not supported on this architecture |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | static __inline__ void set_bit(int nr, volatile void * addr) | ||
27 | { | ||
28 | unsigned long mask = 1 << (nr & 0x1f); | ||
29 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
30 | unsigned long flags; | ||
31 | |||
32 | local_irq_save(flags); | ||
33 | *a |= mask; | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | |||
37 | static __inline__ void __set_bit(int nr, volatile unsigned long * addr) | ||
38 | { | ||
39 | unsigned long mask = 1 << (nr & 0x1f); | ||
40 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
41 | |||
42 | *a |= mask; | ||
43 | } | ||
44 | |||
45 | static __inline__ void clear_bit(int nr, volatile void * addr) | ||
46 | { | ||
47 | unsigned long mask = 1 << (nr & 0x1f); | ||
48 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
49 | unsigned long flags; | ||
50 | |||
51 | local_irq_save(flags); | ||
52 | *a &= ~mask; | ||
53 | local_irq_restore(flags); | ||
54 | } | ||
55 | |||
56 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | unsigned long mask = 1 << (nr & 0x1f); | ||
59 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
60 | |||
61 | *a &= ~mask; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * clear_bit() doesn't provide any barrier for the compiler. | ||
66 | */ | ||
67 | |||
68 | #define smp_mb__before_clear_bit() barrier() | 26 | #define smp_mb__before_clear_bit() barrier() |
69 | #define smp_mb__after_clear_bit() barrier() | 27 | #define smp_mb__after_clear_bit() barrier() |
70 | 28 | ||
71 | static __inline__ void change_bit(int nr, volatile void * addr) | 29 | #include <asm-generic/bitops/atomic.h> |
72 | { | 30 | #include <asm-generic/bitops/non-atomic.h> |
73 | unsigned long mask = 1 << (nr & 0x1f); | ||
74 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
75 | unsigned long flags; | ||
76 | |||
77 | local_irq_save(flags); | ||
78 | *a ^= mask; | ||
79 | local_irq_restore(flags); | ||
80 | } | ||
81 | |||
82 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
83 | { | ||
84 | unsigned long mask = 1 << (nr & 0x1f); | ||
85 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
86 | |||
87 | *a ^= mask; | ||
88 | } | ||
89 | |||
90 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | ||
91 | { | ||
92 | unsigned long retval; | ||
93 | unsigned long mask = 1 << (nr & 0x1f); | ||
94 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
95 | unsigned long flags; | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | retval = (mask & *a) != 0; | ||
99 | *a |= mask; | ||
100 | local_irq_restore(flags); | ||
101 | |||
102 | return retval; | ||
103 | } | ||
104 | |||
105 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
106 | { | ||
107 | unsigned long retval; | ||
108 | unsigned long mask = 1 << (nr & 0x1f); | ||
109 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
110 | |||
111 | retval = (mask & *a) != 0; | ||
112 | *a |= mask; | ||
113 | |||
114 | return retval; | ||
115 | } | ||
116 | |||
117 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | ||
118 | { | ||
119 | unsigned long retval; | ||
120 | unsigned long mask = 1 << (nr & 0x1f); | ||
121 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
122 | unsigned long flags; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | retval = (mask & *a) != 0; | ||
126 | *a &= ~mask; | ||
127 | local_irq_restore(flags); | ||
128 | |||
129 | return retval; | ||
130 | } | ||
131 | |||
132 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
133 | { | ||
134 | unsigned long mask = 1 << (nr & 0x1f); | ||
135 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
136 | unsigned long old = *a; | ||
137 | |||
138 | *a = old & ~mask; | ||
139 | return (old & mask) != 0; | ||
140 | } | ||
141 | |||
142 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | ||
143 | { | ||
144 | unsigned long retval; | ||
145 | unsigned long mask = 1 << (nr & 0x1f); | ||
146 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
147 | unsigned long flags; | ||
148 | |||
149 | local_irq_save(flags); | ||
150 | |||
151 | retval = (mask & *a) != 0; | ||
152 | *a ^= mask; | ||
153 | local_irq_restore(flags); | ||
154 | |||
155 | return retval; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * non-atomic version; can be reordered | ||
160 | */ | ||
161 | |||
162 | static __inline__ int __test_and_change_bit(int nr, volatile void *addr) | ||
163 | { | ||
164 | unsigned long mask = 1 << (nr & 0x1f); | ||
165 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
166 | unsigned long old = *a; | ||
167 | |||
168 | *a = old ^ mask; | ||
169 | return (old & mask) != 0; | ||
170 | } | ||
171 | |||
172 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
173 | { | ||
174 | return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); | ||
175 | } | ||
176 | 31 | ||
177 | #if XCHAL_HAVE_NSA | 32 | #if XCHAL_HAVE_NSA |
178 | 33 | ||
@@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x) | |||
245 | { | 100 | { |
246 | return __cntlz(x); | 101 | return __cntlz(x); |
247 | } | 102 | } |
248 | #define fls64(x) generic_fls64(x) | 103 | #include <asm-generic/bitops/fls64.h> |
249 | 104 | #include <asm-generic/bitops/find.h> | |
250 | static __inline__ int | 105 | #include <asm-generic/bitops/ext2-non-atomic.h> |
251 | find_next_bit(const unsigned long *addr, int size, int offset) | ||
252 | { | ||
253 | const unsigned long *p = addr + (offset >> 5); | ||
254 | unsigned long result = offset & ~31UL; | ||
255 | unsigned long tmp; | ||
256 | |||
257 | if (offset >= size) | ||
258 | return size; | ||
259 | size -= result; | ||
260 | offset &= 31UL; | ||
261 | if (offset) { | ||
262 | tmp = *p++; | ||
263 | tmp &= ~0UL << offset; | ||
264 | if (size < 32) | ||
265 | goto found_first; | ||
266 | if (tmp) | ||
267 | goto found_middle; | ||
268 | size -= 32; | ||
269 | result += 32; | ||
270 | } | ||
271 | while (size >= 32) { | ||
272 | if ((tmp = *p++) != 0) | ||
273 | goto found_middle; | ||
274 | result += 32; | ||
275 | size -= 32; | ||
276 | } | ||
277 | if (!size) | ||
278 | return result; | ||
279 | tmp = *p; | ||
280 | |||
281 | found_first: | ||
282 | tmp &= ~0UL >> (32 - size); | ||
283 | if (tmp == 0UL) /* Are any bits set? */ | ||
284 | return result + size; /* Nope. */ | ||
285 | found_middle: | ||
286 | return result + __ffs(tmp); | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * find_first_bit - find the first set bit in a memory region | ||
291 | * @addr: The address to start the search at | ||
292 | * @size: The maximum size to search | ||
293 | * | ||
294 | * Returns the bit-number of the first set bit, not the number of the byte | ||
295 | * containing a bit. | ||
296 | */ | ||
297 | |||
298 | #define find_first_bit(addr, size) \ | ||
299 | find_next_bit((addr), (size), 0) | ||
300 | |||
301 | static __inline__ int | ||
302 | find_next_zero_bit(const unsigned long *addr, int size, int offset) | ||
303 | { | ||
304 | const unsigned long *p = addr + (offset >> 5); | ||
305 | unsigned long result = offset & ~31UL; | ||
306 | unsigned long tmp; | ||
307 | |||
308 | if (offset >= size) | ||
309 | return size; | ||
310 | size -= result; | ||
311 | offset &= 31UL; | ||
312 | if (offset) { | ||
313 | tmp = *p++; | ||
314 | tmp |= ~0UL >> (32-offset); | ||
315 | if (size < 32) | ||
316 | goto found_first; | ||
317 | if (~tmp) | ||
318 | goto found_middle; | ||
319 | size -= 32; | ||
320 | result += 32; | ||
321 | } | ||
322 | while (size & ~31UL) { | ||
323 | if (~(tmp = *p++)) | ||
324 | goto found_middle; | ||
325 | result += 32; | ||
326 | size -= 32; | ||
327 | } | ||
328 | if (!size) | ||
329 | return result; | ||
330 | tmp = *p; | ||
331 | |||
332 | found_first: | ||
333 | tmp |= ~0UL << size; | ||
334 | found_middle: | ||
335 | return result + ffz(tmp); | ||
336 | } | ||
337 | |||
338 | #define find_first_zero_bit(addr, size) \ | ||
339 | find_next_zero_bit((addr), (size), 0) | ||
340 | 106 | ||
341 | #ifdef __XTENSA_EL__ | 107 | #ifdef __XTENSA_EL__ |
342 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) | ||
343 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) | 108 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) |
344 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) | ||
345 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) | 109 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) |
346 | # define ext2_test_bit(nr,addr) test_bit((nr), (addr)) | ||
347 | # define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) | ||
348 | # define ext2_find_next_zero_bit(addr, size, offset) \ | ||
349 | find_next_zero_bit((addr), (size), (offset)) | ||
350 | #elif defined(__XTENSA_EB__) | 110 | #elif defined(__XTENSA_EB__) |
351 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) | ||
352 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) | 111 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) |
353 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) | ||
354 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) | 112 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) |
355 | # define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) | ||
356 | # define ext2_find_first_zero_bit(addr, size) \ | ||
357 | ext2_find_next_zero_bit((addr), (size), 0) | ||
358 | |||
359 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
360 | { | ||
361 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
362 | unsigned long result = offset & ~31UL; | ||
363 | unsigned long tmp; | ||
364 | |||
365 | if (offset >= size) | ||
366 | return size; | ||
367 | size -= result; | ||
368 | offset &= 31UL; | ||
369 | if(offset) { | ||
370 | /* We hold the little endian value in tmp, but then the | ||
371 | * shift is illegal. So we could keep a big endian value | ||
372 | * in tmp, like this: | ||
373 | * | ||
374 | * tmp = __swab32(*(p++)); | ||
375 | * tmp |= ~0UL >> (32-offset); | ||
376 | * | ||
377 | * but this would decrease preformance, so we change the | ||
378 | * shift: | ||
379 | */ | ||
380 | tmp = *(p++); | ||
381 | tmp |= __swab32(~0UL >> (32-offset)); | ||
382 | if(size < 32) | ||
383 | goto found_first; | ||
384 | if(~tmp) | ||
385 | goto found_middle; | ||
386 | size -= 32; | ||
387 | result += 32; | ||
388 | } | ||
389 | while(size & ~31UL) { | ||
390 | if(~(tmp = *(p++))) | ||
391 | goto found_middle; | ||
392 | result += 32; | ||
393 | size -= 32; | ||
394 | } | ||
395 | if(!size) | ||
396 | return result; | ||
397 | tmp = *p; | ||
398 | |||
399 | found_first: | ||
400 | /* tmp is little endian, so we would have to swab the shift, | ||
401 | * see above. But then we have to swab tmp below for ffz, so | ||
402 | * we might as well do this here. | ||
403 | */ | ||
404 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
405 | found_middle: | ||
406 | return result + ffz(__swab32(tmp)); | ||
407 | } | ||
408 | |||
409 | #else | 113 | #else |
410 | # error processor byte order undefined! | 114 | # error processor byte order undefined! |
411 | #endif | 115 | #endif |
412 | 116 | ||
413 | 117 | #include <asm-generic/bitops/hweight.h> | |
414 | #define hweight32(x) generic_hweight32(x) | 118 | #include <asm-generic/bitops/sched.h> |
415 | #define hweight16(x) generic_hweight16(x) | 119 | #include <asm-generic/bitops/minix.h> |
416 | #define hweight8(x) generic_hweight8(x) | ||
417 | |||
418 | /* | ||
419 | * Find the first bit set in a 140-bit bitmap. | ||
420 | * The first 100 bits are unlikely to be set. | ||
421 | */ | ||
422 | |||
423 | static inline int sched_find_first_bit(const unsigned long *b) | ||
424 | { | ||
425 | if (unlikely(b[0])) | ||
426 | return __ffs(b[0]); | ||
427 | if (unlikely(b[1])) | ||
428 | return __ffs(b[1]) + 32; | ||
429 | if (unlikely(b[2])) | ||
430 | return __ffs(b[2]) + 64; | ||
431 | if (b[3]) | ||
432 | return __ffs(b[3]) + 96; | ||
433 | return __ffs(b[4]) + 128; | ||
434 | } | ||
435 | |||
436 | |||
437 | /* Bitmap functions for the minix filesystem. */ | ||
438 | |||
439 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
440 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
441 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
442 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
443 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
444 | 120 | ||
445 | #endif /* __KERNEL__ */ | 121 | #endif /* __KERNEL__ */ |
446 | 122 | ||