aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/bitops.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2008-09-16 13:48:51 -0400
committerRalf Baechle <ralf@linux-mips.org>2008-10-11 11:18:52 -0400
commit384740dc49ea651ba350704d13ff6be9976e37fe (patch)
treea6e80cad287ccae7a86d81bfa692fc96889c88ed /arch/mips/include/asm/bitops.h
parente8c7c482347574ecdd45c43e32c332d5fc2ece61 (diff)
MIPS: Move headfiles to new location below arch/mips/include
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/bitops.h')
-rw-r--r--arch/mips/include/asm/bitops.h672
1 files changed, 672 insertions, 0 deletions
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
new file mode 100644
index 000000000000..49df8c4c9d25
--- /dev/null
+++ b/arch/mips/include/asm/bitops.h
@@ -0,0 +1,672 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/bug.h>
21#include <asm/byteorder.h> /* sigh ... */
22#include <asm/cpu-features.h>
23#include <asm/sgidefs.h>
24#include <asm/war.h>
25
26#if _MIPS_SZLONG == 32
27#define SZLONG_LOG 5
28#define SZLONG_MASK 31UL
29#define __LL "ll "
30#define __SC "sc "
31#define __INS "ins "
32#define __EXT "ext "
33#elif _MIPS_SZLONG == 64
34#define SZLONG_LOG 6
35#define SZLONG_MASK 63UL
36#define __LL "lld "
37#define __SC "scd "
38#define __INS "dins "
39#define __EXT "dext "
40#endif
41
42/*
43 * clear_bit() doesn't provide any barrier for the compiler.
44 */
45#define smp_mb__before_clear_bit() smp_llsc_mb()
46#define smp_mb__after_clear_bit() smp_llsc_mb()
47
48/*
49 * set_bit - Atomically set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
52 *
53 * This function is atomic and may not be reordered. See __set_bit()
54 * if you do not require the atomic guarantees.
55 * Note that @nr may be almost arbitrarily large; this function is not
56 * restricted to acting on a single-word quantity.
57 */
58static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
59{
60 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
61 unsigned short bit = nr & SZLONG_MASK;
62 unsigned long temp;
63
64 if (cpu_has_llsc && R10000_LLSC_WAR) {
65 __asm__ __volatile__(
66 " .set mips3 \n"
67 "1: " __LL "%0, %1 # set_bit \n"
68 " or %0, %2 \n"
69 " " __SC "%0, %1 \n"
70 " beqzl %0, 1b \n"
71 " .set mips0 \n"
72 : "=&r" (temp), "=m" (*m)
73 : "ir" (1UL << bit), "m" (*m));
74#ifdef CONFIG_CPU_MIPSR2
75 } else if (__builtin_constant_p(bit)) {
76 __asm__ __volatile__(
77 "1: " __LL "%0, %1 # set_bit \n"
78 " " __INS "%0, %4, %2, 1 \n"
79 " " __SC "%0, %1 \n"
80 " beqz %0, 2f \n"
81 " .subsection 2 \n"
82 "2: b 1b \n"
83 " .previous \n"
84 : "=&r" (temp), "=m" (*m)
85 : "ir" (bit), "m" (*m), "r" (~0));
86#endif /* CONFIG_CPU_MIPSR2 */
87 } else if (cpu_has_llsc) {
88 __asm__ __volatile__(
89 " .set mips3 \n"
90 "1: " __LL "%0, %1 # set_bit \n"
91 " or %0, %2 \n"
92 " " __SC "%0, %1 \n"
93 " beqz %0, 2f \n"
94 " .subsection 2 \n"
95 "2: b 1b \n"
96 " .previous \n"
97 " .set mips0 \n"
98 : "=&r" (temp), "=m" (*m)
99 : "ir" (1UL << bit), "m" (*m));
100 } else {
101 volatile unsigned long *a = addr;
102 unsigned long mask;
103 unsigned long flags;
104
105 a += nr >> SZLONG_LOG;
106 mask = 1UL << bit;
107 raw_local_irq_save(flags);
108 *a |= mask;
109 raw_local_irq_restore(flags);
110 }
111}
112
113/*
114 * clear_bit - Clears a bit in memory
115 * @nr: Bit to clear
116 * @addr: Address to start counting from
117 *
118 * clear_bit() is atomic and may not be reordered. However, it does
119 * not contain a memory barrier, so if it is used for locking purposes,
120 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
121 * in order to ensure changes are visible on other processors.
122 */
123static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
124{
125 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
126 unsigned short bit = nr & SZLONG_MASK;
127 unsigned long temp;
128
129 if (cpu_has_llsc && R10000_LLSC_WAR) {
130 __asm__ __volatile__(
131 " .set mips3 \n"
132 "1: " __LL "%0, %1 # clear_bit \n"
133 " and %0, %2 \n"
134 " " __SC "%0, %1 \n"
135 " beqzl %0, 1b \n"
136 " .set mips0 \n"
137 : "=&r" (temp), "=m" (*m)
138 : "ir" (~(1UL << bit)), "m" (*m));
139#ifdef CONFIG_CPU_MIPSR2
140 } else if (__builtin_constant_p(bit)) {
141 __asm__ __volatile__(
142 "1: " __LL "%0, %1 # clear_bit \n"
143 " " __INS "%0, $0, %2, 1 \n"
144 " " __SC "%0, %1 \n"
145 " beqz %0, 2f \n"
146 " .subsection 2 \n"
147 "2: b 1b \n"
148 " .previous \n"
149 : "=&r" (temp), "=m" (*m)
150 : "ir" (bit), "m" (*m));
151#endif /* CONFIG_CPU_MIPSR2 */
152 } else if (cpu_has_llsc) {
153 __asm__ __volatile__(
154 " .set mips3 \n"
155 "1: " __LL "%0, %1 # clear_bit \n"
156 " and %0, %2 \n"
157 " " __SC "%0, %1 \n"
158 " beqz %0, 2f \n"
159 " .subsection 2 \n"
160 "2: b 1b \n"
161 " .previous \n"
162 " .set mips0 \n"
163 : "=&r" (temp), "=m" (*m)
164 : "ir" (~(1UL << bit)), "m" (*m));
165 } else {
166 volatile unsigned long *a = addr;
167 unsigned long mask;
168 unsigned long flags;
169
170 a += nr >> SZLONG_LOG;
171 mask = 1UL << bit;
172 raw_local_irq_save(flags);
173 *a &= ~mask;
174 raw_local_irq_restore(flags);
175 }
176}
177
178/*
179 * clear_bit_unlock - Clears a bit in memory
180 * @nr: Bit to clear
181 * @addr: Address to start counting from
182 *
183 * clear_bit() is atomic and implies release semantics before the memory
184 * operation. It can be used for an unlock.
185 */
186static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
187{
188 smp_mb__before_clear_bit();
189 clear_bit(nr, addr);
190}
191
192/*
193 * change_bit - Toggle a bit in memory
194 * @nr: Bit to change
195 * @addr: Address to start counting from
196 *
197 * change_bit() is atomic and may not be reordered.
198 * Note that @nr may be almost arbitrarily large; this function is not
199 * restricted to acting on a single-word quantity.
200 */
201static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
202{
203 unsigned short bit = nr & SZLONG_MASK;
204
205 if (cpu_has_llsc && R10000_LLSC_WAR) {
206 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
207 unsigned long temp;
208
209 __asm__ __volatile__(
210 " .set mips3 \n"
211 "1: " __LL "%0, %1 # change_bit \n"
212 " xor %0, %2 \n"
213 " " __SC "%0, %1 \n"
214 " beqzl %0, 1b \n"
215 " .set mips0 \n"
216 : "=&r" (temp), "=m" (*m)
217 : "ir" (1UL << bit), "m" (*m));
218 } else if (cpu_has_llsc) {
219 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
220 unsigned long temp;
221
222 __asm__ __volatile__(
223 " .set mips3 \n"
224 "1: " __LL "%0, %1 # change_bit \n"
225 " xor %0, %2 \n"
226 " " __SC "%0, %1 \n"
227 " beqz %0, 2f \n"
228 " .subsection 2 \n"
229 "2: b 1b \n"
230 " .previous \n"
231 " .set mips0 \n"
232 : "=&r" (temp), "=m" (*m)
233 : "ir" (1UL << bit), "m" (*m));
234 } else {
235 volatile unsigned long *a = addr;
236 unsigned long mask;
237 unsigned long flags;
238
239 a += nr >> SZLONG_LOG;
240 mask = 1UL << bit;
241 raw_local_irq_save(flags);
242 *a ^= mask;
243 raw_local_irq_restore(flags);
244 }
245}
246
247/*
248 * test_and_set_bit - Set a bit and return its old value
249 * @nr: Bit to set
250 * @addr: Address to count from
251 *
252 * This operation is atomic and cannot be reordered.
253 * It also implies a memory barrier.
254 */
255static inline int test_and_set_bit(unsigned long nr,
256 volatile unsigned long *addr)
257{
258 unsigned short bit = nr & SZLONG_MASK;
259 unsigned long res;
260
261 smp_llsc_mb();
262
263 if (cpu_has_llsc && R10000_LLSC_WAR) {
264 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
265 unsigned long temp;
266
267 __asm__ __volatile__(
268 " .set mips3 \n"
269 "1: " __LL "%0, %1 # test_and_set_bit \n"
270 " or %2, %0, %3 \n"
271 " " __SC "%2, %1 \n"
272 " beqzl %2, 1b \n"
273 " and %2, %0, %3 \n"
274 " .set mips0 \n"
275 : "=&r" (temp), "=m" (*m), "=&r" (res)
276 : "r" (1UL << bit), "m" (*m)
277 : "memory");
278 } else if (cpu_has_llsc) {
279 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
280 unsigned long temp;
281
282 __asm__ __volatile__(
283 " .set push \n"
284 " .set noreorder \n"
285 " .set mips3 \n"
286 "1: " __LL "%0, %1 # test_and_set_bit \n"
287 " or %2, %0, %3 \n"
288 " " __SC "%2, %1 \n"
289 " beqz %2, 2f \n"
290 " and %2, %0, %3 \n"
291 " .subsection 2 \n"
292 "2: b 1b \n"
293 " nop \n"
294 " .previous \n"
295 " .set pop \n"
296 : "=&r" (temp), "=m" (*m), "=&r" (res)
297 : "r" (1UL << bit), "m" (*m)
298 : "memory");
299 } else {
300 volatile unsigned long *a = addr;
301 unsigned long mask;
302 unsigned long flags;
303
304 a += nr >> SZLONG_LOG;
305 mask = 1UL << bit;
306 raw_local_irq_save(flags);
307 res = (mask & *a);
308 *a |= mask;
309 raw_local_irq_restore(flags);
310 }
311
312 smp_llsc_mb();
313
314 return res != 0;
315}
316
317/*
318 * test_and_set_bit_lock - Set a bit and return its old value
319 * @nr: Bit to set
320 * @addr: Address to count from
321 *
322 * This operation is atomic and implies acquire ordering semantics
323 * after the memory operation.
324 */
325static inline int test_and_set_bit_lock(unsigned long nr,
326 volatile unsigned long *addr)
327{
328 unsigned short bit = nr & SZLONG_MASK;
329 unsigned long res;
330
331 if (cpu_has_llsc && R10000_LLSC_WAR) {
332 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 unsigned long temp;
334
335 __asm__ __volatile__(
336 " .set mips3 \n"
337 "1: " __LL "%0, %1 # test_and_set_bit \n"
338 " or %2, %0, %3 \n"
339 " " __SC "%2, %1 \n"
340 " beqzl %2, 1b \n"
341 " and %2, %0, %3 \n"
342 " .set mips0 \n"
343 : "=&r" (temp), "=m" (*m), "=&r" (res)
344 : "r" (1UL << bit), "m" (*m)
345 : "memory");
346 } else if (cpu_has_llsc) {
347 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
348 unsigned long temp;
349
350 __asm__ __volatile__(
351 " .set push \n"
352 " .set noreorder \n"
353 " .set mips3 \n"
354 "1: " __LL "%0, %1 # test_and_set_bit \n"
355 " or %2, %0, %3 \n"
356 " " __SC "%2, %1 \n"
357 " beqz %2, 2f \n"
358 " and %2, %0, %3 \n"
359 " .subsection 2 \n"
360 "2: b 1b \n"
361 " nop \n"
362 " .previous \n"
363 " .set pop \n"
364 : "=&r" (temp), "=m" (*m), "=&r" (res)
365 : "r" (1UL << bit), "m" (*m)
366 : "memory");
367 } else {
368 volatile unsigned long *a = addr;
369 unsigned long mask;
370 unsigned long flags;
371
372 a += nr >> SZLONG_LOG;
373 mask = 1UL << bit;
374 raw_local_irq_save(flags);
375 res = (mask & *a);
376 *a |= mask;
377 raw_local_irq_restore(flags);
378 }
379
380 smp_llsc_mb();
381
382 return res != 0;
383}
384/*
385 * test_and_clear_bit - Clear a bit and return its old value
386 * @nr: Bit to clear
387 * @addr: Address to count from
388 *
389 * This operation is atomic and cannot be reordered.
390 * It also implies a memory barrier.
391 */
392static inline int test_and_clear_bit(unsigned long nr,
393 volatile unsigned long *addr)
394{
395 unsigned short bit = nr & SZLONG_MASK;
396 unsigned long res;
397
398 smp_llsc_mb();
399
400 if (cpu_has_llsc && R10000_LLSC_WAR) {
401 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
402 unsigned long temp;
403
404 __asm__ __volatile__(
405 " .set mips3 \n"
406 "1: " __LL "%0, %1 # test_and_clear_bit \n"
407 " or %2, %0, %3 \n"
408 " xor %2, %3 \n"
409 " " __SC "%2, %1 \n"
410 " beqzl %2, 1b \n"
411 " and %2, %0, %3 \n"
412 " .set mips0 \n"
413 : "=&r" (temp), "=m" (*m), "=&r" (res)
414 : "r" (1UL << bit), "m" (*m)
415 : "memory");
416#ifdef CONFIG_CPU_MIPSR2
417 } else if (__builtin_constant_p(nr)) {
418 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
419 unsigned long temp;
420
421 __asm__ __volatile__(
422 "1: " __LL "%0, %1 # test_and_clear_bit \n"
423 " " __EXT "%2, %0, %3, 1 \n"
424 " " __INS "%0, $0, %3, 1 \n"
425 " " __SC "%0, %1 \n"
426 " beqz %0, 2f \n"
427 " .subsection 2 \n"
428 "2: b 1b \n"
429 " .previous \n"
430 : "=&r" (temp), "=m" (*m), "=&r" (res)
431 : "ir" (bit), "m" (*m)
432 : "memory");
433#endif
434 } else if (cpu_has_llsc) {
435 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
436 unsigned long temp;
437
438 __asm__ __volatile__(
439 " .set push \n"
440 " .set noreorder \n"
441 " .set mips3 \n"
442 "1: " __LL "%0, %1 # test_and_clear_bit \n"
443 " or %2, %0, %3 \n"
444 " xor %2, %3 \n"
445 " " __SC "%2, %1 \n"
446 " beqz %2, 2f \n"
447 " and %2, %0, %3 \n"
448 " .subsection 2 \n"
449 "2: b 1b \n"
450 " nop \n"
451 " .previous \n"
452 " .set pop \n"
453 : "=&r" (temp), "=m" (*m), "=&r" (res)
454 : "r" (1UL << bit), "m" (*m)
455 : "memory");
456 } else {
457 volatile unsigned long *a = addr;
458 unsigned long mask;
459 unsigned long flags;
460
461 a += nr >> SZLONG_LOG;
462 mask = 1UL << bit;
463 raw_local_irq_save(flags);
464 res = (mask & *a);
465 *a &= ~mask;
466 raw_local_irq_restore(flags);
467 }
468
469 smp_llsc_mb();
470
471 return res != 0;
472}
473
474/*
475 * test_and_change_bit - Change a bit and return its old value
476 * @nr: Bit to change
477 * @addr: Address to count from
478 *
479 * This operation is atomic and cannot be reordered.
480 * It also implies a memory barrier.
481 */
482static inline int test_and_change_bit(unsigned long nr,
483 volatile unsigned long *addr)
484{
485 unsigned short bit = nr & SZLONG_MASK;
486 unsigned long res;
487
488 smp_llsc_mb();
489
490 if (cpu_has_llsc && R10000_LLSC_WAR) {
491 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
492 unsigned long temp;
493
494 __asm__ __volatile__(
495 " .set mips3 \n"
496 "1: " __LL "%0, %1 # test_and_change_bit \n"
497 " xor %2, %0, %3 \n"
498 " " __SC "%2, %1 \n"
499 " beqzl %2, 1b \n"
500 " and %2, %0, %3 \n"
501 " .set mips0 \n"
502 : "=&r" (temp), "=m" (*m), "=&r" (res)
503 : "r" (1UL << bit), "m" (*m)
504 : "memory");
505 } else if (cpu_has_llsc) {
506 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
507 unsigned long temp;
508
509 __asm__ __volatile__(
510 " .set push \n"
511 " .set noreorder \n"
512 " .set mips3 \n"
513 "1: " __LL "%0, %1 # test_and_change_bit \n"
514 " xor %2, %0, %3 \n"
515 " " __SC "\t%2, %1 \n"
516 " beqz %2, 2f \n"
517 " and %2, %0, %3 \n"
518 " .subsection 2 \n"
519 "2: b 1b \n"
520 " nop \n"
521 " .previous \n"
522 " .set pop \n"
523 : "=&r" (temp), "=m" (*m), "=&r" (res)
524 : "r" (1UL << bit), "m" (*m)
525 : "memory");
526 } else {
527 volatile unsigned long *a = addr;
528 unsigned long mask;
529 unsigned long flags;
530
531 a += nr >> SZLONG_LOG;
532 mask = 1UL << bit;
533 raw_local_irq_save(flags);
534 res = (mask & *a);
535 *a ^= mask;
536 raw_local_irq_restore(flags);
537 }
538
539 smp_llsc_mb();
540
541 return res != 0;
542}
543
544#include <asm-generic/bitops/non-atomic.h>
545
546/*
547 * __clear_bit_unlock - Clears a bit in memory
548 * @nr: Bit to clear
549 * @addr: Address to start counting from
550 *
551 * __clear_bit() is non-atomic and implies release semantics before the memory
552 * operation. It can be used for an unlock if no other CPUs can concurrently
553 * modify other bits in the word.
554 */
555static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
556{
557 smp_mb();
558 __clear_bit(nr, addr);
559}
560
561#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
562
563/*
564 * Return the bit position (0..63) of the most significant 1 bit in a word
565 * Returns -1 if no 1 bit exists
566 */
567static inline unsigned long __fls(unsigned long x)
568{
569 int lz;
570
571 if (sizeof(x) == 4) {
572 __asm__(
573 " .set push \n"
574 " .set mips32 \n"
575 " clz %0, %1 \n"
576 " .set pop \n"
577 : "=r" (lz)
578 : "r" (x));
579
580 return 31 - lz;
581 }
582
583 BUG_ON(sizeof(x) != 8);
584
585 __asm__(
586 " .set push \n"
587 " .set mips64 \n"
588 " dclz %0, %1 \n"
589 " .set pop \n"
590 : "=r" (lz)
591 : "r" (x));
592
593 return 63 - lz;
594}
595
596/*
597 * __ffs - find first bit in word.
598 * @word: The word to search
599 *
600 * Returns 0..SZLONG-1
601 * Undefined if no bit exists, so code should check against 0 first.
602 */
603static inline unsigned long __ffs(unsigned long word)
604{
605 return __fls(word & -word);
606}
607
608/*
609 * fls - find last bit set.
610 * @word: The word to search
611 *
612 * This is defined the same way as ffs.
613 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
614 */
615static inline int fls(int word)
616{
617 __asm__("clz %0, %1" : "=r" (word) : "r" (word));
618
619 return 32 - word;
620}
621
622#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
623static inline int fls64(__u64 word)
624{
625 __asm__("dclz %0, %1" : "=r" (word) : "r" (word));
626
627 return 64 - word;
628}
629#else
630#include <asm-generic/bitops/fls64.h>
631#endif
632
633/*
634 * ffs - find first bit set.
635 * @word: The word to search
636 *
637 * This is defined the same way as
638 * the libc and compiler builtin ffs routines, therefore
639 * differs in spirit from the above ffz (man ffs).
640 */
641static inline int ffs(int word)
642{
643 if (!word)
644 return 0;
645
646 return fls(word & -word);
647}
648
649#else
650
651#include <asm-generic/bitops/__ffs.h>
652#include <asm-generic/bitops/__fls.h>
653#include <asm-generic/bitops/ffs.h>
654#include <asm-generic/bitops/fls.h>
655#include <asm-generic/bitops/fls64.h>
656
657#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
658
659#include <asm-generic/bitops/ffz.h>
660#include <asm-generic/bitops/find.h>
661
662#ifdef __KERNEL__
663
664#include <asm-generic/bitops/sched.h>
665#include <asm-generic/bitops/hweight.h>
666#include <asm-generic/bitops/ext2-non-atomic.h>
667#include <asm-generic/bitops/ext2-atomic.h>
668#include <asm-generic/bitops/minix.h>
669
670#endif /* __KERNEL__ */
671
672#endif /* _ASM_BITOPS_H */