aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-04-23 15:44:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 08:06:22 -0400
commitd835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab (patch)
tree4bee7d7516eb8f382d137cde96b46fe2b6eea442
parentf8a570e270bf62363cd498ac2ac8ea07a76ad4d6 (diff)
blackfin: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. TODO: use inline asm or at least asm macros to collapse the lot. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/blackfin/include/asm/atomic.h28
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
3 files changed, 40 insertions, 25 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..eafa55b81a7b 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,33 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
19asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 19asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
20asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 20
21asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 21asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 23asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
23asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 24asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
24 25
25#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 26#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
26 27
27#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) 28#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
28#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) 29#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
30
31#define CONFIG_ARCH_HAS_ATOMIC_OR
32
33#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
34#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
35#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
36
37static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
38{
39 atomic_and(~mask, v);
40}
29 41
30#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) 42static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
31#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) 43{
44 atomic_or(mask, v);
45}
32 46
33#endif 47#endif
34 48
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591b961d..a401c27b69b4 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
83EXPORT_SYMBOL(insl_16); 83EXPORT_SYMBOL(insl_16);
84 84
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86EXPORT_SYMBOL(__raw_atomic_update_asm); 86EXPORT_SYMBOL(__raw_atomic_add_asm);
87EXPORT_SYMBOL(__raw_atomic_clear_asm); 87EXPORT_SYMBOL(__raw_atomic_and_asm);
88EXPORT_SYMBOL(__raw_atomic_set_asm); 88EXPORT_SYMBOL(__raw_atomic_or_asm);
89EXPORT_SYMBOL(__raw_atomic_xor_asm); 89EXPORT_SYMBOL(__raw_atomic_xor_asm);
90EXPORT_SYMBOL(__raw_atomic_test_asm); 90EXPORT_SYMBOL(__raw_atomic_test_asm);
91
91EXPORT_SYMBOL(__raw_xchg_1_asm); 92EXPORT_SYMBOL(__raw_xchg_1_asm);
92EXPORT_SYMBOL(__raw_xchg_2_asm); 93EXPORT_SYMBOL(__raw_xchg_2_asm);
93EXPORT_SYMBOL(__raw_xchg_4_asm); 94EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
587 * r0 = ptr 587 * r0 = ptr
588 * r1 = value 588 * r1 = value
589 * 589 *
590 * Add a signed value to a 32bit word and return the new value atomically. 590 * ADD a signed value to a 32bit word and return the new value atomically.
591 * Clobbers: r3:0, p1:0 591 * Clobbers: r3:0, p1:0
592 */ 592 */
593ENTRY(___raw_atomic_update_asm) 593ENTRY(___raw_atomic_add_asm)
594 p1 = r0; 594 p1 = r0;
595 r3 = r1; 595 r3 = r1;
596 [--sp] = rets; 596 [--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
603 r0 = r3; 603 r0 = r3;
604 rets = [sp++]; 604 rets = [sp++];
605 rts; 605 rts;
606ENDPROC(___raw_atomic_update_asm) 606ENDPROC(___raw_atomic_add_asm)
607 607
608/* 608/*
609 * r0 = ptr 609 * r0 = ptr
610 * r1 = mask 610 * r1 = mask
611 * 611 *
612 * Clear the mask bits from a 32bit word and return the old 32bit value 612 * AND the mask bits from a 32bit word and return the old 32bit value
613 * atomically. 613 * atomically.
614 * Clobbers: r3:0, p1:0 614 * Clobbers: r3:0, p1:0
615 */ 615 */
616ENTRY(___raw_atomic_clear_asm) 616ENTRY(___raw_atomic_and_asm)
617 p1 = r0; 617 p1 = r0;
618 r3 = ~r1; 618 r3 = r1;
619 [--sp] = rets; 619 [--sp] = rets;
620 call _get_core_lock; 620 call _get_core_lock;
621 r2 = [p1]; 621 r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
627 r0 = r3; 627 r0 = r3;
628 rets = [sp++]; 628 rets = [sp++];
629 rts; 629 rts;
630ENDPROC(___raw_atomic_clear_asm) 630ENDPROC(___raw_atomic_and_asm)
631 631
632/* 632/*
633 * r0 = ptr 633 * r0 = ptr
634 * r1 = mask 634 * r1 = mask
635 * 635 *
636 * Set the mask bits into a 32bit word and return the old 32bit value 636 * OR the mask bits into a 32bit word and return the old 32bit value
637 * atomically. 637 * atomically.
638 * Clobbers: r3:0, p1:0 638 * Clobbers: r3:0, p1:0
639 */ 639 */
640ENTRY(___raw_atomic_set_asm) 640ENTRY(___raw_atomic_or_asm)
641 p1 = r0; 641 p1 = r0;
642 r3 = r1; 642 r3 = r1;
643 [--sp] = rets; 643 [--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
651 r0 = r3; 651 r0 = r3;
652 rets = [sp++]; 652 rets = [sp++];
653 rts; 653 rts;
654ENDPROC(___raw_atomic_set_asm) 654ENDPROC(___raw_atomic_or_asm)
655 655
656/* 656/*
657 * r0 = ptr 657 * r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
787 r2 = r1; 787 r2 = r1;
788 r1 = 1; 788 r1 = 1;
789 r1 <<= r2; 789 r1 <<= r2;
790 jump ___raw_atomic_set_asm 790 jump ___raw_atomic_or_asm
791ENDPROC(___raw_bit_set_asm) 791ENDPROC(___raw_bit_set_asm)
792 792
793/* 793/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
798 * Clobbers: r3:0, p1:0 798 * Clobbers: r3:0, p1:0
799 */ 799 */
800ENTRY(___raw_bit_clear_asm) 800ENTRY(___raw_bit_clear_asm)
801 r2 = r1; 801 r2 = 1;
802 r1 = 1; 802 r2 <<= r1;
803 r1 <<= r2; 803 r1 = ~r2;
804 jump ___raw_atomic_clear_asm 804 jump ___raw_atomic_and_asm
805ENDPROC(___raw_bit_clear_asm) 805ENDPROC(___raw_bit_clear_asm)
806 806
807/* 807/*