aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/blackfin
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/include/asm/atomic.h16
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
-rw-r--r--arch/blackfin/mach-common/smp.c2
4 files changed, 29 insertions, 26 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..1c1c42330c99 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
19asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 19asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
20asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 20
21asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 21asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
22asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 23asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
23asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 24asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
24 25
25#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 26#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
26 27
27#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) 28#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
28#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) 29#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
29 30
30#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) 31#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
31#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) 32#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
33#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
32 34
33#endif 35#endif
34 36
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591b961d..a401c27b69b4 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
83EXPORT_SYMBOL(insl_16); 83EXPORT_SYMBOL(insl_16);
84 84
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86EXPORT_SYMBOL(__raw_atomic_update_asm); 86EXPORT_SYMBOL(__raw_atomic_add_asm);
87EXPORT_SYMBOL(__raw_atomic_clear_asm); 87EXPORT_SYMBOL(__raw_atomic_and_asm);
88EXPORT_SYMBOL(__raw_atomic_set_asm); 88EXPORT_SYMBOL(__raw_atomic_or_asm);
89EXPORT_SYMBOL(__raw_atomic_xor_asm); 89EXPORT_SYMBOL(__raw_atomic_xor_asm);
90EXPORT_SYMBOL(__raw_atomic_test_asm); 90EXPORT_SYMBOL(__raw_atomic_test_asm);
91
91EXPORT_SYMBOL(__raw_xchg_1_asm); 92EXPORT_SYMBOL(__raw_xchg_1_asm);
92EXPORT_SYMBOL(__raw_xchg_2_asm); 93EXPORT_SYMBOL(__raw_xchg_2_asm);
93EXPORT_SYMBOL(__raw_xchg_4_asm); 94EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
587 * r0 = ptr 587 * r0 = ptr
588 * r1 = value 588 * r1 = value
589 * 589 *
590 * Add a signed value to a 32bit word and return the new value atomically. 590 * ADD a signed value to a 32bit word and return the new value atomically.
591 * Clobbers: r3:0, p1:0 591 * Clobbers: r3:0, p1:0
592 */ 592 */
593ENTRY(___raw_atomic_update_asm) 593ENTRY(___raw_atomic_add_asm)
594 p1 = r0; 594 p1 = r0;
595 r3 = r1; 595 r3 = r1;
596 [--sp] = rets; 596 [--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
603 r0 = r3; 603 r0 = r3;
604 rets = [sp++]; 604 rets = [sp++];
605 rts; 605 rts;
606ENDPROC(___raw_atomic_update_asm) 606ENDPROC(___raw_atomic_add_asm)
607 607
608/* 608/*
609 * r0 = ptr 609 * r0 = ptr
610 * r1 = mask 610 * r1 = mask
611 * 611 *
612 * Clear the mask bits from a 32bit word and return the old 32bit value 612 * AND the mask bits from a 32bit word and return the old 32bit value
613 * atomically. 613 * atomically.
614 * Clobbers: r3:0, p1:0 614 * Clobbers: r3:0, p1:0
615 */ 615 */
616ENTRY(___raw_atomic_clear_asm) 616ENTRY(___raw_atomic_and_asm)
617 p1 = r0; 617 p1 = r0;
618 r3 = ~r1; 618 r3 = r1;
619 [--sp] = rets; 619 [--sp] = rets;
620 call _get_core_lock; 620 call _get_core_lock;
621 r2 = [p1]; 621 r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
627 r0 = r3; 627 r0 = r3;
628 rets = [sp++]; 628 rets = [sp++];
629 rts; 629 rts;
630ENDPROC(___raw_atomic_clear_asm) 630ENDPROC(___raw_atomic_and_asm)
631 631
632/* 632/*
633 * r0 = ptr 633 * r0 = ptr
634 * r1 = mask 634 * r1 = mask
635 * 635 *
636 * Set the mask bits into a 32bit word and return the old 32bit value 636 * OR the mask bits into a 32bit word and return the old 32bit value
637 * atomically. 637 * atomically.
638 * Clobbers: r3:0, p1:0 638 * Clobbers: r3:0, p1:0
639 */ 639 */
640ENTRY(___raw_atomic_set_asm) 640ENTRY(___raw_atomic_or_asm)
641 p1 = r0; 641 p1 = r0;
642 r3 = r1; 642 r3 = r1;
643 [--sp] = rets; 643 [--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
651 r0 = r3; 651 r0 = r3;
652 rets = [sp++]; 652 rets = [sp++];
653 rts; 653 rts;
654ENDPROC(___raw_atomic_set_asm) 654ENDPROC(___raw_atomic_or_asm)
655 655
656/* 656/*
657 * r0 = ptr 657 * r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
787 r2 = r1; 787 r2 = r1;
788 r1 = 1; 788 r1 = 1;
789 r1 <<= r2; 789 r1 <<= r2;
790 jump ___raw_atomic_set_asm 790 jump ___raw_atomic_or_asm
791ENDPROC(___raw_bit_set_asm) 791ENDPROC(___raw_bit_set_asm)
792 792
793/* 793/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
798 * Clobbers: r3:0, p1:0 798 * Clobbers: r3:0, p1:0
799 */ 799 */
800ENTRY(___raw_bit_clear_asm) 800ENTRY(___raw_bit_clear_asm)
801 r2 = r1; 801 r2 = 1;
802 r1 = 1; 802 r2 <<= r1;
803 r1 <<= r2; 803 r1 = ~r2;
804 jump ___raw_atomic_clear_asm 804 jump ___raw_atomic_and_asm
805ENDPROC(___raw_bit_clear_asm) 805ENDPROC(___raw_bit_clear_asm)
806 806
807/* 807/*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c7259597395..0030e21cfceb 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
195 local_irq_save(flags); 195 local_irq_save(flags);
196 for_each_cpu(cpu, cpumask) { 196 for_each_cpu(cpu, cpumask) {
197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
198 atomic_set_mask((1 << msg), &bfin_ipi_data->bits); 198 atomic_or((1 << msg), &bfin_ipi_data->bits);
199 atomic_inc(&bfin_ipi_data->count); 199 atomic_inc(&bfin_ipi_data->count);
200 } 200 }
201 local_irq_restore(flags); 201 local_irq_restore(flags);