diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 15:41:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-25 15:41:29 -0400 |
commit | c86ad14d305d2429c3da19462440bac50c183def (patch) | |
tree | bd794cd72476661faf82c440063c217bb978ce44 /arch/metag | |
parent | a2303849a6b4b7ba59667091e00d6bb194071d9a (diff) | |
parent | f06628638cf6e75f179742b6c1b35076965b9fdd (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The locking tree was busier in this cycle than the usual pattern - a
couple of major projects happened to coincide.
The main changes are:
- implement the atomic_fetch_{add,sub,and,or,xor}() API natively
across all SMP architectures (Peter Zijlstra)
- add atomic_fetch_{inc/dec}() as well, using the generic primitives
(Davidlohr Bueso)
- optimize various aspects of rwsems (Jason Low, Davidlohr Bueso,
Waiman Long)
- optimize smp_cond_load_acquire() on arm64 and implement LSE based
atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
on arm64 (Will Deacon)
- introduce smp_acquire__after_ctrl_dep() and fix various barrier
mis-uses and bugs (Peter Zijlstra)
- after discovering ancient spin_unlock_wait() barrier bugs in its
implementation and usage, strengthen its semantics and update/fix
usage sites (Peter Zijlstra)
- optimize mutex_trylock() fastpath (Peter Zijlstra)
- ... misc fixes and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API
locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire()
locking/static_keys: Fix non static symbol Sparse warning
locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec()
locking/atomic, arch/tile: Fix tilepro build
locking/atomic, arch/m68k: Remove comment
locking/atomic, arch/arc: Fix build
locking/Documentation: Clarify limited control-dependency scope
locking/atomic, arch/rwsem: Employ atomic_long_fetch_add()
locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire()
locking/atomic, arch/mips: Convert to _relaxed atomics
locking/atomic, arch/alpha: Convert to _relaxed atomics
locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions
locking/atomic: Remove linux/atomic.h:atomic_fetch_or()
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
locking/atomic: Fix atomic64_relaxed() bits
locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
...
Diffstat (limited to 'arch/metag')
-rw-r--r-- | arch/metag/include/asm/atomic_lnkget.h | 36 | ||||
-rw-r--r-- | arch/metag/include/asm/atomic_lock1.h | 33 | ||||
-rw-r--r-- | arch/metag/include/asm/spinlock.h | 14 |
3 files changed, 73 insertions, 10 deletions
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 88fa25fae8bd..def2c642f053 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h | |||
@@ -69,16 +69,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
69 | return result; \ | 69 | return result; \ |
70 | } | 70 | } |
71 | 71 | ||
72 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 72 | #define ATOMIC_FETCH_OP(op) \ |
73 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
74 | { \ | ||
75 | int result, temp; \ | ||
76 | \ | ||
77 | smp_mb(); \ | ||
78 | \ | ||
79 | asm volatile ( \ | ||
80 | "1: LNKGETD %1, [%2]\n" \ | ||
81 | " " #op " %0, %1, %3\n" \ | ||
82 | " LNKSETD [%2], %0\n" \ | ||
83 | " DEFR %0, TXSTAT\n" \ | ||
84 | " ANDT %0, %0, #HI(0x3f000000)\n" \ | ||
85 | " CMPT %0, #HI(0x02000000)\n" \ | ||
86 | " BNZ 1b\n" \ | ||
87 | : "=&d" (temp), "=&d" (result) \ | ||
88 | : "da" (&v->counter), "bd" (i) \ | ||
89 | : "cc"); \ | ||
90 | \ | ||
91 | smp_mb(); \ | ||
92 | \ | ||
93 | return result; \ | ||
94 | } | ||
95 | |||
96 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
73 | 97 | ||
74 | ATOMIC_OPS(add) | 98 | ATOMIC_OPS(add) |
75 | ATOMIC_OPS(sub) | 99 | ATOMIC_OPS(sub) |
76 | 100 | ||
77 | ATOMIC_OP(and) | 101 | #undef ATOMIC_OPS |
78 | ATOMIC_OP(or) | 102 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
79 | ATOMIC_OP(xor) | 103 | |
104 | ATOMIC_OPS(and) | ||
105 | ATOMIC_OPS(or) | ||
106 | ATOMIC_OPS(xor) | ||
80 | 107 | ||
81 | #undef ATOMIC_OPS | 108 | #undef ATOMIC_OPS |
109 | #undef ATOMIC_FETCH_OP | ||
82 | #undef ATOMIC_OP_RETURN | 110 | #undef ATOMIC_OP_RETURN |
83 | #undef ATOMIC_OP | 111 | #undef ATOMIC_OP |
84 | 112 | ||
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h index 0295d9b8d5bf..6c1380a8a0d4 100644 --- a/arch/metag/include/asm/atomic_lock1.h +++ b/arch/metag/include/asm/atomic_lock1.h | |||
@@ -64,15 +64,40 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
64 | return result; \ | 64 | return result; \ |
65 | } | 65 | } |
66 | 66 | ||
67 | #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) | 67 | #define ATOMIC_FETCH_OP(op, c_op) \ |
68 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
69 | { \ | ||
70 | unsigned long result; \ | ||
71 | unsigned long flags; \ | ||
72 | \ | ||
73 | __global_lock1(flags); \ | ||
74 | result = v->counter; \ | ||
75 | fence(); \ | ||
76 | v->counter c_op i; \ | ||
77 | __global_unlock1(flags); \ | ||
78 | \ | ||
79 | return result; \ | ||
80 | } | ||
81 | |||
82 | #define ATOMIC_OPS(op, c_op) \ | ||
83 | ATOMIC_OP(op, c_op) \ | ||
84 | ATOMIC_OP_RETURN(op, c_op) \ | ||
85 | ATOMIC_FETCH_OP(op, c_op) | ||
68 | 86 | ||
69 | ATOMIC_OPS(add, +=) | 87 | ATOMIC_OPS(add, +=) |
70 | ATOMIC_OPS(sub, -=) | 88 | ATOMIC_OPS(sub, -=) |
71 | ATOMIC_OP(and, &=) | ||
72 | ATOMIC_OP(or, |=) | ||
73 | ATOMIC_OP(xor, ^=) | ||
74 | 89 | ||
75 | #undef ATOMIC_OPS | 90 | #undef ATOMIC_OPS |
91 | #define ATOMIC_OPS(op, c_op) \ | ||
92 | ATOMIC_OP(op, c_op) \ | ||
93 | ATOMIC_FETCH_OP(op, c_op) | ||
94 | |||
95 | ATOMIC_OPS(and, &=) | ||
96 | ATOMIC_OPS(or, |=) | ||
97 | ATOMIC_OPS(xor, ^=) | ||
98 | |||
99 | #undef ATOMIC_OPS | ||
100 | #undef ATOMIC_FETCH_OP | ||
76 | #undef ATOMIC_OP_RETURN | 101 | #undef ATOMIC_OP_RETURN |
77 | #undef ATOMIC_OP | 102 | #undef ATOMIC_OP |
78 | 103 | ||
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h index 86a7cf3d1386..c0c7a22be1ae 100644 --- a/arch/metag/include/asm/spinlock.h +++ b/arch/metag/include/asm/spinlock.h | |||
@@ -1,14 +1,24 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | 1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/barrier.h> | ||
5 | #include <asm/processor.h> | ||
6 | |||
4 | #ifdef CONFIG_METAG_ATOMICITY_LOCK1 | 7 | #ifdef CONFIG_METAG_ATOMICITY_LOCK1 |
5 | #include <asm/spinlock_lock1.h> | 8 | #include <asm/spinlock_lock1.h> |
6 | #else | 9 | #else |
7 | #include <asm/spinlock_lnkget.h> | 10 | #include <asm/spinlock_lnkget.h> |
8 | #endif | 11 | #endif |
9 | 12 | ||
10 | #define arch_spin_unlock_wait(lock) \ | 13 | /* |
11 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 14 | * both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1 |
15 | * locked. | ||
16 | */ | ||
17 | |||
18 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
19 | { | ||
20 | smp_cond_load_acquire(&lock->lock, !VAL); | ||
21 | } | ||
12 | 22 | ||
13 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | 23 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
14 | 24 | ||