aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
commitc86ad14d305d2429c3da19462440bac50c183def (patch)
treebd794cd72476661faf82c440063c217bb978ce44 /arch/sh/include/asm
parenta2303849a6b4b7ba59667091e00d6bb194071d9a (diff)
parentf06628638cf6e75f179742b6c1b35076965b9fdd (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/atomic-grb.h34
-rw-r--r--arch/sh/include/asm/atomic-irq.h31
-rw-r--r--arch/sh/include/asm/atomic-llsc.h32
-rw-r--r--arch/sh/include/asm/spinlock.h10
4 files changed, 93 insertions, 14 deletions
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index b94df40e5f2d..d755e96c3064 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
43 return tmp; \ 43 return tmp; \
44} 44}
45 45
46#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 46#define ATOMIC_FETCH_OP(op) \
47static inline int atomic_fetch_##op(int i, atomic_t *v) \
48{ \
49 int res, tmp; \
50 \
51 __asm__ __volatile__ ( \
52 " .align 2 \n\t" \
53 " mova 1f, r0 \n\t" /* r0 = end point */ \
54 " mov r15, r1 \n\t" /* r1 = saved sp */ \
55 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
56 " mov.l @%2, %0 \n\t" /* load old value */ \
57 " mov %0, %1 \n\t" /* save old value */ \
58 " " #op " %3, %0 \n\t" /* $op */ \
59 " mov.l %0, @%2 \n\t" /* store new value */ \
60 "1: mov r1, r15 \n\t" /* LOGOUT */ \
61 : "=&r" (tmp), "=&r" (res), "+r" (v) \
62 : "r" (i) \
63 : "memory" , "r0", "r1"); \
64 \
65 return res; \
66}
67
68#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
47 69
48ATOMIC_OPS(add) 70ATOMIC_OPS(add)
49ATOMIC_OPS(sub) 71ATOMIC_OPS(sub)
50 72
51ATOMIC_OP(and) 73#undef ATOMIC_OPS
52ATOMIC_OP(or) 74#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
53ATOMIC_OP(xor) 75
76ATOMIC_OPS(and)
77ATOMIC_OPS(or)
78ATOMIC_OPS(xor)
54 79
55#undef ATOMIC_OPS 80#undef ATOMIC_OPS
81#undef ATOMIC_FETCH_OP
56#undef ATOMIC_OP_RETURN 82#undef ATOMIC_OP_RETURN
57#undef ATOMIC_OP 83#undef ATOMIC_OP
58 84
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 23fcdad5773e..8e2da5fa0178 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
33 return temp; \ 33 return temp; \
34} 34}
35 35
36#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) 36#define ATOMIC_FETCH_OP(op, c_op) \
37static inline int atomic_fetch_##op(int i, atomic_t *v) \
38{ \
39 unsigned long temp, flags; \
40 \
41 raw_local_irq_save(flags); \
42 temp = v->counter; \
43 v->counter c_op i; \
44 raw_local_irq_restore(flags); \
45 \
46 return temp; \
47}
48
49#define ATOMIC_OPS(op, c_op) \
50 ATOMIC_OP(op, c_op) \
51 ATOMIC_OP_RETURN(op, c_op) \
52 ATOMIC_FETCH_OP(op, c_op)
37 53
38ATOMIC_OPS(add, +=) 54ATOMIC_OPS(add, +=)
39ATOMIC_OPS(sub, -=) 55ATOMIC_OPS(sub, -=)
40ATOMIC_OP(and, &=)
41ATOMIC_OP(or, |=)
42ATOMIC_OP(xor, ^=)
43 56
44#undef ATOMIC_OPS 57#undef ATOMIC_OPS
58#define ATOMIC_OPS(op, c_op) \
59 ATOMIC_OP(op, c_op) \
60 ATOMIC_FETCH_OP(op, c_op)
61
62ATOMIC_OPS(and, &=)
63ATOMIC_OPS(or, |=)
64ATOMIC_OPS(xor, ^=)
65
66#undef ATOMIC_OPS
67#undef ATOMIC_FETCH_OP
45#undef ATOMIC_OP_RETURN 68#undef ATOMIC_OP_RETURN
46#undef ATOMIC_OP 69#undef ATOMIC_OP
47 70
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 33d34b16d4d6..caea2c45f6c2 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
48 return temp; \ 48 return temp; \
49} 49}
50 50
51#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 51#define ATOMIC_FETCH_OP(op) \
52static inline int atomic_fetch_##op(int i, atomic_t *v) \
53{ \
54 unsigned long res, temp; \
55 \
56 __asm__ __volatile__ ( \
57"1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
58" mov %0, %1 \n" \
59" " #op " %2, %0 \n" \
60" movco.l %0, @%3 \n" \
61" bf 1b \n" \
62" synco \n" \
63 : "=&z" (temp), "=&z" (res) \
64 : "r" (i), "r" (&v->counter) \
65 : "t"); \
66 \
67 return res; \
68}
69
70#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
52 71
53ATOMIC_OPS(add) 72ATOMIC_OPS(add)
54ATOMIC_OPS(sub) 73ATOMIC_OPS(sub)
55ATOMIC_OP(and)
56ATOMIC_OP(or)
57ATOMIC_OP(xor)
58 74
59#undef ATOMIC_OPS 75#undef ATOMIC_OPS
76#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
77
78ATOMIC_OPS(and)
79ATOMIC_OPS(or)
80ATOMIC_OPS(xor)
81
82#undef ATOMIC_OPS
83#undef ATOMIC_FETCH_OP
60#undef ATOMIC_OP_RETURN 84#undef ATOMIC_OP_RETURN
61#undef ATOMIC_OP 85#undef ATOMIC_OP
62 86
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index bdc0f3b6c56a..416834b60ad0 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -19,14 +19,20 @@
19#error "Need movli.l/movco.l for spinlocks" 19#error "Need movli.l/movco.l for spinlocks"
20#endif 20#endif
21 21
22#include <asm/barrier.h>
23#include <asm/processor.h>
24
22/* 25/*
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 26 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */ 27 */
25 28
26#define arch_spin_is_locked(x) ((x)->lock <= 0) 29#define arch_spin_is_locked(x) ((x)->lock <= 0)
27#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 30#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
28#define arch_spin_unlock_wait(x) \ 31
29 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 32static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
33{
34 smp_cond_load_acquire(&lock->lock, VAL > 0);
35}
30 36
31/* 37/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 38 * Simple spin lock operations. There are two variants, one clears IRQ's