aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
commitc86ad14d305d2429c3da19462440bac50c183def (patch)
treebd794cd72476661faf82c440063c217bb978ce44 /arch/m32r
parenta2303849a6b4b7ba59667091e00d6bb194071d9a (diff)
parentf06628638cf6e75f179742b6c1b35076965b9fdd (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/include/asm/atomic.h36
-rw-r--r--arch/m32r/include/asm/spinlock.h9
2 files changed, 39 insertions, 6 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index ea35160d632b..640cc1c7099f 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -89,16 +89,44 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
89 return result; \ 89 return result; \
90} 90}
91 91
92#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 92#define ATOMIC_FETCH_OP(op) \
93static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
94{ \
95 unsigned long flags; \
96 int result, val; \
97 \
98 local_irq_save(flags); \
99 __asm__ __volatile__ ( \
100 "# atomic_fetch_" #op " \n\t" \
101 DCACHE_CLEAR("%0", "r4", "%2") \
102 M32R_LOCK" %1, @%2; \n\t" \
103 "mv %0, %1 \n\t" \
104 #op " %1, %3; \n\t" \
105 M32R_UNLOCK" %1, @%2; \n\t" \
106 : "=&r" (result), "=&r" (val) \
107 : "r" (&v->counter), "r" (i) \
108 : "memory" \
109 __ATOMIC_CLOBBER \
110 ); \
111 local_irq_restore(flags); \
112 \
113 return result; \
114}
115
116#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
93 117
94ATOMIC_OPS(add) 118ATOMIC_OPS(add)
95ATOMIC_OPS(sub) 119ATOMIC_OPS(sub)
96 120
97ATOMIC_OP(and) 121#undef ATOMIC_OPS
98ATOMIC_OP(or) 122#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
99ATOMIC_OP(xor) 123
124ATOMIC_OPS(and)
125ATOMIC_OPS(or)
126ATOMIC_OPS(xor)
100 127
101#undef ATOMIC_OPS 128#undef ATOMIC_OPS
129#undef ATOMIC_FETCH_OP
102#undef ATOMIC_OP_RETURN 130#undef ATOMIC_OP_RETURN
103#undef ATOMIC_OP 131#undef ATOMIC_OP
104 132
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index fa13694eaae3..323c7fc953cd 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -13,6 +13,8 @@
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14#include <asm/dcache_clear.h> 14#include <asm/dcache_clear.h>
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/barrier.h>
17#include <asm/processor.h>
16 18
17/* 19/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere 20 * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -27,8 +29,11 @@
27 29
28#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 30#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
29#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 31#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
30#define arch_spin_unlock_wait(x) \ 32
31 do { cpu_relax(); } while (arch_spin_is_locked(x)) 33static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
34{
35 smp_cond_load_acquire(&lock->slock, VAL > 0);
36}
32 37
33/** 38/**
34 * arch_spin_trylock - Try spin lock and return a result 39 * arch_spin_trylock - Try spin lock and return a result