aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/tile/include/asm
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r--arch/tile/include/asm/atomic_32.h28
-rw-r--r--arch/tile/include/asm/atomic_64.h40
2 files changed, 68 insertions, 0 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fad9fff..d320ce253d86 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
34 _atomic_xchg_add(&v->counter, i); 34 _atomic_xchg_add(&v->counter, i);
35} 35}
36 36
37#define ATOMIC_OP(op) \
38unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 _atomic_##op((unsigned long *)&v->counter, i); \
42}
43
44ATOMIC_OP(and)
45ATOMIC_OP(or)
46ATOMIC_OP(xor)
47
48#undef ATOMIC_OP
49
37/** 50/**
38 * atomic_add_return - add integer and return 51 * atomic_add_return - add integer and return
39 * @v: pointer of type atomic_t 52 * @v: pointer of type atomic_t
@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
113 _atomic64_xchg_add(&v->counter, i); 126 _atomic64_xchg_add(&v->counter, i);
114} 127}
115 128
129#define ATOMIC64_OP(op) \
130long long _atomic64_##op(long long *v, long long n); \
131static inline void atomic64_##op(long long i, atomic64_t *v) \
132{ \
133 _atomic64_##op(&v->counter, i); \
134}
135
136ATOMIC64_OP(and)
137ATOMIC64_OP(or)
138ATOMIC64_OP(xor)
139
116/** 140/**
117 * atomic64_add_return - add integer and return 141 * atomic64_add_return - add integer and return
118 * @v: pointer of type atomic64_t 142 * @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
225extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 249extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
226 int *lock, int o, int n); 250 int *lock, int o, int n);
227extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 251extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
252extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
228extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 253extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
229extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 254extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
230extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, 255extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
234 long long n); 259 long long n);
235extern long long __atomic64_xchg_add_unless(volatile long long *p, 260extern long long __atomic64_xchg_add_unless(volatile long long *p,
236 int *lock, long long o, long long n); 261 int *lock, long long o, long long n);
262extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
263extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
264extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
237 265
238/* Return failure from the atomic wrappers. */ 266/* Return failure from the atomic wrappers. */
239struct __get_user __atomic_bad_address(int __user *addr); 267struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970cef82..096a56d6ead4 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58 return oldval; 58 return oldval;
59} 59}
60 60
61static inline void atomic_and(int i, atomic_t *v)
62{
63 __insn_fetchand4((void *)&v->counter, i);
64}
65
66static inline void atomic_or(int i, atomic_t *v)
67{
68 __insn_fetchor4((void *)&v->counter, i);
69}
70
71static inline void atomic_xor(int i, atomic_t *v)
72{
73 int guess, oldval = v->counter;
74 do {
75 guess = oldval;
76 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
77 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
78 } while (guess != oldval);
79}
80
61/* Now the true 64-bit operations. */ 81/* Now the true 64-bit operations. */
62 82
63#define ATOMIC64_INIT(i) { (i) } 83#define ATOMIC64_INIT(i) { (i) }
@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
91 return oldval != u; 111 return oldval != u;
92} 112}
93 113
114static inline void atomic64_and(long i, atomic64_t *v)
115{
116 __insn_fetchand((void *)&v->counter, i);
117}
118
119static inline void atomic64_or(long i, atomic64_t *v)
120{
121 __insn_fetchor((void *)&v->counter, i);
122}
123
124static inline void atomic64_xor(long i, atomic64_t *v)
125{
126 long guess, oldval = v->counter;
127 do {
128 guess = oldval;
129 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
130 oldval = __insn_cmpexch(&v->counter, guess ^ i);
131 } while (guess != oldval);
132}
133
94#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) 134#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
95#define atomic64_sub(i, v) atomic64_add(-(i), (v)) 135#define atomic64_sub(i, v) atomic64_add(-(i), (v))
96#define atomic64_inc_return(v) atomic64_add_return(1, (v)) 136#define atomic64_inc_return(v) atomic64_add_return(1, (v))