aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-04-17 19:16:03 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-16 04:48:30 -0400
commit1af5de9af138941fb8638cf126293b16f3387de4 (patch)
tree55b1f10f549b59c7e6442505905496a9e822ce36 /arch/tile/include
parent3a1adb23a52c920304239efff377d3bc967febc2 (diff)
locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Metcalf <cmetcalf@mellanox.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/asm/atomic.h4
-rw-r--r--arch/tile/include/asm/atomic_32.h60
-rw-r--r--arch/tile/include/asm/atomic_64.h115
-rw-r--r--arch/tile/include/asm/bitops_32.h18
4 files changed, 131 insertions, 66 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 9fc0107a9c5e..9807030557c4 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -46,6 +46,10 @@ static inline int atomic_read(const atomic_t *v)
46 */ 46 */
47#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) 47#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
48 48
49#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
50
51#define atomic_fetch_or atomic_fetch_or
52
49/** 53/**
50 * atomic_sub - subtract integer from atomic variable 54 * atomic_sub - subtract integer from atomic variable
51 * @i: integer value to subtract 55 * @i: integer value to subtract
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index d320ce253d86..da8eb4ed3752 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,18 +34,29 @@ static inline void atomic_add(int i, atomic_t *v)
34 _atomic_xchg_add(&v->counter, i); 34 _atomic_xchg_add(&v->counter, i);
35} 35}
36 36
37#define ATOMIC_OP(op) \ 37#define ATOMIC_OPS(op) \
38unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ 38unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
39static inline void atomic_##op(int i, atomic_t *v) \ 39static inline void atomic_##op(int i, atomic_t *v) \
40{ \ 40{ \
41 _atomic_##op((unsigned long *)&v->counter, i); \ 41 _atomic_fetch_##op((unsigned long *)&v->counter, i); \
42} \
43static inline int atomic_fetch_##op(int i, atomic_t *v) \
44{ \
45 smp_mb(); \
46 return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
42} 47}
43 48
44ATOMIC_OP(and) 49ATOMIC_OPS(and)
45ATOMIC_OP(or) 50ATOMIC_OPS(or)
46ATOMIC_OP(xor) 51ATOMIC_OPS(xor)
52
53#undef ATOMIC_OPS
47 54
48#undef ATOMIC_OP 55static inline int atomic_fetch_add(int i, atomic_t *v)
56{
57 smp_mb();
58 return _atomic_xchg_add(&v->counter, i);
59}
49 60
50/** 61/**
51 * atomic_add_return - add integer and return 62 * atomic_add_return - add integer and return
@@ -126,17 +137,30 @@ static inline void atomic64_add(long long i, atomic64_t *v)
126 _atomic64_xchg_add(&v->counter, i); 137 _atomic64_xchg_add(&v->counter, i);
127} 138}
128 139
129#define ATOMIC64_OP(op) \ 140#define ATOMIC64_OPS(op) \
130long long _atomic64_##op(long long *v, long long n); \ 141long long _atomic64_fetch_##op(long long *v, long long n); \
142static inline void atomic64_##op(long long i, atomic64_t *v) \
143{ \
144 _atomic64_fetch_##op(&v->counter, i); \
145} \
131static inline void atomic64_##op(long long i, atomic64_t *v) \ 146static inline void atomic64_##op(long long i, atomic64_t *v) \
132{ \ 147{ \
133 _atomic64_##op(&v->counter, i); \ 148 smp_mb(); \
149 return _atomic64_fetch_##op(&v->counter, i); \
134} 150}
135 151
136ATOMIC64_OP(and) 152ATOMIC64_OP(and)
137ATOMIC64_OP(or) 153ATOMIC64_OP(or)
138ATOMIC64_OP(xor) 154ATOMIC64_OP(xor)
139 155
156#undef ATOMIC64_OPS
157
158static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
159{
160 smp_mb();
161 return _atomic64_xchg_add(&v->counter, i);
162}
163
140/** 164/**
141 * atomic64_add_return - add integer and return 165 * atomic64_add_return - add integer and return
142 * @v: pointer of type atomic64_t 166 * @v: pointer of type atomic64_t
@@ -186,6 +210,7 @@ static inline void atomic64_set(atomic64_t *v, long long n)
186#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) 210#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
187#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 211#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
188#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) 212#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
213#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
189#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) 214#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
190#define atomic64_sub(i, v) atomic64_add(-(i), (v)) 215#define atomic64_sub(i, v) atomic64_add(-(i), (v))
191#define atomic64_dec(v) atomic64_sub(1LL, (v)) 216#define atomic64_dec(v) atomic64_sub(1LL, (v))
@@ -193,7 +218,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
193#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 218#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
194#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) 219#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
195 220
196
197#endif /* !__ASSEMBLY__ */ 221#endif /* !__ASSEMBLY__ */
198 222
199/* 223/*
@@ -248,10 +272,10 @@ extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
248extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); 272extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
249extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 273extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
250 int *lock, int o, int n); 274 int *lock, int o, int n);
251extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 275extern struct __get_user __atomic_fetch_or(volatile int *p, int *lock, int n);
252extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); 276extern struct __get_user __atomic_fetch_and(volatile int *p, int *lock, int n);
253extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 277extern struct __get_user __atomic_fetch_andn(volatile int *p, int *lock, int n);
254extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 278extern struct __get_user __atomic_fetch_xor(volatile int *p, int *lock, int n);
255extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, 279extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
256 long long o, long long n); 280 long long o, long long n);
257extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); 281extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
@@ -259,9 +283,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
259 long long n); 283 long long n);
260extern long long __atomic64_xchg_add_unless(volatile long long *p, 284extern long long __atomic64_xchg_add_unless(volatile long long *p,
261 int *lock, long long o, long long n); 285 int *lock, long long o, long long n);
262extern long long __atomic64_and(volatile long long *p, int *lock, long long n); 286extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
263extern long long __atomic64_or(volatile long long *p, int *lock, long long n); 287extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
264extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); 288extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
265 289
266/* Return failure from the atomic wrappers. */ 290/* Return failure from the atomic wrappers. */
267struct __get_user __atomic_bad_address(int __user *addr); 291struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index b0531a623653..4cefa0c9fd81 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -32,11 +32,6 @@
32 * on any routine which updates memory and returns a value. 32 * on any routine which updates memory and returns a value.
33 */ 33 */
34 34
35static inline void atomic_add(int i, atomic_t *v)
36{
37 __insn_fetchadd4((void *)&v->counter, i);
38}
39
40/* 35/*
41 * Note a subtlety of the locking here. We are required to provide a 36 * Note a subtlety of the locking here. We are required to provide a
42 * full memory barrier before and after the operation. However, we 37 * full memory barrier before and after the operation. However, we
@@ -59,28 +54,39 @@ static inline int atomic_add_return(int i, atomic_t *v)
59 return val; 54 return val;
60} 55}
61 56
62static inline int __atomic_add_unless(atomic_t *v, int a, int u) 57#define ATOMIC_OPS(op) \
58static inline int atomic_fetch_##op(int i, atomic_t *v) \
59{ \
60 int val; \
61 smp_mb(); \
62 val = __insn_fetch##op##4((void *)&v->counter, i); \
63 smp_mb(); \
64 return val; \
65} \
66static inline void atomic_##op(int i, atomic_t *v) \
67{ \
68 __insn_fetch##op##4((void *)&v->counter, i); \
69}
70
71ATOMIC_OPS(add)
72ATOMIC_OPS(and)
73ATOMIC_OPS(or)
74
75#undef ATOMIC_OPS
76
77static inline int atomic_fetch_xor(int i, atomic_t *v)
63{ 78{
64 int guess, oldval = v->counter; 79 int guess, oldval = v->counter;
80 smp_mb();
65 do { 81 do {
66 if (oldval == u)
67 break;
68 guess = oldval; 82 guess = oldval;
69 oldval = cmpxchg(&v->counter, guess, guess + a); 83 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
84 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
70 } while (guess != oldval); 85 } while (guess != oldval);
86 smp_mb();
71 return oldval; 87 return oldval;
72} 88}
73 89
74static inline void atomic_and(int i, atomic_t *v)
75{
76 __insn_fetchand4((void *)&v->counter, i);
77}
78
79static inline void atomic_or(int i, atomic_t *v)
80{
81 __insn_fetchor4((void *)&v->counter, i);
82}
83
84static inline void atomic_xor(int i, atomic_t *v) 90static inline void atomic_xor(int i, atomic_t *v)
85{ 91{
86 int guess, oldval = v->counter; 92 int guess, oldval = v->counter;
@@ -91,6 +97,18 @@ static inline void atomic_xor(int i, atomic_t *v)
91 } while (guess != oldval); 97 } while (guess != oldval);
92} 98}
93 99
100static inline int __atomic_add_unless(atomic_t *v, int a, int u)
101{
102 int guess, oldval = v->counter;
103 do {
104 if (oldval == u)
105 break;
106 guess = oldval;
107 oldval = cmpxchg(&v->counter, guess, guess + a);
108 } while (guess != oldval);
109 return oldval;
110}
111
94/* Now the true 64-bit operations. */ 112/* Now the true 64-bit operations. */
95 113
96#define ATOMIC64_INIT(i) { (i) } 114#define ATOMIC64_INIT(i) { (i) }
@@ -98,11 +116,6 @@ static inline void atomic_xor(int i, atomic_t *v)
98#define atomic64_read(v) READ_ONCE((v)->counter) 116#define atomic64_read(v) READ_ONCE((v)->counter)
99#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) 117#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
100 118
101static inline void atomic64_add(long i, atomic64_t *v)
102{
103 __insn_fetchadd((void *)&v->counter, i);
104}
105
106static inline long atomic64_add_return(long i, atomic64_t *v) 119static inline long atomic64_add_return(long i, atomic64_t *v)
107{ 120{
108 int val; 121 int val;
@@ -112,26 +125,37 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
112 return val; 125 return val;
113} 126}
114 127
115static inline long atomic64_add_unless(atomic64_t *v, long a, long u) 128#define ATOMIC64_OPS(op) \
129static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
130{ \
131 long val; \
132 smp_mb(); \
133 val = __insn_fetch##op((void *)&v->counter, i); \
134 smp_mb(); \
135 return val; \
136} \
137static inline void atomic64_##op(long i, atomic64_t *v) \
138{ \
139 __insn_fetch##op((void *)&v->counter, i); \
140}
141
142ATOMIC64_OPS(add)
143ATOMIC64_OPS(and)
144ATOMIC64_OPS(or)
145
146#undef ATOMIC64_OPS
147
148static inline long atomic64_fetch_xor(long i, atomic64_t *v)
116{ 149{
117 long guess, oldval = v->counter; 150 long guess, oldval = v->counter;
151 smp_mb();
118 do { 152 do {
119 if (oldval == u)
120 break;
121 guess = oldval; 153 guess = oldval;
122 oldval = cmpxchg(&v->counter, guess, guess + a); 154 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
155 oldval = __insn_cmpexch(&v->counter, guess ^ i);
123 } while (guess != oldval); 156 } while (guess != oldval);
124 return oldval != u; 157 smp_mb();
125} 158 return oldval;
126
127static inline void atomic64_and(long i, atomic64_t *v)
128{
129 __insn_fetchand((void *)&v->counter, i);
130}
131
132static inline void atomic64_or(long i, atomic64_t *v)
133{
134 __insn_fetchor((void *)&v->counter, i);
135} 159}
136 160
137static inline void atomic64_xor(long i, atomic64_t *v) 161static inline void atomic64_xor(long i, atomic64_t *v)
@@ -144,7 +168,20 @@ static inline void atomic64_xor(long i, atomic64_t *v)
144 } while (guess != oldval); 168 } while (guess != oldval);
145} 169}
146 170
171static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
172{
173 long guess, oldval = v->counter;
174 do {
175 if (oldval == u)
176 break;
177 guess = oldval;
178 oldval = cmpxchg(&v->counter, guess, guess + a);
179 } while (guess != oldval);
180 return oldval != u;
181}
182
147#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) 183#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
184#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
148#define atomic64_sub(i, v) atomic64_add(-(i), (v)) 185#define atomic64_sub(i, v) atomic64_add(-(i), (v))
149#define atomic64_inc_return(v) atomic64_add_return(1, (v)) 186#define atomic64_inc_return(v) atomic64_add_return(1, (v))
150#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 187#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index bbf7b666f21d..d1406a95f6b7 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -19,9 +19,9 @@
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20 20
21/* Tile-specific routines to support <asm/bitops.h>. */ 21/* Tile-specific routines to support <asm/bitops.h>. */
22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); 22unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
23unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); 23unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
24unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); 24unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
25 25
26/** 26/**
27 * set_bit - Atomically set a bit in memory 27 * set_bit - Atomically set a bit in memory
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
35 */ 35 */
36static inline void set_bit(unsigned nr, volatile unsigned long *addr) 36static inline void set_bit(unsigned nr, volatile unsigned long *addr)
37{ 37{
38 _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); 38 _atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
39} 39}
40 40
41/** 41/**
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
54 */ 54 */
55static inline void clear_bit(unsigned nr, volatile unsigned long *addr) 55static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
56{ 56{
57 _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); 57 _atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
58} 58}
59 59
60/** 60/**
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
69 */ 69 */
70static inline void change_bit(unsigned nr, volatile unsigned long *addr) 70static inline void change_bit(unsigned nr, volatile unsigned long *addr)
71{ 71{
72 _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); 72 _atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
73} 73}
74 74
75/** 75/**
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
85 unsigned long mask = BIT_MASK(nr); 85 unsigned long mask = BIT_MASK(nr);
86 addr += BIT_WORD(nr); 86 addr += BIT_WORD(nr);
87 smp_mb(); /* barrier for proper semantics */ 87 smp_mb(); /* barrier for proper semantics */
88 return (_atomic_or(addr, mask) & mask) != 0; 88 return (_atomic_fetch_or(addr, mask) & mask) != 0;
89} 89}
90 90
91/** 91/**
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
101 unsigned long mask = BIT_MASK(nr); 101 unsigned long mask = BIT_MASK(nr);
102 addr += BIT_WORD(nr); 102 addr += BIT_WORD(nr);
103 smp_mb(); /* barrier for proper semantics */ 103 smp_mb(); /* barrier for proper semantics */
104 return (_atomic_andn(addr, mask) & mask) != 0; 104 return (_atomic_fetch_andn(addr, mask) & mask) != 0;
105} 105}
106 106
107/** 107/**
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr,
118 unsigned long mask = BIT_MASK(nr); 118 unsigned long mask = BIT_MASK(nr);
119 addr += BIT_WORD(nr); 119 addr += BIT_WORD(nr);
120 smp_mb(); /* barrier for proper semantics */ 120 smp_mb(); /* barrier for proper semantics */
121 return (_atomic_xor(addr, mask) & mask) != 0; 121 return (_atomic_fetch_xor(addr, mask) & mask) != 0;
122} 122}
123 123
124#include <asm-generic/bitops/ext2-atomic.h> 124#include <asm-generic/bitops/ext2-atomic.h>