aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/ia64
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/atomic.h192
1 files changed, 88 insertions, 104 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0f8bf48dadf3..0bf03501fe5c 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,68 +21,100 @@
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) } 22#define ATOMIC64_INIT(i) { (i) }
23 23
24#define atomic_read(v) (*(volatile int *)&(v)->counter) 24#define atomic_read(v) ACCESS_ONCE((v)->counter)
25#define atomic64_read(v) (*(volatile long *)&(v)->counter) 25#define atomic64_read(v) ACCESS_ONCE((v)->counter)
26 26
27#define atomic_set(v,i) (((v)->counter) = (i)) 27#define atomic_set(v,i) (((v)->counter) = (i))
28#define atomic64_set(v,i) (((v)->counter) = (i)) 28#define atomic64_set(v,i) (((v)->counter) = (i))
29 29
30static __inline__ int 30#define ATOMIC_OP(op, c_op) \
31ia64_atomic_add (int i, atomic_t *v) 31static __inline__ int \
32{ 32ia64_atomic_##op (int i, atomic_t *v) \
33 __s32 old, new; 33{ \
34 CMPXCHG_BUGCHECK_DECL 34 __s32 old, new; \
35 35 CMPXCHG_BUGCHECK_DECL \
36 do { 36 \
37 CMPXCHG_BUGCHECK(v); 37 do { \
38 old = atomic_read(v); 38 CMPXCHG_BUGCHECK(v); \
39 new = old + i; 39 old = atomic_read(v); \
40 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 40 new = old c_op i; \
41 return new; 41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
42} 43}
43 44
44static __inline__ long 45ATOMIC_OP(add, +)
45ia64_atomic64_add (__s64 i, atomic64_t *v) 46ATOMIC_OP(sub, -)
46{
47 __s64 old, new;
48 CMPXCHG_BUGCHECK_DECL
49
50 do {
51 CMPXCHG_BUGCHECK(v);
52 old = atomic64_read(v);
53 new = old + i;
54 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
55 return new;
56}
57 47
58static __inline__ int 48#undef ATOMIC_OP
59ia64_atomic_sub (int i, atomic_t *v)
60{
61 __s32 old, new;
62 CMPXCHG_BUGCHECK_DECL
63
64 do {
65 CMPXCHG_BUGCHECK(v);
66 old = atomic_read(v);
67 new = old - i;
68 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
69 return new;
70}
71 49
72static __inline__ long 50#define atomic_add_return(i,v) \
73ia64_atomic64_sub (__s64 i, atomic64_t *v) 51({ \
74{ 52 int __ia64_aar_i = (i); \
75 __s64 old, new; 53 (__builtin_constant_p(i) \
76 CMPXCHG_BUGCHECK_DECL 54 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
77 55 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
78 do { 56 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
79 CMPXCHG_BUGCHECK(v); 57 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
80 old = atomic64_read(v); 58 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
81 new = old - i; 59 : ia64_atomic_add(__ia64_aar_i, v); \
82 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 60})
83 return new; 61
62#define atomic_sub_return(i,v) \
63({ \
64 int __ia64_asr_i = (i); \
65 (__builtin_constant_p(i) \
66 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
67 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
68 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
69 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
70 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
71 : ia64_atomic_sub(__ia64_asr_i, v); \
72})
73
74#define ATOMIC64_OP(op, c_op) \
75static __inline__ long \
76ia64_atomic64_##op (__s64 i, atomic64_t *v) \
77{ \
78 __s64 old, new; \
79 CMPXCHG_BUGCHECK_DECL \
80 \
81 do { \
82 CMPXCHG_BUGCHECK(v); \
83 old = atomic64_read(v); \
84 new = old c_op i; \
85 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
86 return new; \
84} 87}
85 88
89ATOMIC64_OP(add, +)
90ATOMIC64_OP(sub, -)
91
92#undef ATOMIC64_OP
93
94#define atomic64_add_return(i,v) \
95({ \
96 long __ia64_aar_i = (i); \
97 (__builtin_constant_p(i) \
98 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
99 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
100 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
101 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
102 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
103 : ia64_atomic64_add(__ia64_aar_i, v); \
104})
105
106#define atomic64_sub_return(i,v) \
107({ \
108 long __ia64_asr_i = (i); \
109 (__builtin_constant_p(i) \
110 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
111 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
112 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
113 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
114 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
115 : ia64_atomic64_sub(__ia64_asr_i, v); \
116})
117
86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 118#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
87#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 119#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88 120
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
123 155
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 156#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125 157
126#define atomic_add_return(i,v) \
127({ \
128 int __ia64_aar_i = (i); \
129 (__builtin_constant_p(i) \
130 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
131 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
132 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
133 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
134 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
135 : ia64_atomic_add(__ia64_aar_i, v); \
136})
137
138#define atomic64_add_return(i,v) \
139({ \
140 long __ia64_aar_i = (i); \
141 (__builtin_constant_p(i) \
142 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
143 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
144 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
145 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
146 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
147 : ia64_atomic64_add(__ia64_aar_i, v); \
148})
149
150/* 158/*
151 * Atomically add I to V and return TRUE if the resulting value is 159 * Atomically add I to V and return TRUE if the resulting value is
152 * negative. 160 * negative.
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
163 return atomic64_add_return(i, v) < 0; 171 return atomic64_add_return(i, v) < 0;
164} 172}
165 173
166#define atomic_sub_return(i,v) \
167({ \
168 int __ia64_asr_i = (i); \
169 (__builtin_constant_p(i) \
170 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
171 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
172 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
173 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
174 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
175 : ia64_atomic_sub(__ia64_asr_i, v); \
176})
177
178#define atomic64_sub_return(i,v) \
179({ \
180 long __ia64_asr_i = (i); \
181 (__builtin_constant_p(i) \
182 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
183 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
184 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
185 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
186 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
187 : ia64_atomic64_sub(__ia64_asr_i, v); \
188})
189
190#define atomic_dec_return(v) atomic_sub_return(1, (v)) 174#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_inc_return(v) atomic_add_return(1, (v)) 175#define atomic_inc_return(v) atomic_add_return(1, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 176#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
199#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 183#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 184#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
201 185
202#define atomic_add(i,v) atomic_add_return((i), (v)) 186#define atomic_add(i,v) (void)atomic_add_return((i), (v))
203#define atomic_sub(i,v) atomic_sub_return((i), (v)) 187#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204#define atomic_inc(v) atomic_add(1, (v)) 188#define atomic_inc(v) atomic_add(1, (v))
205#define atomic_dec(v) atomic_sub(1, (v)) 189#define atomic_dec(v) atomic_sub(1, (v))
206 190
207#define atomic64_add(i,v) atomic64_add_return((i), (v)) 191#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 192#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v) atomic64_add(1, (v)) 193#define atomic64_inc(v) atomic64_add(1, (v))
210#define atomic64_dec(v) atomic64_sub(1, (v)) 194#define atomic64_dec(v) atomic64_sub(1, (v))
211 195