diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2013-09-11 10:34:04 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-10-24 11:16:46 -0400 |
commit | 75287430b4af7c22080d02b8cfc8344c4ecafc21 (patch) | |
tree | 6a1346c2619105866383ba482f709c77db43c5b4 | |
parent | 86d51bc31fabd3782a99375b6848c5c667e72605 (diff) |
s390/atomic: make use of interlocked-access facility 1 instructions
Same as for bitops: make use of the interlocked-access facility 1
instructions which allow to atomically update storage locations
without a compare-and-swap loop.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/atomic.h | 77 |
1 files changed, 65 insertions, 12 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fea2c8887da5..823ec99cf426 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -19,7 +19,31 @@ | |||
19 | 19 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 20 | #define ATOMIC_INIT(i) { (i) } |
21 | 21 | ||
22 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 22 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
23 | |||
24 | #define __ATOMIC_OR "lao" | ||
25 | #define __ATOMIC_AND "lan" | ||
26 | #define __ATOMIC_ADD "laa" | ||
27 | |||
28 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | ||
29 | ({ \ | ||
30 | int old_val; \ | ||
31 | asm volatile( \ | ||
32 | op_string " %0,%2,%1\n" \ | ||
33 | : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \ | ||
34 | : "d" (op_val) \ | ||
35 | : "cc", "memory"); \ | ||
36 | old_val; \ | ||
37 | }) | ||
38 | |||
39 | #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
40 | |||
41 | #define __ATOMIC_OR "or" | ||
42 | #define __ATOMIC_AND "nr" | ||
43 | #define __ATOMIC_ADD "ar" | ||
44 | |||
45 | #define __ATOMIC_LOOP(ptr, op_val, op_string) \ | ||
46 | ({ \ | ||
23 | int old_val, new_val; \ | 47 | int old_val, new_val; \ |
24 | asm volatile( \ | 48 | asm volatile( \ |
25 | " l %0,%2\n" \ | 49 | " l %0,%2\n" \ |
@@ -31,9 +55,11 @@ | |||
31 | "=Q" (((atomic_t *)(ptr))->counter) \ | 55 | "=Q" (((atomic_t *)(ptr))->counter) \ |
32 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 56 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
33 | : "cc", "memory"); \ | 57 | : "cc", "memory"); \ |
34 | new_val; \ | 58 | old_val; \ |
35 | }) | 59 | }) |
36 | 60 | ||
61 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
62 | |||
37 | static inline int atomic_read(const atomic_t *v) | 63 | static inline int atomic_read(const atomic_t *v) |
38 | { | 64 | { |
39 | int c; | 65 | int c; |
@@ -53,8 +79,9 @@ static inline void atomic_set(atomic_t *v, int i) | |||
53 | 79 | ||
54 | static inline int atomic_add_return(int i, atomic_t *v) | 80 | static inline int atomic_add_return(int i, atomic_t *v) |
55 | { | 81 | { |
56 | return __CS_LOOP(v, i, "ar"); | 82 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i; |
57 | } | 83 | } |
84 | |||
58 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) | 85 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) |
59 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) | 86 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
60 | #define atomic_inc(_v) atomic_add_return(1, _v) | 87 | #define atomic_inc(_v) atomic_add_return(1, _v) |
@@ -69,12 +96,12 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
69 | 96 | ||
70 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) | 97 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
71 | { | 98 | { |
72 | __CS_LOOP(v, ~mask, "nr"); | 99 | __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND); |
73 | } | 100 | } |
74 | 101 | ||
75 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) | 102 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
76 | { | 103 | { |
77 | __CS_LOOP(v, mask, "or"); | 104 | __ATOMIC_LOOP(v, mask, __ATOMIC_OR); |
78 | } | 105 | } |
79 | 106 | ||
80 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 107 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -105,13 +132,37 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
105 | } | 132 | } |
106 | 133 | ||
107 | 134 | ||
108 | #undef __CS_LOOP | 135 | #undef __ATOMIC_LOOP |
109 | 136 | ||
110 | #define ATOMIC64_INIT(i) { (i) } | 137 | #define ATOMIC64_INIT(i) { (i) } |
111 | 138 | ||
112 | #ifdef CONFIG_64BIT | 139 | #ifdef CONFIG_64BIT |
113 | 140 | ||
114 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 141 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
142 | |||
143 | #define __ATOMIC64_OR "laog" | ||
144 | #define __ATOMIC64_AND "lang" | ||
145 | #define __ATOMIC64_ADD "laag" | ||
146 | |||
147 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | ||
148 | ({ \ | ||
149 | long long old_val; \ | ||
150 | asm volatile( \ | ||
151 | op_string " %0,%2,%1\n" \ | ||
152 | : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \ | ||
153 | : "d" (op_val) \ | ||
154 | : "cc", "memory"); \ | ||
155 | old_val; \ | ||
156 | }) | ||
157 | |||
158 | #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
159 | |||
160 | #define __ATOMIC64_OR "ogr" | ||
161 | #define __ATOMIC64_AND "ngr" | ||
162 | #define __ATOMIC64_ADD "agr" | ||
163 | |||
164 | #define __ATOMIC64_LOOP(ptr, op_val, op_string) \ | ||
165 | ({ \ | ||
115 | long long old_val, new_val; \ | 166 | long long old_val, new_val; \ |
116 | asm volatile( \ | 167 | asm volatile( \ |
117 | " lg %0,%2\n" \ | 168 | " lg %0,%2\n" \ |
@@ -123,9 +174,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
123 | "=Q" (((atomic_t *)(ptr))->counter) \ | 174 | "=Q" (((atomic_t *)(ptr))->counter) \ |
124 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 175 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
125 | : "cc", "memory"); \ | 176 | : "cc", "memory"); \ |
126 | new_val; \ | 177 | old_val; \ |
127 | }) | 178 | }) |
128 | 179 | ||
180 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
181 | |||
129 | static inline long long atomic64_read(const atomic64_t *v) | 182 | static inline long long atomic64_read(const atomic64_t *v) |
130 | { | 183 | { |
131 | long long c; | 184 | long long c; |
@@ -145,17 +198,17 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
145 | 198 | ||
146 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 199 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
147 | { | 200 | { |
148 | return __CSG_LOOP(v, i, "agr"); | 201 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i; |
149 | } | 202 | } |
150 | 203 | ||
151 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) | 204 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
152 | { | 205 | { |
153 | __CSG_LOOP(v, ~mask, "ngr"); | 206 | __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND); |
154 | } | 207 | } |
155 | 208 | ||
156 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | 209 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
157 | { | 210 | { |
158 | __CSG_LOOP(v, mask, "ogr"); | 211 | __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR); |
159 | } | 212 | } |
160 | 213 | ||
161 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 214 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -171,7 +224,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
171 | return old; | 224 | return old; |
172 | } | 225 | } |
173 | 226 | ||
174 | #undef __CSG_LOOP | 227 | #undef __ATOMIC64_LOOP |
175 | 228 | ||
176 | #else /* CONFIG_64BIT */ | 229 | #else /* CONFIG_64BIT */ |
177 | 230 | ||