diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2010-02-26 16:37:31 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-02-26 16:37:30 -0500 |
commit | 987bcdacb18a3adc2a48d85c9b005069c2f4dd7b (patch) | |
tree | 65da88589e2c95a4677090b570f49402e870e573 /arch/s390 | |
parent | d1bf85902c28dd990c08f1703ea94109223549a7 (diff) |
[S390] use inline assembly contraints available with gcc 3.3.3
Drop support to compile the kernel with gcc versions older than 3.3.3.
This allows us to use the "Q" inline assembly contraint on some more
inline assemblies without duplicating a lot of complex code (e.g. __xchg
and __cmpxchg). The distinction for older gcc versions can be removed
which saves a few lines and simplifies the code.
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 86 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 83 | ||||
-rw-r--r-- | arch/s390/include/asm/etr.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/irqflags.h | 36 | ||||
-rw-r--r-- | arch/s390/include/asm/processor.h | 18 | ||||
-rw-r--r-- | arch/s390/include/asm/rwsem.h | 147 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 18 | ||||
-rw-r--r-- | arch/s390/include/asm/swab.h | 16 | ||||
-rw-r--r-- | arch/s390/include/asm/system.h | 168 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 22 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 8 |
11 files changed, 226 insertions, 388 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 2a113d6a7dfd..451bfbb9db3d 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -18,8 +18,6 @@ | |||
18 | 18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
20 | 20 | ||
21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
22 | |||
23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 21 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
24 | int old_val, new_val; \ | 22 | int old_val, new_val; \ |
25 | asm volatile( \ | 23 | asm volatile( \ |
@@ -35,26 +33,6 @@ | |||
35 | new_val; \ | 33 | new_val; \ |
36 | }) | 34 | }) |
37 | 35 | ||
38 | #else /* __GNUC__ */ | ||
39 | |||
40 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | ||
41 | int old_val, new_val; \ | ||
42 | asm volatile( \ | ||
43 | " l %0,0(%3)\n" \ | ||
44 | "0: lr %1,%0\n" \ | ||
45 | op_string " %1,%4\n" \ | ||
46 | " cs %0,%1,0(%3)\n" \ | ||
47 | " jl 0b" \ | ||
48 | : "=&d" (old_val), "=&d" (new_val), \ | ||
49 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
50 | : "a" (ptr), "d" (op_val), \ | ||
51 | "m" (((atomic_t *)(ptr))->counter) \ | ||
52 | : "cc", "memory"); \ | ||
53 | new_val; \ | ||
54 | }) | ||
55 | |||
56 | #endif /* __GNUC__ */ | ||
57 | |||
58 | static inline int atomic_read(const atomic_t *v) | 36 | static inline int atomic_read(const atomic_t *v) |
59 | { | 37 | { |
60 | barrier(); | 38 | barrier(); |
@@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v) | |||
101 | 79 | ||
102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 80 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
103 | { | 81 | { |
104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
105 | asm volatile( | 82 | asm volatile( |
106 | " cs %0,%2,%1" | 83 | " cs %0,%2,%1" |
107 | : "+d" (old), "=Q" (v->counter) | 84 | : "+d" (old), "=Q" (v->counter) |
108 | : "d" (new), "Q" (v->counter) | 85 | : "d" (new), "Q" (v->counter) |
109 | : "cc", "memory"); | 86 | : "cc", "memory"); |
110 | #else /* __GNUC__ */ | ||
111 | asm volatile( | ||
112 | " cs %0,%3,0(%2)" | ||
113 | : "+d" (old), "=m" (v->counter) | ||
114 | : "a" (v), "d" (new), "m" (v->counter) | ||
115 | : "cc", "memory"); | ||
116 | #endif /* __GNUC__ */ | ||
117 | return old; | 87 | return old; |
118 | } | 88 | } |
119 | 89 | ||
@@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
140 | 110 | ||
141 | #ifdef CONFIG_64BIT | 111 | #ifdef CONFIG_64BIT |
142 | 112 | ||
143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
144 | |||
145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 113 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
146 | long long old_val, new_val; \ | 114 | long long old_val, new_val; \ |
147 | asm volatile( \ | 115 | asm volatile( \ |
@@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
157 | new_val; \ | 125 | new_val; \ |
158 | }) | 126 | }) |
159 | 127 | ||
160 | #else /* __GNUC__ */ | ||
161 | |||
162 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | ||
163 | long long old_val, new_val; \ | ||
164 | asm volatile( \ | ||
165 | " lg %0,0(%3)\n" \ | ||
166 | "0: lgr %1,%0\n" \ | ||
167 | op_string " %1,%4\n" \ | ||
168 | " csg %0,%1,0(%3)\n" \ | ||
169 | " jl 0b" \ | ||
170 | : "=&d" (old_val), "=&d" (new_val), \ | ||
171 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
172 | : "a" (ptr), "d" (op_val), \ | ||
173 | "m" (((atomic_t *)(ptr))->counter) \ | ||
174 | : "cc", "memory"); \ | ||
175 | new_val; \ | ||
176 | }) | ||
177 | |||
178 | #endif /* __GNUC__ */ | ||
179 | |||
180 | static inline long long atomic64_read(const atomic64_t *v) | 128 | static inline long long atomic64_read(const atomic64_t *v) |
181 | { | 129 | { |
182 | barrier(); | 130 | barrier(); |
@@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | |||
214 | static inline long long atomic64_cmpxchg(atomic64_t *v, | 162 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
215 | long long old, long long new) | 163 | long long old, long long new) |
216 | { | 164 | { |
217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
218 | asm volatile( | 165 | asm volatile( |
219 | " csg %0,%2,%1" | 166 | " csg %0,%2,%1" |
220 | : "+d" (old), "=Q" (v->counter) | 167 | : "+d" (old), "=Q" (v->counter) |
221 | : "d" (new), "Q" (v->counter) | 168 | : "d" (new), "Q" (v->counter) |
222 | : "cc", "memory"); | 169 | : "cc", "memory"); |
223 | #else /* __GNUC__ */ | ||
224 | asm volatile( | ||
225 | " csg %0,%3,0(%2)" | ||
226 | : "+d" (old), "=m" (v->counter) | ||
227 | : "a" (v), "d" (new), "m" (v->counter) | ||
228 | : "cc", "memory"); | ||
229 | #endif /* __GNUC__ */ | ||
230 | return old; | 170 | return old; |
231 | } | 171 | } |
232 | 172 | ||
@@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v) | |||
243 | register_pair rp; | 183 | register_pair rp; |
244 | 184 | ||
245 | asm volatile( | 185 | asm volatile( |
246 | " lm %0,%N0,0(%1)" | 186 | " lm %0,%N0,%1" |
247 | : "=&d" (rp) | 187 | : "=&d" (rp) : "Q" (v->counter) ); |
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | 188 | return rp.pair; |
251 | } | 189 | } |
252 | 190 | ||
@@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
255 | register_pair rp = {.pair = i}; | 193 | register_pair rp = {.pair = i}; |
256 | 194 | ||
257 | asm volatile( | 195 | asm volatile( |
258 | " stm %1,%N1,0(%2)" | 196 | " stm %1,%N1,%0" |
259 | : "=m" (v->counter) | 197 | : "=Q" (v->counter) : "d" (rp) ); |
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | 198 | } |
263 | 199 | ||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | 200 | static inline long long atomic64_xchg(atomic64_t *v, long long new) |
@@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) | |||
267 | register_pair rp_old; | 203 | register_pair rp_old; |
268 | 204 | ||
269 | asm volatile( | 205 | asm volatile( |
270 | " lm %0,%N0,0(%2)\n" | 206 | " lm %0,%N0,%1\n" |
271 | "0: cds %0,%3,0(%2)\n" | 207 | "0: cds %0,%2,%1\n" |
272 | " jl 0b\n" | 208 | " jl 0b\n" |
273 | : "=&d" (rp_old), "+m" (v->counter) | 209 | : "=&d" (rp_old), "=Q" (v->counter) |
274 | : "a" (&v->counter), "d" (rp_new) | 210 | : "d" (rp_new), "Q" (v->counter) |
275 | : "cc"); | 211 | : "cc"); |
276 | return rp_old.pair; | 212 | return rp_old.pair; |
277 | } | 213 | } |
@@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
283 | register_pair rp_new = {.pair = new}; | 219 | register_pair rp_new = {.pair = new}; |
284 | 220 | ||
285 | asm volatile( | 221 | asm volatile( |
286 | " cds %0,%3,0(%2)" | 222 | " cds %0,%2,%1" |
287 | : "+&d" (rp_old), "+m" (v->counter) | 223 | : "+&d" (rp_old), "=Q" (v->counter) |
288 | : "a" (&v->counter), "d" (rp_new) | 224 | : "d" (rp_new), "Q" (v->counter) |
289 | : "cc"); | 225 | : "cc"); |
290 | return rp_old.pair; | 226 | return rp_old.pair; |
291 | } | 227 | } |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index b30606f6d523..2e05972c5085 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -71,8 +71,6 @@ extern const char _sb_findmap[]; | |||
71 | #define __BITOPS_AND "nr" | 71 | #define __BITOPS_AND "nr" |
72 | #define __BITOPS_XOR "xr" | 72 | #define __BITOPS_XOR "xr" |
73 | 73 | ||
74 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
75 | |||
76 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 74 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
77 | asm volatile( \ | 75 | asm volatile( \ |
78 | " l %0,%2\n" \ | 76 | " l %0,%2\n" \ |
@@ -85,22 +83,6 @@ extern const char _sb_findmap[]; | |||
85 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 83 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
86 | : "cc"); | 84 | : "cc"); |
87 | 85 | ||
88 | #else /* __GNUC__ */ | ||
89 | |||
90 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | ||
91 | asm volatile( \ | ||
92 | " l %0,0(%4)\n" \ | ||
93 | "0: lr %1,%0\n" \ | ||
94 | __op_string " %1,%3\n" \ | ||
95 | " cs %0,%1,0(%4)\n" \ | ||
96 | " jl 0b" \ | ||
97 | : "=&d" (__old), "=&d" (__new), \ | ||
98 | "=m" (*(unsigned long *) __addr) \ | ||
99 | : "d" (__val), "a" (__addr), \ | ||
100 | "m" (*(unsigned long *) __addr) : "cc"); | ||
101 | |||
102 | #endif /* __GNUC__ */ | ||
103 | |||
104 | #else /* __s390x__ */ | 86 | #else /* __s390x__ */ |
105 | 87 | ||
106 | #define __BITOPS_ALIGN 7 | 88 | #define __BITOPS_ALIGN 7 |
@@ -109,8 +91,6 @@ extern const char _sb_findmap[]; | |||
109 | #define __BITOPS_AND "ngr" | 91 | #define __BITOPS_AND "ngr" |
110 | #define __BITOPS_XOR "xgr" | 92 | #define __BITOPS_XOR "xgr" |
111 | 93 | ||
112 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
113 | |||
114 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 94 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
115 | asm volatile( \ | 95 | asm volatile( \ |
116 | " lg %0,%2\n" \ | 96 | " lg %0,%2\n" \ |
@@ -123,23 +103,6 @@ extern const char _sb_findmap[]; | |||
123 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 103 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
124 | : "cc"); | 104 | : "cc"); |
125 | 105 | ||
126 | #else /* __GNUC__ */ | ||
127 | |||
128 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | ||
129 | asm volatile( \ | ||
130 | " lg %0,0(%4)\n" \ | ||
131 | "0: lgr %1,%0\n" \ | ||
132 | __op_string " %1,%3\n" \ | ||
133 | " csg %0,%1,0(%4)\n" \ | ||
134 | " jl 0b" \ | ||
135 | : "=&d" (__old), "=&d" (__new), \ | ||
136 | "=m" (*(unsigned long *) __addr) \ | ||
137 | : "d" (__val), "a" (__addr), \ | ||
138 | "m" (*(unsigned long *) __addr) : "cc"); | ||
139 | |||
140 | |||
141 | #endif /* __GNUC__ */ | ||
142 | |||
143 | #endif /* __s390x__ */ | 106 | #endif /* __s390x__ */ |
144 | 107 | ||
145 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) | 108 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) |
@@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
261 | 224 | ||
262 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 225 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
263 | asm volatile( | 226 | asm volatile( |
264 | " oc 0(1,%1),0(%2)" | 227 | " oc %O0(1,%R0),%1" |
265 | : "=m" (*(char *) addr) : "a" (addr), | 228 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
266 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | ||
267 | } | 229 | } |
268 | 230 | ||
269 | static inline void | 231 | static inline void |
@@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
290 | 252 | ||
291 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 253 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
292 | asm volatile( | 254 | asm volatile( |
293 | " nc 0(1,%1),0(%2)" | 255 | " nc %O0(1,%R0),%1" |
294 | : "=m" (*(char *) addr) : "a" (addr), | 256 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); |
295 | "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); | ||
296 | } | 257 | } |
297 | 258 | ||
298 | static inline void | 259 | static inline void |
@@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
318 | 279 | ||
319 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 280 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
320 | asm volatile( | 281 | asm volatile( |
321 | " xc 0(1,%1),0(%2)" | 282 | " xc %O0(1,%R0),%1" |
322 | : "=m" (*(char *) addr) : "a" (addr), | 283 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
323 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | ||
324 | } | 284 | } |
325 | 285 | ||
326 | static inline void | 286 | static inline void |
@@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
349 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 309 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
350 | ch = *(unsigned char *) addr; | 310 | ch = *(unsigned char *) addr; |
351 | asm volatile( | 311 | asm volatile( |
352 | " oc 0(1,%1),0(%2)" | 312 | " oc %O0(1,%R0),%1" |
353 | : "=m" (*(char *) addr) | 313 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
354 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 314 | : "cc", "memory"); |
355 | "m" (*(char *) addr) : "cc", "memory"); | ||
356 | return (ch >> (nr & 7)) & 1; | 315 | return (ch >> (nr & 7)) & 1; |
357 | } | 316 | } |
358 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) | 317 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) |
@@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
369 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 328 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
370 | ch = *(unsigned char *) addr; | 329 | ch = *(unsigned char *) addr; |
371 | asm volatile( | 330 | asm volatile( |
372 | " nc 0(1,%1),0(%2)" | 331 | " nc %O0(1,%R0),%1" |
373 | : "=m" (*(char *) addr) | 332 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) |
374 | : "a" (addr), "a" (_ni_bitmap + (nr & 7)), | 333 | : "cc", "memory"); |
375 | "m" (*(char *) addr) : "cc", "memory"); | ||
376 | return (ch >> (nr & 7)) & 1; | 334 | return (ch >> (nr & 7)) & 1; |
377 | } | 335 | } |
378 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) | 336 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) |
@@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
389 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 347 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
390 | ch = *(unsigned char *) addr; | 348 | ch = *(unsigned char *) addr; |
391 | asm volatile( | 349 | asm volatile( |
392 | " xc 0(1,%1),0(%2)" | 350 | " xc %O0(1,%R0),%1" |
393 | : "=m" (*(char *) addr) | 351 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
394 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 352 | : "cc", "memory"); |
395 | "m" (*(char *) addr) : "cc", "memory"); | ||
396 | return (ch >> (nr & 7)) & 1; | 353 | return (ch >> (nr & 7)) & 1; |
397 | } | 354 | } |
398 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) | 355 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) |
@@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p, | |||
591 | p = (unsigned long *)((unsigned long) p + offset); | 548 | p = (unsigned long *)((unsigned long) p + offset); |
592 | #ifndef __s390x__ | 549 | #ifndef __s390x__ |
593 | asm volatile( | 550 | asm volatile( |
594 | " ic %0,0(%1)\n" | 551 | " ic %0,%O1(%R1)\n" |
595 | " icm %0,2,1(%1)\n" | 552 | " icm %0,2,%O1+1(%R1)\n" |
596 | " icm %0,4,2(%1)\n" | 553 | " icm %0,4,%O1+2(%R1)\n" |
597 | " icm %0,8,3(%1)" | 554 | " icm %0,8,%O1+3(%R1)" |
598 | : "=&d" (word) : "a" (p), "m" (*p) : "cc"); | 555 | : "=&d" (word) : "Q" (*p) : "cc"); |
599 | #else | 556 | #else |
600 | asm volatile( | 557 | asm volatile( |
601 | " lrvg %0,%1" | 558 | " lrvg %0,%1" |
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index 80ef58c61970..538e1b36a726 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h | |||
@@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl) | |||
145 | int rc = -ENOSYS; | 145 | int rc = -ENOSYS; |
146 | 146 | ||
147 | asm volatile( | 147 | asm volatile( |
148 | " .insn s,0xb2160000,0(%2)\n" | 148 | " .insn s,0xb2160000,%1\n" |
149 | "0: la %0,0\n" | 149 | "0: la %0,0\n" |
150 | "1:\n" | 150 | "1:\n" |
151 | EX_TABLE(0b,1b) | 151 | EX_TABLE(0b,1b) |
152 | : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); | 152 | : "+d" (rc) : "Q" (*ctrl)); |
153 | return rc; | 153 | return rc; |
154 | } | 154 | } |
155 | 155 | ||
@@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib) | |||
159 | int rc = -ENOSYS; | 159 | int rc = -ENOSYS; |
160 | 160 | ||
161 | asm volatile( | 161 | asm volatile( |
162 | " .insn s,0xb2170000,0(%2)\n" | 162 | " .insn s,0xb2170000,%1\n" |
163 | "0: la %0,0\n" | 163 | "0: la %0,0\n" |
164 | "1:\n" | 164 | "1:\n" |
165 | EX_TABLE(0b,1b) | 165 | EX_TABLE(0b,1b) |
166 | : "+d" (rc) : "m" (*aib), "a" (aib)); | 166 | : "+d" (rc) : "Q" (*aib)); |
167 | return rc; | 167 | return rc; |
168 | } | 168 | } |
169 | 169 | ||
@@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func) | |||
174 | int rc = -ENOSYS; | 174 | int rc = -ENOSYS; |
175 | 175 | ||
176 | asm volatile( | 176 | asm volatile( |
177 | " .insn s,0xb2b30000,0(%2)\n" | 177 | " .insn s,0xb2b30000,%1\n" |
178 | "0: la %0,0\n" | 178 | "0: la %0,0\n" |
179 | "1:\n" | 179 | "1:\n" |
180 | EX_TABLE(0b,1b) | 180 | EX_TABLE(0b,1b) |
181 | : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); | 181 | : "+d" (rc) : "Q" (*aib), "d" (reg0)); |
182 | return rc; | 182 | return rc; |
183 | } | 183 | } |
184 | 184 | ||
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index c2fb432f576a..15b3ac253898 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h | |||
@@ -8,8 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | 10 | ||
11 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
12 | |||
13 | /* store then or system mask. */ | 11 | /* store then or system mask. */ |
14 | #define __raw_local_irq_stosm(__or) \ | 12 | #define __raw_local_irq_stosm(__or) \ |
15 | ({ \ | 13 | ({ \ |
@@ -36,40 +34,6 @@ | |||
36 | asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ | 34 | asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ |
37 | }) | 35 | }) |
38 | 36 | ||
39 | #else /* __GNUC__ */ | ||
40 | |||
41 | /* store then or system mask. */ | ||
42 | #define __raw_local_irq_stosm(__or) \ | ||
43 | ({ \ | ||
44 | unsigned long __mask; \ | ||
45 | asm volatile( \ | ||
46 | " stosm 0(%1),%2" \ | ||
47 | : "=m" (__mask) \ | ||
48 | : "a" (&__mask), "i" (__or) : "memory"); \ | ||
49 | __mask; \ | ||
50 | }) | ||
51 | |||
52 | /* store then and system mask. */ | ||
53 | #define __raw_local_irq_stnsm(__and) \ | ||
54 | ({ \ | ||
55 | unsigned long __mask; \ | ||
56 | asm volatile( \ | ||
57 | " stnsm 0(%1),%2" \ | ||
58 | : "=m" (__mask) \ | ||
59 | : "a" (&__mask), "i" (__and) : "memory"); \ | ||
60 | __mask; \ | ||
61 | }) | ||
62 | |||
63 | /* set system mask. */ | ||
64 | #define __raw_local_irq_ssm(__mask) \ | ||
65 | ({ \ | ||
66 | asm volatile( \ | ||
67 | " ssm 0(%0)" \ | ||
68 | : : "a" (&__mask), "m" (__mask) : "memory"); \ | ||
69 | }) | ||
70 | |||
71 | #endif /* __GNUC__ */ | ||
72 | |||
73 | /* interrupt control.. */ | 37 | /* interrupt control.. */ |
74 | static inline unsigned long raw_local_irq_enable(void) | 38 | static inline unsigned long raw_local_irq_enable(void) |
75 | { | 39 | { |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index b42715458312..73e259834e10 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | static inline void get_cpu_id(struct cpuid *ptr) | 29 | static inline void get_cpu_id(struct cpuid *ptr) |
30 | { | 30 | { |
31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); | 31 | asm volatile("stidp %0" : "=Q" (*ptr)); |
32 | } | 32 | } |
33 | 33 | ||
34 | extern void s390_adjust_jiffies(void); | 34 | extern void s390_adjust_jiffies(void); |
@@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key) | |||
184 | static inline void __load_psw(psw_t psw) | 184 | static inline void __load_psw(psw_t psw) |
185 | { | 185 | { |
186 | #ifndef __s390x__ | 186 | #ifndef __s390x__ |
187 | asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); | 187 | asm volatile("lpsw %0" : : "Q" (psw) : "cc"); |
188 | #else | 188 | #else |
189 | asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); | 189 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); |
190 | #endif | 190 | #endif |
191 | } | 191 | } |
192 | 192 | ||
@@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask) | |||
206 | asm volatile( | 206 | asm volatile( |
207 | " basr %0,0\n" | 207 | " basr %0,0\n" |
208 | "0: ahi %0,1f-0b\n" | 208 | "0: ahi %0,1f-0b\n" |
209 | " st %0,4(%1)\n" | 209 | " st %0,%O1+4(%R1)\n" |
210 | " lpsw 0(%1)\n" | 210 | " lpsw %1\n" |
211 | "1:" | 211 | "1:" |
212 | : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); | 212 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
213 | #else /* __s390x__ */ | 213 | #else /* __s390x__ */ |
214 | asm volatile( | 214 | asm volatile( |
215 | " larl %0,1f\n" | 215 | " larl %0,1f\n" |
216 | " stg %0,8(%1)\n" | 216 | " stg %0,%O1+8(%R1)\n" |
217 | " lpswe 0(%1)\n" | 217 | " lpswe %1\n" |
218 | "1:" | 218 | "1:" |
219 | : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); | 219 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
220 | #endif /* __s390x__ */ | 220 | #endif /* __s390x__ */ |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 9d2a17971805..423fdda2322d 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h | |||
@@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
124 | 124 | ||
125 | asm volatile( | 125 | asm volatile( |
126 | #ifndef __s390x__ | 126 | #ifndef __s390x__ |
127 | " l %0,0(%3)\n" | 127 | " l %0,%2\n" |
128 | "0: lr %1,%0\n" | 128 | "0: lr %1,%0\n" |
129 | " ahi %1,%5\n" | 129 | " ahi %1,%4\n" |
130 | " cs %0,%1,0(%3)\n" | 130 | " cs %0,%1,%2\n" |
131 | " jl 0b" | 131 | " jl 0b" |
132 | #else /* __s390x__ */ | 132 | #else /* __s390x__ */ |
133 | " lg %0,0(%3)\n" | 133 | " lg %0,%2\n" |
134 | "0: lgr %1,%0\n" | 134 | "0: lgr %1,%0\n" |
135 | " aghi %1,%5\n" | 135 | " aghi %1,%4\n" |
136 | " csg %0,%1,0(%3)\n" | 136 | " csg %0,%1,%2\n" |
137 | " jl 0b" | 137 | " jl 0b" |
138 | #endif /* __s390x__ */ | 138 | #endif /* __s390x__ */ |
139 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 139 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
140 | : "a" (&sem->count), "m" (sem->count), | 140 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
141 | "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); | 141 | : "cc", "memory"); |
142 | if (old < 0) | 142 | if (old < 0) |
143 | rwsem_down_read_failed(sem); | 143 | rwsem_down_read_failed(sem); |
144 | } | 144 | } |
@@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
152 | 152 | ||
153 | asm volatile( | 153 | asm volatile( |
154 | #ifndef __s390x__ | 154 | #ifndef __s390x__ |
155 | " l %0,0(%3)\n" | 155 | " l %0,%2\n" |
156 | "0: ltr %1,%0\n" | 156 | "0: ltr %1,%0\n" |
157 | " jm 1f\n" | 157 | " jm 1f\n" |
158 | " ahi %1,%5\n" | 158 | " ahi %1,%4\n" |
159 | " cs %0,%1,0(%3)\n" | 159 | " cs %0,%1,%2\n" |
160 | " jl 0b\n" | 160 | " jl 0b\n" |
161 | "1:" | 161 | "1:" |
162 | #else /* __s390x__ */ | 162 | #else /* __s390x__ */ |
163 | " lg %0,0(%3)\n" | 163 | " lg %0,%2\n" |
164 | "0: ltgr %1,%0\n" | 164 | "0: ltgr %1,%0\n" |
165 | " jm 1f\n" | 165 | " jm 1f\n" |
166 | " aghi %1,%5\n" | 166 | " aghi %1,%4\n" |
167 | " csg %0,%1,0(%3)\n" | 167 | " csg %0,%1,%2\n" |
168 | " jl 0b\n" | 168 | " jl 0b\n" |
169 | "1:" | 169 | "1:" |
170 | #endif /* __s390x__ */ | 170 | #endif /* __s390x__ */ |
171 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 171 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
172 | : "a" (&sem->count), "m" (sem->count), | 172 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
173 | "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); | 173 | : "cc", "memory"); |
174 | return old >= 0 ? 1 : 0; | 174 | return old >= 0 ? 1 : 0; |
175 | } | 175 | } |
176 | 176 | ||
@@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
184 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 184 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
185 | asm volatile( | 185 | asm volatile( |
186 | #ifndef __s390x__ | 186 | #ifndef __s390x__ |
187 | " l %0,0(%3)\n" | 187 | " l %0,%2\n" |
188 | "0: lr %1,%0\n" | 188 | "0: lr %1,%0\n" |
189 | " a %1,%5\n" | 189 | " a %1,%4\n" |
190 | " cs %0,%1,0(%3)\n" | 190 | " cs %0,%1,%2\n" |
191 | " jl 0b" | 191 | " jl 0b" |
192 | #else /* __s390x__ */ | 192 | #else /* __s390x__ */ |
193 | " lg %0,0(%3)\n" | 193 | " lg %0,%2\n" |
194 | "0: lgr %1,%0\n" | 194 | "0: lgr %1,%0\n" |
195 | " ag %1,%5\n" | 195 | " ag %1,%4\n" |
196 | " csg %0,%1,0(%3)\n" | 196 | " csg %0,%1,%2\n" |
197 | " jl 0b" | 197 | " jl 0b" |
198 | #endif /* __s390x__ */ | 198 | #endif /* __s390x__ */ |
199 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 199 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
200 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 200 | : "Q" (sem->count), "m" (tmp) |
201 | : "cc", "memory"); | 201 | : "cc", "memory"); |
202 | if (old != 0) | 202 | if (old != 0) |
203 | rwsem_down_write_failed(sem); | 203 | rwsem_down_write_failed(sem); |
@@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
217 | 217 | ||
218 | asm volatile( | 218 | asm volatile( |
219 | #ifndef __s390x__ | 219 | #ifndef __s390x__ |
220 | " l %0,0(%2)\n" | 220 | " l %0,%1\n" |
221 | "0: ltr %0,%0\n" | 221 | "0: ltr %0,%0\n" |
222 | " jnz 1f\n" | 222 | " jnz 1f\n" |
223 | " cs %0,%4,0(%2)\n" | 223 | " cs %0,%3,%1\n" |
224 | " jl 0b\n" | 224 | " jl 0b\n" |
225 | #else /* __s390x__ */ | 225 | #else /* __s390x__ */ |
226 | " lg %0,0(%2)\n" | 226 | " lg %0,%1\n" |
227 | "0: ltgr %0,%0\n" | 227 | "0: ltgr %0,%0\n" |
228 | " jnz 1f\n" | 228 | " jnz 1f\n" |
229 | " csg %0,%4,0(%2)\n" | 229 | " csg %0,%3,%1\n" |
230 | " jl 0b\n" | 230 | " jl 0b\n" |
231 | #endif /* __s390x__ */ | 231 | #endif /* __s390x__ */ |
232 | "1:" | 232 | "1:" |
233 | : "=&d" (old), "=m" (sem->count) | 233 | : "=&d" (old), "=Q" (sem->count) |
234 | : "a" (&sem->count), "m" (sem->count), | 234 | : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) |
235 | "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); | 235 | : "cc", "memory"); |
236 | return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; | 236 | return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; |
237 | } | 237 | } |
238 | 238 | ||
@@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
245 | 245 | ||
246 | asm volatile( | 246 | asm volatile( |
247 | #ifndef __s390x__ | 247 | #ifndef __s390x__ |
248 | " l %0,0(%3)\n" | 248 | " l %0,%2\n" |
249 | "0: lr %1,%0\n" | 249 | "0: lr %1,%0\n" |
250 | " ahi %1,%5\n" | 250 | " ahi %1,%4\n" |
251 | " cs %0,%1,0(%3)\n" | 251 | " cs %0,%1,%2\n" |
252 | " jl 0b" | 252 | " jl 0b" |
253 | #else /* __s390x__ */ | 253 | #else /* __s390x__ */ |
254 | " lg %0,0(%3)\n" | 254 | " lg %0,%2\n" |
255 | "0: lgr %1,%0\n" | 255 | "0: lgr %1,%0\n" |
256 | " aghi %1,%5\n" | 256 | " aghi %1,%4\n" |
257 | " csg %0,%1,0(%3)\n" | 257 | " csg %0,%1,%2\n" |
258 | " jl 0b" | 258 | " jl 0b" |
259 | #endif /* __s390x__ */ | 259 | #endif /* __s390x__ */ |
260 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 260 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
261 | : "a" (&sem->count), "m" (sem->count), | 261 | : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) |
262 | "i" (-RWSEM_ACTIVE_READ_BIAS) | ||
263 | : "cc", "memory"); | 262 | : "cc", "memory"); |
264 | if (new < 0) | 263 | if (new < 0) |
265 | if ((new & RWSEM_ACTIVE_MASK) == 0) | 264 | if ((new & RWSEM_ACTIVE_MASK) == 0) |
@@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
276 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; | 275 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; |
277 | asm volatile( | 276 | asm volatile( |
278 | #ifndef __s390x__ | 277 | #ifndef __s390x__ |
279 | " l %0,0(%3)\n" | 278 | " l %0,%2\n" |
280 | "0: lr %1,%0\n" | 279 | "0: lr %1,%0\n" |
281 | " a %1,%5\n" | 280 | " a %1,%4\n" |
282 | " cs %0,%1,0(%3)\n" | 281 | " cs %0,%1,%2\n" |
283 | " jl 0b" | 282 | " jl 0b" |
284 | #else /* __s390x__ */ | 283 | #else /* __s390x__ */ |
285 | " lg %0,0(%3)\n" | 284 | " lg %0,%2\n" |
286 | "0: lgr %1,%0\n" | 285 | "0: lgr %1,%0\n" |
287 | " ag %1,%5\n" | 286 | " ag %1,%4\n" |
288 | " csg %0,%1,0(%3)\n" | 287 | " csg %0,%1,%2\n" |
289 | " jl 0b" | 288 | " jl 0b" |
290 | #endif /* __s390x__ */ | 289 | #endif /* __s390x__ */ |
291 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 290 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
292 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 291 | : "Q" (sem->count), "m" (tmp) |
293 | : "cc", "memory"); | 292 | : "cc", "memory"); |
294 | if (new < 0) | 293 | if (new < 0) |
295 | if ((new & RWSEM_ACTIVE_MASK) == 0) | 294 | if ((new & RWSEM_ACTIVE_MASK) == 0) |
@@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
306 | tmp = -RWSEM_WAITING_BIAS; | 305 | tmp = -RWSEM_WAITING_BIAS; |
307 | asm volatile( | 306 | asm volatile( |
308 | #ifndef __s390x__ | 307 | #ifndef __s390x__ |
309 | " l %0,0(%3)\n" | 308 | " l %0,%2\n" |
310 | "0: lr %1,%0\n" | 309 | "0: lr %1,%0\n" |
311 | " a %1,%5\n" | 310 | " a %1,%4\n" |
312 | " cs %0,%1,0(%3)\n" | 311 | " cs %0,%1,%2\n" |
313 | " jl 0b" | 312 | " jl 0b" |
314 | #else /* __s390x__ */ | 313 | #else /* __s390x__ */ |
315 | " lg %0,0(%3)\n" | 314 | " lg %0,%2\n" |
316 | "0: lgr %1,%0\n" | 315 | "0: lgr %1,%0\n" |
317 | " ag %1,%5\n" | 316 | " ag %1,%4\n" |
318 | " csg %0,%1,0(%3)\n" | 317 | " csg %0,%1,%2\n" |
319 | " jl 0b" | 318 | " jl 0b" |
320 | #endif /* __s390x__ */ | 319 | #endif /* __s390x__ */ |
321 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 320 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
322 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 321 | : "Q" (sem->count), "m" (tmp) |
323 | : "cc", "memory"); | 322 | : "cc", "memory"); |
324 | if (new > 1) | 323 | if (new > 1) |
325 | rwsem_downgrade_wake(sem); | 324 | rwsem_downgrade_wake(sem); |
@@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) | |||
334 | 333 | ||
335 | asm volatile( | 334 | asm volatile( |
336 | #ifndef __s390x__ | 335 | #ifndef __s390x__ |
337 | " l %0,0(%3)\n" | 336 | " l %0,%2\n" |
338 | "0: lr %1,%0\n" | 337 | "0: lr %1,%0\n" |
339 | " ar %1,%5\n" | 338 | " ar %1,%4\n" |
340 | " cs %0,%1,0(%3)\n" | 339 | " cs %0,%1,%2\n" |
341 | " jl 0b" | 340 | " jl 0b" |
342 | #else /* __s390x__ */ | 341 | #else /* __s390x__ */ |
343 | " lg %0,0(%3)\n" | 342 | " lg %0,%2\n" |
344 | "0: lgr %1,%0\n" | 343 | "0: lgr %1,%0\n" |
345 | " agr %1,%5\n" | 344 | " agr %1,%4\n" |
346 | " csg %0,%1,0(%3)\n" | 345 | " csg %0,%1,%2\n" |
347 | " jl 0b" | 346 | " jl 0b" |
348 | #endif /* __s390x__ */ | 347 | #endif /* __s390x__ */ |
349 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 348 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
350 | : "a" (&sem->count), "m" (sem->count), "d" (delta) | 349 | : "Q" (sem->count), "d" (delta) |
351 | : "cc", "memory"); | 350 | : "cc", "memory"); |
352 | } | 351 | } |
353 | 352 | ||
@@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
360 | 359 | ||
361 | asm volatile( | 360 | asm volatile( |
362 | #ifndef __s390x__ | 361 | #ifndef __s390x__ |
363 | " l %0,0(%3)\n" | 362 | " l %0,%2\n" |
364 | "0: lr %1,%0\n" | 363 | "0: lr %1,%0\n" |
365 | " ar %1,%5\n" | 364 | " ar %1,%4\n" |
366 | " cs %0,%1,0(%3)\n" | 365 | " cs %0,%1,%2\n" |
367 | " jl 0b" | 366 | " jl 0b" |
368 | #else /* __s390x__ */ | 367 | #else /* __s390x__ */ |
369 | " lg %0,0(%3)\n" | 368 | " lg %0,%2\n" |
370 | "0: lgr %1,%0\n" | 369 | "0: lgr %1,%0\n" |
371 | " agr %1,%5\n" | 370 | " agr %1,%4\n" |
372 | " csg %0,%1,0(%3)\n" | 371 | " csg %0,%1,%2\n" |
373 | " jl 0b" | 372 | " jl 0b" |
374 | #endif /* __s390x__ */ | 373 | #endif /* __s390x__ */ |
375 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 374 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
376 | : "a" (&sem->count), "m" (sem->count), "d" (delta) | 375 | : "Q" (sem->count), "d" (delta) |
377 | : "cc", "memory"); | 376 | : "cc", "memory"); |
378 | return new; | 377 | return new; |
379 | } | 378 | } |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a587907d77f3..56612fc8186e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -13,8 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | 15 | ||
16 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
17 | |||
18 | static inline int | 16 | static inline int |
19 | _raw_compare_and_swap(volatile unsigned int *lock, | 17 | _raw_compare_and_swap(volatile unsigned int *lock, |
20 | unsigned int old, unsigned int new) | 18 | unsigned int old, unsigned int new) |
@@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
27 | return old; | 25 | return old; |
28 | } | 26 | } |
29 | 27 | ||
30 | #else /* __GNUC__ */ | ||
31 | |||
32 | static inline int | ||
33 | _raw_compare_and_swap(volatile unsigned int *lock, | ||
34 | unsigned int old, unsigned int new) | ||
35 | { | ||
36 | asm volatile( | ||
37 | " cs %0,%3,0(%4)" | ||
38 | : "=d" (old), "=m" (*lock) | ||
39 | : "0" (old), "d" (new), "a" (lock), "m" (*lock) | ||
40 | : "cc", "memory" ); | ||
41 | return old; | ||
42 | } | ||
43 | |||
44 | #endif /* __GNUC__ */ | ||
45 | |||
46 | /* | 28 | /* |
47 | * Simple spin lock operations. There are two variants, one clears IRQ's | 29 | * Simple spin lock operations. There are two variants, one clears IRQ's |
48 | * on the local processor, one does not. | 30 | * on the local processor, one does not. |
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h index eb18dc1f327b..6bdee21c077e 100644 --- a/arch/s390/include/asm/swab.h +++ b/arch/s390/include/asm/swab.h | |||
@@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x) | |||
47 | 47 | ||
48 | asm volatile( | 48 | asm volatile( |
49 | #ifndef __s390x__ | 49 | #ifndef __s390x__ |
50 | " icm %0,8,3(%1)\n" | 50 | " icm %0,8,%O1+3(%R1)\n" |
51 | " icm %0,4,2(%1)\n" | 51 | " icm %0,4,%O1+2(%R1)\n" |
52 | " icm %0,2,1(%1)\n" | 52 | " icm %0,2,%O1+1(%R1)\n" |
53 | " ic %0,0(%1)" | 53 | " ic %0,%1" |
54 | : "=&d" (result) : "a" (x), "m" (*x) : "cc"); | 54 | : "=&d" (result) : "Q" (*x) : "cc"); |
55 | #else /* __s390x__ */ | 55 | #else /* __s390x__ */ |
56 | " lrv %0,%1" | 56 | " lrv %0,%1" |
57 | : "=d" (result) : "m" (*x)); | 57 | : "=d" (result) : "m" (*x)); |
@@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x) | |||
77 | 77 | ||
78 | asm volatile( | 78 | asm volatile( |
79 | #ifndef __s390x__ | 79 | #ifndef __s390x__ |
80 | " icm %0,2,1(%1)\n" | 80 | " icm %0,2,%O+1(%R1)\n" |
81 | " ic %0,0(%1)\n" | 81 | " ic %0,%1\n" |
82 | : "=&d" (result) : "a" (x), "m" (*x) : "cc"); | 82 | : "=&d" (result) : "Q" (*x) : "cc"); |
83 | #else /* __s390x__ */ | 83 | #else /* __s390x__ */ |
84 | " lrvh %0,%1" | 84 | " lrvh %0,%1" |
85 | : "=d" (result) : "m" (*x)); | 85 | : "=d" (result) : "m" (*x)); |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 379661d2f81a..67ee6c3c6bb3 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *); | |||
24 | static inline void save_fp_regs(s390_fp_regs *fpregs) | 24 | static inline void save_fp_regs(s390_fp_regs *fpregs) |
25 | { | 25 | { |
26 | asm volatile( | 26 | asm volatile( |
27 | " std 0,8(%1)\n" | 27 | " std 0,%O0+8(%R0)\n" |
28 | " std 2,24(%1)\n" | 28 | " std 2,%O0+24(%R0)\n" |
29 | " std 4,40(%1)\n" | 29 | " std 4,%O0+40(%R0)\n" |
30 | " std 6,56(%1)" | 30 | " std 6,%O0+56(%R0)" |
31 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | 31 | : "=Q" (*fpregs) : "Q" (*fpregs)); |
32 | if (!MACHINE_HAS_IEEE) | 32 | if (!MACHINE_HAS_IEEE) |
33 | return; | 33 | return; |
34 | asm volatile( | 34 | asm volatile( |
35 | " stfpc 0(%1)\n" | 35 | " stfpc %0\n" |
36 | " std 1,16(%1)\n" | 36 | " std 1,%O0+16(%R0)\n" |
37 | " std 3,32(%1)\n" | 37 | " std 3,%O0+32(%R0)\n" |
38 | " std 5,48(%1)\n" | 38 | " std 5,%O0+48(%R0)\n" |
39 | " std 7,64(%1)\n" | 39 | " std 7,%O0+64(%R0)\n" |
40 | " std 8,72(%1)\n" | 40 | " std 8,%O0+72(%R0)\n" |
41 | " std 9,80(%1)\n" | 41 | " std 9,%O0+80(%R0)\n" |
42 | " std 10,88(%1)\n" | 42 | " std 10,%O0+88(%R0)\n" |
43 | " std 11,96(%1)\n" | 43 | " std 11,%O0+96(%R0)\n" |
44 | " std 12,104(%1)\n" | 44 | " std 12,%O0+104(%R0)\n" |
45 | " std 13,112(%1)\n" | 45 | " std 13,%O0+112(%R0)\n" |
46 | " std 14,120(%1)\n" | 46 | " std 14,%O0+120(%R0)\n" |
47 | " std 15,128(%1)\n" | 47 | " std 15,%O0+128(%R0)\n" |
48 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | 48 | : "=Q" (*fpregs) : "Q" (*fpregs)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | 51 | static inline void restore_fp_regs(s390_fp_regs *fpregs) |
52 | { | 52 | { |
53 | asm volatile( | 53 | asm volatile( |
54 | " ld 0,8(%0)\n" | 54 | " ld 0,%O0+8(%R0)\n" |
55 | " ld 2,24(%0)\n" | 55 | " ld 2,%O0+24(%R0)\n" |
56 | " ld 4,40(%0)\n" | 56 | " ld 4,%O0+40(%R0)\n" |
57 | " ld 6,56(%0)" | 57 | " ld 6,%O0+56(%R0)" |
58 | : : "a" (fpregs), "m" (*fpregs)); | 58 | : : "Q" (*fpregs)); |
59 | if (!MACHINE_HAS_IEEE) | 59 | if (!MACHINE_HAS_IEEE) |
60 | return; | 60 | return; |
61 | asm volatile( | 61 | asm volatile( |
62 | " lfpc 0(%0)\n" | 62 | " lfpc %0\n" |
63 | " ld 1,16(%0)\n" | 63 | " ld 1,%O0+16(%R0)\n" |
64 | " ld 3,32(%0)\n" | 64 | " ld 3,%O0+32(%R0)\n" |
65 | " ld 5,48(%0)\n" | 65 | " ld 5,%O0+48(%R0)\n" |
66 | " ld 7,64(%0)\n" | 66 | " ld 7,%O0+64(%R0)\n" |
67 | " ld 8,72(%0)\n" | 67 | " ld 8,%O0+72(%R0)\n" |
68 | " ld 9,80(%0)\n" | 68 | " ld 9,%O0+80(%R0)\n" |
69 | " ld 10,88(%0)\n" | 69 | " ld 10,%O0+88(%R0)\n" |
70 | " ld 11,96(%0)\n" | 70 | " ld 11,%O0+96(%R0)\n" |
71 | " ld 12,104(%0)\n" | 71 | " ld 12,%O0+104(%R0)\n" |
72 | " ld 13,112(%0)\n" | 72 | " ld 13,%O0+112(%R0)\n" |
73 | " ld 14,120(%0)\n" | 73 | " ld 14,%O0+120(%R0)\n" |
74 | " ld 15,128(%0)\n" | 74 | " ld 15,%O0+128(%R0)\n" |
75 | : : "a" (fpregs), "m" (*fpregs)); | 75 | : : "Q" (*fpregs)); |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline void save_access_regs(unsigned int *acrs) | 78 | static inline void save_access_regs(unsigned int *acrs) |
79 | { | 79 | { |
80 | asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); | 80 | asm volatile("stam 0,15,%0" : "=Q" (*acrs)); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void restore_access_regs(unsigned int *acrs) | 83 | static inline void restore_access_regs(unsigned int *acrs) |
84 | { | 84 | { |
85 | asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); | 85 | asm volatile("lam 0,15,%0" : : "Q" (*acrs)); |
86 | } | 86 | } |
87 | 87 | ||
88 | #define switch_to(prev,next,last) do { \ | 88 | #define switch_to(prev,next,last) do { \ |
@@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) | |||
139 | shift = (3 ^ (addr & 3)) << 3; | 139 | shift = (3 ^ (addr & 3)) << 3; |
140 | addr ^= addr & 3; | 140 | addr ^= addr & 3; |
141 | asm volatile( | 141 | asm volatile( |
142 | " l %0,0(%4)\n" | 142 | " l %0,%4\n" |
143 | "0: lr 0,%0\n" | 143 | "0: lr 0,%0\n" |
144 | " nr 0,%3\n" | 144 | " nr 0,%3\n" |
145 | " or 0,%2\n" | 145 | " or 0,%2\n" |
146 | " cs %0,0,0(%4)\n" | 146 | " cs %0,0,%4\n" |
147 | " jl 0b\n" | 147 | " jl 0b\n" |
148 | : "=&d" (old), "=m" (*(int *) addr) | 148 | : "=&d" (old), "=Q" (*(int *) addr) |
149 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | 149 | : "d" (x << shift), "d" (~(255 << shift)), |
150 | "m" (*(int *) addr) : "memory", "cc", "0"); | 150 | "Q" (*(int *) addr) : "memory", "cc", "0"); |
151 | return old >> shift; | 151 | return old >> shift; |
152 | case 2: | 152 | case 2: |
153 | addr = (unsigned long) ptr; | 153 | addr = (unsigned long) ptr; |
154 | shift = (2 ^ (addr & 2)) << 3; | 154 | shift = (2 ^ (addr & 2)) << 3; |
155 | addr ^= addr & 2; | 155 | addr ^= addr & 2; |
156 | asm volatile( | 156 | asm volatile( |
157 | " l %0,0(%4)\n" | 157 | " l %0,%4\n" |
158 | "0: lr 0,%0\n" | 158 | "0: lr 0,%0\n" |
159 | " nr 0,%3\n" | 159 | " nr 0,%3\n" |
160 | " or 0,%2\n" | 160 | " or 0,%2\n" |
161 | " cs %0,0,0(%4)\n" | 161 | " cs %0,0,%4\n" |
162 | " jl 0b\n" | 162 | " jl 0b\n" |
163 | : "=&d" (old), "=m" (*(int *) addr) | 163 | : "=&d" (old), "=Q" (*(int *) addr) |
164 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | 164 | : "d" (x << shift), "d" (~(65535 << shift)), |
165 | "m" (*(int *) addr) : "memory", "cc", "0"); | 165 | "Q" (*(int *) addr) : "memory", "cc", "0"); |
166 | return old >> shift; | 166 | return old >> shift; |
167 | case 4: | 167 | case 4: |
168 | asm volatile( | 168 | asm volatile( |
169 | " l %0,0(%3)\n" | 169 | " l %0,%3\n" |
170 | "0: cs %0,%2,0(%3)\n" | 170 | "0: cs %0,%2,%3\n" |
171 | " jl 0b\n" | 171 | " jl 0b\n" |
172 | : "=&d" (old), "=m" (*(int *) ptr) | 172 | : "=&d" (old), "=Q" (*(int *) ptr) |
173 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | 173 | : "d" (x), "Q" (*(int *) ptr) |
174 | : "memory", "cc"); | 174 | : "memory", "cc"); |
175 | return old; | 175 | return old; |
176 | #ifdef __s390x__ | 176 | #ifdef __s390x__ |
177 | case 8: | 177 | case 8: |
178 | asm volatile( | 178 | asm volatile( |
179 | " lg %0,0(%3)\n" | 179 | " lg %0,%3\n" |
180 | "0: csg %0,%2,0(%3)\n" | 180 | "0: csg %0,%2,%3\n" |
181 | " jl 0b\n" | 181 | " jl 0b\n" |
182 | : "=&d" (old), "=m" (*(long *) ptr) | 182 | : "=&d" (old), "=m" (*(long *) ptr) |
183 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | 183 | : "d" (x), "Q" (*(long *) ptr) |
184 | : "memory", "cc"); | 184 | : "memory", "cc"); |
185 | return old; | 185 | return old; |
186 | #endif /* __s390x__ */ | 186 | #endif /* __s390x__ */ |
@@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
215 | shift = (3 ^ (addr & 3)) << 3; | 215 | shift = (3 ^ (addr & 3)) << 3; |
216 | addr ^= addr & 3; | 216 | addr ^= addr & 3; |
217 | asm volatile( | 217 | asm volatile( |
218 | " l %0,0(%4)\n" | 218 | " l %0,%2\n" |
219 | "0: nr %0,%5\n" | 219 | "0: nr %0,%5\n" |
220 | " lr %1,%0\n" | 220 | " lr %1,%0\n" |
221 | " or %0,%2\n" | 221 | " or %0,%2\n" |
222 | " or %1,%3\n" | 222 | " or %1,%3\n" |
223 | " cs %0,%1,0(%4)\n" | 223 | " cs %0,%1,%2\n" |
224 | " jnl 1f\n" | 224 | " jnl 1f\n" |
225 | " xr %1,%0\n" | 225 | " xr %1,%0\n" |
226 | " nr %1,%5\n" | 226 | " nr %1,%5\n" |
227 | " jnz 0b\n" | 227 | " jnz 0b\n" |
228 | "1:" | 228 | "1:" |
229 | : "=&d" (prev), "=&d" (tmp) | 229 | : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) |
230 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | 230 | : "d" (old << shift), "d" (new << shift), |
231 | "d" (~(255 << shift)) | 231 | "d" (~(255 << shift)), "Q" (*(int *) ptr) |
232 | : "memory", "cc"); | 232 | : "memory", "cc"); |
233 | return prev >> shift; | 233 | return prev >> shift; |
234 | case 2: | 234 | case 2: |
@@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
236 | shift = (2 ^ (addr & 2)) << 3; | 236 | shift = (2 ^ (addr & 2)) << 3; |
237 | addr ^= addr & 2; | 237 | addr ^= addr & 2; |
238 | asm volatile( | 238 | asm volatile( |
239 | " l %0,0(%4)\n" | 239 | " l %0,%2\n" |
240 | "0: nr %0,%5\n" | 240 | "0: nr %0,%5\n" |
241 | " lr %1,%0\n" | 241 | " lr %1,%0\n" |
242 | " or %0,%2\n" | 242 | " or %0,%2\n" |
243 | " or %1,%3\n" | 243 | " or %1,%3\n" |
244 | " cs %0,%1,0(%4)\n" | 244 | " cs %0,%1,%2\n" |
245 | " jnl 1f\n" | 245 | " jnl 1f\n" |
246 | " xr %1,%0\n" | 246 | " xr %1,%0\n" |
247 | " nr %1,%5\n" | 247 | " nr %1,%5\n" |
248 | " jnz 0b\n" | 248 | " jnz 0b\n" |
249 | "1:" | 249 | "1:" |
250 | : "=&d" (prev), "=&d" (tmp) | 250 | : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) |
251 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | 251 | : "d" (old << shift), "d" (new << shift), |
252 | "d" (~(65535 << shift)) | 252 | "d" (~(65535 << shift)), "Q" (*(int *) ptr) |
253 | : "memory", "cc"); | 253 | : "memory", "cc"); |
254 | return prev >> shift; | 254 | return prev >> shift; |
255 | case 4: | 255 | case 4: |
256 | asm volatile( | 256 | asm volatile( |
257 | " cs %0,%2,0(%3)\n" | 257 | " cs %0,%3,%1\n" |
258 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | 258 | : "=&d" (prev), "=Q" (*(int *) ptr) |
259 | : "0" (old), "d" (new), "Q" (*(int *) ptr) | ||
259 | : "memory", "cc"); | 260 | : "memory", "cc"); |
260 | return prev; | 261 | return prev; |
261 | #ifdef __s390x__ | 262 | #ifdef __s390x__ |
262 | case 8: | 263 | case 8: |
263 | asm volatile( | 264 | asm volatile( |
264 | " csg %0,%2,0(%3)\n" | 265 | " csg %0,%3,%1\n" |
265 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | 266 | : "=&d" (prev), "=Q" (*(long *) ptr) |
267 | : "0" (old), "d" (new), "Q" (*(long *) ptr) | ||
266 | : "memory", "cc"); | 268 | : "memory", "cc"); |
267 | return prev; | 269 | return prev; |
268 | #endif /* __s390x__ */ | 270 | #endif /* __s390x__ */ |
@@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
302 | #define __ctl_load(array, low, high) ({ \ | 304 | #define __ctl_load(array, low, high) ({ \ |
303 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 305 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
304 | asm volatile( \ | 306 | asm volatile( \ |
305 | " lctlg %1,%2,0(%0)\n" \ | 307 | " lctlg %1,%2,%0\n" \ |
306 | : : "a" (&array), "i" (low), "i" (high), \ | 308 | : : "Q" (*(addrtype *)(&array)), \ |
307 | "m" (*(addrtype *)(&array))); \ | 309 | "i" (low), "i" (high)); \ |
308 | }) | 310 | }) |
309 | 311 | ||
310 | #define __ctl_store(array, low, high) ({ \ | 312 | #define __ctl_store(array, low, high) ({ \ |
311 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 313 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
312 | asm volatile( \ | 314 | asm volatile( \ |
313 | " stctg %2,%3,0(%1)\n" \ | 315 | " stctg %1,%2,%0\n" \ |
314 | : "=m" (*(addrtype *)(&array)) \ | 316 | : "=Q" (*(addrtype *)(&array)) \ |
315 | : "a" (&array), "i" (low), "i" (high)); \ | 317 | : "i" (low), "i" (high)); \ |
316 | }) | 318 | }) |
317 | 319 | ||
318 | #else /* __s390x__ */ | 320 | #else /* __s390x__ */ |
@@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
320 | #define __ctl_load(array, low, high) ({ \ | 322 | #define __ctl_load(array, low, high) ({ \ |
321 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 323 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
322 | asm volatile( \ | 324 | asm volatile( \ |
323 | " lctl %1,%2,0(%0)\n" \ | 325 | " lctl %1,%2,%0\n" \ |
324 | : : "a" (&array), "i" (low), "i" (high), \ | 326 | : : "Q" (*(addrtype *)(&array)), \ |
325 | "m" (*(addrtype *)(&array))); \ | 327 | "i" (low), "i" (high)); \ |
326 | }) | 328 | }) |
327 | 329 | ||
328 | #define __ctl_store(array, low, high) ({ \ | 330 | #define __ctl_store(array, low, high) ({ \ |
329 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 331 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
330 | asm volatile( \ | 332 | asm volatile( \ |
331 | " stctl %2,%3,0(%1)\n" \ | 333 | " stctl %1,%2,%0\n" \ |
332 | : "=m" (*(addrtype *)(&array)) \ | 334 | : "=Q" (*(addrtype *)(&array)) \ |
333 | : "a" (&array), "i" (low), "i" (high)); \ | 335 | : "i" (low), "i" (high)); \ |
334 | }) | 336 | }) |
335 | 337 | ||
336 | #endif /* __s390x__ */ | 338 | #endif /* __s390x__ */ |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 68d9fea34b4b..f174bdaa6b59 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -20,10 +20,10 @@ static inline int set_clock(__u64 time) | |||
20 | int cc; | 20 | int cc; |
21 | 21 | ||
22 | asm volatile( | 22 | asm volatile( |
23 | " sck 0(%2)\n" | 23 | " sck %1\n" |
24 | " ipm %0\n" | 24 | " ipm %0\n" |
25 | " srl %0,28\n" | 25 | " srl %0,28\n" |
26 | : "=d" (cc) : "m" (time), "a" (&time) : "cc"); | 26 | : "=d" (cc) : "Q" (time) : "cc"); |
27 | return cc; | 27 | return cc; |
28 | } | 28 | } |
29 | 29 | ||
@@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time) | |||
32 | int cc; | 32 | int cc; |
33 | 33 | ||
34 | asm volatile( | 34 | asm volatile( |
35 | " stck 0(%2)\n" | 35 | " stck %1\n" |
36 | " ipm %0\n" | 36 | " ipm %0\n" |
37 | " srl %0,28\n" | 37 | " srl %0,28\n" |
38 | : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); | 38 | : "=d" (cc), "=Q" (*time) : : "cc"); |
39 | return cc; | 39 | return cc; |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void set_clock_comparator(__u64 time) | 42 | static inline void set_clock_comparator(__u64 time) |
43 | { | 43 | { |
44 | asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); | 44 | asm volatile("sckc %0" : : "Q" (time)); |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void store_clock_comparator(__u64 *time) | 47 | static inline void store_clock_comparator(__u64 *time) |
48 | { | 48 | { |
49 | asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); | 49 | asm volatile("stckc %0" : "=Q" (*time)); |
50 | } | 50 | } |
51 | 51 | ||
52 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ | 52 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ |
@@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void) | |||
57 | { | 57 | { |
58 | unsigned long long clk; | 58 | unsigned long long clk; |
59 | 59 | ||
60 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
61 | asm volatile("stck %0" : "=Q" (clk) : : "cc"); | 60 | asm volatile("stck %0" : "=Q" (clk) : : "cc"); |
62 | #else /* __GNUC__ */ | ||
63 | asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); | ||
64 | #endif /* __GNUC__ */ | ||
65 | return clk; | 61 | return clk; |
66 | } | 62 | } |
67 | 63 | ||
@@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void) | |||
69 | { | 65 | { |
70 | unsigned char clk[16]; | 66 | unsigned char clk[16]; |
71 | 67 | ||
72 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
73 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); | 68 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); |
74 | #else /* __GNUC__ */ | ||
75 | asm volatile("stcke 0(%1)" : "=m" (clk) | ||
76 | : "a" (clk) : "cc"); | ||
77 | #endif /* __GNUC__ */ | ||
78 | |||
79 | return *((unsigned long long *)&clk[1]); | 69 | return *((unsigned long long *)&clk[1]); |
80 | } | 70 | } |
81 | 71 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 63e46433e81d..a5850a0cfe80 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -9,6 +9,14 @@ | |||
9 | #include <asm/vdso.h> | 9 | #include <asm/vdso.h> |
10 | #include <asm/sigp.h> | 10 | #include <asm/sigp.h> |
11 | 11 | ||
12 | /* | ||
13 | * Make sure that the compiler is new enough. We want a compiler that | ||
14 | * is known to work with the "Q" assembler constraint. | ||
15 | */ | ||
16 | #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) | ||
17 | #error Your compiler is too old; please use version 3.3.3 or newer | ||
18 | #endif | ||
19 | |||
12 | int main(void) | 20 | int main(void) |
13 | { | 21 | { |
14 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); | 22 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); |