aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/crypto/crypt_s390.h204
-rw-r--r--arch/s390/hypfs/hypfs_diag.c23
-rw-r--r--arch/s390/kernel/compat_linux.c5
-rw-r--r--arch/s390/kernel/cpcmd.c83
-rw-r--r--arch/s390/kernel/ipl.c21
-rw-r--r--arch/s390/kernel/process.c5
-rw-r--r--arch/s390/kernel/semaphore.c22
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c73
-rw-r--r--arch/s390/kernel/time.c10
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/lib/delay.c11
-rw-r--r--arch/s390/math-emu/math.c126
-rw-r--r--arch/s390/math-emu/sfp-util.h73
-rw-r--r--arch/s390/mm/extmem.c16
-rw-r--r--arch/s390/mm/fault.c34
-rw-r--r--arch/s390/mm/init.c41
-rw-r--r--drivers/s390/block/dasd_diag.c34
-rw-r--r--drivers/s390/block/xpram.c54
-rw-r--r--drivers/s390/char/sclp.c31
-rw-r--r--drivers/s390/char/vmwatchdog.c52
-rw-r--r--drivers/s390/cio/device_id.c38
-rw-r--r--drivers/s390/cio/ioasm.h220
-rw-r--r--drivers/s390/cio/qdio.h192
-rw-r--r--drivers/s390/net/iucv.c39
-rw-r--r--drivers/s390/s390mach.c93
-rw-r--r--include/asm-s390/appldata.h2
-rw-r--r--include/asm-s390/atomic.h120
-rw-r--r--include/asm-s390/bitops.h626
-rw-r--r--include/asm-s390/byteorder.h50
-rw-r--r--include/asm-s390/checksum.h176
-rw-r--r--include/asm-s390/ebcdic.h20
-rw-r--r--include/asm-s390/io.h14
-rw-r--r--include/asm-s390/irqflags.h110
-rw-r--r--include/asm-s390/lowcore.h2
-rw-r--r--include/asm-s390/page.h111
-rw-r--r--include/asm-s390/pgtable.h28
-rw-r--r--include/asm-s390/processor.h130
-rw-r--r--include/asm-s390/ptrace.h2
-rw-r--r--include/asm-s390/rwsem.h238
-rw-r--r--include/asm-s390/semaphore.h16
-rw-r--r--include/asm-s390/sfp-machine.h64
-rw-r--r--include/asm-s390/sigp.h65
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--include/asm-s390/spinlock.h27
-rw-r--r--include/asm-s390/string.h56
-rw-r--r--include/asm-s390/system.h342
-rw-r--r--include/asm-s390/timex.h19
-rw-r--r--include/asm-s390/tlbflush.h32
-rw-r--r--include/asm-s390/uaccess.h13
-rw-r--r--include/asm-s390/unistd.h258
51 files changed, 1759 insertions, 2269 deletions
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index efd836c2e4a6..2b137089f625 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -105,63 +105,6 @@ struct crypt_s390_query_status {
105}; 105};
106 106
107/* 107/*
108 * Standard fixup and ex_table sections for crypt_s390 inline functions.
109 * label 0: the s390 crypto operation
110 * label 1: just after 1 to catch illegal operation exception
111 * (unsupported model)
112 * label 6: the return point after fixup
113 * label 7: set error value if exception _in_ crypto operation
114 * label 8: set error value if illegal operation exception
115 * [ret] is the variable to receive the error code
116 * [ERR] is the error code value
117 */
118#ifndef CONFIG_64BIT
119#define __crypt_s390_fixup \
120 ".section .fixup,\"ax\" \n" \
121 "7: lhi %0,%h[e1] \n" \
122 " bras 1,9f \n" \
123 " .long 6b \n" \
124 "8: lhi %0,%h[e2] \n" \
125 " bras 1,9f \n" \
126 " .long 6b \n" \
127 "9: l 1,0(1) \n" \
128 " br 1 \n" \
129 ".previous \n" \
130 ".section __ex_table,\"a\" \n" \
131 " .align 4 \n" \
132 " .long 0b,7b \n" \
133 " .long 1b,8b \n" \
134 ".previous"
135#else /* CONFIG_64BIT */
136#define __crypt_s390_fixup \
137 ".section .fixup,\"ax\" \n" \
138 "7: lhi %0,%h[e1] \n" \
139 " jg 6b \n" \
140 "8: lhi %0,%h[e2] \n" \
141 " jg 6b \n" \
142 ".previous\n" \
143 ".section __ex_table,\"a\" \n" \
144 " .align 8 \n" \
145 " .quad 0b,7b \n" \
146 " .quad 1b,8b \n" \
147 ".previous"
148#endif /* CONFIG_64BIT */
149
150/*
151 * Standard code for setting the result of s390 crypto instructions.
152 * %0: the register which will receive the result
153 * [result]: the register containing the result (e.g. second operand length
154 * to compute number of processed bytes].
155 */
156#ifndef CONFIG_64BIT
157#define __crypt_s390_set_result \
158 " lr %0,%[result] \n"
159#else /* CONFIG_64BIT */
160#define __crypt_s390_set_result \
161 " lgr %0,%[result] \n"
162#endif
163
164/*
165 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 108 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
166 * @param func: the function code passed to KM; see crypt_s390_km_func 109 * @param func: the function code passed to KM; see crypt_s390_km_func
167 * @param param: address of parameter block; see POP for details on each func 110 * @param param: address of parameter block; see POP for details on each func
@@ -176,28 +119,24 @@ crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
176{ 119{
177 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
178 register void* __param asm("1") = param; 121 register void* __param asm("1") = param;
179 register u8* __dest asm("4") = dest;
180 register const u8* __src asm("2") = src; 122 register const u8* __src asm("2") = src;
181 register long __src_len asm("3") = src_len; 123 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest;
182 int ret; 125 int ret;
183 126
184 ret = 0; 127 asm volatile(
185 __asm__ __volatile__ ( 128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
186 "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
187 "1: brc 1,0b \n" /* handle partial completion */ 129 "1: brc 1,0b \n" /* handle partial completion */
188 __crypt_s390_set_result 130 " ahi %0,%h7\n"
189 "6: \n" 131 "2: ahi %0,%h8\n"
190 __crypt_s390_fixup 132 "3:\n"
191 : "+d" (ret), "+a" (__dest), "+a" (__src), 133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
192 [result] "+d" (__src_len) 134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
193 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 135 : "d" (__func), "a" (__param), "0" (-EFAULT),
194 "a" (__param) 136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
195 : "cc", "memory" 137 if (ret < 0)
196 ); 138 return ret;
197 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
198 ret = src_len - ret;
199 }
200 return ret;
201} 140}
202 141
203/* 142/*
@@ -215,28 +154,24 @@ crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
215{ 154{
216 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
217 register void* __param asm("1") = param; 156 register void* __param asm("1") = param;
218 register u8* __dest asm("4") = dest;
219 register const u8* __src asm("2") = src; 157 register const u8* __src asm("2") = src;
220 register long __src_len asm("3") = src_len; 158 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest;
221 int ret; 160 int ret;
222 161
223 ret = 0; 162 asm volatile(
224 __asm__ __volatile__ ( 163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
225 "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
226 "1: brc 1,0b \n" /* handle partial completion */ 164 "1: brc 1,0b \n" /* handle partial completion */
227 __crypt_s390_set_result 165 " ahi %0,%h7\n"
228 "6: \n" 166 "2: ahi %0,%h8\n"
229 __crypt_s390_fixup 167 "3:\n"
230 : "+d" (ret), "+a" (__dest), "+a" (__src), 168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
231 [result] "+d" (__src_len) 169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
232 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 170 : "d" (__func), "a" (__param), "0" (-EFAULT),
233 "a" (__param) 171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
234 : "cc", "memory" 172 if (ret < 0)
235 ); 173 return ret;
236 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
237 ret = src_len - ret;
238 }
239 return ret;
240} 175}
241 176
242/* 177/*
@@ -258,22 +193,19 @@ crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
258 register long __src_len asm("3") = src_len; 193 register long __src_len asm("3") = src_len;
259 int ret; 194 int ret;
260 195
261 ret = 0; 196 asm volatile(
262 __asm__ __volatile__ ( 197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
263 "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */ 198 "1: brc 1,0b \n" /* handle partial completion */
264 "1: brc 1,0b \n" /* handle partical completion */ 199 " ahi %0,%h6\n"
265 __crypt_s390_set_result 200 "2: ahi %0,%h7\n"
266 "6: \n" 201 "3:\n"
267 __crypt_s390_fixup 202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
268 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 203 : "=d" (ret), "+a" (__src), "+d" (__src_len)
269 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 204 : "d" (__func), "a" (__param), "0" (-EFAULT),
270 "a" (__param) 205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
271 : "cc", "memory" 206 if (ret < 0)
272 ); 207 return ret;
273 if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){ 208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
274 ret = src_len - ret;
275 }
276 return ret;
277} 209}
278 210
279/* 211/*
@@ -294,22 +226,19 @@ crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
294 register long __src_len asm("3") = src_len; 226 register long __src_len asm("3") = src_len;
295 int ret; 227 int ret;
296 228
297 ret = 0; 229 asm volatile(
298 __asm__ __volatile__ ( 230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
299 "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */ 231 "1: brc 1,0b \n" /* handle partial completion */
300 "1: brc 1,0b \n" /* handle partical completion */ 232 " ahi %0,%h6\n"
301 __crypt_s390_set_result 233 "2: ahi %0,%h7\n"
302 "6: \n" 234 "3:\n"
303 __crypt_s390_fixup 235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
304 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 236 : "=d" (ret), "+a" (__src), "+d" (__src_len)
305 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 237 : "d" (__func), "a" (__param), "0" (-EFAULT),
306 "a" (__param) 238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
307 : "cc", "memory" 239 if (ret < 0)
308 ); 240 return ret;
309 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
310 ret = src_len - ret;
311 }
312 return ret;
313} 242}
314 243
315/* 244/*
@@ -331,22 +260,19 @@ crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
331 register long __src_len asm("3") = src_len; 260 register long __src_len asm("3") = src_len;
332 int ret; 261 int ret;
333 262
334 ret = 0; 263 asm volatile(
335 __asm__ __volatile__ ( 264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
336 "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */ 265 "1: brc 1,0b \n" /* handle partial completion */
337 "1: brc 1,0b \n" /* handle partical completion */ 266 " ahi %0,%h6\n"
338 __crypt_s390_set_result 267 "2: ahi %0,%h7\n"
339 "6: \n" 268 "3:\n"
340 __crypt_s390_fixup 269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
341 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 270 : "=d" (ret), "+a" (__src), "+d" (__src_len)
342 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 271 : "d" (__func), "a" (__param), "0" (-EFAULT),
343 "a" (__param) 272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
344 : "cc", "memory" 273 if (ret < 0)
345 ); 274 return ret;
346 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
347 ret = src_len - ret;
348 }
349 return ret;
350} 276}
351 277
352/** 278/**
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 684384f2b364..443fa377d9ff 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -333,22 +333,14 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr)
333 register unsigned long _subcode asm("0") = subcode; 333 register unsigned long _subcode asm("0") = subcode;
334 register unsigned long _size asm("1") = size; 334 register unsigned long _size asm("1") = size;
335 335
336 asm volatile (" diag %2,%0,0x204\n" 336 asm volatile(
337 "0: \n" ".section __ex_table,\"a\"\n" 337 " diag %2,%0,0x204\n"
338#ifndef __s390x__ 338 "0:\n"
339 " .align 4\n" 339 EX_TABLE(0b,0b)
340 " .long 0b,0b\n" 340 : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
341#else
342 " .align 8\n"
343 " .quad 0b,0b\n"
344#endif
345 ".previous":"+d" (_subcode), "+d"(_size)
346 :"d"(addr)
347 :"memory");
348 if (_subcode) 341 if (_subcode)
349 return -1; 342 return -1;
350 else 343 return _size;
351 return _size;
352} 344}
353 345
354/* 346/*
@@ -491,8 +483,7 @@ out:
491 483
492static void diag224(void *ptr) 484static void diag224(void *ptr)
493{ 485{
494 asm volatile(" diag %0,%1,0x224\n" 486 asm volatile("diag %0,%1,0x224" : :"d" (0), "d"(ptr) : "memory");
495 : :"d" (0), "d"(ptr) : "memory");
496} 487}
497 488
498static int diag224_get_name_table(void) 489static int diag224_get_name_table(void)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 91b2884fa5c4..c46e3d48e410 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -544,10 +544,7 @@ sys32_execve(struct pt_regs regs)
544 current->ptrace &= ~PT_DTRACE; 544 current->ptrace &= ~PT_DTRACE;
545 task_unlock(current); 545 task_unlock(current);
546 current->thread.fp_regs.fpc=0; 546 current->thread.fp_regs.fpc=0;
547 __asm__ __volatile__ 547 asm volatile("sfpc %0,0" : : "d" (0));
548 ("sr 0,0\n\t"
549 "sfpc 0,0\n\t"
550 : : :"0");
551 } 548 }
552 putname(filename); 549 putname(filename);
553out: 550out:
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 4ef44e536b2c..1eae74e72f95 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -25,11 +25,8 @@ static char cpcmd_buf[241];
25 */ 25 */
26int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) 26int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
27{ 27{
28 const int mask = 0x40000000L; 28 unsigned long flags, cmdlen;
29 unsigned long flags; 29 int return_code, return_len;
30 int return_code;
31 int return_len;
32 int cmdlen;
33 30
34 spin_lock_irqsave(&cpcmd_lock, flags); 31 spin_lock_irqsave(&cpcmd_lock, flags);
35 cmdlen = strlen(cmd); 32 cmdlen = strlen(cmd);
@@ -38,64 +35,44 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
38 ASCEBC(cpcmd_buf, cmdlen); 35 ASCEBC(cpcmd_buf, cmdlen);
39 36
40 if (response != NULL && rlen > 0) { 37 if (response != NULL && rlen > 0) {
38 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
39 register unsigned long reg3 asm ("3") = (addr_t) response;
40 register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
41 register unsigned long reg5 asm ("5") = rlen;
42
41 memset(response, 0, rlen); 43 memset(response, 0, rlen);
44 asm volatile(
42#ifndef CONFIG_64BIT 45#ifndef CONFIG_64BIT
43 asm volatile ( "lra 2,0(%2)\n" 46 " diag %2,%0,0x8\n"
44 "lr 4,%3\n" 47 " brc 8,1f\n"
45 "o 4,%6\n" 48 " ar %1,%4\n"
46 "lra 3,0(%4)\n"
47 "lr 5,%5\n"
48 "diag 2,4,0x8\n"
49 "brc 8, 1f\n"
50 "ar 5, %5\n"
51 "1: \n"
52 "lr %0,4\n"
53 "lr %1,5\n"
54 : "=d" (return_code), "=d" (return_len)
55 : "a" (cpcmd_buf), "d" (cmdlen),
56 "a" (response), "d" (rlen), "m" (mask)
57 : "cc", "2", "3", "4", "5" );
58#else /* CONFIG_64BIT */ 49#else /* CONFIG_64BIT */
59 asm volatile ( "lrag 2,0(%2)\n" 50 " sam31\n"
60 "lgr 4,%3\n" 51 " diag %2,%0,0x8\n"
61 "o 4,%6\n" 52 " sam64\n"
62 "lrag 3,0(%4)\n" 53 " brc 8,1f\n"
63 "lgr 5,%5\n" 54 " agr %1,%4\n"
64 "sam31\n"
65 "diag 2,4,0x8\n"
66 "sam64\n"
67 "brc 8, 1f\n"
68 "agr 5, %5\n"
69 "1: \n"
70 "lgr %0,4\n"
71 "lgr %1,5\n"
72 : "=d" (return_code), "=d" (return_len)
73 : "a" (cpcmd_buf), "d" (cmdlen),
74 "a" (response), "d" (rlen), "m" (mask)
75 : "cc", "2", "3", "4", "5" );
76#endif /* CONFIG_64BIT */ 55#endif /* CONFIG_64BIT */
56 "1:\n"
57 : "+d" (reg4), "+d" (reg5)
58 : "d" (reg2), "d" (reg3), "d" (rlen) : "cc");
59 return_code = (int) reg4;
60 return_len = (int) reg5;
77 EBCASC(response, rlen); 61 EBCASC(response, rlen);
78 } else { 62 } else {
63 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
64 register unsigned long reg3 asm ("3") = cmdlen;
79 return_len = 0; 65 return_len = 0;
66 asm volatile(
80#ifndef CONFIG_64BIT 67#ifndef CONFIG_64BIT
81 asm volatile ( "lra 2,0(%1)\n" 68 " diag %1,%0,0x8\n"
82 "lr 3,%2\n"
83 "diag 2,3,0x8\n"
84 "lr %0,3\n"
85 : "=d" (return_code)
86 : "a" (cpcmd_buf), "d" (cmdlen)
87 : "2", "3" );
88#else /* CONFIG_64BIT */ 69#else /* CONFIG_64BIT */
89 asm volatile ( "lrag 2,0(%1)\n" 70 " sam31\n"
90 "lgr 3,%2\n" 71 " diag %1,%0,0x8\n"
91 "sam31\n" 72 " sam64\n"
92 "diag 2,3,0x8\n"
93 "sam64\n"
94 "lgr %0,3\n"
95 : "=d" (return_code)
96 : "a" (cpcmd_buf), "d" (cmdlen)
97 : "2", "3" );
98#endif /* CONFIG_64BIT */ 73#endif /* CONFIG_64BIT */
74 : "+d" (reg3) : "d" (reg2) : "cc");
75 return_code = (int) reg3;
99 } 76 }
100 spin_unlock_irqrestore(&cpcmd_lock, flags); 77 spin_unlock_irqrestore(&cpcmd_lock, flags);
101 if (response_code != NULL) 78 if (response_code != NULL)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6555cc48e28f..1f5e782b3d05 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -120,24 +120,15 @@ static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
120 120
121static int diag308(unsigned long subcode, void *addr) 121static int diag308(unsigned long subcode, void *addr)
122{ 122{
123 register unsigned long _addr asm("0") = (unsigned long)addr; 123 register unsigned long _addr asm("0") = (unsigned long) addr;
124 register unsigned long _rc asm("1") = 0; 124 register unsigned long _rc asm("1") = 0;
125 125
126 asm volatile ( 126 asm volatile(
127 " diag %0,%2,0x308\n" 127 " diag %0,%2,0x308\n"
128 "0: \n" 128 "0:\n"
129 ".section __ex_table,\"a\"\n" 129 EX_TABLE(0b,0b)
130#ifdef CONFIG_64BIT
131 " .align 8\n"
132 " .quad 0b, 0b\n"
133#else
134 " .align 4\n"
135 " .long 0b, 0b\n"
136#endif
137 ".previous\n"
138 : "+d" (_addr), "+d" (_rc) 130 : "+d" (_addr), "+d" (_rc)
139 : "d" (subcode) : "cc", "memory" ); 131 : "d" (subcode) : "cc", "memory");
140
141 return _rc; 132 return _rc;
142} 133}
143 134
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d3cbfa3005ec..6603fbb41d07 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -45,7 +45,7 @@
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/timer.h> 46#include <asm/timer.h>
47 47
48asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 48asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
49 49
50/* 50/*
51 * Return saved PC of a blocked thread. used in kernel/sched. 51 * Return saved PC of a blocked thread. used in kernel/sched.
@@ -177,7 +177,8 @@ void show_regs(struct pt_regs *regs)
177 177
178extern void kernel_thread_starter(void); 178extern void kernel_thread_starter(void);
179 179
180__asm__(".align 4\n" 180asm(
181 ".align 4\n"
181 "kernel_thread_starter:\n" 182 "kernel_thread_starter:\n"
182 " la 2,0(10)\n" 183 " la 2,0(10)\n"
183 " basr 14,9\n" 184 " basr 14,9\n"
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
index 8dfb690c159f..191303f6c1d8 100644
--- a/arch/s390/kernel/semaphore.c
+++ b/arch/s390/kernel/semaphore.c
@@ -26,17 +26,17 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
26{ 26{
27 int old_val, new_val; 27 int old_val, new_val;
28 28
29 __asm__ __volatile__(" l %0,0(%3)\n" 29 asm volatile(
30 "0: ltr %1,%0\n" 30 " l %0,0(%3)\n"
31 " jhe 1f\n" 31 "0: ltr %1,%0\n"
32 " lhi %1,0\n" 32 " jhe 1f\n"
33 "1: ar %1,%4\n" 33 " lhi %1,0\n"
34 " cs %0,%1,0(%3)\n" 34 "1: ar %1,%4\n"
35 " jl 0b\n" 35 " cs %0,%1,0(%3)\n"
36 : "=&d" (old_val), "=&d" (new_val), 36 " jl 0b\n"
37 "=m" (sem->count) 37 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
38 : "a" (&sem->count), "d" (incr), "m" (sem->count) 38 : "a" (&sem->count), "d" (incr), "m" (sem->count)
39 : "cc" ); 39 : "cc");
40 return old_val; 40 return old_val;
41} 41}
42 42
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e3d9325f6022..a21cfbb9d97e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -101,7 +101,7 @@ void __devinit cpu_init (void)
101 /* 101 /*
102 * Store processor id in lowcore (used e.g. in timer_interrupt) 102 * Store processor id in lowcore (used e.g. in timer_interrupt)
103 */ 103 */
104 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 104 asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
105 S390_lowcore.cpu_data.cpu_addr = addr; 105 S390_lowcore.cpu_data.cpu_addr = addr;
106 106
107 /* 107 /*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b2e6f4c8d382..a8e6199755d4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -63,7 +63,7 @@ static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 63static void smp_ext_bitcall_others(ec_bit_sig);
64 64
65/* 65/*
66 * Structure and data for smp_call_function(). This is designed to minimise 665B * Structure and data for smp_call_function(). This is designed to minimise
67 * static memory requirements. It also looks cleaner. 67 * static memory requirements. It also looks cleaner.
68 */ 68 */
69static DEFINE_SPINLOCK(call_lock); 69static DEFINE_SPINLOCK(call_lock);
@@ -418,59 +418,49 @@ void smp_send_reschedule(int cpu)
418/* 418/*
419 * parameter area for the set/clear control bit callbacks 419 * parameter area for the set/clear control bit callbacks
420 */ 420 */
421typedef struct 421struct ec_creg_mask_parms {
422{
423 __u16 start_ctl;
424 __u16 end_ctl;
425 unsigned long orvals[16]; 422 unsigned long orvals[16];
426 unsigned long andvals[16]; 423 unsigned long andvals[16];
427} ec_creg_mask_parms; 424};
428 425
429/* 426/*
430 * callback for setting/clearing control bits 427 * callback for setting/clearing control bits
431 */ 428 */
432void smp_ctl_bit_callback(void *info) { 429void smp_ctl_bit_callback(void *info) {
433 ec_creg_mask_parms *pp; 430 struct ec_creg_mask_parms *pp = info;
434 unsigned long cregs[16]; 431 unsigned long cregs[16];
435 int i; 432 int i;
436 433
437 pp = (ec_creg_mask_parms *) info; 434 __ctl_store(cregs, 0, 15);
438 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 435 for (i = 0; i <= 15; i++)
439 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
440 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 436 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
441 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 437 __ctl_load(cregs, 0, 15);
442} 438}
443 439
444/* 440/*
445 * Set a bit in a control register of all cpus 441 * Set a bit in a control register of all cpus
446 */ 442 */
447void smp_ctl_set_bit(int cr, int bit) { 443void smp_ctl_set_bit(int cr, int bit)
448 ec_creg_mask_parms parms; 444{
445 struct ec_creg_mask_parms parms;
449 446
450 parms.start_ctl = cr; 447 memset(&parms.orvals, 0, sizeof(parms.orvals));
451 parms.end_ctl = cr; 448 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
452 parms.orvals[cr] = 1 << bit; 449 parms.orvals[cr] = 1 << bit;
453 parms.andvals[cr] = -1L; 450 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
454 preempt_disable();
455 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
456 __ctl_set_bit(cr, bit);
457 preempt_enable();
458} 451}
459 452
460/* 453/*
461 * Clear a bit in a control register of all cpus 454 * Clear a bit in a control register of all cpus
462 */ 455 */
463void smp_ctl_clear_bit(int cr, int bit) { 456void smp_ctl_clear_bit(int cr, int bit)
464 ec_creg_mask_parms parms; 457{
458 struct ec_creg_mask_parms parms;
465 459
466 parms.start_ctl = cr; 460 memset(&parms.orvals, 0, sizeof(parms.orvals));
467 parms.end_ctl = cr; 461 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
468 parms.orvals[cr] = 0;
469 parms.andvals[cr] = ~(1L << bit); 462 parms.andvals[cr] = ~(1L << bit);
470 preempt_disable(); 463 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
471 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
472 __ctl_clear_bit(cr, bit);
473 preempt_enable();
474} 464}
475 465
476/* 466/*
@@ -650,9 +640,9 @@ __cpu_up(unsigned int cpu)
650 sf->gprs[9] = (unsigned long) sf; 640 sf->gprs[9] = (unsigned long) sf;
651 cpu_lowcore->save_area[15] = (unsigned long) sf; 641 cpu_lowcore->save_area[15] = (unsigned long) sf;
652 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 642 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
653 __asm__ __volatile__("stam 0,15,0(%0)" 643 asm volatile(
654 : : "a" (&cpu_lowcore->access_regs_save_area) 644 " stam 0,15,0(%0)"
655 : "memory"); 645 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
656 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 646 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
657 cpu_lowcore->current_task = (unsigned long) idle; 647 cpu_lowcore->current_task = (unsigned long) idle;
658 cpu_lowcore->cpu_data.cpu_nr = cpu; 648 cpu_lowcore->cpu_data.cpu_nr = cpu;
@@ -708,7 +698,7 @@ int
708__cpu_disable(void) 698__cpu_disable(void)
709{ 699{
710 unsigned long flags; 700 unsigned long flags;
711 ec_creg_mask_parms cr_parms; 701 struct ec_creg_mask_parms cr_parms;
712 int cpu = smp_processor_id(); 702 int cpu = smp_processor_id();
713 703
714 spin_lock_irqsave(&smp_reserve_lock, flags); 704 spin_lock_irqsave(&smp_reserve_lock, flags);
@@ -724,30 +714,21 @@ __cpu_disable(void)
724 pfault_fini(); 714 pfault_fini();
725#endif 715#endif
726 716
727 /* disable all external interrupts */ 717 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
718 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
728 719
729 cr_parms.start_ctl = 0; 720 /* disable all external interrupts */
730 cr_parms.end_ctl = 0;
731 cr_parms.orvals[0] = 0; 721 cr_parms.orvals[0] = 0;
732 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 722 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
733 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 723 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
734 smp_ctl_bit_callback(&cr_parms);
735
736 /* disable all I/O interrupts */ 724 /* disable all I/O interrupts */
737
738 cr_parms.start_ctl = 6;
739 cr_parms.end_ctl = 6;
740 cr_parms.orvals[6] = 0; 725 cr_parms.orvals[6] = 0;
741 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 726 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
742 1<<27 | 1<<26 | 1<<25 | 1<<24); 727 1<<27 | 1<<26 | 1<<25 | 1<<24);
743 smp_ctl_bit_callback(&cr_parms);
744
745 /* disable most machine checks */ 728 /* disable most machine checks */
746
747 cr_parms.start_ctl = 14;
748 cr_parms.end_ctl = 14;
749 cr_parms.orvals[14] = 0; 729 cr_parms.orvals[14] = 0;
750 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 730 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
731
751 smp_ctl_bit_callback(&cr_parms); 732 smp_ctl_bit_callback(&cr_parms);
752 733
753 spin_unlock_irqrestore(&smp_reserve_lock, flags); 734 spin_unlock_irqrestore(&smp_reserve_lock, flags);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 74e6178fbaf2..1981c6199fa2 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -351,10 +351,12 @@ void __init time_init(void)
351 int cc; 351 int cc;
352 352
353 /* kick the TOD clock */ 353 /* kick the TOD clock */
354 asm volatile ("STCK 0(%1)\n\t" 354 asm volatile(
355 "IPM %0\n\t" 355 " stck 0(%2)\n"
356 "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) 356 " ipm %0\n"
357 : "memory", "cc"); 357 " srl %0,28"
358 : "=d" (cc), "=m" (init_timer_cc)
359 : "a" (&init_timer_cc) : "cc");
358 switch (cc) { 360 switch (cc) {
359 case 0: /* clock in set state: all is fine */ 361 case 0: /* clock in set state: all is fine */
360 break; 362 break;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c4982c963424..3eb4fab048b8 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -597,8 +597,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
597 local_irq_enable(); 597 local_irq_enable();
598 598
599 if (MACHINE_HAS_IEEE) 599 if (MACHINE_HAS_IEEE)
600 __asm__ volatile ("stfpc %0\n\t" 600 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
601 : "=m" (current->thread.fp_regs.fpc));
602 601
603#ifdef CONFIG_MATHEMU 602#ifdef CONFIG_MATHEMU
604 else if (regs->psw.mask & PSW_MASK_PSTATE) { 603 else if (regs->psw.mask & PSW_MASK_PSTATE) {
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 468f4ea33f99..027c4742a001 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -27,9 +27,7 @@ void __delay(unsigned long loops)
27 * yield the megahertz number of the cpu. The important function 27 * yield the megahertz number of the cpu. The important function
28 * is udelay and that is done using the tod clock. -- martin. 28 * is udelay and that is done using the tod clock. -- martin.
29 */ 29 */
30 __asm__ __volatile__( 30 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
31 "0: brct %0,0b"
32 : /* no outputs */ : "r" ((loops/2) + 1));
33} 31}
34 32
35/* 33/*
@@ -38,13 +36,12 @@ void __delay(unsigned long loops)
38 */ 36 */
39void __udelay(unsigned long usecs) 37void __udelay(unsigned long usecs)
40{ 38{
41 uint64_t start_cc, end_cc; 39 uint64_t start_cc;
42 40
43 if (usecs == 0) 41 if (usecs == 0)
44 return; 42 return;
45 asm volatile ("STCK %0" : "=m" (start_cc)); 43 start_cc = get_clock();
46 do { 44 do {
47 cpu_relax(); 45 cpu_relax();
48 asm volatile ("STCK %0" : "=m" (end_cc)); 46 } while (((get_clock() - start_cc)/4096) < usecs);
49 } while (((end_cc - start_cc)/4096) < usecs);
50} 47}
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index b4957c84e4d6..6b9aec5a2c18 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -1564,52 +1564,52 @@ static int emu_tceb (struct pt_regs *regs, int rx, long val) {
1564} 1564}
1565 1565
1566static inline void emu_load_regd(int reg) { 1566static inline void emu_load_regd(int reg) {
1567 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1567 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1568 return; 1568 return;
1569 asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1569 asm volatile( /* load reg from fp_regs.fprs[reg] */
1570 " bras 1,0f\n" 1570 " bras 1,0f\n"
1571 " ld 0,0(%1)\n" 1571 " ld 0,0(%1)\n"
1572 "0: ex %0,0(1)" 1572 "0: ex %0,0(1)"
1573 : /* no output */ 1573 : /* no output */
1574 : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) 1574 : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
1575 : "1" ); 1575 : "1");
1576} 1576}
1577 1577
1578static inline void emu_load_rege(int reg) { 1578static inline void emu_load_rege(int reg) {
1579 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1579 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1580 return; 1580 return;
1581 asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1581 asm volatile( /* load reg from fp_regs.fprs[reg] */
1582 " bras 1,0f\n" 1582 " bras 1,0f\n"
1583 " le 0,0(%1)\n" 1583 " le 0,0(%1)\n"
1584 "0: ex %0,0(1)" 1584 "0: ex %0,0(1)"
1585 : /* no output */ 1585 : /* no output */
1586 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1586 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1587 : "1" ); 1587 : "1");
1588} 1588}
1589 1589
1590static inline void emu_store_regd(int reg) { 1590static inline void emu_store_regd(int reg) {
1591 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1591 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1592 return; 1592 return;
1593 asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1593 asm volatile( /* store reg to fp_regs.fprs[reg] */
1594 " bras 1,0f\n" 1594 " bras 1,0f\n"
1595 " std 0,0(%1)\n" 1595 " std 0,0(%1)\n"
1596 "0: ex %0,0(1)" 1596 "0: ex %0,0(1)"
1597 : /* no output */ 1597 : /* no output */
1598 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) 1598 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
1599 : "1" ); 1599 : "1");
1600} 1600}
1601 1601
1602 1602
1603static inline void emu_store_rege(int reg) { 1603static inline void emu_store_rege(int reg) {
1604 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1604 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1605 return; 1605 return;
1606 asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1606 asm volatile( /* store reg to fp_regs.fprs[reg] */
1607 " bras 1,0f\n" 1607 " bras 1,0f\n"
1608 " ste 0,0(%1)\n" 1608 " ste 0,0(%1)\n"
1609 "0: ex %0,0(1)" 1609 "0: ex %0,0(1)"
1610 : /* no output */ 1610 : /* no output */
1611 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1611 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1612 : "1" ); 1612 : "1");
1613} 1613}
1614 1614
1615int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { 1615int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
@@ -2089,23 +2089,22 @@ int math_emu_ldr(__u8 *opcode) {
2089 2089
2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2091 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2091 /* we got an exception therfore ry can't be in {0,2,4,6} */
2092 __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2092 asm volatile( /* load rx from fp_regs.fprs[ry] */
2093 " bras 1,0f\n" 2093 " bras 1,0f\n"
2094 " ld 0,0(%1)\n" 2094 " ld 0,0(%1)\n"
2095 "0: ex %0,0(1)" 2095 "0: ex %0,0(1)"
2096 : /* no output */ 2096 : /* no output */
2097 : "a" (opc & 0xf0), 2097 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
2098 "a" (&fp_regs->fprs[opc & 0xf].d) 2098 : "1");
2099 : "1" );
2100 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2099 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2101 __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2100 asm volatile ( /* store ry to fp_regs.fprs[rx] */
2102 " bras 1,0f\n" 2101 " bras 1,0f\n"
2103 " std 0,0(%1)\n" 2102 " std 0,0(%1)\n"
2104 "0: ex %0,0(1)" 2103 "0: ex %0,0(1)"
2105 : /* no output */ 2104 : /* no output */
2106 : "a" ((opc & 0xf) << 4), 2105 : "a" ((opc & 0xf) << 4),
2107 "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) 2106 "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
2108 : "1" ); 2107 : "1");
2109 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2108 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2110 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2109 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2111 return 0; 2110 return 0;
@@ -2120,23 +2119,22 @@ int math_emu_ler(__u8 *opcode) {
2120 2119
2121 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2120 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2122 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2121 /* we got an exception therfore ry can't be in {0,2,4,6} */
2123 __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2122 asm volatile( /* load rx from fp_regs.fprs[ry] */
2124 " bras 1,0f\n" 2123 " bras 1,0f\n"
2125 " le 0,0(%1)\n" 2124 " le 0,0(%1)\n"
2126 "0: ex %0,0(1)" 2125 "0: ex %0,0(1)"
2127 : /* no output */ 2126 : /* no output */
2128 : "a" (opc & 0xf0), 2127 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
2129 "a" (&fp_regs->fprs[opc & 0xf].f) 2128 : "1");
2130 : "1" );
2131 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2129 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2132 __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2130 asm volatile( /* store ry to fp_regs.fprs[rx] */
2133 " bras 1,0f\n" 2131 " bras 1,0f\n"
2134 " ste 0,0(%1)\n" 2132 " ste 0,0(%1)\n"
2135 "0: ex %0,0(1)" 2133 "0: ex %0,0(1)"
2136 : /* no output */ 2134 : /* no output */
2137 : "a" ((opc & 0xf) << 4), 2135 : "a" ((opc & 0xf) << 4),
2138 "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) 2136 "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
2139 : "1" ); 2137 : "1");
2140 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2138 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2141 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2139 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2142 return 0; 2140 return 0;
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
index ab556b600f73..5b6ca4570ea4 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/arch/s390/math-emu/sfp-util.h
@@ -4,48 +4,51 @@
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ 6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
7 unsigned int __sh = (ah); \ 7 unsigned int __sh = (ah); \
8 unsigned int __sl = (al); \ 8 unsigned int __sl = (al); \
9 __asm__ (" alr %1,%3\n" \ 9 asm volatile( \
10 " brc 12,0f\n" \ 10 " alr %1,%3\n" \
11 " ahi %0,1\n" \ 11 " brc 12,0f\n" \
12 "0: alr %0,%2" \ 12 " ahi %0,1\n" \
13 : "+&d" (__sh), "+d" (__sl) \ 13 "0: alr %0,%2" \
14 : "d" (bh), "d" (bl) : "cc" ); \ 14 : "+&d" (__sh), "+d" (__sl) \
15 (sh) = __sh; \ 15 : "d" (bh), "d" (bl) : "cc"); \
16 (sl) = __sl; \ 16 (sh) = __sh; \
17 (sl) = __sl; \
17}) 18})
18 19
19#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ 20#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
20 unsigned int __sh = (ah); \ 21 unsigned int __sh = (ah); \
21 unsigned int __sl = (al); \ 22 unsigned int __sl = (al); \
22 __asm__ (" slr %1,%3\n" \ 23 asm volatile( \
23 " brc 3,0f\n" \ 24 " slr %1,%3\n" \
24 " ahi %0,-1\n" \ 25 " brc 3,0f\n" \
25 "0: slr %0,%2" \ 26 " ahi %0,-1\n" \
26 : "+&d" (__sh), "+d" (__sl) \ 27 "0: slr %0,%2" \
27 : "d" (bh), "d" (bl) : "cc" ); \ 28 : "+&d" (__sh), "+d" (__sl) \
28 (sh) = __sh; \ 29 : "d" (bh), "d" (bl) : "cc"); \
29 (sl) = __sl; \ 30 (sh) = __sh; \
31 (sl) = __sl; \
30}) 32})
31 33
32/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ 34/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
33#define umul_ppmm(wh, wl, u, v) ({ \ 35#define umul_ppmm(wh, wl, u, v) ({ \
34 unsigned int __wh = u; \ 36 unsigned int __wh = u; \
35 unsigned int __wl = v; \ 37 unsigned int __wl = v; \
36 __asm__ (" ltr 1,%0\n" \ 38 asm volatile( \
37 " mr 0,%1\n" \ 39 " ltr 1,%0\n" \
38 " jnm 0f\n" \ 40 " mr 0,%1\n" \
39 " alr 0,%1\n" \ 41 " jnm 0f\n" \
40 "0: ltr %1,%1\n" \ 42 " alr 0,%1\n" \
41 " jnm 1f\n" \ 43 "0: ltr %1,%1\n" \
42 " alr 0,%0\n" \ 44 " jnm 1f\n" \
43 "1: lr %0,0\n" \ 45 " alr 0,%0\n" \
44 " lr %1,1\n" \ 46 "1: lr %0,0\n" \
45 : "+d" (__wh), "+d" (__wl) \ 47 " lr %1,1\n" \
46 : : "0", "1", "cc" ); \ 48 : "+d" (__wh), "+d" (__wl) \
47 wh = __wh; \ 49 : : "0", "1", "cc"); \
48 wl = __wl; \ 50 wh = __wh; \
51 wl = __wl; \
49}) 52})
50 53
51#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9b11e3e20903..226275d5c4f6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter,
142 142
143 rx = (unsigned long) parameter; 143 rx = (unsigned long) parameter;
144 ry = (unsigned long) func; 144 ry = (unsigned long) func;
145 __asm__ __volatile__( 145 asm volatile(
146#ifdef CONFIG_64BIT 146#ifdef CONFIG_64BIT
147 " sam31\n" // switch to 31 bit 147 " sam31\n"
148 " diag %0,%1,0x64\n" 148 " diag %0,%1,0x64\n"
149 " sam64\n" // switch back to 64 bit 149 " sam64\n"
150#else 150#else
151 " diag %0,%1,0x64\n" 151 " diag %0,%1,0x64\n"
152#endif 152#endif
153 " ipm %2\n" 153 " ipm %2\n"
154 " srl %2,28\n" 154 " srl %2,28\n"
155 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); 155 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
156 *ret1 = rx; 156 *ret1 = rx;
157 *ret2 = ry; 157 *ret2 = ry;
158 return rc; 158 return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a393c308bb29..f2b9a84dc2bf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -424,20 +424,13 @@ int pfault_init(void)
424 424
425 if (pfault_disable) 425 if (pfault_disable)
426 return -1; 426 return -1;
427 __asm__ __volatile__( 427 asm volatile(
428 " diag %1,%0,0x258\n" 428 " diag %1,%0,0x258\n"
429 "0: j 2f\n" 429 "0: j 2f\n"
430 "1: la %0,8\n" 430 "1: la %0,8\n"
431 "2:\n" 431 "2:\n"
432 ".section __ex_table,\"a\"\n" 432 EX_TABLE(0b,1b)
433 " .align 4\n" 433 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
434#ifndef CONFIG_64BIT
435 " .long 0b,1b\n"
436#else /* CONFIG_64BIT */
437 " .quad 0b,1b\n"
438#endif /* CONFIG_64BIT */
439 ".previous"
440 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
441 __ctl_set_bit(0, 9); 434 __ctl_set_bit(0, 9);
442 return rc; 435 return rc;
443} 436}
@@ -450,18 +443,11 @@ void pfault_fini(void)
450 if (pfault_disable) 443 if (pfault_disable)
451 return; 444 return;
452 __ctl_clear_bit(0,9); 445 __ctl_clear_bit(0,9);
453 __asm__ __volatile__( 446 asm volatile(
454 " diag %0,0,0x258\n" 447 " diag %0,0,0x258\n"
455 "0:\n" 448 "0:\n"
456 ".section __ex_table,\"a\"\n" 449 EX_TABLE(0b,0b)
457 " .align 4\n" 450 : : "a" (&refbk), "m" (refbk) : "cc");
458#ifndef CONFIG_64BIT
459 " .long 0b,0b\n"
460#else /* CONFIG_64BIT */
461 " .quad 0b,0b\n"
462#endif /* CONFIG_64BIT */
463 ".previous"
464 : : "a" (&refbk), "m" (refbk) : "cc" );
465} 451}
466 452
467asmlinkage void 453asmlinkage void
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f7a523..127044e1707c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
45{ 45{
46 if (addr >= 0x7ff00000) 46 if (addr >= 0x7ff00000)
47 return; 47 return;
48 asm volatile(
48#ifdef CONFIG_64BIT 49#ifdef CONFIG_64BIT
49 asm volatile ( 50 " sam31\n"
50 " sam31\n" 51 " diag %0,%0,0x10\n"
51 " diag %0,%0,0x10\n" 52 "0: sam64\n"
52 "0: sam64\n"
53 ".section __ex_table,\"a\"\n"
54 " .align 8\n"
55 " .quad 0b, 0b\n"
56 ".previous\n"
57 : : "a" (addr));
58#else 53#else
59 asm volatile ( 54 " diag %0,%0,0x10\n"
60 " diag %0,%0,0x10\n"
61 "0:\n" 55 "0:\n"
62 ".section __ex_table,\"a\"\n"
63 " .align 4\n"
64 " .long 0b, 0b\n"
65 ".previous\n"
66 : : "a" (addr));
67#endif 56#endif
57 EX_TABLE(0b,0b)
58 : : "a" (addr));
68} 59}
69 60
70void show_mem(void) 61void show_mem(void)
@@ -156,11 +147,10 @@ void __init paging_init(void)
156 S390_lowcore.kernel_asce = pgdir_k; 147 S390_lowcore.kernel_asce = pgdir_k;
157 148
158 /* enable virtual mapping in kernel mode */ 149 /* enable virtual mapping in kernel mode */
159 __asm__ __volatile__(" LCTL 1,1,%0\n" 150 __ctl_load(pgdir_k, 1, 1);
160 " LCTL 7,7,%0\n" 151 __ctl_load(pgdir_k, 7, 7);
161 " LCTL 13,13,%0\n" 152 __ctl_load(pgdir_k, 13, 13);
162 " SSM %1" 153 __raw_local_irq_ssm(ssm_mask);
163 : : "m" (pgdir_k), "m" (ssm_mask));
164 154
165 local_flush_tlb(); 155 local_flush_tlb();
166 return; 156 return;
@@ -241,11 +231,10 @@ void __init paging_init(void)
241 S390_lowcore.kernel_asce = pgdir_k; 231 S390_lowcore.kernel_asce = pgdir_k;
242 232
243 /* enable virtual mapping in kernel mode */ 233 /* enable virtual mapping in kernel mode */
244 __asm__ __volatile__("lctlg 1,1,%0\n\t" 234 __ctl_load(pgdir_k, 1, 1);
245 "lctlg 7,7,%0\n\t" 235 __ctl_load(pgdir_k, 7, 7);
246 "lctlg 13,13,%0\n\t" 236 __ctl_load(pgdir_k, 13, 13);
247 "ssm %1" 237 __raw_local_irq_ssm(ssm_mask);
248 : :"m" (pgdir_k), "m" (ssm_mask));
249 238
250 local_flush_tlb(); 239 local_flush_tlb();
251 240
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 23fa0b289173..9d051e5687ea 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -63,44 +63,26 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
63 * and function code cmd. 63 * and function code cmd.
64 * In case of an exception return 3. Otherwise return result of bitwise OR of 64 * In case of an exception return 3. Otherwise return result of bitwise OR of
65 * resulting condition code and DIAG return code. */ 65 * resulting condition code and DIAG return code. */
66static __inline__ int 66static inline int dia250(void *iob, int cmd)
67dia250(void *iob, int cmd)
68{ 67{
68 register unsigned long reg0 asm ("0") = (unsigned long) iob;
69 typedef union { 69 typedef union {
70 struct dasd_diag_init_io init_io; 70 struct dasd_diag_init_io init_io;
71 struct dasd_diag_rw_io rw_io; 71 struct dasd_diag_rw_io rw_io;
72 } addr_type; 72 } addr_type;
73 int rc; 73 int rc;
74 74
75 __asm__ __volatile__( 75 rc = 3;
76#ifdef CONFIG_64BIT 76 asm volatile(
77 " lghi %0,3\n"
78 " lgr 0,%3\n"
79 " diag 0,%2,0x250\n"
80 "0: ipm %0\n"
81 " srl %0,28\n"
82 " or %0,1\n"
83 "1:\n"
84 ".section __ex_table,\"a\"\n"
85 " .align 8\n"
86 " .quad 0b,1b\n"
87 ".previous\n"
88#else
89 " lhi %0,3\n"
90 " lr 0,%3\n"
91 " diag 0,%2,0x250\n" 77 " diag 0,%2,0x250\n"
92 "0: ipm %0\n" 78 "0: ipm %0\n"
93 " srl %0,28\n" 79 " srl %0,28\n"
94 " or %0,1\n" 80 " or %0,1\n"
95 "1:\n" 81 "1:\n"
96 ".section __ex_table,\"a\"\n" 82 EX_TABLE(0b,1b)
97 " .align 4\n" 83 : "+d" (rc), "=m" (*(addr_type *) iob)
98 " .long 0b,1b\n" 84 : "d" (cmd), "d" (reg0), "m" (*(addr_type *) iob)
99 ".previous\n" 85 : "1", "cc");
100#endif
101 : "=&d" (rc), "=m" (*(addr_type *) iob)
102 : "d" (cmd), "d" (iob), "m" (*(addr_type *) iob)
103 : "0", "1", "cc");
104 return rc; 86 return rc;
105} 87}
106 88
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index cab2c736683a..a04d9120cef0 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -89,28 +89,15 @@ MODULE_LICENSE("GPL");
89 */ 89 */
90static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) 90static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
91{ 91{
92 int cc; 92 int cc = 2; /* return unused cc 2 if pgin traps */
93 93
94 __asm__ __volatile__ ( 94 asm volatile(
95 " lhi %0,2\n" /* return unused cc 2 if pgin traps */ 95 " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
96 " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */ 96 "0: ipm %0\n"
97 "0: ipm %0\n" 97 " srl %0,28\n"
98 " srl %0,28\n"
99 "1:\n" 98 "1:\n"
100#ifndef CONFIG_64BIT 99 EX_TABLE(0b,1b)
101 ".section __ex_table,\"a\"\n" 100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
102 " .align 4\n"
103 " .long 0b,1b\n"
104 ".previous"
105#else
106 ".section __ex_table,\"a\"\n"
107 " .align 8\n"
108 " .quad 0b,1b\n"
109 ".previous"
110#endif
111 : "=&d" (cc)
112 : "a" (__pa(page_addr)), "a" (xpage_index)
113 : "cc" );
114 if (cc == 3) 101 if (cc == 3)
115 return -ENXIO; 102 return -ENXIO;
116 if (cc == 2) { 103 if (cc == 2) {
@@ -137,28 +124,15 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
137 */ 124 */
138static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) 125static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
139{ 126{
140 int cc; 127 int cc = 2; /* return unused cc 2 if pgin traps */
141 128
142 __asm__ __volatile__ ( 129 asm volatile(
143 " lhi %0,2\n" /* return unused cc 2 if pgout traps */ 130 " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
144 " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */ 131 "0: ipm %0\n"
145 "0: ipm %0\n" 132 " srl %0,28\n"
146 " srl %0,28\n"
147 "1:\n" 133 "1:\n"
148#ifndef CONFIG_64BIT 134 EX_TABLE(0b,1b)
149 ".section __ex_table,\"a\"\n" 135 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
150 " .align 4\n"
151 " .long 0b,1b\n"
152 ".previous"
153#else
154 ".section __ex_table,\"a\"\n"
155 " .align 8\n"
156 " .quad 0b,1b\n"
157 ".previous"
158#endif
159 : "=&d" (cc)
160 : "a" (__pa(page_addr)), "a" (xpage_index)
161 : "cc" );
162 if (cc == 3) 136 if (cc == 3)
163 return -ENXIO; 137 return -ENXIO;
164 if (cc == 2) { 138 if (cc == 2) {
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 985d1613baaa..31e335751d6d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -100,13 +100,12 @@ service_call(sclp_cmdw_t command, void *sccb)
100{ 100{
101 int cc; 101 int cc;
102 102
103 __asm__ __volatile__( 103 asm volatile(
104 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ 104 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
105 " ipm %0\n" 105 " ipm %0\n"
106 " srl %0,28" 106 " srl %0,28"
107 : "=&d" (cc) 107 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
108 : "d" (command), "a" (__pa(sccb)) 108 : "cc", "memory");
109 : "cc", "memory" );
110 if (cc == 3) 109 if (cc == 3)
111 return -EIO; 110 return -EIO;
112 if (cc == 2) 111 if (cc == 2)
@@ -360,16 +359,6 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
360 sclp_process_queue(); 359 sclp_process_queue();
361} 360}
362 361
363/* Return current Time-Of-Day clock. */
364static inline u64
365sclp_get_clock(void)
366{
367 u64 result;
368
369 asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
370 return result;
371}
372
373/* Convert interval in jiffies to TOD ticks. */ 362/* Convert interval in jiffies to TOD ticks. */
374static inline u64 363static inline u64
375sclp_tod_from_jiffies(unsigned long jiffies) 364sclp_tod_from_jiffies(unsigned long jiffies)
@@ -382,7 +371,6 @@ sclp_tod_from_jiffies(unsigned long jiffies)
382void 371void
383sclp_sync_wait(void) 372sclp_sync_wait(void)
384{ 373{
385 unsigned long psw_mask;
386 unsigned long flags; 374 unsigned long flags;
387 unsigned long cr0, cr0_sync; 375 unsigned long cr0, cr0_sync;
388 u64 timeout; 376 u64 timeout;
@@ -392,7 +380,7 @@ sclp_sync_wait(void)
392 timeout = 0; 380 timeout = 0;
393 if (timer_pending(&sclp_request_timer)) { 381 if (timer_pending(&sclp_request_timer)) {
394 /* Get timeout TOD value */ 382 /* Get timeout TOD value */
395 timeout = sclp_get_clock() + 383 timeout = get_clock() +
396 sclp_tod_from_jiffies(sclp_request_timer.expires - 384 sclp_tod_from_jiffies(sclp_request_timer.expires -
397 jiffies); 385 jiffies);
398 } 386 }
@@ -406,13 +394,12 @@ sclp_sync_wait(void)
406 cr0_sync |= 0x00000200; 394 cr0_sync |= 0x00000200;
407 cr0_sync &= 0xFFFFF3AC; 395 cr0_sync &= 0xFFFFF3AC;
408 __ctl_load(cr0_sync, 0, 0); 396 __ctl_load(cr0_sync, 0, 0);
409 asm volatile ("STOSM 0(%1),0x01" 397 __raw_local_irq_stosm(0x01);
410 : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
411 /* Loop until driver state indicates finished request */ 398 /* Loop until driver state indicates finished request */
412 while (sclp_running_state != sclp_running_state_idle) { 399 while (sclp_running_state != sclp_running_state_idle) {
413 /* Check for expired request timer */ 400 /* Check for expired request timer */
414 if (timer_pending(&sclp_request_timer) && 401 if (timer_pending(&sclp_request_timer) &&
415 sclp_get_clock() > timeout && 402 get_clock() > timeout &&
416 del_timer(&sclp_request_timer)) 403 del_timer(&sclp_request_timer))
417 sclp_request_timer.function(sclp_request_timer.data); 404 sclp_request_timer.function(sclp_request_timer.data);
418 barrier(); 405 barrier();
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 807320a41fa4..4b868f72fe89 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -54,48 +54,20 @@ enum vmwdt_func {
54static int __diag288(enum vmwdt_func func, unsigned int timeout, 54static int __diag288(enum vmwdt_func func, unsigned int timeout,
55 char *cmd, size_t len) 55 char *cmd, size_t len)
56{ 56{
57 register unsigned long __func asm("2"); 57 register unsigned long __func asm("2") = func;
58 register unsigned long __timeout asm("3"); 58 register unsigned long __timeout asm("3") = timeout;
59 register unsigned long __cmdp asm("4"); 59 register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
60 register unsigned long __cmdl asm("5"); 60 register unsigned long __cmdl asm("5") = len;
61 int err; 61 int err;
62 62
63 __func = func; 63 err = -EINVAL;
64 __timeout = timeout; 64 asm volatile(
65 __cmdp = virt_to_phys(cmd); 65 " diag %1,%3,0x288\n"
66 __cmdl = len; 66 "0: la %0,0\n"
67 err = 0; 67 "1:\n"
68 asm volatile ( 68 EX_TABLE(0b,1b)
69#ifdef CONFIG_64BIT 69 : "=d" (err) : "d"(__func), "d"(__timeout),
70 "diag %2,%4,0x288\n" 70 "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc");
71 "1: \n"
72 ".section .fixup,\"ax\"\n"
73 "2: lghi %0,%1\n"
74 " jg 1b\n"
75 ".previous\n"
76 ".section __ex_table,\"a\"\n"
77 " .align 8\n"
78 " .quad 1b,2b\n"
79 ".previous\n"
80#else
81 "diag %2,%4,0x288\n"
82 "1: \n"
83 ".section .fixup,\"ax\"\n"
84 "2: lhi %0,%1\n"
85 " bras 1,3f\n"
86 " .long 1b\n"
87 "3: l 1,0(1)\n"
88 " br 1\n"
89 ".previous\n"
90 ".section __ex_table,\"a\"\n"
91 " .align 4\n"
92 " .long 1b,2b\n"
93 ".previous\n"
94#endif
95 : "+&d"(err)
96 : "i"(-EINVAL), "d"(__func), "d"(__timeout),
97 "d"(__cmdp), "d"(__cmdl)
98 : "1", "cc");
99 return err; 71 return err;
100} 72}
101 73
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 438db483035d..1398367b5f68 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -42,18 +42,15 @@ diag210(struct diag210 * addr)
42 spin_lock_irqsave(&diag210_lock, flags); 42 spin_lock_irqsave(&diag210_lock, flags);
43 diag210_tmp = *addr; 43 diag210_tmp = *addr;
44 44
45 asm volatile ( 45 asm volatile(
46 " lhi %0,-1\n" 46 " lhi %0,-1\n"
47 " sam31\n" 47 " sam31\n"
48 " diag %1,0,0x210\n" 48 " diag %1,0,0x210\n"
49 "0: ipm %0\n" 49 "0: ipm %0\n"
50 " srl %0,28\n" 50 " srl %0,28\n"
51 "1: sam64\n" 51 "1: sam64\n"
52 ".section __ex_table,\"a\"\n" 52 EX_TABLE(0b,1b)
53 " .align 8\n" 53 : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory");
54 " .quad 0b,1b\n"
55 ".previous"
56 : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
57 54
58 *addr = diag210_tmp; 55 *addr = diag210_tmp;
59 spin_unlock_irqrestore(&diag210_lock, flags); 56 spin_unlock_irqrestore(&diag210_lock, flags);
@@ -66,17 +63,14 @@ diag210(struct diag210 * addr)
66{ 63{
67 int ccode; 64 int ccode;
68 65
69 asm volatile ( 66 asm volatile(
70 " lhi %0,-1\n" 67 " lhi %0,-1\n"
71 " diag %1,0,0x210\n" 68 " diag %1,0,0x210\n"
72 "0: ipm %0\n" 69 "0: ipm %0\n"
73 " srl %0,28\n" 70 " srl %0,28\n"
74 "1:\n" 71 "1:\n"
75 ".section __ex_table,\"a\"\n" 72 EX_TABLE(0b,1b)
76 " .align 4\n" 73 : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory");
77 " .long 0b,1b\n"
78 ".previous"
79 : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
80 74
81 return ccode; 75 return ccode;
82} 76}
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 95a9462f9a91..ad6d82940069 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -25,106 +25,74 @@ struct tpi_info {
25static inline int stsch(struct subchannel_id schid, 25static inline int stsch(struct subchannel_id schid,
26 volatile struct schib *addr) 26 volatile struct schib *addr)
27{ 27{
28 register struct subchannel_id reg1 asm ("1") = schid;
28 int ccode; 29 int ccode;
29 30
30 __asm__ __volatile__( 31 asm volatile(
31 " lr 1,%1\n" 32 " stsch 0(%2)\n"
32 " stsch 0(%2)\n" 33 " ipm %0\n"
33 " ipm %0\n" 34 " srl %0,28"
34 " srl %0,28" 35 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
35 : "=d" (ccode)
36 : "d" (schid), "a" (addr), "m" (*addr)
37 : "cc", "1" );
38 return ccode; 36 return ccode;
39} 37}
40 38
41static inline int stsch_err(struct subchannel_id schid, 39static inline int stsch_err(struct subchannel_id schid,
42 volatile struct schib *addr) 40 volatile struct schib *addr)
43{ 41{
44 int ccode; 42 register struct subchannel_id reg1 asm ("1") = schid;
43 int ccode = -EIO;
45 44
46 __asm__ __volatile__( 45 asm volatile(
47 " lhi %0,%3\n" 46 " stsch 0(%2)\n"
48 " lr 1,%1\n" 47 "0: ipm %0\n"
49 " stsch 0(%2)\n" 48 " srl %0,28\n"
50 "0: ipm %0\n"
51 " srl %0,28\n"
52 "1:\n" 49 "1:\n"
53#ifdef CONFIG_64BIT 50 EX_TABLE(0b,1b)
54 ".section __ex_table,\"a\"\n" 51 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
55 " .align 8\n"
56 " .quad 0b,1b\n"
57 ".previous"
58#else
59 ".section __ex_table,\"a\"\n"
60 " .align 4\n"
61 " .long 0b,1b\n"
62 ".previous"
63#endif
64 : "=&d" (ccode)
65 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
66 : "cc", "1" );
67 return ccode; 52 return ccode;
68} 53}
69 54
70static inline int msch(struct subchannel_id schid, 55static inline int msch(struct subchannel_id schid,
71 volatile struct schib *addr) 56 volatile struct schib *addr)
72{ 57{
58 register struct subchannel_id reg1 asm ("1") = schid;
73 int ccode; 59 int ccode;
74 60
75 __asm__ __volatile__( 61 asm volatile(
76 " lr 1,%1\n" 62 " msch 0(%2)\n"
77 " msch 0(%2)\n" 63 " ipm %0\n"
78 " ipm %0\n" 64 " srl %0,28"
79 " srl %0,28" 65 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
80 : "=d" (ccode)
81 : "d" (schid), "a" (addr), "m" (*addr)
82 : "cc", "1" );
83 return ccode; 66 return ccode;
84} 67}
85 68
86static inline int msch_err(struct subchannel_id schid, 69static inline int msch_err(struct subchannel_id schid,
87 volatile struct schib *addr) 70 volatile struct schib *addr)
88{ 71{
89 int ccode; 72 register struct subchannel_id reg1 asm ("1") = schid;
73 int ccode = -EIO;
90 74
91 __asm__ __volatile__( 75 asm volatile(
92 " lhi %0,%3\n" 76 " msch 0(%2)\n"
93 " lr 1,%1\n" 77 "0: ipm %0\n"
94 " msch 0(%2)\n" 78 " srl %0,28\n"
95 "0: ipm %0\n"
96 " srl %0,28\n"
97 "1:\n" 79 "1:\n"
98#ifdef CONFIG_64BIT 80 EX_TABLE(0b,1b)
99 ".section __ex_table,\"a\"\n" 81 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
100 " .align 8\n"
101 " .quad 0b,1b\n"
102 ".previous"
103#else
104 ".section __ex_table,\"a\"\n"
105 " .align 4\n"
106 " .long 0b,1b\n"
107 ".previous"
108#endif
109 : "=&d" (ccode)
110 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
111 : "cc", "1" );
112 return ccode; 82 return ccode;
113} 83}
114 84
115static inline int tsch(struct subchannel_id schid, 85static inline int tsch(struct subchannel_id schid,
116 volatile struct irb *addr) 86 volatile struct irb *addr)
117{ 87{
88 register struct subchannel_id reg1 asm ("1") = schid;
118 int ccode; 89 int ccode;
119 90
120 __asm__ __volatile__( 91 asm volatile(
121 " lr 1,%1\n" 92 " tsch 0(%2)\n"
122 " tsch 0(%2)\n" 93 " ipm %0\n"
123 " ipm %0\n" 94 " srl %0,28"
124 " srl %0,28" 95 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
125 : "=d" (ccode)
126 : "d" (schid), "a" (addr), "m" (*addr)
127 : "cc", "1" );
128 return ccode; 96 return ccode;
129} 97}
130 98
@@ -132,89 +100,77 @@ static inline int tpi( volatile struct tpi_info *addr)
132{ 100{
133 int ccode; 101 int ccode;
134 102
135 __asm__ __volatile__( 103 asm volatile(
136 " tpi 0(%1)\n" 104 " tpi 0(%1)\n"
137 " ipm %0\n" 105 " ipm %0\n"
138 " srl %0,28" 106 " srl %0,28"
139 : "=d" (ccode) 107 : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc");
140 : "a" (addr), "m" (*addr)
141 : "cc", "1" );
142 return ccode; 108 return ccode;
143} 109}
144 110
145static inline int ssch(struct subchannel_id schid, 111static inline int ssch(struct subchannel_id schid,
146 volatile struct orb *addr) 112 volatile struct orb *addr)
147{ 113{
114 register struct subchannel_id reg1 asm ("1") = schid;
148 int ccode; 115 int ccode;
149 116
150 __asm__ __volatile__( 117 asm volatile(
151 " lr 1,%1\n" 118 " ssch 0(%2)\n"
152 " ssch 0(%2)\n" 119 " ipm %0\n"
153 " ipm %0\n" 120 " srl %0,28"
154 " srl %0,28" 121 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
155 : "=d" (ccode)
156 : "d" (schid), "a" (addr), "m" (*addr)
157 : "cc", "1" );
158 return ccode; 122 return ccode;
159} 123}
160 124
161static inline int rsch(struct subchannel_id schid) 125static inline int rsch(struct subchannel_id schid)
162{ 126{
127 register struct subchannel_id reg1 asm ("1") = schid;
163 int ccode; 128 int ccode;
164 129
165 __asm__ __volatile__( 130 asm volatile(
166 " lr 1,%1\n" 131 " rsch\n"
167 " rsch\n" 132 " ipm %0\n"
168 " ipm %0\n" 133 " srl %0,28"
169 " srl %0,28" 134 : "=d" (ccode) : "d" (reg1) : "cc");
170 : "=d" (ccode)
171 : "d" (schid)
172 : "cc", "1" );
173 return ccode; 135 return ccode;
174} 136}
175 137
176static inline int csch(struct subchannel_id schid) 138static inline int csch(struct subchannel_id schid)
177{ 139{
140 register struct subchannel_id reg1 asm ("1") = schid;
178 int ccode; 141 int ccode;
179 142
180 __asm__ __volatile__( 143 asm volatile(
181 " lr 1,%1\n" 144 " csch\n"
182 " csch\n" 145 " ipm %0\n"
183 " ipm %0\n" 146 " srl %0,28"
184 " srl %0,28" 147 : "=d" (ccode) : "d" (reg1) : "cc");
185 : "=d" (ccode)
186 : "d" (schid)
187 : "cc", "1" );
188 return ccode; 148 return ccode;
189} 149}
190 150
191static inline int hsch(struct subchannel_id schid) 151static inline int hsch(struct subchannel_id schid)
192{ 152{
153 register struct subchannel_id reg1 asm ("1") = schid;
193 int ccode; 154 int ccode;
194 155
195 __asm__ __volatile__( 156 asm volatile(
196 " lr 1,%1\n" 157 " hsch\n"
197 " hsch\n" 158 " ipm %0\n"
198 " ipm %0\n" 159 " srl %0,28"
199 " srl %0,28" 160 : "=d" (ccode) : "d" (reg1) : "cc");
200 : "=d" (ccode)
201 : "d" (schid)
202 : "cc", "1" );
203 return ccode; 161 return ccode;
204} 162}
205 163
206static inline int xsch(struct subchannel_id schid) 164static inline int xsch(struct subchannel_id schid)
207{ 165{
166 register struct subchannel_id reg1 asm ("1") = schid;
208 int ccode; 167 int ccode;
209 168
210 __asm__ __volatile__( 169 asm volatile(
211 " lr 1,%1\n" 170 " .insn rre,0xb2760000,%1,0\n"
212 " .insn rre,0xb2760000,%1,0\n" 171 " ipm %0\n"
213 " ipm %0\n" 172 " srl %0,28"
214 " srl %0,28" 173 : "=d" (ccode) : "d" (reg1) : "cc");
215 : "=d" (ccode)
216 : "d" (schid)
217 : "cc", "1" );
218 return ccode; 174 return ccode;
219} 175}
220 176
@@ -223,41 +179,27 @@ static inline int chsc(void *chsc_area)
223 typedef struct { char _[4096]; } addr_type; 179 typedef struct { char _[4096]; } addr_type;
224 int cc; 180 int cc;
225 181
226 __asm__ __volatile__ ( 182 asm volatile(
227 ".insn rre,0xb25f0000,%2,0 \n\t" 183 " .insn rre,0xb25f0000,%2,0\n"
228 "ipm %0 \n\t" 184 " ipm %0\n"
229 "srl %0,28 \n\t" 185 " srl %0,28\n"
230 : "=d" (cc), "=m" (*(addr_type *) chsc_area) 186 : "=d" (cc), "=m" (*(addr_type *) chsc_area)
231 : "d" (chsc_area), "m" (*(addr_type *) chsc_area) 187 : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
232 : "cc" ); 188 : "cc");
233
234 return cc; 189 return cc;
235} 190}
236 191
237static inline int iac( void)
238{
239 int ccode;
240
241 __asm__ __volatile__(
242 " iac 1\n"
243 " ipm %0\n"
244 " srl %0,28"
245 : "=d" (ccode) : : "cc", "1" );
246 return ccode;
247}
248
249static inline int rchp(int chpid) 192static inline int rchp(int chpid)
250{ 193{
194 register unsigned int reg1 asm ("1") = chpid;
251 int ccode; 195 int ccode;
252 196
253 __asm__ __volatile__( 197 asm volatile(
254 " lr 1,%1\n" 198 " lr 1,%1\n"
255 " rchp\n" 199 " rchp\n"
256 " ipm %0\n" 200 " ipm %0\n"
257 " srl %0,28" 201 " srl %0,28"
258 : "=d" (ccode) 202 : "=d" (ccode) : "d" (reg1) : "cc");
259 : "d" (chpid)
260 : "cc", "1" );
261 return ccode; 203 return ccode;
262} 204}
263 205
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 124569362f02..49bb9e371c32 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -274,12 +274,11 @@ do_sqbs(unsigned long sch, unsigned char state, int queue,
274 register unsigned long _sch asm ("1") = sch; 274 register unsigned long _sch asm ("1") = sch;
275 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 275 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
276 276
277 asm volatile ( 277 asm volatile(
278 " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" 278 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
279 : "+d" (_ccq), "+d" (_queuestart) 279 : "+d" (_ccq), "+d" (_queuestart)
280 : "d" ((unsigned long)state), "d" (_sch) 280 : "d" ((unsigned long)state), "d" (_sch)
281 : "memory", "cc" 281 : "memory", "cc");
282 );
283 *count = _ccq & 0xff; 282 *count = _ccq & 0xff;
284 *start = _queuestart & 0xff; 283 *start = _queuestart & 0xff;
285 284
@@ -299,12 +298,11 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
299 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 298 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
300 unsigned long _state = 0; 299 unsigned long _state = 0;
301 300
302 asm volatile ( 301 asm volatile(
303 " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" 302 " .insn rrf,0xB99c0000,%1,%2,0,0"
304 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 303 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
305 : "d" (_sch) 304 : "d" (_sch)
306 : "memory", "cc" 305 : "memory", "cc" );
307 );
308 *count = _ccq & 0xff; 306 *count = _ccq & 0xff;
309 *start = _queuestart & 0xff; 307 *start = _queuestart & 0xff;
310 *state = _state & 0xff; 308 *state = _state & 0xff;
@@ -319,69 +317,35 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
319static inline int 317static inline int
320do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) 318do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
321{ 319{
320 register unsigned long reg0 asm ("0") = 2;
321 register struct subchannel_id reg1 asm ("1") = schid;
322 register unsigned long reg2 asm ("2") = mask1;
323 register unsigned long reg3 asm ("3") = mask2;
322 int cc; 324 int cc;
323 325
324#ifndef CONFIG_64BIT 326 asm volatile(
325 asm volatile ( 327 " siga 0\n"
326 "lhi 0,2 \n\t" 328 " ipm %0\n"
327 "lr 1,%1 \n\t" 329 " srl %0,28\n"
328 "lr 2,%2 \n\t"
329 "lr 3,%3 \n\t"
330 "siga 0 \n\t"
331 "ipm %0 \n\t"
332 "srl %0,28 \n\t"
333 : "=d" (cc) 330 : "=d" (cc)
334 : "d" (schid), "d" (mask1), "d" (mask2) 331 : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
335 : "cc", "0", "1", "2", "3"
336 );
337#else /* CONFIG_64BIT */
338 asm volatile (
339 "lghi 0,2 \n\t"
340 "llgfr 1,%1 \n\t"
341 "llgfr 2,%2 \n\t"
342 "llgfr 3,%3 \n\t"
343 "siga 0 \n\t"
344 "ipm %0 \n\t"
345 "srl %0,28 \n\t"
346 : "=d" (cc)
347 : "d" (schid), "d" (mask1), "d" (mask2)
348 : "cc", "0", "1", "2", "3"
349 );
350#endif /* CONFIG_64BIT */
351 return cc; 332 return cc;
352} 333}
353 334
354static inline int 335static inline int
355do_siga_input(struct subchannel_id schid, unsigned int mask) 336do_siga_input(struct subchannel_id schid, unsigned int mask)
356{ 337{
338 register unsigned long reg0 asm ("0") = 1;
339 register struct subchannel_id reg1 asm ("1") = schid;
340 register unsigned long reg2 asm ("2") = mask;
357 int cc; 341 int cc;
358 342
359#ifndef CONFIG_64BIT 343 asm volatile(
360 asm volatile ( 344 " siga 0\n"
361 "lhi 0,1 \n\t" 345 " ipm %0\n"
362 "lr 1,%1 \n\t" 346 " srl %0,28\n"
363 "lr 2,%2 \n\t"
364 "siga 0 \n\t"
365 "ipm %0 \n\t"
366 "srl %0,28 \n\t"
367 : "=d" (cc)
368 : "d" (schid), "d" (mask)
369 : "cc", "0", "1", "2", "memory"
370 );
371#else /* CONFIG_64BIT */
372 asm volatile (
373 "lghi 0,1 \n\t"
374 "llgfr 1,%1 \n\t"
375 "llgfr 2,%2 \n\t"
376 "siga 0 \n\t"
377 "ipm %0 \n\t"
378 "srl %0,28 \n\t"
379 : "=d" (cc) 347 : "=d" (cc)
380 : "d" (schid), "d" (mask) 348 : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
381 : "cc", "0", "1", "2", "memory"
382 );
383#endif /* CONFIG_64BIT */
384
385 return cc; 349 return cc;
386} 350}
387 351
@@ -389,93 +353,35 @@ static inline int
389do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, 353do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
390 unsigned int fc) 354 unsigned int fc)
391{ 355{
356 register unsigned long __fc asm("0") = fc;
357 register unsigned long __schid asm("1") = schid;
358 register unsigned long __mask asm("2") = mask;
392 int cc; 359 int cc;
393 __u32 busy_bit; 360
394 361 asm volatile(
395#ifndef CONFIG_64BIT 362 " siga 0\n"
396 asm volatile ( 363 "0: ipm %0\n"
397 "lhi 0,0 \n\t" 364 " srl %0,28\n"
398 "lr 1,%2 \n\t" 365 "1:\n"
399 "lr 2,%3 \n\t" 366 EX_TABLE(0b,1b)
400 "siga 0 \n\t" 367 : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
401 "0:" 368 : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
402 "ipm %0 \n\t" 369 : "cc", "memory");
403 "srl %0,28 \n\t" 370 (*bb) = ((unsigned int) __fc) >> 31;
404 "srl 0,31 \n\t"
405 "lr %1,0 \n\t"
406 "1: \n\t"
407 ".section .fixup,\"ax\"\n\t"
408 "2: \n\t"
409 "lhi %0,%4 \n\t"
410 "bras 1,3f \n\t"
411 ".long 1b \n\t"
412 "3: \n\t"
413 "l 1,0(1) \n\t"
414 "br 1 \n\t"
415 ".previous \n\t"
416 ".section __ex_table,\"a\"\n\t"
417 ".align 4 \n\t"
418 ".long 0b,2b \n\t"
419 ".previous \n\t"
420 : "=d" (cc), "=d" (busy_bit)
421 : "d" (schid), "d" (mask),
422 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
423 : "cc", "0", "1", "2", "memory"
424 );
425#else /* CONFIG_64BIT */
426 asm volatile (
427 "llgfr 0,%5 \n\t"
428 "lgr 1,%2 \n\t"
429 "llgfr 2,%3 \n\t"
430 "siga 0 \n\t"
431 "0:"
432 "ipm %0 \n\t"
433 "srl %0,28 \n\t"
434 "srl 0,31 \n\t"
435 "llgfr %1,0 \n\t"
436 "1: \n\t"
437 ".section .fixup,\"ax\"\n\t"
438 "lghi %0,%4 \n\t"
439 "jg 1b \n\t"
440 ".previous\n\t"
441 ".section __ex_table,\"a\"\n\t"
442 ".align 8 \n\t"
443 ".quad 0b,1b \n\t"
444 ".previous \n\t"
445 : "=d" (cc), "=d" (busy_bit)
446 : "d" (schid), "d" (mask),
447 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
448 : "cc", "0", "1", "2", "memory"
449 );
450#endif /* CONFIG_64BIT */
451
452 (*bb) = busy_bit;
453 return cc; 371 return cc;
454} 372}
455 373
456static inline unsigned long 374static inline unsigned long
457do_clear_global_summary(void) 375do_clear_global_summary(void)
458{ 376{
459 377 register unsigned long __fn asm("1") = 3;
460 unsigned long time; 378 register unsigned long __tmp asm("2");
461 379 register unsigned long __time asm("3");
462#ifndef CONFIG_64BIT 380
463 asm volatile ( 381 asm volatile(
464 "lhi 1,3 \n\t" 382 " .insn rre,0xb2650000,2,0"
465 ".insn rre,0xb2650000,2,0 \n\t" 383 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
466 "lr %0,3 \n\t" 384 return __time;
467 : "=d" (time) : : "cc", "1", "2", "3"
468 );
469#else /* CONFIG_64BIT */
470 asm volatile (
471 "lghi 1,3 \n\t"
472 ".insn rre,0xb2650000,2,0 \n\t"
473 "lgr %0,3 \n\t"
474 : "=d" (time) : : "cc", "1", "2", "3"
475 );
476#endif /* CONFIG_64BIT */
477
478 return time;
479} 385}
480 386
481/* 387/*
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index 821dde86e240..809dd8d7f47a 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -534,19 +534,15 @@ iucv_add_handler (handler *new)
534 * 534 *
535 * Returns: return code from CP's IUCV call 535 * Returns: return code from CP's IUCV call
536 */ 536 */
537static __inline__ ulong 537static inline ulong b2f0(__u32 code, void *parm)
538b2f0(__u32 code, void *parm)
539{ 538{
539 register unsigned long reg0 asm ("0");
540 register unsigned long reg1 asm ("1");
540 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); 541 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
541 542
542 asm volatile ( 543 reg0 = code;
543 "LRA 1,0(%1)\n\t" 544 reg1 = virt_to_phys(parm);
544 "LR 0,%0\n\t" 545 asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
545 ".long 0xb2f01000"
546 :
547 : "d" (code), "a" (parm)
548 : "0", "1"
549 );
550 546
551 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); 547 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
552 548
@@ -1248,6 +1244,8 @@ iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
1248static int 1244static int
1249iucv_query_generic(int want_maxconn) 1245iucv_query_generic(int want_maxconn)
1250{ 1246{
1247 register unsigned long reg0 asm ("0");
1248 register unsigned long reg1 asm ("1");
1251 iparml_purge *parm = (iparml_purge *)grab_param(); 1249 iparml_purge *parm = (iparml_purge *)grab_param();
1252 int bufsize, maxconn; 1250 int bufsize, maxconn;
1253 int ccode; 1251 int ccode;
@@ -1256,18 +1254,15 @@ iucv_query_generic(int want_maxconn)
1256 * Call b2f0 and store R0 (max buffer size), 1254 * Call b2f0 and store R0 (max buffer size),
1257 * R1 (max connections) and CC. 1255 * R1 (max connections) and CC.
1258 */ 1256 */
1259 asm volatile ( 1257 reg0 = QUERY;
1260 "LRA 1,0(%4)\n\t" 1258 reg1 = virt_to_phys(parm);
1261 "LR 0,%3\n\t" 1259 asm volatile(
1262 ".long 0xb2f01000\n\t" 1260 " .long 0xb2f01000\n"
1263 "IPM %0\n\t" 1261 " ipm %0\n"
1264 "SRL %0,28\n\t" 1262 " srl %0,28\n"
1265 "ST 0,%1\n\t" 1263 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
1266 "ST 1,%2\n\t" 1264 bufsize = reg0;
1267 : "=d" (ccode), "=m" (bufsize), "=m" (maxconn) 1265 maxconn = reg1;
1268 : "d" (QUERY), "a" (parm)
1269 : "0", "1", "cc"
1270 );
1271 release_param(parm); 1266 release_param(parm);
1272 1267
1273 if (ccode) 1268 if (ccode)
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index a914129a4da9..479364d0332a 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -253,11 +253,12 @@ s390_revalidate_registers(struct mci *mci)
253 kill_task = 1; 253 kill_task = 1;
254 254
255#ifndef CONFIG_64BIT 255#ifndef CONFIG_64BIT
256 asm volatile("ld 0,0(%0)\n" 256 asm volatile(
257 "ld 2,8(%0)\n" 257 " ld 0,0(%0)\n"
258 "ld 4,16(%0)\n" 258 " ld 2,8(%0)\n"
259 "ld 6,24(%0)" 259 " ld 4,16(%0)\n"
260 : : "a" (&S390_lowcore.floating_pt_save_area)); 260 " ld 6,24(%0)"
261 : : "a" (&S390_lowcore.floating_pt_save_area));
261#endif 262#endif
262 263
263 if (MACHINE_HAS_IEEE) { 264 if (MACHINE_HAS_IEEE) {
@@ -274,37 +275,36 @@ s390_revalidate_registers(struct mci *mci)
274 * Floating point control register can't be restored. 275 * Floating point control register can't be restored.
275 * Task will be terminated. 276 * Task will be terminated.
276 */ 277 */
277 asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 278 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
278 kill_task = 1; 279 kill_task = 1;
279 280
280 } 281 } else
281 else 282 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
282 asm volatile ( 283
283 "lfpc 0(%0)" 284 asm volatile(
284 : : "a" (fpt_creg_save_area)); 285 " ld 0,0(%0)\n"
285 286 " ld 1,8(%0)\n"
286 asm volatile("ld 0,0(%0)\n" 287 " ld 2,16(%0)\n"
287 "ld 1,8(%0)\n" 288 " ld 3,24(%0)\n"
288 "ld 2,16(%0)\n" 289 " ld 4,32(%0)\n"
289 "ld 3,24(%0)\n" 290 " ld 5,40(%0)\n"
290 "ld 4,32(%0)\n" 291 " ld 6,48(%0)\n"
291 "ld 5,40(%0)\n" 292 " ld 7,56(%0)\n"
292 "ld 6,48(%0)\n" 293 " ld 8,64(%0)\n"
293 "ld 7,56(%0)\n" 294 " ld 9,72(%0)\n"
294 "ld 8,64(%0)\n" 295 " ld 10,80(%0)\n"
295 "ld 9,72(%0)\n" 296 " ld 11,88(%0)\n"
296 "ld 10,80(%0)\n" 297 " ld 12,96(%0)\n"
297 "ld 11,88(%0)\n" 298 " ld 13,104(%0)\n"
298 "ld 12,96(%0)\n" 299 " ld 14,112(%0)\n"
299 "ld 13,104(%0)\n" 300 " ld 15,120(%0)\n"
300 "ld 14,112(%0)\n" 301 : : "a" (fpt_save_area));
301 "ld 15,120(%0)\n"
302 : : "a" (fpt_save_area));
303 } 302 }
304 303
305 /* Revalidate access registers */ 304 /* Revalidate access registers */
306 asm volatile("lam 0,15,0(%0)" 305 asm volatile(
307 : : "a" (&S390_lowcore.access_regs_save_area)); 306 " lam 0,15,0(%0)"
307 : : "a" (&S390_lowcore.access_regs_save_area));
308 if (!mci->ar) 308 if (!mci->ar)
309 /* 309 /*
310 * Access registers have unknown contents. 310 * Access registers have unknown contents.
@@ -321,11 +321,13 @@ s390_revalidate_registers(struct mci *mci)
321 s390_handle_damage("invalid control registers."); 321 s390_handle_damage("invalid control registers.");
322 else 322 else
323#ifdef CONFIG_64BIT 323#ifdef CONFIG_64BIT
324 asm volatile("lctlg 0,15,0(%0)" 324 asm volatile(
325 : : "a" (&S390_lowcore.cregs_save_area)); 325 " lctlg 0,15,0(%0)"
326 : : "a" (&S390_lowcore.cregs_save_area));
326#else 327#else
327 asm volatile("lctl 0,15,0(%0)" 328 asm volatile(
328 : : "a" (&S390_lowcore.cregs_save_area)); 329 " lctl 0,15,0(%0)"
330 : : "a" (&S390_lowcore.cregs_save_area));
329#endif 331#endif
330 332
331 /* 333 /*
@@ -339,20 +341,23 @@ s390_revalidate_registers(struct mci *mci)
339 * old contents (should be zero) otherwise set it to zero. 341 * old contents (should be zero) otherwise set it to zero.
340 */ 342 */
341 if (!mci->pr) 343 if (!mci->pr)
342 asm volatile("sr 0,0\n" 344 asm volatile(
343 "sckpf" 345 " sr 0,0\n"
344 : : : "0", "cc"); 346 " sckpf"
347 : : : "0", "cc");
345 else 348 else
346 asm volatile( 349 asm volatile(
347 "l 0,0(%0)\n" 350 " l 0,0(%0)\n"
348 "sckpf" 351 " sckpf"
349 : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); 352 : : "a" (&S390_lowcore.tod_progreg_save_area)
353 : "0", "cc");
350#endif 354#endif
351 355
352 /* Revalidate clock comparator register */ 356 /* Revalidate clock comparator register */
353 asm volatile ("stck 0(%1)\n" 357 asm volatile(
354 "sckc 0(%1)" 358 " stck 0(%1)\n"
355 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 359 " sckc 0(%1)"
360 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
356 361
357 /* Check if old PSW is valid */ 362 /* Check if old PSW is valid */
358 if (!mci->wp) 363 if (!mci->wp)
diff --git a/include/asm-s390/appldata.h b/include/asm-s390/appldata.h
index b1770703b706..79283dac8281 100644
--- a/include/asm-s390/appldata.h
+++ b/include/asm-s390/appldata.h
@@ -80,7 +80,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
80 parm_list.product_id_addr = (unsigned long) id; 80 parm_list.product_id_addr = (unsigned long) id;
81 parm_list.buffer_addr = virt_to_phys(buffer); 81 parm_list.buffer_addr = virt_to_phys(buffer);
82 asm volatile( 82 asm volatile(
83 "diag %1,%0,0xdc" 83 " diag %1,%0,0xdc"
84 : "=d" (ry) 84 : "=d" (ry)
85 : "d" (&parm_list), "m" (parm_list), "m" (*id) 85 : "d" (&parm_list), "m" (parm_list), "m" (*id)
86 : "cc"); 86 : "cc");
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 399bf02894dd..af20c7462485 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -30,20 +30,43 @@ typedef struct {
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32 32
33#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
34
33#define __CS_LOOP(ptr, op_val, op_string) ({ \ 35#define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \ 36 typeof(ptr->counter) old_val, new_val; \
35 __asm__ __volatile__(" l %0,0(%3)\n" \ 37 asm volatile( \
36 "0: lr %1,%0\n" \ 38 " l %0,%2\n" \
37 op_string " %1,%4\n" \ 39 "0: lr %1,%0\n" \
38 " cs %0,%1,0(%3)\n" \ 40 op_string " %1,%3\n" \
39 " jl 0b" \ 41 " cs %0,%1,%2\n" \
40 : "=&d" (old_val), "=&d" (new_val), \ 42 " jl 0b" \
41 "=m" (((atomic_t *)(ptr))->counter) \ 43 : "=&d" (old_val), "=&d" (new_val), \
42 : "a" (ptr), "d" (op_val), \ 44 "=Q" (((atomic_t *)(ptr))->counter) \
43 "m" (((atomic_t *)(ptr))->counter) \ 45 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
44 : "cc", "memory" ); \ 46 : "cc", "memory"); \
45 new_val; \ 47 new_val; \
46}) 48})
49
50#else /* __GNUC__ */
51
52#define __CS_LOOP(ptr, op_val, op_string) ({ \
53 typeof(ptr->counter) old_val, new_val; \
54 asm volatile( \
55 " l %0,0(%3)\n" \
56 "0: lr %1,%0\n" \
57 op_string " %1,%4\n" \
58 " cs %0,%1,0(%3)\n" \
59 " jl 0b" \
60 : "=&d" (old_val), "=&d" (new_val), \
61 "=m" (((atomic_t *)(ptr))->counter) \
62 : "a" (ptr), "d" (op_val), \
63 "m" (((atomic_t *)(ptr))->counter) \
64 : "cc", "memory"); \
65 new_val; \
66})
67
68#endif /* __GNUC__ */
69
47#define atomic_read(v) ((v)->counter) 70#define atomic_read(v) ((v)->counter)
48#define atomic_set(v,i) (((v)->counter) = (i)) 71#define atomic_set(v,i) (((v)->counter) = (i))
49 72
@@ -81,10 +104,19 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
81 104
82static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 105static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
83{ 106{
84 __asm__ __volatile__(" cs %0,%3,0(%2)\n" 107#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
85 : "+d" (old), "=m" (v->counter) 108 asm volatile(
86 : "a" (v), "d" (new), "m" (v->counter) 109 " cs %0,%2,%1"
87 : "cc", "memory" ); 110 : "+d" (old), "=Q" (v->counter)
111 : "d" (new), "Q" (v->counter)
112 : "cc", "memory");
113#else /* __GNUC__ */
114 asm volatile(
115 " cs %0,%3,0(%2)"
116 : "+d" (old), "=m" (v->counter)
117 : "a" (v), "d" (new), "m" (v->counter)
118 : "cc", "memory");
119#endif /* __GNUC__ */
88 return old; 120 return old;
89} 121}
90 122
@@ -113,20 +145,43 @@ typedef struct {
113} __attribute__ ((aligned (8))) atomic64_t; 145} __attribute__ ((aligned (8))) atomic64_t;
114#define ATOMIC64_INIT(i) { (i) } 146#define ATOMIC64_INIT(i) { (i) }
115 147
148#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
149
116#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 150#define __CSG_LOOP(ptr, op_val, op_string) ({ \
117 typeof(ptr->counter) old_val, new_val; \ 151 typeof(ptr->counter) old_val, new_val; \
118 __asm__ __volatile__(" lg %0,0(%3)\n" \ 152 asm volatile( \
119 "0: lgr %1,%0\n" \ 153 " lg %0,%2\n" \
120 op_string " %1,%4\n" \ 154 "0: lgr %1,%0\n" \
121 " csg %0,%1,0(%3)\n" \ 155 op_string " %1,%3\n" \
122 " jl 0b" \ 156 " csg %0,%1,%2\n" \
123 : "=&d" (old_val), "=&d" (new_val), \ 157 " jl 0b" \
124 "=m" (((atomic_t *)(ptr))->counter) \ 158 : "=&d" (old_val), "=&d" (new_val), \
125 : "a" (ptr), "d" (op_val), \ 159 "=Q" (((atomic_t *)(ptr))->counter) \
126 "m" (((atomic_t *)(ptr))->counter) \ 160 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
127 : "cc", "memory" ); \ 161 : "cc", "memory" ); \
128 new_val; \ 162 new_val; \
129}) 163})
164
165#else /* __GNUC__ */
166
167#define __CSG_LOOP(ptr, op_val, op_string) ({ \
168 typeof(ptr->counter) old_val, new_val; \
169 asm volatile( \
170 " lg %0,0(%3)\n" \
171 "0: lgr %1,%0\n" \
172 op_string " %1,%4\n" \
173 " csg %0,%1,0(%3)\n" \
174 " jl 0b" \
175 : "=&d" (old_val), "=&d" (new_val), \
176 "=m" (((atomic_t *)(ptr))->counter) \
177 : "a" (ptr), "d" (op_val), \
178 "m" (((atomic_t *)(ptr))->counter) \
179 : "cc", "memory" ); \
180 new_val; \
181})
182
183#endif /* __GNUC__ */
184
130#define atomic64_read(v) ((v)->counter) 185#define atomic64_read(v) ((v)->counter)
131#define atomic64_set(v,i) (((v)->counter) = (i)) 186#define atomic64_set(v,i) (((v)->counter) = (i))
132 187
@@ -163,10 +218,19 @@ static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
163static __inline__ long long atomic64_cmpxchg(atomic64_t *v, 218static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
164 long long old, long long new) 219 long long old, long long new)
165{ 220{
166 __asm__ __volatile__(" csg %0,%3,0(%2)\n" 221#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
167 : "+d" (old), "=m" (v->counter) 222 asm volatile(
168 : "a" (v), "d" (new), "m" (v->counter) 223 " csg %0,%2,%1"
169 : "cc", "memory" ); 224 : "+d" (old), "=Q" (v->counter)
225 : "d" (new), "Q" (v->counter)
226 : "cc", "memory");
227#else /* __GNUC__ */
228 asm volatile(
229 " csg %0,%3,0(%2)"
230 : "+d" (old), "=m" (v->counter)
231 : "a" (v), "d" (new), "m" (v->counter)
232 : "cc", "memory");
233#endif /* __GNUC__ */
170 return old; 234 return old;
171} 235}
172 236
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 0ddcdba79e4a..f79c9b792af1 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -67,16 +67,35 @@ extern const char _sb_findmap[];
67#define __BITOPS_AND "nr" 67#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr" 68#define __BITOPS_XOR "xr"
69 69
70#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 70#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
71 __asm__ __volatile__(" l %0,0(%4)\n" \ 71
72 "0: lr %1,%0\n" \ 72#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
73 __op_string " %1,%3\n" \ 73 asm volatile( \
74 " cs %0,%1,0(%4)\n" \ 74 " l %0,%2\n" \
75 " jl 0b" \ 75 "0: lr %1,%0\n" \
76 : "=&d" (__old), "=&d" (__new), \ 76 __op_string " %1,%3\n" \
77 "=m" (*(unsigned long *) __addr) \ 77 " cs %0,%1,%2\n" \
78 : "d" (__val), "a" (__addr), \ 78 " jl 0b" \
79 "m" (*(unsigned long *) __addr) : "cc" ); 79 : "=&d" (__old), "=&d" (__new), \
80 "=Q" (*(unsigned long *) __addr) \
81 : "d" (__val), "Q" (*(unsigned long *) __addr) \
82 : "cc");
83
84#else /* __GNUC__ */
85
86#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
87 asm volatile( \
88 " l %0,0(%4)\n" \
89 "0: lr %1,%0\n" \
90 __op_string " %1,%3\n" \
91 " cs %0,%1,0(%4)\n" \
92 " jl 0b" \
93 : "=&d" (__old), "=&d" (__new), \
94 "=m" (*(unsigned long *) __addr) \
95 : "d" (__val), "a" (__addr), \
96 "m" (*(unsigned long *) __addr) : "cc");
97
98#endif /* __GNUC__ */
80 99
81#else /* __s390x__ */ 100#else /* __s390x__ */
82 101
@@ -86,21 +105,41 @@ extern const char _sb_findmap[];
86#define __BITOPS_AND "ngr" 105#define __BITOPS_AND "ngr"
87#define __BITOPS_XOR "xgr" 106#define __BITOPS_XOR "xgr"
88 107
89#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 108#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
90 __asm__ __volatile__(" lg %0,0(%4)\n" \ 109
91 "0: lgr %1,%0\n" \ 110#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
92 __op_string " %1,%3\n" \ 111 asm volatile( \
93 " csg %0,%1,0(%4)\n" \ 112 " lg %0,%2\n" \
94 " jl 0b" \ 113 "0: lgr %1,%0\n" \
95 : "=&d" (__old), "=&d" (__new), \ 114 __op_string " %1,%3\n" \
96 "=m" (*(unsigned long *) __addr) \ 115 " csg %0,%1,%2\n" \
97 : "d" (__val), "a" (__addr), \ 116 " jl 0b" \
98 "m" (*(unsigned long *) __addr) : "cc" ); 117 : "=&d" (__old), "=&d" (__new), \
118 "=Q" (*(unsigned long *) __addr) \
119 : "d" (__val), "Q" (*(unsigned long *) __addr) \
120 : "cc");
121
122#else /* __GNUC__ */
123
124#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
125 asm volatile( \
126 " lg %0,0(%4)\n" \
127 "0: lgr %1,%0\n" \
128 __op_string " %1,%3\n" \
129 " csg %0,%1,0(%4)\n" \
130 " jl 0b" \
131 : "=&d" (__old), "=&d" (__new), \
132 "=m" (*(unsigned long *) __addr) \
133 : "d" (__val), "a" (__addr), \
134 "m" (*(unsigned long *) __addr) : "cc");
135
136
137#endif /* __GNUC__ */
99 138
100#endif /* __s390x__ */ 139#endif /* __s390x__ */
101 140
102#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 141#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
103#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) 142#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
104 143
105#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
106/* 145/*
@@ -217,10 +256,10 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
217 unsigned long addr; 256 unsigned long addr;
218 257
219 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 258 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
220 asm volatile("oc 0(1,%1),0(%2)" 259 asm volatile(
221 : "=m" (*(char *) addr) 260 " oc 0(1,%1),0(%2)"
222 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 261 : "=m" (*(char *) addr) : "a" (addr),
223 "m" (*(char *) addr) : "cc" ); 262 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
224} 263}
225 264
226static inline void 265static inline void
@@ -229,40 +268,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229 unsigned long addr; 268 unsigned long addr;
230 269
231 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 270 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
232 switch (nr&7) { 271 *(unsigned char *) addr |= 1 << (nr & 7);
233 case 0:
234 asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
235 : "a" (addr), "m" (*(char *) addr) : "cc" );
236 break;
237 case 1:
238 asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
239 : "a" (addr), "m" (*(char *) addr) : "cc" );
240 break;
241 case 2:
242 asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
243 : "a" (addr), "m" (*(char *) addr) : "cc" );
244 break;
245 case 3:
246 asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
247 : "a" (addr), "m" (*(char *) addr) : "cc" );
248 break;
249 case 4:
250 asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
251 : "a" (addr), "m" (*(char *) addr) : "cc" );
252 break;
253 case 5:
254 asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
255 : "a" (addr), "m" (*(char *) addr) : "cc" );
256 break;
257 case 6:
258 asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
259 : "a" (addr), "m" (*(char *) addr) : "cc" );
260 break;
261 case 7:
262 asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
263 : "a" (addr), "m" (*(char *) addr) : "cc" );
264 break;
265 }
266} 272}
267 273
268#define set_bit_simple(nr,addr) \ 274#define set_bit_simple(nr,addr) \
@@ -279,10 +285,10 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
279 unsigned long addr; 285 unsigned long addr;
280 286
281 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 287 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
282 asm volatile("nc 0(1,%1),0(%2)" 288 asm volatile(
283 : "=m" (*(char *) addr) 289 " nc 0(1,%1),0(%2)"
284 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 290 : "=m" (*(char *) addr) : "a" (addr),
285 "m" (*(char *) addr) : "cc" ); 291 "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
286} 292}
287 293
288static inline void 294static inline void
@@ -291,40 +297,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
291 unsigned long addr; 297 unsigned long addr;
292 298
293 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 299 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
294 switch (nr&7) { 300 *(unsigned char *) addr &= ~(1 << (nr & 7));
295 case 0:
296 asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
297 : "a" (addr), "m" (*(char *) addr) : "cc" );
298 break;
299 case 1:
300 asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
301 : "a" (addr), "m" (*(char *) addr) : "cc" );
302 break;
303 case 2:
304 asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
305 : "a" (addr), "m" (*(char *) addr) : "cc" );
306 break;
307 case 3:
308 asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
309 : "a" (addr), "m" (*(char *) addr) : "cc" );
310 break;
311 case 4:
312 asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
313 : "a" (addr), "m" (*(char *) addr) : "cc" );
314 break;
315 case 5:
316 asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
317 : "a" (addr), "m" (*(char *) addr) : "cc" );
318 break;
319 case 6:
320 asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
321 : "a" (addr), "m" (*(char *) addr) : "cc" );
322 break;
323 case 7:
324 asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
325 : "a" (addr), "m" (*(char *) addr) : "cc" );
326 break;
327 }
328} 301}
329 302
330#define clear_bit_simple(nr,addr) \ 303#define clear_bit_simple(nr,addr) \
@@ -340,10 +313,10 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
340 unsigned long addr; 313 unsigned long addr;
341 314
342 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 315 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
343 asm volatile("xc 0(1,%1),0(%2)" 316 asm volatile(
344 : "=m" (*(char *) addr) 317 " xc 0(1,%1),0(%2)"
345 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 318 : "=m" (*(char *) addr) : "a" (addr),
346 "m" (*(char *) addr) : "cc" ); 319 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
347} 320}
348 321
349static inline void 322static inline void
@@ -352,40 +325,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
352 unsigned long addr; 325 unsigned long addr;
353 326
354 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 327 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
355 switch (nr&7) { 328 *(unsigned char *) addr ^= 1 << (nr & 7);
356 case 0:
357 asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
358 : "a" (addr), "m" (*(char *) addr) : "cc" );
359 break;
360 case 1:
361 asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
362 : "a" (addr), "m" (*(char *) addr) : "cc" );
363 break;
364 case 2:
365 asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
366 : "a" (addr), "m" (*(char *) addr) : "cc" );
367 break;
368 case 3:
369 asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
370 : "a" (addr), "m" (*(char *) addr) : "cc" );
371 break;
372 case 4:
373 asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
374 : "a" (addr), "m" (*(char *) addr) : "cc" );
375 break;
376 case 5:
377 asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
378 : "a" (addr), "m" (*(char *) addr) : "cc" );
379 break;
380 case 6:
381 asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
382 : "a" (addr), "m" (*(char *) addr) : "cc" );
383 break;
384 case 7:
385 asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
386 : "a" (addr), "m" (*(char *) addr) : "cc" );
387 break;
388 }
389} 329}
390 330
391#define change_bit_simple(nr,addr) \ 331#define change_bit_simple(nr,addr) \
@@ -404,10 +344,11 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
404 344
405 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 345 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
406 ch = *(unsigned char *) addr; 346 ch = *(unsigned char *) addr;
407 asm volatile("oc 0(1,%1),0(%2)" 347 asm volatile(
408 : "=m" (*(char *) addr) 348 " oc 0(1,%1),0(%2)"
409 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 349 : "=m" (*(char *) addr)
410 "m" (*(char *) addr) : "cc", "memory" ); 350 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
351 "m" (*(char *) addr) : "cc", "memory");
411 return (ch >> (nr & 7)) & 1; 352 return (ch >> (nr & 7)) & 1;
412} 353}
413#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 354#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -423,10 +364,11 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
423 364
424 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 365 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
425 ch = *(unsigned char *) addr; 366 ch = *(unsigned char *) addr;
426 asm volatile("nc 0(1,%1),0(%2)" 367 asm volatile(
427 : "=m" (*(char *) addr) 368 " nc 0(1,%1),0(%2)"
428 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 369 : "=m" (*(char *) addr)
429 "m" (*(char *) addr) : "cc", "memory" ); 370 : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
371 "m" (*(char *) addr) : "cc", "memory");
430 return (ch >> (nr & 7)) & 1; 372 return (ch >> (nr & 7)) & 1;
431} 373}
432#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 374#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -442,10 +384,11 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
442 384
443 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 385 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
444 ch = *(unsigned char *) addr; 386 ch = *(unsigned char *) addr;
445 asm volatile("xc 0(1,%1),0(%2)" 387 asm volatile(
446 : "=m" (*(char *) addr) 388 " xc 0(1,%1),0(%2)"
447 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 389 : "=m" (*(char *) addr)
448 "m" (*(char *) addr) : "cc", "memory" ); 390 : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
391 "m" (*(char *) addr) : "cc", "memory");
449 return (ch >> (nr & 7)) & 1; 392 return (ch >> (nr & 7)) & 1;
450} 393}
451#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 394#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -557,35 +500,36 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
557 500
558 if (!size) 501 if (!size)
559 return 0; 502 return 0;
560 __asm__(" lhi %1,-1\n" 503 asm volatile(
561 " lr %2,%3\n" 504 " lhi %1,-1\n"
562 " slr %0,%0\n" 505 " lr %2,%3\n"
563 " ahi %2,31\n" 506 " slr %0,%0\n"
564 " srl %2,5\n" 507 " ahi %2,31\n"
565 "0: c %1,0(%0,%4)\n" 508 " srl %2,5\n"
566 " jne 1f\n" 509 "0: c %1,0(%0,%4)\n"
567 " la %0,4(%0)\n" 510 " jne 1f\n"
568 " brct %2,0b\n" 511 " la %0,4(%0)\n"
569 " lr %0,%3\n" 512 " brct %2,0b\n"
570 " j 4f\n" 513 " lr %0,%3\n"
571 "1: l %2,0(%0,%4)\n" 514 " j 4f\n"
572 " sll %0,3\n" 515 "1: l %2,0(%0,%4)\n"
573 " lhi %1,0xff\n" 516 " sll %0,3\n"
574 " tml %2,0xffff\n" 517 " lhi %1,0xff\n"
575 " jno 2f\n" 518 " tml %2,0xffff\n"
576 " ahi %0,16\n" 519 " jno 2f\n"
577 " srl %2,16\n" 520 " ahi %0,16\n"
578 "2: tml %2,0x00ff\n" 521 " srl %2,16\n"
579 " jno 3f\n" 522 "2: tml %2,0x00ff\n"
580 " ahi %0,8\n" 523 " jno 3f\n"
581 " srl %2,8\n" 524 " ahi %0,8\n"
582 "3: nr %2,%1\n" 525 " srl %2,8\n"
583 " ic %2,0(%2,%5)\n" 526 "3: nr %2,%1\n"
584 " alr %0,%2\n" 527 " ic %2,0(%2,%5)\n"
585 "4:" 528 " alr %0,%2\n"
586 : "=&a" (res), "=&d" (cmp), "=&a" (count) 529 "4:"
587 : "a" (size), "a" (addr), "a" (&_zb_findmap), 530 : "=&a" (res), "=&d" (cmp), "=&a" (count)
588 "m" (*(addrtype *) addr) : "cc" ); 531 : "a" (size), "a" (addr), "a" (&_zb_findmap),
532 "m" (*(addrtype *) addr) : "cc");
589 return (res < size) ? res : size; 533 return (res < size) ? res : size;
590} 534}
591 535
@@ -598,35 +542,36 @@ find_first_bit(const unsigned long * addr, unsigned long size)
598 542
599 if (!size) 543 if (!size)
600 return 0; 544 return 0;
601 __asm__(" slr %1,%1\n" 545 asm volatile(
602 " lr %2,%3\n" 546 " slr %1,%1\n"
603 " slr %0,%0\n" 547 " lr %2,%3\n"
604 " ahi %2,31\n" 548 " slr %0,%0\n"
605 " srl %2,5\n" 549 " ahi %2,31\n"
606 "0: c %1,0(%0,%4)\n" 550 " srl %2,5\n"
607 " jne 1f\n" 551 "0: c %1,0(%0,%4)\n"
608 " la %0,4(%0)\n" 552 " jne 1f\n"
609 " brct %2,0b\n" 553 " la %0,4(%0)\n"
610 " lr %0,%3\n" 554 " brct %2,0b\n"
611 " j 4f\n" 555 " lr %0,%3\n"
612 "1: l %2,0(%0,%4)\n" 556 " j 4f\n"
613 " sll %0,3\n" 557 "1: l %2,0(%0,%4)\n"
614 " lhi %1,0xff\n" 558 " sll %0,3\n"
615 " tml %2,0xffff\n" 559 " lhi %1,0xff\n"
616 " jnz 2f\n" 560 " tml %2,0xffff\n"
617 " ahi %0,16\n" 561 " jnz 2f\n"
618 " srl %2,16\n" 562 " ahi %0,16\n"
619 "2: tml %2,0x00ff\n" 563 " srl %2,16\n"
620 " jnz 3f\n" 564 "2: tml %2,0x00ff\n"
621 " ahi %0,8\n" 565 " jnz 3f\n"
622 " srl %2,8\n" 566 " ahi %0,8\n"
623 "3: nr %2,%1\n" 567 " srl %2,8\n"
624 " ic %2,0(%2,%5)\n" 568 "3: nr %2,%1\n"
625 " alr %0,%2\n" 569 " ic %2,0(%2,%5)\n"
626 "4:" 570 " alr %0,%2\n"
627 : "=&a" (res), "=&d" (cmp), "=&a" (count) 571 "4:"
628 : "a" (size), "a" (addr), "a" (&_sb_findmap), 572 : "=&a" (res), "=&d" (cmp), "=&a" (count)
629 "m" (*(addrtype *) addr) : "cc" ); 573 : "a" (size), "a" (addr), "a" (&_sb_findmap),
574 "m" (*(addrtype *) addr) : "cc");
630 return (res < size) ? res : size; 575 return (res < size) ? res : size;
631} 576}
632 577
@@ -640,39 +585,40 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
640 585
641 if (!size) 586 if (!size)
642 return 0; 587 return 0;
643 __asm__(" lghi %1,-1\n" 588 asm volatile(
644 " lgr %2,%3\n" 589 " lghi %1,-1\n"
645 " slgr %0,%0\n" 590 " lgr %2,%3\n"
646 " aghi %2,63\n" 591 " slgr %0,%0\n"
647 " srlg %2,%2,6\n" 592 " aghi %2,63\n"
648 "0: cg %1,0(%0,%4)\n" 593 " srlg %2,%2,6\n"
649 " jne 1f\n" 594 "0: cg %1,0(%0,%4)\n"
650 " la %0,8(%0)\n" 595 " jne 1f\n"
651 " brct %2,0b\n" 596 " la %0,8(%0)\n"
652 " lgr %0,%3\n" 597 " brct %2,0b\n"
653 " j 5f\n" 598 " lgr %0,%3\n"
654 "1: lg %2,0(%0,%4)\n" 599 " j 5f\n"
655 " sllg %0,%0,3\n" 600 "1: lg %2,0(%0,%4)\n"
656 " clr %2,%1\n" 601 " sllg %0,%0,3\n"
657 " jne 2f\n" 602 " clr %2,%1\n"
658 " aghi %0,32\n" 603 " jne 2f\n"
659 " srlg %2,%2,32\n" 604 " aghi %0,32\n"
660 "2: lghi %1,0xff\n" 605 " srlg %2,%2,32\n"
661 " tmll %2,0xffff\n" 606 "2: lghi %1,0xff\n"
662 " jno 3f\n" 607 " tmll %2,0xffff\n"
663 " aghi %0,16\n" 608 " jno 3f\n"
664 " srl %2,16\n" 609 " aghi %0,16\n"
665 "3: tmll %2,0x00ff\n" 610 " srl %2,16\n"
666 " jno 4f\n" 611 "3: tmll %2,0x00ff\n"
667 " aghi %0,8\n" 612 " jno 4f\n"
668 " srl %2,8\n" 613 " aghi %0,8\n"
669 "4: ngr %2,%1\n" 614 " srl %2,8\n"
670 " ic %2,0(%2,%5)\n" 615 "4: ngr %2,%1\n"
671 " algr %0,%2\n" 616 " ic %2,0(%2,%5)\n"
672 "5:" 617 " algr %0,%2\n"
673 : "=&a" (res), "=&d" (cmp), "=&a" (count) 618 "5:"
619 : "=&a" (res), "=&d" (cmp), "=&a" (count)
674 : "a" (size), "a" (addr), "a" (&_zb_findmap), 620 : "a" (size), "a" (addr), "a" (&_zb_findmap),
675 "m" (*(addrtype *) addr) : "cc" ); 621 "m" (*(addrtype *) addr) : "cc");
676 return (res < size) ? res : size; 622 return (res < size) ? res : size;
677} 623}
678 624
@@ -684,39 +630,40 @@ find_first_bit(const unsigned long * addr, unsigned long size)
684 630
685 if (!size) 631 if (!size)
686 return 0; 632 return 0;
687 __asm__(" slgr %1,%1\n" 633 asm volatile(
688 " lgr %2,%3\n" 634 " slgr %1,%1\n"
689 " slgr %0,%0\n" 635 " lgr %2,%3\n"
690 " aghi %2,63\n" 636 " slgr %0,%0\n"
691 " srlg %2,%2,6\n" 637 " aghi %2,63\n"
692 "0: cg %1,0(%0,%4)\n" 638 " srlg %2,%2,6\n"
693 " jne 1f\n" 639 "0: cg %1,0(%0,%4)\n"
694 " aghi %0,8\n" 640 " jne 1f\n"
695 " brct %2,0b\n" 641 " aghi %0,8\n"
696 " lgr %0,%3\n" 642 " brct %2,0b\n"
697 " j 5f\n" 643 " lgr %0,%3\n"
698 "1: lg %2,0(%0,%4)\n" 644 " j 5f\n"
699 " sllg %0,%0,3\n" 645 "1: lg %2,0(%0,%4)\n"
700 " clr %2,%1\n" 646 " sllg %0,%0,3\n"
701 " jne 2f\n" 647 " clr %2,%1\n"
702 " aghi %0,32\n" 648 " jne 2f\n"
703 " srlg %2,%2,32\n" 649 " aghi %0,32\n"
704 "2: lghi %1,0xff\n" 650 " srlg %2,%2,32\n"
705 " tmll %2,0xffff\n" 651 "2: lghi %1,0xff\n"
706 " jnz 3f\n" 652 " tmll %2,0xffff\n"
707 " aghi %0,16\n" 653 " jnz 3f\n"
708 " srl %2,16\n" 654 " aghi %0,16\n"
709 "3: tmll %2,0x00ff\n" 655 " srl %2,16\n"
710 " jnz 4f\n" 656 "3: tmll %2,0x00ff\n"
711 " aghi %0,8\n" 657 " jnz 4f\n"
712 " srl %2,8\n" 658 " aghi %0,8\n"
713 "4: ngr %2,%1\n" 659 " srl %2,8\n"
714 " ic %2,0(%2,%5)\n" 660 "4: ngr %2,%1\n"
715 " algr %0,%2\n" 661 " ic %2,0(%2,%5)\n"
716 "5:" 662 " algr %0,%2\n"
717 : "=&a" (res), "=&d" (cmp), "=&a" (count) 663 "5:"
664 : "=&a" (res), "=&d" (cmp), "=&a" (count)
718 : "a" (size), "a" (addr), "a" (&_sb_findmap), 665 : "a" (size), "a" (addr), "a" (&_sb_findmap),
719 "m" (*(addrtype *) addr) : "cc" ); 666 "m" (*(addrtype *) addr) : "cc");
720 return (res < size) ? res : size; 667 return (res < size) ? res : size;
721} 668}
722 669
@@ -832,36 +779,37 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size)
832 779
833 if (!size) 780 if (!size)
834 return 0; 781 return 0;
835 __asm__(" lhi %1,-1\n" 782 asm volatile(
836 " lr %2,%3\n" 783 " lhi %1,-1\n"
837 " ahi %2,31\n" 784 " lr %2,%3\n"
838 " srl %2,5\n" 785 " ahi %2,31\n"
839 " slr %0,%0\n" 786 " srl %2,5\n"
840 "0: cl %1,0(%0,%4)\n" 787 " slr %0,%0\n"
841 " jne 1f\n" 788 "0: cl %1,0(%0,%4)\n"
842 " ahi %0,4\n" 789 " jne 1f\n"
843 " brct %2,0b\n" 790 " ahi %0,4\n"
844 " lr %0,%3\n" 791 " brct %2,0b\n"
845 " j 4f\n" 792 " lr %0,%3\n"
846 "1: l %2,0(%0,%4)\n" 793 " j 4f\n"
847 " sll %0,3\n" 794 "1: l %2,0(%0,%4)\n"
848 " ahi %0,24\n" 795 " sll %0,3\n"
849 " lhi %1,0xff\n" 796 " ahi %0,24\n"
850 " tmh %2,0xffff\n" 797 " lhi %1,0xff\n"
851 " jo 2f\n" 798 " tmh %2,0xffff\n"
852 " ahi %0,-16\n" 799 " jo 2f\n"
853 " srl %2,16\n" 800 " ahi %0,-16\n"
854 "2: tml %2,0xff00\n" 801 " srl %2,16\n"
855 " jo 3f\n" 802 "2: tml %2,0xff00\n"
856 " ahi %0,-8\n" 803 " jo 3f\n"
857 " srl %2,8\n" 804 " ahi %0,-8\n"
858 "3: nr %2,%1\n" 805 " srl %2,8\n"
859 " ic %2,0(%2,%5)\n" 806 "3: nr %2,%1\n"
860 " alr %0,%2\n" 807 " ic %2,0(%2,%5)\n"
861 "4:" 808 " alr %0,%2\n"
862 : "=&a" (res), "=&d" (cmp), "=&a" (count) 809 "4:"
863 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 810 : "=&a" (res), "=&d" (cmp), "=&a" (count)
864 "m" (*(addrtype *) vaddr) : "cc" ); 811 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
812 "m" (*(addrtype *) vaddr) : "cc");
865 return (res < size) ? res : size; 813 return (res < size) ? res : size;
866} 814}
867 815
@@ -875,39 +823,40 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size)
875 823
876 if (!size) 824 if (!size)
877 return 0; 825 return 0;
878 __asm__(" lghi %1,-1\n" 826 asm volatile(
879 " lgr %2,%3\n" 827 " lghi %1,-1\n"
880 " aghi %2,63\n" 828 " lgr %2,%3\n"
881 " srlg %2,%2,6\n" 829 " aghi %2,63\n"
882 " slgr %0,%0\n" 830 " srlg %2,%2,6\n"
883 "0: clg %1,0(%0,%4)\n" 831 " slgr %0,%0\n"
884 " jne 1f\n" 832 "0: clg %1,0(%0,%4)\n"
885 " aghi %0,8\n" 833 " jne 1f\n"
886 " brct %2,0b\n" 834 " aghi %0,8\n"
887 " lgr %0,%3\n" 835 " brct %2,0b\n"
888 " j 5f\n" 836 " lgr %0,%3\n"
889 "1: cl %1,0(%0,%4)\n" 837 " j 5f\n"
890 " jne 2f\n" 838 "1: cl %1,0(%0,%4)\n"
891 " aghi %0,4\n" 839 " jne 2f\n"
892 "2: l %2,0(%0,%4)\n" 840 " aghi %0,4\n"
893 " sllg %0,%0,3\n" 841 "2: l %2,0(%0,%4)\n"
894 " aghi %0,24\n" 842 " sllg %0,%0,3\n"
895 " lghi %1,0xff\n" 843 " aghi %0,24\n"
896 " tmlh %2,0xffff\n" 844 " lghi %1,0xff\n"
897 " jo 3f\n" 845 " tmlh %2,0xffff\n"
898 " aghi %0,-16\n" 846 " jo 3f\n"
899 " srl %2,16\n" 847 " aghi %0,-16\n"
900 "3: tmll %2,0xff00\n" 848 " srl %2,16\n"
901 " jo 4f\n" 849 "3: tmll %2,0xff00\n"
902 " aghi %0,-8\n" 850 " jo 4f\n"
903 " srl %2,8\n" 851 " aghi %0,-8\n"
904 "4: ngr %2,%1\n" 852 " srl %2,8\n"
905 " ic %2,0(%2,%5)\n" 853 "4: ngr %2,%1\n"
906 " algr %0,%2\n" 854 " ic %2,0(%2,%5)\n"
907 "5:" 855 " algr %0,%2\n"
908 : "=&a" (res), "=&d" (cmp), "=&a" (count) 856 "5:"
857 : "=&a" (res), "=&d" (cmp), "=&a" (count)
909 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 858 : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
910 "m" (*(addrtype *) vaddr) : "cc" ); 859 "m" (*(addrtype *) vaddr) : "cc");
911 return (res < size) ? res : size; 860 return (res < size) ? res : size;
912} 861}
913 862
@@ -927,13 +876,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
927 p = addr + offset / __BITOPS_WORDSIZE; 876 p = addr + offset / __BITOPS_WORDSIZE;
928 if (bit) { 877 if (bit) {
929#ifndef __s390x__ 878#ifndef __s390x__
930 asm(" ic %0,0(%1)\n" 879 asm volatile(
931 " icm %0,2,1(%1)\n" 880 " ic %0,0(%1)\n"
932 " icm %0,4,2(%1)\n" 881 " icm %0,2,1(%1)\n"
933 " icm %0,8,3(%1)" 882 " icm %0,4,2(%1)\n"
934 : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); 883 " icm %0,8,3(%1)"
884 : "=&a" (word) : "a" (p), "m" (*p) : "cc");
935#else 885#else
936 asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); 886 asm volatile(
887 " lrvg %0,%1"
888 : "=a" (word) : "m" (*p) );
937#endif 889#endif
938 /* 890 /*
939 * s390 version of ffz returns __BITOPS_WORDSIZE 891 * s390 version of ffz returns __BITOPS_WORDSIZE
diff --git a/include/asm-s390/byteorder.h b/include/asm-s390/byteorder.h
index 2cc35a0e188e..1fe2492baa8d 100644
--- a/include/asm-s390/byteorder.h
+++ b/include/asm-s390/byteorder.h
@@ -14,60 +14,54 @@
14#ifdef __GNUC__ 14#ifdef __GNUC__
15 15
16#ifdef __s390x__ 16#ifdef __s390x__
17static __inline__ __u64 ___arch__swab64p(const __u64 *x) 17static inline __u64 ___arch__swab64p(const __u64 *x)
18{ 18{
19 __u64 result; 19 __u64 result;
20 20
21 __asm__ __volatile__ ( 21 asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
22 " lrvg %0,%1"
23 : "=d" (result) : "m" (*x) );
24 return result; 22 return result;
25} 23}
26 24
27static __inline__ __u64 ___arch__swab64(__u64 x) 25static inline __u64 ___arch__swab64(__u64 x)
28{ 26{
29 __u64 result; 27 __u64 result;
30 28
31 __asm__ __volatile__ ( 29 asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
32 " lrvgr %0,%1"
33 : "=d" (result) : "d" (x) );
34 return result; 30 return result;
35} 31}
36 32
37static __inline__ void ___arch__swab64s(__u64 *x) 33static inline void ___arch__swab64s(__u64 *x)
38{ 34{
39 *x = ___arch__swab64p(x); 35 *x = ___arch__swab64p(x);
40} 36}
41#endif /* __s390x__ */ 37#endif /* __s390x__ */
42 38
43static __inline__ __u32 ___arch__swab32p(const __u32 *x) 39static inline __u32 ___arch__swab32p(const __u32 *x)
44{ 40{
45 __u32 result; 41 __u32 result;
46 42
47 __asm__ __volatile__ ( 43 asm volatile(
48#ifndef __s390x__ 44#ifndef __s390x__
49 " icm %0,8,3(%1)\n" 45 " icm %0,8,3(%1)\n"
50 " icm %0,4,2(%1)\n" 46 " icm %0,4,2(%1)\n"
51 " icm %0,2,1(%1)\n" 47 " icm %0,2,1(%1)\n"
52 " ic %0,0(%1)" 48 " ic %0,0(%1)"
53 : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 49 : "=&d" (result) : "a" (x), "m" (*x) : "cc");
54#else /* __s390x__ */ 50#else /* __s390x__ */
55 " lrv %0,%1" 51 " lrv %0,%1"
56 : "=d" (result) : "m" (*x) ); 52 : "=d" (result) : "m" (*x));
57#endif /* __s390x__ */ 53#endif /* __s390x__ */
58 return result; 54 return result;
59} 55}
60 56
61static __inline__ __u32 ___arch__swab32(__u32 x) 57static inline __u32 ___arch__swab32(__u32 x)
62{ 58{
63#ifndef __s390x__ 59#ifndef __s390x__
64 return ___arch__swab32p(&x); 60 return ___arch__swab32p(&x);
65#else /* __s390x__ */ 61#else /* __s390x__ */
66 __u32 result; 62 __u32 result;
67 63
68 __asm__ __volatile__ ( 64 asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
69 " lrvr %0,%1"
70 : "=d" (result) : "d" (x) );
71 return result; 65 return result;
72#endif /* __s390x__ */ 66#endif /* __s390x__ */
73} 67}
@@ -81,14 +75,14 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
81{ 75{
82 __u16 result; 76 __u16 result;
83 77
84 __asm__ __volatile__ ( 78 asm volatile(
85#ifndef __s390x__ 79#ifndef __s390x__
86 " icm %0,2,1(%1)\n" 80 " icm %0,2,1(%1)\n"
87 " ic %0,0(%1)\n" 81 " ic %0,0(%1)\n"
88 : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 82 : "=&d" (result) : "a" (x), "m" (*x) : "cc");
89#else /* __s390x__ */ 83#else /* __s390x__ */
90 " lrvh %0,%1" 84 " lrvh %0,%1"
91 : "=d" (result) : "m" (*x) ); 85 : "=d" (result) : "m" (*x));
92#endif /* __s390x__ */ 86#endif /* __s390x__ */
93 return result; 87 return result;
94} 88}
diff --git a/include/asm-s390/checksum.h b/include/asm-s390/checksum.h
index 471f2af2b16a..37c362d89fad 100644
--- a/include/asm-s390/checksum.h
+++ b/include/asm-s390/checksum.h
@@ -30,57 +30,13 @@
30static inline unsigned int 30static inline unsigned int
31csum_partial(const unsigned char * buff, int len, unsigned int sum) 31csum_partial(const unsigned char * buff, int len, unsigned int sum)
32{ 32{
33 /* 33 register unsigned long reg2 asm("2") = (unsigned long) buff;
34 * Experiments with ethernet and slip connections show that buf 34 register unsigned long reg3 asm("3") = (unsigned long) len;
35 * is aligned on either a 2-byte or 4-byte boundary.
36 */
37#ifndef __s390x__
38 register_pair rp;
39
40 rp.subreg.even = (unsigned long) buff;
41 rp.subreg.odd = (unsigned long) len;
42 __asm__ __volatile__ (
43 "0: cksm %0,%1\n" /* do checksum on longs */
44 " jo 0b\n"
45 : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
46#else /* __s390x__ */
47 __asm__ __volatile__ (
48 " lgr 2,%1\n" /* address in gpr 2 */
49 " lgfr 3,%2\n" /* length in gpr 3 */
50 "0: cksm %0,2\n" /* do checksum on longs */
51 " jo 0b\n"
52 : "+&d" (sum)
53 : "d" (buff), "d" (len)
54 : "cc", "memory", "2", "3" );
55#endif /* __s390x__ */
56 return sum;
57}
58
59/*
60 * csum_partial as an inline function
61 */
62static inline unsigned int
63csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
64{
65#ifndef __s390x__
66 register_pair rp;
67 35
68 rp.subreg.even = (unsigned long) buff; 36 asm volatile(
69 rp.subreg.odd = (unsigned long) len; 37 "0: cksm %0,%1\n" /* do checksum on longs */
70 __asm__ __volatile__ ( 38 " jo 0b\n"
71 "0: cksm %0,%1\n" /* do checksum on longs */ 39 : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
72 " jo 0b\n"
73 : "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
74#else /* __s390x__ */
75 __asm__ __volatile__ (
76 " lgr 2,%1\n" /* address in gpr 2 */
77 " lgfr 3,%2\n" /* length in gpr 3 */
78 "0: cksm %0,2\n" /* do checksum on longs */
79 " jo 0b\n"
80 : "+&d" (sum)
81 : "d" (buff), "d" (len)
82 : "cc", "memory", "2", "3" );
83#endif /* __s390x__ */
84 return sum; 40 return sum;
85} 41}
86 42
@@ -114,7 +70,7 @@ static inline unsigned int
114csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) 70csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
115{ 71{
116 memcpy(dst,src,len); 72 memcpy(dst,src,len);
117 return csum_partial_inline(dst, len, sum); 73 return csum_partial(dst, len, sum);
118} 74}
119 75
120/* 76/*
@@ -126,22 +82,22 @@ csum_fold(unsigned int sum)
126#ifndef __s390x__ 82#ifndef __s390x__
127 register_pair rp; 83 register_pair rp;
128 84
129 __asm__ __volatile__ ( 85 asm volatile(
130 " slr %N1,%N1\n" /* %0 = H L */ 86 " slr %N1,%N1\n" /* %0 = H L */
131 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ 87 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
132 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ 88 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
133 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ 89 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
134 " alr %0,%1\n" /* %0 = H+L+C L+H */ 90 " alr %0,%1\n" /* %0 = H+L+C L+H */
135 " srl %0,16\n" /* %0 = H+L+C */ 91 " srl %0,16\n" /* %0 = H+L+C */
136 : "+&d" (sum), "=d" (rp) : : "cc" ); 92 : "+&d" (sum), "=d" (rp) : : "cc");
137#else /* __s390x__ */ 93#else /* __s390x__ */
138 __asm__ __volatile__ ( 94 asm volatile(
139 " sr 3,3\n" /* %0 = H*65536 + L */ 95 " sr 3,3\n" /* %0 = H*65536 + L */
140 " lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */ 96 " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
141 " srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */ 97 " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
142 " alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */ 98 " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
143 " alr %0,2\n" /* %0 = H+L+C L+H */ 99 " alr %0,2\n" /* %0 = H+L+C L+H */
144 " srl %0,16\n" /* %0 = H+L+C */ 100 " srl %0,16\n" /* %0 = H+L+C */
145 : "+&d" (sum) : : "cc", "2", "3"); 101 : "+&d" (sum) : : "cc", "2", "3");
146#endif /* __s390x__ */ 102#endif /* __s390x__ */
147 return ((unsigned short) ~sum); 103 return ((unsigned short) ~sum);
@@ -155,29 +111,7 @@ csum_fold(unsigned int sum)
155static inline unsigned short 111static inline unsigned short
156ip_fast_csum(unsigned char *iph, unsigned int ihl) 112ip_fast_csum(unsigned char *iph, unsigned int ihl)
157{ 113{
158 unsigned long sum; 114 return csum_fold(csum_partial(iph, ihl*4, 0));
159#ifndef __s390x__
160 register_pair rp;
161
162 rp.subreg.even = (unsigned long) iph;
163 rp.subreg.odd = (unsigned long) ihl*4;
164 __asm__ __volatile__ (
165 " sr %0,%0\n" /* set sum to zero */
166 "0: cksm %0,%1\n" /* do checksum on longs */
167 " jo 0b\n"
168 : "=&d" (sum), "+&a" (rp) : : "cc", "memory" );
169#else /* __s390x__ */
170 __asm__ __volatile__ (
171 " slgr %0,%0\n" /* set sum to zero */
172 " lgr 2,%1\n" /* address in gpr 2 */
173 " lgfr 3,%2\n" /* length in gpr 3 */
174 "0: cksm %0,2\n" /* do checksum on ints */
175 " jo 0b\n"
176 : "=&d" (sum)
177 : "d" (iph), "d" (ihl*4)
178 : "cc", "memory", "2", "3" );
179#endif /* __s390x__ */
180 return csum_fold(sum);
181} 115}
182 116
183/* 117/*
@@ -190,47 +124,47 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
190 unsigned int sum) 124 unsigned int sum)
191{ 125{
192#ifndef __s390x__ 126#ifndef __s390x__
193 __asm__ __volatile__ ( 127 asm volatile(
194 " alr %0,%1\n" /* sum += saddr */ 128 " alr %0,%1\n" /* sum += saddr */
195 " brc 12,0f\n" 129 " brc 12,0f\n"
196 " ahi %0,1\n" /* add carry */ 130 " ahi %0,1\n" /* add carry */
197 "0:" 131 "0:"
198 : "+&d" (sum) : "d" (saddr) : "cc" ); 132 : "+&d" (sum) : "d" (saddr) : "cc");
199 __asm__ __volatile__ ( 133 asm volatile(
200 " alr %0,%1\n" /* sum += daddr */ 134 " alr %0,%1\n" /* sum += daddr */
201 " brc 12,1f\n" 135 " brc 12,1f\n"
202 " ahi %0,1\n" /* add carry */ 136 " ahi %0,1\n" /* add carry */
203 "1:" 137 "1:"
204 : "+&d" (sum) : "d" (daddr) : "cc" ); 138 : "+&d" (sum) : "d" (daddr) : "cc");
205 __asm__ __volatile__ ( 139 asm volatile(
206 " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */ 140 " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */
207 " brc 12,2f\n" 141 " brc 12,2f\n"
208 " ahi %0,1\n" /* add carry */ 142 " ahi %0,1\n" /* add carry */
209 "2:" 143 "2:"
210 : "+&d" (sum) 144 : "+&d" (sum)
211 : "d" (((unsigned int) len<<16) + (unsigned int) proto) 145 : "d" (((unsigned int) len<<16) + (unsigned int) proto)
212 : "cc" ); 146 : "cc");
213#else /* __s390x__ */ 147#else /* __s390x__ */
214 __asm__ __volatile__ ( 148 asm volatile(
215 " lgfr %0,%0\n" 149 " lgfr %0,%0\n"
216 " algr %0,%1\n" /* sum += saddr */ 150 " algr %0,%1\n" /* sum += saddr */
217 " brc 12,0f\n" 151 " brc 12,0f\n"
218 " aghi %0,1\n" /* add carry */ 152 " aghi %0,1\n" /* add carry */
219 "0: algr %0,%2\n" /* sum += daddr */ 153 "0: algr %0,%2\n" /* sum += daddr */
220 " brc 12,1f\n" 154 " brc 12,1f\n"
221 " aghi %0,1\n" /* add carry */ 155 " aghi %0,1\n" /* add carry */
222 "1: algfr %0,%3\n" /* sum += (len<<16) + proto */ 156 "1: algfr %0,%3\n" /* sum += (len<<16) + proto */
223 " brc 12,2f\n" 157 " brc 12,2f\n"
224 " aghi %0,1\n" /* add carry */ 158 " aghi %0,1\n" /* add carry */
225 "2: srlg 0,%0,32\n" 159 "2: srlg 0,%0,32\n"
226 " alr %0,0\n" /* fold to 32 bits */ 160 " alr %0,0\n" /* fold to 32 bits */
227 " brc 12,3f\n" 161 " brc 12,3f\n"
228 " ahi %0,1\n" /* add carry */ 162 " ahi %0,1\n" /* add carry */
229 "3: llgfr %0,%0" 163 "3: llgfr %0,%0"
230 : "+&d" (sum) 164 : "+&d" (sum)
231 : "d" (saddr), "d" (daddr), 165 : "d" (saddr), "d" (daddr),
232 "d" (((unsigned int) len<<16) + (unsigned int) proto) 166 "d" (((unsigned int) len<<16) + (unsigned int) proto)
233 : "cc", "0" ); 167 : "cc", "0");
234#endif /* __s390x__ */ 168#endif /* __s390x__ */
235 return sum; 169 return sum;
236} 170}
diff --git a/include/asm-s390/ebcdic.h b/include/asm-s390/ebcdic.h
index 15fd2eda6c90..7f6f641d32f4 100644
--- a/include/asm-s390/ebcdic.h
+++ b/include/asm-s390/ebcdic.h
@@ -26,16 +26,16 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
26{ 26{
27 if (nr-- <= 0) 27 if (nr-- <= 0)
28 return; 28 return;
29 __asm__ __volatile__( 29 asm volatile(
30 " bras 1,1f\n" 30 " bras 1,1f\n"
31 " tr 0(1,%0),0(%2)\n" 31 " tr 0(1,%0),0(%2)\n"
32 "0: tr 0(256,%0),0(%2)\n" 32 "0: tr 0(256,%0),0(%2)\n"
33 " la %0,256(%0)\n" 33 " la %0,256(%0)\n"
34 "1: ahi %1,-256\n" 34 "1: ahi %1,-256\n"
35 " jnm 0b\n" 35 " jnm 0b\n"
36 " ex %1,0(1)" 36 " ex %1,0(1)"
37 : "+&a" (addr), "+&a" (nr) 37 : "+&a" (addr), "+&a" (nr)
38 : "a" (codepage) : "cc", "memory", "1" ); 38 : "a" (codepage) : "cc", "memory", "1");
39} 39}
40 40
41#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr) 41#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index a6cc27e77007..63c78b9399c4 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -27,18 +27,16 @@
27static inline unsigned long virt_to_phys(volatile void * address) 27static inline unsigned long virt_to_phys(volatile void * address)
28{ 28{
29 unsigned long real_address; 29 unsigned long real_address;
30 __asm__ ( 30 asm volatile(
31#ifndef __s390x__ 31#ifndef __s390x__
32 " lra %0,0(%1)\n" 32 " lra %0,0(%1)\n"
33 " jz 0f\n"
34 " sr %0,%0\n"
35#else /* __s390x__ */ 33#else /* __s390x__ */
36 " lrag %0,0(%1)\n" 34 " lrag %0,0(%1)\n"
37 " jz 0f\n"
38 " slgr %0,%0\n"
39#endif /* __s390x__ */ 35#endif /* __s390x__ */
36 " jz 0f\n"
37 " la %0,0\n"
40 "0:" 38 "0:"
41 : "=a" (real_address) : "a" (address) : "cc" ); 39 : "=a" (real_address) : "a" (address) : "cc");
42 return real_address; 40 return real_address;
43} 41}
44 42
diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h
index 3b566a5b3cc7..3f26131120b7 100644
--- a/include/asm-s390/irqflags.h
+++ b/include/asm-s390/irqflags.h
@@ -10,43 +10,93 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
14
15/* store then or system mask. */
16#define __raw_local_irq_stosm(__or) \
17({ \
18 unsigned long __mask; \
19 asm volatile( \
20 " stosm %0,%1" \
21 : "=Q" (__mask) : "i" (__or) : "memory"); \
22 __mask; \
23})
24
25/* store then and system mask. */
26#define __raw_local_irq_stnsm(__and) \
27({ \
28 unsigned long __mask; \
29 asm volatile( \
30 " stnsm %0,%1" \
31 : "=Q" (__mask) : "i" (__and) : "memory"); \
32 __mask; \
33})
34
35/* set system mask. */
36#define __raw_local_irq_ssm(__mask) \
37({ \
38 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
39})
40
41#else /* __GNUC__ */
42
43/* store then or system mask. */
44#define __raw_local_irq_stosm(__or) \
45({ \
46 unsigned long __mask; \
47 asm volatile( \
48 " stosm 0(%1),%2" \
49 : "=m" (__mask) \
50 : "a" (&__mask), "i" (__or) : "memory"); \
51 __mask; \
52})
53
54/* store then and system mask. */
55#define __raw_local_irq_stnsm(__and) \
56({ \
57 unsigned long __mask; \
58 asm volatile( \
59 " stnsm 0(%1),%2" \
60 : "=m" (__mask) \
61 : "a" (&__mask), "i" (__and) : "memory"); \
62 __mask; \
63})
64
65/* set system mask. */
66#define __raw_local_irq_ssm(__mask) \
67({ \
68 asm volatile( \
69 " ssm 0(%0)" \
70 : : "a" (&__mask), "m" (__mask) : "memory"); \
71})
72
73#endif /* __GNUC__ */
74
13/* interrupt control.. */ 75/* interrupt control.. */
14#define raw_local_irq_enable() ({ \ 76static inline unsigned long raw_local_irq_enable(void)
15 unsigned long __dummy; \ 77{
16 __asm__ __volatile__ ( \ 78 return __raw_local_irq_stosm(0x03);
17 "stosm 0(%1),0x03" \ 79}
18 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
19 })
20
21#define raw_local_irq_disable() ({ \
22 unsigned long __flags; \
23 __asm__ __volatile__ ( \
24 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
25 __flags; \
26 })
27
28#define raw_local_save_flags(x) \
29do { \
30 typecheck(unsigned long, x); \
31 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ); \
32} while (0)
33 80
34#define raw_local_irq_restore(x) \ 81static inline unsigned long raw_local_irq_disable(void)
35do { \ 82{
36 typecheck(unsigned long, x); \ 83 return __raw_local_irq_stnsm(0xfc);
37 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory"); \ 84}
85
86#define raw_local_save_flags(x) \
87do { \
88 typecheck(unsigned long, x); \
89 (x) = __raw_local_irq_stosm(0x00); \
38} while (0) 90} while (0)
39 91
40#define raw_irqs_disabled() \ 92static inline void raw_local_irq_restore(unsigned long flags)
41({ \ 93{
42 unsigned long flags; \ 94 __raw_local_irq_ssm(flags);
43 raw_local_save_flags(flags); \ 95}
44 !((flags >> __FLAG_SHIFT) & 3); \
45})
46 96
47static inline int raw_irqs_disabled_flags(unsigned long flags) 97static inline int raw_irqs_disabled_flags(unsigned long flags)
48{ 98{
49 return !((flags >> __FLAG_SHIFT) & 3); 99 return !(flags & (3UL << (BITS_PER_LONG - 8)));
50} 100}
51 101
52/* For spinlocks etc */ 102/* For spinlocks etc */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 18695d10dedf..06583ed0bde7 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -359,7 +359,7 @@ extern struct _lowcore *lowcore_ptr[];
359 359
360static inline void set_prefix(__u32 address) 360static inline void set_prefix(__u32 address)
361{ 361{
362 __asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" ); 362 asm volatile("spx %0" : : "m" (address) : "memory");
363} 363}
364 364
365#define __PANIC_MAGIC 0xDEADC0DE 365#define __PANIC_MAGIC 0xDEADC0DE
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index b2628dc5c490..796c400f2b79 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -22,89 +22,45 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25#ifndef __s390x__
26
27static inline void clear_page(void *page)
28{
29 register_pair rp;
30
31 rp.subreg.even = (unsigned long) page;
32 rp.subreg.odd = (unsigned long) 4096;
33 asm volatile (" slr 1,1\n"
34 " mvcl %0,0"
35 : "+&a" (rp) : : "memory", "cc", "1" );
36}
37
38static inline void copy_page(void *to, void *from)
39{
40 if (MACHINE_HAS_MVPG)
41 asm volatile (" sr 0,0\n"
42 " mvpg %0,%1"
43 : : "a" ((void *)(to)), "a" ((void *)(from))
44 : "memory", "cc", "0" );
45 else
46 asm volatile (" mvc 0(256,%0),0(%1)\n"
47 " mvc 256(256,%0),256(%1)\n"
48 " mvc 512(256,%0),512(%1)\n"
49 " mvc 768(256,%0),768(%1)\n"
50 " mvc 1024(256,%0),1024(%1)\n"
51 " mvc 1280(256,%0),1280(%1)\n"
52 " mvc 1536(256,%0),1536(%1)\n"
53 " mvc 1792(256,%0),1792(%1)\n"
54 " mvc 2048(256,%0),2048(%1)\n"
55 " mvc 2304(256,%0),2304(%1)\n"
56 " mvc 2560(256,%0),2560(%1)\n"
57 " mvc 2816(256,%0),2816(%1)\n"
58 " mvc 3072(256,%0),3072(%1)\n"
59 " mvc 3328(256,%0),3328(%1)\n"
60 " mvc 3584(256,%0),3584(%1)\n"
61 " mvc 3840(256,%0),3840(%1)\n"
62 : : "a"((void *)(to)),"a"((void *)(from))
63 : "memory" );
64}
65
66#else /* __s390x__ */
67
68static inline void clear_page(void *page) 25static inline void clear_page(void *page)
69{ 26{
70 asm volatile (" lgr 2,%0\n" 27 register unsigned long reg1 asm ("1") = 0;
71 " lghi 3,4096\n" 28 register void *reg2 asm ("2") = page;
72 " slgr 1,1\n" 29 register unsigned long reg3 asm ("3") = 4096;
73 " mvcl 2,0" 30 asm volatile(
74 : : "a" ((void *) (page)) 31 " mvcl 2,0"
75 : "memory", "cc", "1", "2", "3" ); 32 : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
76} 33}
77 34
78static inline void copy_page(void *to, void *from) 35static inline void copy_page(void *to, void *from)
79{ 36{
80 if (MACHINE_HAS_MVPG) 37 if (MACHINE_HAS_MVPG) {
81 asm volatile (" sgr 0,0\n" 38 register unsigned long reg0 asm ("0") = 0;
82 " mvpg %0,%1" 39 asm volatile(
83 : : "a" ((void *)(to)), "a" ((void *)(from)) 40 " mvpg %0,%1"
84 : "memory", "cc", "0" ); 41 : : "a" (to), "a" (from), "d" (reg0)
85 else 42 : "memory", "cc");
86 asm volatile (" mvc 0(256,%0),0(%1)\n" 43 } else
87 " mvc 256(256,%0),256(%1)\n" 44 asm volatile(
88 " mvc 512(256,%0),512(%1)\n" 45 " mvc 0(256,%0),0(%1)\n"
89 " mvc 768(256,%0),768(%1)\n" 46 " mvc 256(256,%0),256(%1)\n"
90 " mvc 1024(256,%0),1024(%1)\n" 47 " mvc 512(256,%0),512(%1)\n"
91 " mvc 1280(256,%0),1280(%1)\n" 48 " mvc 768(256,%0),768(%1)\n"
92 " mvc 1536(256,%0),1536(%1)\n" 49 " mvc 1024(256,%0),1024(%1)\n"
93 " mvc 1792(256,%0),1792(%1)\n" 50 " mvc 1280(256,%0),1280(%1)\n"
94 " mvc 2048(256,%0),2048(%1)\n" 51 " mvc 1536(256,%0),1536(%1)\n"
95 " mvc 2304(256,%0),2304(%1)\n" 52 " mvc 1792(256,%0),1792(%1)\n"
96 " mvc 2560(256,%0),2560(%1)\n" 53 " mvc 2048(256,%0),2048(%1)\n"
97 " mvc 2816(256,%0),2816(%1)\n" 54 " mvc 2304(256,%0),2304(%1)\n"
98 " mvc 3072(256,%0),3072(%1)\n" 55 " mvc 2560(256,%0),2560(%1)\n"
99 " mvc 3328(256,%0),3328(%1)\n" 56 " mvc 2816(256,%0),2816(%1)\n"
100 " mvc 3584(256,%0),3584(%1)\n" 57 " mvc 3072(256,%0),3072(%1)\n"
101 " mvc 3840(256,%0),3840(%1)\n" 58 " mvc 3328(256,%0),3328(%1)\n"
102 : : "a"((void *)(to)),"a"((void *)(from)) 59 " mvc 3584(256,%0),3584(%1)\n"
103 : "memory" ); 60 " mvc 3840(256,%0),3840(%1)\n"
61 : : "a" (to), "a" (from) : "memory");
104} 62}
105 63
106#endif /* __s390x__ */
107
108#define clear_user_page(page, vaddr, pg) clear_page(page) 64#define clear_user_page(page, vaddr, pg) clear_page(page)
109#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 65#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
110 66
@@ -159,7 +115,7 @@ extern unsigned int default_storage_key;
159static inline void 115static inline void
160page_set_storage_key(unsigned long addr, unsigned int skey) 116page_set_storage_key(unsigned long addr, unsigned int skey)
161{ 117{
162 asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) ); 118 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
163} 119}
164 120
165static inline unsigned int 121static inline unsigned int
@@ -167,8 +123,7 @@ page_get_storage_key(unsigned long addr)
167{ 123{
168 unsigned int skey; 124 unsigned int skey;
169 125
170 asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) ); 126 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0));
171
172 return skey; 127 return skey;
173} 128}
174 129
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index e965309fedac..83425cdefc91 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -554,9 +554,10 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
554 /* ipte in zarch mode can do the math */ 554 /* ipte in zarch mode can do the math */
555 pte_t *pto = ptep; 555 pte_t *pto = ptep;
556#endif 556#endif
557 asm volatile ("ipte %2,%3" 557 asm volatile(
558 : "=m" (*ptep) : "m" (*ptep), 558 " ipte %2,%3"
559 "a" (pto), "a" (address) ); 559 : "=m" (*ptep) : "m" (*ptep),
560 "a" (pto), "a" (address));
560 } 561 }
561 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 562 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
562} 563}
@@ -609,16 +610,17 @@ ptep_establish(struct vm_area_struct *vma,
609/* 610/*
610 * Test and clear referenced bit in storage key. 611 * Test and clear referenced bit in storage key.
611 */ 612 */
612#define page_test_and_clear_young(page) \ 613#define page_test_and_clear_young(page) \
613({ \ 614({ \
614 struct page *__page = (page); \ 615 struct page *__page = (page); \
615 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 616 unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);\
616 int __ccode; \ 617 int __ccode; \
617 asm volatile ("rrbe 0,%1\n\t" \ 618 asm volatile( \
618 "ipm %0\n\t" \ 619 " rrbe 0,%1\n" \
619 "srl %0,28\n\t" \ 620 " ipm %0\n" \
620 : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ 621 " srl %0,28\n" \
621 (__ccode & 2); \ 622 : "=d" (__ccode) : "a" (__physpage) : "cc"); \
623 (__ccode & 2); \
622}) 624})
623 625
624/* 626/*
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 578c2209fa76..cbbedc63ba25 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -13,7 +13,6 @@
13#ifndef __ASM_S390_PROCESSOR_H 13#ifndef __ASM_S390_PROCESSOR_H
14#define __ASM_S390_PROCESSOR_H 14#define __ASM_S390_PROCESSOR_H
15 15
16#include <asm/page.h>
17#include <asm/ptrace.h> 16#include <asm/ptrace.h>
18 17
19#ifdef __KERNEL__ 18#ifdef __KERNEL__
@@ -21,7 +20,7 @@
21 * Default implementation of macro that returns current 20 * Default implementation of macro that returns current
22 * instruction pointer ("program counter"). 21 * instruction pointer ("program counter").
23 */ 22 */
24#define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; }) 23#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
25 24
26/* 25/*
27 * CPU type and hardware bug flags. Kept separately for each CPU. 26 * CPU type and hardware bug flags. Kept separately for each CPU.
@@ -202,7 +201,7 @@ unsigned long get_wchan(struct task_struct *p);
202static inline void cpu_relax(void) 201static inline void cpu_relax(void)
203{ 202{
204 if (MACHINE_HAS_DIAG44) 203 if (MACHINE_HAS_DIAG44)
205 asm volatile ("diag 0,0,68" : : : "memory"); 204 asm volatile("diag 0,0,68" : : : "memory");
206 else 205 else
207 barrier(); 206 barrier();
208} 207}
@@ -213,9 +212,9 @@ static inline void cpu_relax(void)
213static inline void __load_psw(psw_t psw) 212static inline void __load_psw(psw_t psw)
214{ 213{
215#ifndef __s390x__ 214#ifndef __s390x__
216 asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 215 asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
217#else 216#else
218 asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 217 asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
219#endif 218#endif
220} 219}
221 220
@@ -232,20 +231,20 @@ static inline void __load_psw_mask (unsigned long mask)
232 psw.mask = mask; 231 psw.mask = mask;
233 232
234#ifndef __s390x__ 233#ifndef __s390x__
235 asm volatile ( 234 asm volatile(
236 " basr %0,0\n" 235 " basr %0,0\n"
237 "0: ahi %0,1f-0b\n" 236 "0: ahi %0,1f-0b\n"
238 " st %0,4(%1)\n" 237 " st %0,4(%1)\n"
239 " lpsw 0(%1)\n" 238 " lpsw 0(%1)\n"
240 "1:" 239 "1:"
241 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 240 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
242#else /* __s390x__ */ 241#else /* __s390x__ */
243 asm volatile ( 242 asm volatile(
244 " larl %0,1f\n" 243 " larl %0,1f\n"
245 " stg %0,8(%1)\n" 244 " stg %0,8(%1)\n"
246 " lpswe 0(%1)\n" 245 " lpswe 0(%1)\n"
247 "1:" 246 "1:"
248 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 247 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
249#endif /* __s390x__ */ 248#endif /* __s390x__ */
250} 249}
251 250
@@ -274,56 +273,57 @@ static inline void disabled_wait(unsigned long code)
274 * the processor is dead afterwards 273 * the processor is dead afterwards
275 */ 274 */
276#ifndef __s390x__ 275#ifndef __s390x__
277 asm volatile (" stctl 0,0,0(%2)\n" 276 asm volatile(
278 " ni 0(%2),0xef\n" /* switch off protection */ 277 " stctl 0,0,0(%2)\n"
279 " lctl 0,0,0(%2)\n" 278 " ni 0(%2),0xef\n" /* switch off protection */
280 " stpt 0xd8\n" /* store timer */ 279 " lctl 0,0,0(%2)\n"
281 " stckc 0xe0\n" /* store clock comparator */ 280 " stpt 0xd8\n" /* store timer */
282 " stpx 0x108\n" /* store prefix register */ 281 " stckc 0xe0\n" /* store clock comparator */
283 " stam 0,15,0x120\n" /* store access registers */ 282 " stpx 0x108\n" /* store prefix register */
284 " std 0,0x160\n" /* store f0 */ 283 " stam 0,15,0x120\n" /* store access registers */
285 " std 2,0x168\n" /* store f2 */ 284 " std 0,0x160\n" /* store f0 */
286 " std 4,0x170\n" /* store f4 */ 285 " std 2,0x168\n" /* store f2 */
287 " std 6,0x178\n" /* store f6 */ 286 " std 4,0x170\n" /* store f4 */
288 " stm 0,15,0x180\n" /* store general registers */ 287 " std 6,0x178\n" /* store f6 */
289 " stctl 0,15,0x1c0\n" /* store control registers */ 288 " stm 0,15,0x180\n" /* store general registers */
290 " oi 0x1c0,0x10\n" /* fake protection bit */ 289 " stctl 0,15,0x1c0\n" /* store control registers */
291 " lpsw 0(%1)" 290 " oi 0x1c0,0x10\n" /* fake protection bit */
292 : "=m" (ctl_buf) 291 " lpsw 0(%1)"
293 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 292 : "=m" (ctl_buf)
293 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
294#else /* __s390x__ */ 294#else /* __s390x__ */
295 asm volatile (" stctg 0,0,0(%2)\n" 295 asm volatile(
296 " ni 4(%2),0xef\n" /* switch off protection */ 296 " stctg 0,0,0(%2)\n"
297 " lctlg 0,0,0(%2)\n" 297 " ni 4(%2),0xef\n" /* switch off protection */
298 " lghi 1,0x1000\n" 298 " lctlg 0,0,0(%2)\n"
299 " stpt 0x328(1)\n" /* store timer */ 299 " lghi 1,0x1000\n"
300 " stckc 0x330(1)\n" /* store clock comparator */ 300 " stpt 0x328(1)\n" /* store timer */
301 " stpx 0x318(1)\n" /* store prefix register */ 301 " stckc 0x330(1)\n" /* store clock comparator */
302 " stam 0,15,0x340(1)\n" /* store access registers */ 302 " stpx 0x318(1)\n" /* store prefix register */
303 " stfpc 0x31c(1)\n" /* store fpu control */ 303 " stam 0,15,0x340(1)\n"/* store access registers */
304 " std 0,0x200(1)\n" /* store f0 */ 304 " stfpc 0x31c(1)\n" /* store fpu control */
305 " std 1,0x208(1)\n" /* store f1 */ 305 " std 0,0x200(1)\n" /* store f0 */
306 " std 2,0x210(1)\n" /* store f2 */ 306 " std 1,0x208(1)\n" /* store f1 */
307 " std 3,0x218(1)\n" /* store f3 */ 307 " std 2,0x210(1)\n" /* store f2 */
308 " std 4,0x220(1)\n" /* store f4 */ 308 " std 3,0x218(1)\n" /* store f3 */
309 " std 5,0x228(1)\n" /* store f5 */ 309 " std 4,0x220(1)\n" /* store f4 */
310 " std 6,0x230(1)\n" /* store f6 */ 310 " std 5,0x228(1)\n" /* store f5 */
311 " std 7,0x238(1)\n" /* store f7 */ 311 " std 6,0x230(1)\n" /* store f6 */
312 " std 8,0x240(1)\n" /* store f8 */ 312 " std 7,0x238(1)\n" /* store f7 */
313 " std 9,0x248(1)\n" /* store f9 */ 313 " std 8,0x240(1)\n" /* store f8 */
314 " std 10,0x250(1)\n" /* store f10 */ 314 " std 9,0x248(1)\n" /* store f9 */
315 " std 11,0x258(1)\n" /* store f11 */ 315 " std 10,0x250(1)\n" /* store f10 */
316 " std 12,0x260(1)\n" /* store f12 */ 316 " std 11,0x258(1)\n" /* store f11 */
317 " std 13,0x268(1)\n" /* store f13 */ 317 " std 12,0x260(1)\n" /* store f12 */
318 " std 14,0x270(1)\n" /* store f14 */ 318 " std 13,0x268(1)\n" /* store f13 */
319 " std 15,0x278(1)\n" /* store f15 */ 319 " std 14,0x270(1)\n" /* store f14 */
320 " stmg 0,15,0x280(1)\n" /* store general registers */ 320 " std 15,0x278(1)\n" /* store f15 */
321 " stctg 0,15,0x380(1)\n" /* store control registers */ 321 " stmg 0,15,0x280(1)\n"/* store general registers */
322 " oi 0x384(1),0x10\n" /* fake protection bit */ 322 " stctg 0,15,0x380(1)\n"/* store control registers */
323 " lpswe 0(%1)" 323 " oi 0x384(1),0x10\n"/* fake protection bit */
324 : "=m" (ctl_buf) 324 " lpswe 0(%1)"
325 : "a" (&dw_psw), "a" (&ctl_buf), 325 : "=m" (ctl_buf)
326 "m" (dw_psw) : "cc", "0", "1"); 326 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
327#endif /* __s390x__ */ 327#endif /* __s390x__ */
328} 328}
329 329
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 4d75d77b0f99..8d2bf65b0b64 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -479,7 +479,7 @@ extern void show_regs(struct pt_regs * regs);
479static inline void 479static inline void
480psw_set_key(unsigned int key) 480psw_set_key(unsigned int key)
481{ 481{
482 asm volatile ( "spka 0(%0)" : : "d" (key) ); 482 asm volatile("spka 0(%0)" : : "d" (key));
483} 483}
484 484
485#endif /* __ASSEMBLY__ */ 485#endif /* __ASSEMBLY__ */
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 13ec16965150..90f4eccaa290 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -122,23 +122,23 @@ static inline void __down_read(struct rw_semaphore *sem)
122{ 122{
123 signed long old, new; 123 signed long old, new;
124 124
125 __asm__ __volatile__( 125 asm volatile(
126#ifndef __s390x__ 126#ifndef __s390x__
127 " l %0,0(%3)\n" 127 " l %0,0(%3)\n"
128 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
129 " ahi %1,%5\n" 129 " ahi %1,%5\n"
130 " cs %0,%1,0(%3)\n" 130 " cs %0,%1,0(%3)\n"
131 " jl 0b" 131 " jl 0b"
132#else /* __s390x__ */ 132#else /* __s390x__ */
133 " lg %0,0(%3)\n" 133 " lg %0,0(%3)\n"
134 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n" 135 " aghi %1,%5\n"
136 " csg %0,%1,0(%3)\n" 136 " csg %0,%1,0(%3)\n"
137 " jl 0b" 137 " jl 0b"
138#endif /* __s390x__ */ 138#endif /* __s390x__ */
139 : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 : "=&d" (old), "=&d" (new), "=m" (sem->count)
140 : "a" (&sem->count), "m" (sem->count), 140 : "a" (&sem->count), "m" (sem->count),
141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
142 if (old < 0) 142 if (old < 0)
143 rwsem_down_read_failed(sem); 143 rwsem_down_read_failed(sem);
144} 144}
@@ -150,27 +150,27 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
150{ 150{
151 signed long old, new; 151 signed long old, new;
152 152
153 __asm__ __volatile__( 153 asm volatile(
154#ifndef __s390x__ 154#ifndef __s390x__
155 " l %0,0(%3)\n" 155 " l %0,0(%3)\n"
156 "0: ltr %1,%0\n" 156 "0: ltr %1,%0\n"
157 " jm 1f\n" 157 " jm 1f\n"
158 " ahi %1,%5\n" 158 " ahi %1,%5\n"
159 " cs %0,%1,0(%3)\n" 159 " cs %0,%1,0(%3)\n"
160 " jl 0b\n" 160 " jl 0b\n"
161 "1:" 161 "1:"
162#else /* __s390x__ */ 162#else /* __s390x__ */
163 " lg %0,0(%3)\n" 163 " lg %0,0(%3)\n"
164 "0: ltgr %1,%0\n" 164 "0: ltgr %1,%0\n"
165 " jm 1f\n" 165 " jm 1f\n"
166 " aghi %1,%5\n" 166 " aghi %1,%5\n"
167 " csg %0,%1,0(%3)\n" 167 " csg %0,%1,0(%3)\n"
168 " jl 0b\n" 168 " jl 0b\n"
169 "1:" 169 "1:"
170#endif /* __s390x__ */ 170#endif /* __s390x__ */
171 : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 : "=&d" (old), "=&d" (new), "=m" (sem->count)
172 : "a" (&sem->count), "m" (sem->count), 172 : "a" (&sem->count), "m" (sem->count),
173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
174 return old >= 0 ? 1 : 0; 174 return old >= 0 ? 1 : 0;
175} 175}
176 176
@@ -182,23 +182,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
182 signed long old, new, tmp; 182 signed long old, new, tmp;
183 183
184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
185 __asm__ __volatile__( 185 asm volatile(
186#ifndef __s390x__ 186#ifndef __s390x__
187 " l %0,0(%3)\n" 187 " l %0,0(%3)\n"
188 "0: lr %1,%0\n" 188 "0: lr %1,%0\n"
189 " a %1,%5\n" 189 " a %1,%5\n"
190 " cs %0,%1,0(%3)\n" 190 " cs %0,%1,0(%3)\n"
191 " jl 0b" 191 " jl 0b"
192#else /* __s390x__ */ 192#else /* __s390x__ */
193 " lg %0,0(%3)\n" 193 " lg %0,0(%3)\n"
194 "0: lgr %1,%0\n" 194 "0: lgr %1,%0\n"
195 " ag %1,%5\n" 195 " ag %1,%5\n"
196 " csg %0,%1,0(%3)\n" 196 " csg %0,%1,0(%3)\n"
197 " jl 0b" 197 " jl 0b"
198#endif /* __s390x__ */ 198#endif /* __s390x__ */
199 : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 : "=&d" (old), "=&d" (new), "=m" (sem->count)
200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 200 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
201 : "cc", "memory" ); 201 : "cc", "memory");
202 if (old != 0) 202 if (old != 0)
203 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
204} 204}
@@ -215,24 +215,24 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
215{ 215{
216 signed long old; 216 signed long old;
217 217
218 __asm__ __volatile__( 218 asm volatile(
219#ifndef __s390x__ 219#ifndef __s390x__
220 " l %0,0(%2)\n" 220 " l %0,0(%2)\n"
221 "0: ltr %0,%0\n" 221 "0: ltr %0,%0\n"
222 " jnz 1f\n" 222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n" 223 " cs %0,%4,0(%2)\n"
224 " jl 0b\n" 224 " jl 0b\n"
225#else /* __s390x__ */ 225#else /* __s390x__ */
226 " lg %0,0(%2)\n" 226 " lg %0,0(%2)\n"
227 "0: ltgr %0,%0\n" 227 "0: ltgr %0,%0\n"
228 " jnz 1f\n" 228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n" 229 " csg %0,%4,0(%2)\n"
230 " jl 0b\n" 230 " jl 0b\n"
231#endif /* __s390x__ */ 231#endif /* __s390x__ */
232 "1:" 232 "1:"
233 : "=&d" (old), "=m" (sem->count) 233 : "=&d" (old), "=m" (sem->count)
234 : "a" (&sem->count), "m" (sem->count), 234 : "a" (&sem->count), "m" (sem->count),
235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" ); 235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237} 237}
238 238
@@ -243,24 +243,24 @@ static inline void __up_read(struct rw_semaphore *sem)
243{ 243{
244 signed long old, new; 244 signed long old, new;
245 245
246 __asm__ __volatile__( 246 asm volatile(
247#ifndef __s390x__ 247#ifndef __s390x__
248 " l %0,0(%3)\n" 248 " l %0,0(%3)\n"
249 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
250 " ahi %1,%5\n" 250 " ahi %1,%5\n"
251 " cs %0,%1,0(%3)\n" 251 " cs %0,%1,0(%3)\n"
252 " jl 0b" 252 " jl 0b"
253#else /* __s390x__ */ 253#else /* __s390x__ */
254 " lg %0,0(%3)\n" 254 " lg %0,0(%3)\n"
255 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n" 256 " aghi %1,%5\n"
257 " csg %0,%1,0(%3)\n" 257 " csg %0,%1,0(%3)\n"
258 " jl 0b" 258 " jl 0b"
259#endif /* __s390x__ */ 259#endif /* __s390x__ */
260 : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 : "=&d" (old), "=&d" (new), "=m" (sem->count)
261 : "a" (&sem->count), "m" (sem->count), 261 : "a" (&sem->count), "m" (sem->count),
262 "i" (-RWSEM_ACTIVE_READ_BIAS) 262 "i" (-RWSEM_ACTIVE_READ_BIAS)
263 : "cc", "memory" ); 263 : "cc", "memory");
264 if (new < 0) 264 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0) 265 if ((new & RWSEM_ACTIVE_MASK) == 0)
266 rwsem_wake(sem); 266 rwsem_wake(sem);
@@ -274,23 +274,23 @@ static inline void __up_write(struct rw_semaphore *sem)
274 signed long old, new, tmp; 274 signed long old, new, tmp;
275 275
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 276 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
277 __asm__ __volatile__( 277 asm volatile(
278#ifndef __s390x__ 278#ifndef __s390x__
279 " l %0,0(%3)\n" 279 " l %0,0(%3)\n"
280 "0: lr %1,%0\n" 280 "0: lr %1,%0\n"
281 " a %1,%5\n" 281 " a %1,%5\n"
282 " cs %0,%1,0(%3)\n" 282 " cs %0,%1,0(%3)\n"
283 " jl 0b" 283 " jl 0b"
284#else /* __s390x__ */ 284#else /* __s390x__ */
285 " lg %0,0(%3)\n" 285 " lg %0,0(%3)\n"
286 "0: lgr %1,%0\n" 286 "0: lgr %1,%0\n"
287 " ag %1,%5\n" 287 " ag %1,%5\n"
288 " csg %0,%1,0(%3)\n" 288 " csg %0,%1,0(%3)\n"
289 " jl 0b" 289 " jl 0b"
290#endif /* __s390x__ */ 290#endif /* __s390x__ */
291 : "=&d" (old), "=&d" (new), "=m" (sem->count) 291 : "=&d" (old), "=&d" (new), "=m" (sem->count)
292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 292 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
293 : "cc", "memory" ); 293 : "cc", "memory");
294 if (new < 0) 294 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0) 295 if ((new & RWSEM_ACTIVE_MASK) == 0)
296 rwsem_wake(sem); 296 rwsem_wake(sem);
@@ -304,23 +304,23 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
304 signed long old, new, tmp; 304 signed long old, new, tmp;
305 305
306 tmp = -RWSEM_WAITING_BIAS; 306 tmp = -RWSEM_WAITING_BIAS;
307 __asm__ __volatile__( 307 asm volatile(
308#ifndef __s390x__ 308#ifndef __s390x__
309 " l %0,0(%3)\n" 309 " l %0,0(%3)\n"
310 "0: lr %1,%0\n" 310 "0: lr %1,%0\n"
311 " a %1,%5\n" 311 " a %1,%5\n"
312 " cs %0,%1,0(%3)\n" 312 " cs %0,%1,0(%3)\n"
313 " jl 0b" 313 " jl 0b"
314#else /* __s390x__ */ 314#else /* __s390x__ */
315 " lg %0,0(%3)\n" 315 " lg %0,0(%3)\n"
316 "0: lgr %1,%0\n" 316 "0: lgr %1,%0\n"
317 " ag %1,%5\n" 317 " ag %1,%5\n"
318 " csg %0,%1,0(%3)\n" 318 " csg %0,%1,0(%3)\n"
319 " jl 0b" 319 " jl 0b"
320#endif /* __s390x__ */ 320#endif /* __s390x__ */
321 : "=&d" (old), "=&d" (new), "=m" (sem->count) 321 : "=&d" (old), "=&d" (new), "=m" (sem->count)
322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 322 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
323 : "cc", "memory" ); 323 : "cc", "memory");
324 if (new > 1) 324 if (new > 1)
325 rwsem_downgrade_wake(sem); 325 rwsem_downgrade_wake(sem);
326} 326}
@@ -332,23 +332,23 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
332{ 332{
333 signed long old, new; 333 signed long old, new;
334 334
335 __asm__ __volatile__( 335 asm volatile(
336#ifndef __s390x__ 336#ifndef __s390x__
337 " l %0,0(%3)\n" 337 " l %0,0(%3)\n"
338 "0: lr %1,%0\n" 338 "0: lr %1,%0\n"
339 " ar %1,%5\n" 339 " ar %1,%5\n"
340 " cs %0,%1,0(%3)\n" 340 " cs %0,%1,0(%3)\n"
341 " jl 0b" 341 " jl 0b"
342#else /* __s390x__ */ 342#else /* __s390x__ */
343 " lg %0,0(%3)\n" 343 " lg %0,0(%3)\n"
344 "0: lgr %1,%0\n" 344 "0: lgr %1,%0\n"
345 " agr %1,%5\n" 345 " agr %1,%5\n"
346 " csg %0,%1,0(%3)\n" 346 " csg %0,%1,0(%3)\n"
347 " jl 0b" 347 " jl 0b"
348#endif /* __s390x__ */ 348#endif /* __s390x__ */
349 : "=&d" (old), "=&d" (new), "=m" (sem->count) 349 : "=&d" (old), "=&d" (new), "=m" (sem->count)
350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 350 : "a" (&sem->count), "m" (sem->count), "d" (delta)
351 : "cc", "memory" ); 351 : "cc", "memory");
352} 352}
353 353
354/* 354/*
@@ -358,23 +358,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
358{ 358{
359 signed long old, new; 359 signed long old, new;
360 360
361 __asm__ __volatile__( 361 asm volatile(
362#ifndef __s390x__ 362#ifndef __s390x__
363 " l %0,0(%3)\n" 363 " l %0,0(%3)\n"
364 "0: lr %1,%0\n" 364 "0: lr %1,%0\n"
365 " ar %1,%5\n" 365 " ar %1,%5\n"
366 " cs %0,%1,0(%3)\n" 366 " cs %0,%1,0(%3)\n"
367 " jl 0b" 367 " jl 0b"
368#else /* __s390x__ */ 368#else /* __s390x__ */
369 " lg %0,0(%3)\n" 369 " lg %0,0(%3)\n"
370 "0: lgr %1,%0\n" 370 "0: lgr %1,%0\n"
371 " agr %1,%5\n" 371 " agr %1,%5\n"
372 " csg %0,%1,0(%3)\n" 372 " csg %0,%1,0(%3)\n"
373 " jl 0b" 373 " jl 0b"
374#endif /* __s390x__ */ 374#endif /* __s390x__ */
375 : "=&d" (old), "=&d" (new), "=m" (sem->count) 375 : "=&d" (old), "=&d" (new), "=m" (sem->count)
376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 376 : "a" (&sem->count), "m" (sem->count), "d" (delta)
377 : "cc", "memory" ); 377 : "cc", "memory");
378 return new; 378 return new;
379} 379}
380 380
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 32cdc69f39f4..dbce058aefa9 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -85,17 +85,17 @@ static inline int down_trylock(struct semaphore * sem)
85 * sem->count.counter = --new_val; 85 * sem->count.counter = --new_val;
86 * In the ppc code this is called atomic_dec_if_positive. 86 * In the ppc code this is called atomic_dec_if_positive.
87 */ 87 */
88 __asm__ __volatile__ ( 88 asm volatile(
89 " l %0,0(%3)\n" 89 " l %0,0(%3)\n"
90 "0: ltr %1,%0\n" 90 "0: ltr %1,%0\n"
91 " jle 1f\n" 91 " jle 1f\n"
92 " ahi %1,-1\n" 92 " ahi %1,-1\n"
93 " cs %0,%1,0(%3)\n" 93 " cs %0,%1,0(%3)\n"
94 " jl 0b\n" 94 " jl 0b\n"
95 "1:" 95 "1:"
96 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) 96 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
97 : "a" (&sem->count.counter), "m" (sem->count.counter) 97 : "a" (&sem->count.counter), "m" (sem->count.counter)
98 : "cc", "memory" ); 98 : "cc", "memory");
99 return old_val <= 0; 99 return old_val <= 0;
100} 100}
101 101
diff --git a/include/asm-s390/sfp-machine.h b/include/asm-s390/sfp-machine.h
index de69dfa46fbb..8ca8c77b2d04 100644
--- a/include/asm-s390/sfp-machine.h
+++ b/include/asm-s390/sfp-machine.h
@@ -76,21 +76,23 @@
76 unsigned int __r2 = (x2) + (y2); \ 76 unsigned int __r2 = (x2) + (y2); \
77 unsigned int __r1 = (x1); \ 77 unsigned int __r1 = (x1); \
78 unsigned int __r0 = (x0); \ 78 unsigned int __r0 = (x0); \
79 __asm__ (" alr %2,%3\n" \ 79 asm volatile( \
80 " brc 12,0f\n" \ 80 " alr %2,%3\n" \
81 " lhi 0,1\n" \ 81 " brc 12,0f\n" \
82 " alr %1,0\n" \ 82 " lhi 0,1\n" \
83 " brc 12,0f\n" \ 83 " alr %1,0\n" \
84 " alr %0,0\n" \ 84 " brc 12,0f\n" \
85 "0:" \ 85 " alr %0,0\n" \
86 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 86 "0:" \
87 : "d" (y0), "i" (1) : "cc", "0" ); \ 87 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
88 __asm__ (" alr %1,%2\n" \ 88 : "d" (y0), "i" (1) : "cc", "0" ); \
89 " brc 12,0f\n" \ 89 asm volatile( \
90 " ahi %0,1\n" \ 90 " alr %1,%2\n" \
91 "0:" \ 91 " brc 12,0f\n" \
92 : "+&d" (__r2), "+&d" (__r1) \ 92 " ahi %0,1\n" \
93 : "d" (y1) : "cc" ); \ 93 "0:" \
94 : "+&d" (__r2), "+&d" (__r1) \
95 : "d" (y1) : "cc"); \
94 (r2) = __r2; \ 96 (r2) = __r2; \
95 (r1) = __r1; \ 97 (r1) = __r1; \
96 (r0) = __r0; \ 98 (r0) = __r0; \
@@ -100,21 +102,23 @@
100 unsigned int __r2 = (x2) - (y2); \ 102 unsigned int __r2 = (x2) - (y2); \
101 unsigned int __r1 = (x1); \ 103 unsigned int __r1 = (x1); \
102 unsigned int __r0 = (x0); \ 104 unsigned int __r0 = (x0); \
103 __asm__ (" slr %2,%3\n" \ 105 asm volatile( \
104 " brc 3,0f\n" \ 106 " slr %2,%3\n" \
105 " lhi 0,1\n" \ 107 " brc 3,0f\n" \
106 " slr %1,0\n" \ 108 " lhi 0,1\n" \
107 " brc 3,0f\n" \ 109 " slr %1,0\n" \
108 " slr %0,0\n" \ 110 " brc 3,0f\n" \
109 "0:" \ 111 " slr %0,0\n" \
110 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 112 "0:" \
111 : "d" (y0) : "cc", "0" ); \ 113 : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
112 __asm__ (" slr %1,%2\n" \ 114 : "d" (y0) : "cc", "0"); \
113 " brc 3,0f\n" \ 115 asm volatile( \
114 " ahi %0,-1\n" \ 116 " slr %1,%2\n" \
115 "0:" \ 117 " brc 3,0f\n" \
116 : "+&d" (__r2), "+&d" (__r1) \ 118 " ahi %0,-1\n" \
117 : "d" (y1) : "cc" ); \ 119 "0:" \
120 : "+&d" (__r2), "+&d" (__r1) \
121 : "d" (y1) : "cc"); \
118 (r2) = __r2; \ 122 (r2) = __r2; \
119 (r1) = __r1; \ 123 (r1) = __r1; \
120 (r0) = __r0; \ 124 (r0) = __r0; \
diff --git a/include/asm-s390/sigp.h b/include/asm-s390/sigp.h
index fc56458aff66..e16d56f8dfe1 100644
--- a/include/asm-s390/sigp.h
+++ b/include/asm-s390/sigp.h
@@ -70,16 +70,16 @@ typedef enum
70static inline sigp_ccode 70static inline sigp_ccode
71signal_processor(__u16 cpu_addr, sigp_order_code order_code) 71signal_processor(__u16 cpu_addr, sigp_order_code order_code)
72{ 72{
73 register unsigned long reg1 asm ("1") = 0;
73 sigp_ccode ccode; 74 sigp_ccode ccode;
74 75
75 __asm__ __volatile__( 76 asm volatile(
76 " sr 1,1\n" /* parameter=0 in gpr 1 */ 77 " sigp %1,%2,0(%3)\n"
77 " sigp 1,%1,0(%2)\n" 78 " ipm %0\n"
78 " ipm %0\n" 79 " srl %0,28\n"
79 " srl %0,28\n" 80 : "=d" (ccode)
80 : "=d" (ccode) 81 : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
81 : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) 82 "a" (order_code) : "cc" , "memory");
82 : "cc" , "memory", "1" );
83 return ccode; 83 return ccode;
84} 84}
85 85
@@ -87,20 +87,18 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
87 * Signal processor with parameter 87 * Signal processor with parameter
88 */ 88 */
89static inline sigp_ccode 89static inline sigp_ccode
90signal_processor_p(__u32 parameter, __u16 cpu_addr, 90signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
91 sigp_order_code order_code)
92{ 91{
92 register unsigned int reg1 asm ("1") = parameter;
93 sigp_ccode ccode; 93 sigp_ccode ccode;
94 94
95 __asm__ __volatile__( 95 asm volatile(
96 " lr 1,%1\n" /* parameter in gpr 1 */ 96 " sigp %1,%2,0(%3)\n"
97 " sigp 1,%2,0(%3)\n" 97 " ipm %0\n"
98 " ipm %0\n" 98 " srl %0,28\n"
99 " srl %0,28\n"
100 : "=d" (ccode) 99 : "=d" (ccode)
101 : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 100 : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
102 "a" (order_code) 101 "a" (order_code) : "cc" , "memory");
103 : "cc" , "memory", "1" );
104 return ccode; 102 return ccode;
105} 103}
106 104
@@ -108,24 +106,21 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr,
108 * Signal processor with parameter and return status 106 * Signal processor with parameter and return status
109 */ 107 */
110static inline sigp_ccode 108static inline sigp_ccode
111signal_processor_ps(__u32 *statusptr, __u32 parameter, 109signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
112 __u16 cpu_addr, sigp_order_code order_code) 110 sigp_order_code order_code)
113{ 111{
112 register unsigned int reg1 asm ("1") = parameter;
114 sigp_ccode ccode; 113 sigp_ccode ccode;
115 114
116 __asm__ __volatile__( 115 asm volatile(
117 " sr 2,2\n" /* clear status */ 116 " sigp %1,%2,0(%3)\n"
118 " lr 3,%2\n" /* parameter in gpr 3 */ 117 " ipm %0\n"
119 " sigp 2,%3,0(%4)\n" 118 " srl %0,28\n"
120 " st 2,%1\n" 119 : "=d" (ccode), "+d" (reg1)
121 " ipm %0\n" 120 : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
122 " srl %0,28\n" 121 : "cc" , "memory");
123 : "=d" (ccode), "=m" (*statusptr) 122 *statusptr = reg1;
124 : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 123 return ccode;
125 "a" (order_code)
126 : "cc" , "memory", "2" , "3"
127 );
128 return ccode;
129} 124}
130 125
131#endif /* __SIGP__ */ 126#endif /* __SIGP__ */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 9fb02e9779c9..c3cf030ada4d 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -56,7 +56,7 @@ static inline __u16 hard_smp_processor_id(void)
56{ 56{
57 __u16 cpu_address; 57 __u16 cpu_address;
58 58
59 __asm__ ("stap %0\n" : "=m" (cpu_address)); 59 asm volatile("stap %0" : "=m" (cpu_address));
60 return cpu_address; 60 return cpu_address;
61} 61}
62 62
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 273dbecf8ace..ce3edf6d63b3 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -11,17 +11,36 @@
11#ifndef __ASM_SPINLOCK_H 11#ifndef __ASM_SPINLOCK_H
12#define __ASM_SPINLOCK_H 12#define __ASM_SPINLOCK_H
13 13
14#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
15
16static inline int
17_raw_compare_and_swap(volatile unsigned int *lock,
18 unsigned int old, unsigned int new)
19{
20 asm volatile(
21 " cs %0,%3,%1"
22 : "=d" (old), "=Q" (*lock)
23 : "0" (old), "d" (new), "Q" (*lock)
24 : "cc", "memory" );
25 return old;
26}
27
28#else /* __GNUC__ */
29
14static inline int 30static inline int
15_raw_compare_and_swap(volatile unsigned int *lock, 31_raw_compare_and_swap(volatile unsigned int *lock,
16 unsigned int old, unsigned int new) 32 unsigned int old, unsigned int new)
17{ 33{
18 asm volatile ("cs %0,%3,0(%4)" 34 asm volatile(
19 : "=d" (old), "=m" (*lock) 35 " cs %0,%3,0(%4)"
20 : "0" (old), "d" (new), "a" (lock), "m" (*lock) 36 : "=d" (old), "=m" (*lock)
21 : "cc", "memory" ); 37 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
38 : "cc", "memory" );
22 return old; 39 return old;
23} 40}
24 41
42#endif /* __GNUC__ */
43
25/* 44/*
26 * Simple spin lock operations. There are two variants, one clears IRQ's 45 * Simple spin lock operations. There are two variants, one clears IRQ's
27 * on the local processor, one does not. 46 * on the local processor, one does not.
diff --git a/include/asm-s390/string.h b/include/asm-s390/string.h
index 23a4c390489f..d074673a6d9b 100644
--- a/include/asm-s390/string.h
+++ b/include/asm-s390/string.h
@@ -60,12 +60,13 @@ static inline void *memchr(const void * s, int c, size_t n)
60 register int r0 asm("0") = (char) c; 60 register int r0 asm("0") = (char) c;
61 const void *ret = s + n; 61 const void *ret = s + n;
62 62
63 asm volatile ("0: srst %0,%1\n" 63 asm volatile(
64 " jo 0b\n" 64 "0: srst %0,%1\n"
65 " jl 1f\n" 65 " jo 0b\n"
66 " la %0,0\n" 66 " jl 1f\n"
67 "1:" 67 " la %0,0\n"
68 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 68 "1:"
69 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
69 return (void *) ret; 70 return (void *) ret;
70} 71}
71 72
@@ -74,9 +75,10 @@ static inline void *memscan(void *s, int c, size_t n)
74 register int r0 asm("0") = (char) c; 75 register int r0 asm("0") = (char) c;
75 const void *ret = s + n; 76 const void *ret = s + n;
76 77
77 asm volatile ("0: srst %0,%1\n" 78 asm volatile(
78 " jo 0b\n" 79 "0: srst %0,%1\n"
79 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 80 " jo 0b\n"
81 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
80 return (void *) ret; 82 return (void *) ret;
81} 83}
82 84
@@ -86,12 +88,13 @@ static inline char *strcat(char *dst, const char *src)
86 unsigned long dummy; 88 unsigned long dummy;
87 char *ret = dst; 89 char *ret = dst;
88 90
89 asm volatile ("0: srst %0,%1\n" 91 asm volatile(
90 " jo 0b\n" 92 "0: srst %0,%1\n"
91 "1: mvst %0,%2\n" 93 " jo 0b\n"
92 " jo 1b" 94 "1: mvst %0,%2\n"
93 : "=&a" (dummy), "+a" (dst), "+a" (src) 95 " jo 1b"
94 : "d" (r0), "0" (0) : "cc", "memory" ); 96 : "=&a" (dummy), "+a" (dst), "+a" (src)
97 : "d" (r0), "0" (0) : "cc", "memory" );
95 return ret; 98 return ret;
96} 99}
97 100
@@ -100,10 +103,11 @@ static inline char *strcpy(char *dst, const char *src)
100 register int r0 asm("0") = 0; 103 register int r0 asm("0") = 0;
101 char *ret = dst; 104 char *ret = dst;
102 105
103 asm volatile ("0: mvst %0,%1\n" 106 asm volatile(
104 " jo 0b" 107 "0: mvst %0,%1\n"
105 : "+&a" (dst), "+&a" (src) : "d" (r0) 108 " jo 0b"
106 : "cc", "memory" ); 109 : "+&a" (dst), "+&a" (src) : "d" (r0)
110 : "cc", "memory");
107 return ret; 111 return ret;
108} 112}
109 113
@@ -112,9 +116,10 @@ static inline size_t strlen(const char *s)
112 register unsigned long r0 asm("0") = 0; 116 register unsigned long r0 asm("0") = 0;
113 const char *tmp = s; 117 const char *tmp = s;
114 118
115 asm volatile ("0: srst %0,%1\n" 119 asm volatile(
116 " jo 0b" 120 "0: srst %0,%1\n"
117 : "+d" (r0), "+a" (tmp) : : "cc" ); 121 " jo 0b"
122 : "+d" (r0), "+a" (tmp) : : "cc");
118 return r0 - (unsigned long) s; 123 return r0 - (unsigned long) s;
119} 124}
120 125
@@ -124,9 +129,10 @@ static inline size_t strnlen(const char * s, size_t n)
124 const char *tmp = s; 129 const char *tmp = s;
125 const char *end = s + n; 130 const char *end = s + n;
126 131
127 asm volatile ("0: srst %0,%1\n" 132 asm volatile(
128 " jo 0b" 133 "0: srst %0,%1\n"
129 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc" ); 134 " jo 0b"
135 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
130 return end - s; 136 return end - s;
131} 137}
132 138
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 16040048cd1b..ccbafe4bf2cb 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -23,74 +23,68 @@ struct task_struct;
23 23
24extern struct task_struct *__switch_to(void *, void *); 24extern struct task_struct *__switch_to(void *, void *);
25 25
26#ifdef __s390x__
27#define __FLAG_SHIFT 56
28#else /* ! __s390x__ */
29#define __FLAG_SHIFT 24
30#endif /* ! __s390x__ */
31
32static inline void save_fp_regs(s390_fp_regs *fpregs) 26static inline void save_fp_regs(s390_fp_regs *fpregs)
33{ 27{
34 asm volatile ( 28 asm volatile(
35 " std 0,8(%1)\n" 29 " std 0,8(%1)\n"
36 " std 2,24(%1)\n" 30 " std 2,24(%1)\n"
37 " std 4,40(%1)\n" 31 " std 4,40(%1)\n"
38 " std 6,56(%1)" 32 " std 6,56(%1)"
39 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 33 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
40 if (!MACHINE_HAS_IEEE) 34 if (!MACHINE_HAS_IEEE)
41 return; 35 return;
42 asm volatile( 36 asm volatile(
43 " stfpc 0(%1)\n" 37 " stfpc 0(%1)\n"
44 " std 1,16(%1)\n" 38 " std 1,16(%1)\n"
45 " std 3,32(%1)\n" 39 " std 3,32(%1)\n"
46 " std 5,48(%1)\n" 40 " std 5,48(%1)\n"
47 " std 7,64(%1)\n" 41 " std 7,64(%1)\n"
48 " std 8,72(%1)\n" 42 " std 8,72(%1)\n"
49 " std 9,80(%1)\n" 43 " std 9,80(%1)\n"
50 " std 10,88(%1)\n" 44 " std 10,88(%1)\n"
51 " std 11,96(%1)\n" 45 " std 11,96(%1)\n"
52 " std 12,104(%1)\n" 46 " std 12,104(%1)\n"
53 " std 13,112(%1)\n" 47 " std 13,112(%1)\n"
54 " std 14,120(%1)\n" 48 " std 14,120(%1)\n"
55 " std 15,128(%1)\n" 49 " std 15,128(%1)\n"
56 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 50 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
57} 51}
58 52
59static inline void restore_fp_regs(s390_fp_regs *fpregs) 53static inline void restore_fp_regs(s390_fp_regs *fpregs)
60{ 54{
61 asm volatile ( 55 asm volatile(
62 " ld 0,8(%0)\n" 56 " ld 0,8(%0)\n"
63 " ld 2,24(%0)\n" 57 " ld 2,24(%0)\n"
64 " ld 4,40(%0)\n" 58 " ld 4,40(%0)\n"
65 " ld 6,56(%0)" 59 " ld 6,56(%0)"
66 : : "a" (fpregs), "m" (*fpregs) ); 60 : : "a" (fpregs), "m" (*fpregs));
67 if (!MACHINE_HAS_IEEE) 61 if (!MACHINE_HAS_IEEE)
68 return; 62 return;
69 asm volatile( 63 asm volatile(
70 " lfpc 0(%0)\n" 64 " lfpc 0(%0)\n"
71 " ld 1,16(%0)\n" 65 " ld 1,16(%0)\n"
72 " ld 3,32(%0)\n" 66 " ld 3,32(%0)\n"
73 " ld 5,48(%0)\n" 67 " ld 5,48(%0)\n"
74 " ld 7,64(%0)\n" 68 " ld 7,64(%0)\n"
75 " ld 8,72(%0)\n" 69 " ld 8,72(%0)\n"
76 " ld 9,80(%0)\n" 70 " ld 9,80(%0)\n"
77 " ld 10,88(%0)\n" 71 " ld 10,88(%0)\n"
78 " ld 11,96(%0)\n" 72 " ld 11,96(%0)\n"
79 " ld 12,104(%0)\n" 73 " ld 12,104(%0)\n"
80 " ld 13,112(%0)\n" 74 " ld 13,112(%0)\n"
81 " ld 14,120(%0)\n" 75 " ld 14,120(%0)\n"
82 " ld 15,128(%0)\n" 76 " ld 15,128(%0)\n"
83 : : "a" (fpregs), "m" (*fpregs) ); 77 : : "a" (fpregs), "m" (*fpregs));
84} 78}
85 79
86static inline void save_access_regs(unsigned int *acrs) 80static inline void save_access_regs(unsigned int *acrs)
87{ 81{
88 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); 82 asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory");
89} 83}
90 84
91static inline void restore_access_regs(unsigned int *acrs) 85static inline void restore_access_regs(unsigned int *acrs)
92{ 86{
93 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); 87 asm volatile("lam 0,15,0(%0)" : : "a" (acrs));
94} 88}
95 89
96#define switch_to(prev,next,last) do { \ 90#define switch_to(prev,next,last) do { \
@@ -126,7 +120,7 @@ extern void account_system_vtime(struct task_struct *);
126 account_vtime(prev); \ 120 account_vtime(prev); \
127} while (0) 121} while (0)
128 122
129#define nop() __asm__ __volatile__ ("nop") 123#define nop() asm volatile("nop")
130 124
131#define xchg(ptr,x) \ 125#define xchg(ptr,x) \
132({ \ 126({ \
@@ -147,15 +141,15 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
147 shift = (3 ^ (addr & 3)) << 3; 141 shift = (3 ^ (addr & 3)) << 3;
148 addr ^= addr & 3; 142 addr ^= addr & 3;
149 asm volatile( 143 asm volatile(
150 " l %0,0(%4)\n" 144 " l %0,0(%4)\n"
151 "0: lr 0,%0\n" 145 "0: lr 0,%0\n"
152 " nr 0,%3\n" 146 " nr 0,%3\n"
153 " or 0,%2\n" 147 " or 0,%2\n"
154 " cs %0,0,0(%4)\n" 148 " cs %0,0,0(%4)\n"
155 " jl 0b\n" 149 " jl 0b\n"
156 : "=&d" (old), "=m" (*(int *) addr) 150 : "=&d" (old), "=m" (*(int *) addr)
157 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 151 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
158 "m" (*(int *) addr) : "memory", "cc", "0" ); 152 "m" (*(int *) addr) : "memory", "cc", "0");
159 x = old >> shift; 153 x = old >> shift;
160 break; 154 break;
161 case 2: 155 case 2:
@@ -163,36 +157,36 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
163 shift = (2 ^ (addr & 2)) << 3; 157 shift = (2 ^ (addr & 2)) << 3;
164 addr ^= addr & 2; 158 addr ^= addr & 2;
165 asm volatile( 159 asm volatile(
166 " l %0,0(%4)\n" 160 " l %0,0(%4)\n"
167 "0: lr 0,%0\n" 161 "0: lr 0,%0\n"
168 " nr 0,%3\n" 162 " nr 0,%3\n"
169 " or 0,%2\n" 163 " or 0,%2\n"
170 " cs %0,0,0(%4)\n" 164 " cs %0,0,0(%4)\n"
171 " jl 0b\n" 165 " jl 0b\n"
172 : "=&d" (old), "=m" (*(int *) addr) 166 : "=&d" (old), "=m" (*(int *) addr)
173 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 167 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
174 "m" (*(int *) addr) : "memory", "cc", "0" ); 168 "m" (*(int *) addr) : "memory", "cc", "0");
175 x = old >> shift; 169 x = old >> shift;
176 break; 170 break;
177 case 4: 171 case 4:
178 asm volatile ( 172 asm volatile(
179 " l %0,0(%3)\n" 173 " l %0,0(%3)\n"
180 "0: cs %0,%2,0(%3)\n" 174 "0: cs %0,%2,0(%3)\n"
181 " jl 0b\n" 175 " jl 0b\n"
182 : "=&d" (old), "=m" (*(int *) ptr) 176 : "=&d" (old), "=m" (*(int *) ptr)
183 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 177 : "d" (x), "a" (ptr), "m" (*(int *) ptr)
184 : "memory", "cc" ); 178 : "memory", "cc");
185 x = old; 179 x = old;
186 break; 180 break;
187#ifdef __s390x__ 181#ifdef __s390x__
188 case 8: 182 case 8:
189 asm volatile ( 183 asm volatile(
190 " lg %0,0(%3)\n" 184 " lg %0,0(%3)\n"
191 "0: csg %0,%2,0(%3)\n" 185 "0: csg %0,%2,0(%3)\n"
192 " jl 0b\n" 186 " jl 0b\n"
193 : "=&d" (old), "=m" (*(long *) ptr) 187 : "=&d" (old), "=m" (*(long *) ptr)
194 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 188 : "d" (x), "a" (ptr), "m" (*(long *) ptr)
195 : "memory", "cc" ); 189 : "memory", "cc");
196 x = old; 190 x = old;
197 break; 191 break;
198#endif /* __s390x__ */ 192#endif /* __s390x__ */
@@ -224,55 +218,55 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
224 shift = (3 ^ (addr & 3)) << 3; 218 shift = (3 ^ (addr & 3)) << 3;
225 addr ^= addr & 3; 219 addr ^= addr & 3;
226 asm volatile( 220 asm volatile(
227 " l %0,0(%4)\n" 221 " l %0,0(%4)\n"
228 "0: nr %0,%5\n" 222 "0: nr %0,%5\n"
229 " lr %1,%0\n" 223 " lr %1,%0\n"
230 " or %0,%2\n" 224 " or %0,%2\n"
231 " or %1,%3\n" 225 " or %1,%3\n"
232 " cs %0,%1,0(%4)\n" 226 " cs %0,%1,0(%4)\n"
233 " jnl 1f\n" 227 " jnl 1f\n"
234 " xr %1,%0\n" 228 " xr %1,%0\n"
235 " nr %1,%5\n" 229 " nr %1,%5\n"
236 " jnz 0b\n" 230 " jnz 0b\n"
237 "1:" 231 "1:"
238 : "=&d" (prev), "=&d" (tmp) 232 : "=&d" (prev), "=&d" (tmp)
239 : "d" (old << shift), "d" (new << shift), "a" (ptr), 233 : "d" (old << shift), "d" (new << shift), "a" (ptr),
240 "d" (~(255 << shift)) 234 "d" (~(255 << shift))
241 : "memory", "cc" ); 235 : "memory", "cc");
242 return prev >> shift; 236 return prev >> shift;
243 case 2: 237 case 2:
244 addr = (unsigned long) ptr; 238 addr = (unsigned long) ptr;
245 shift = (2 ^ (addr & 2)) << 3; 239 shift = (2 ^ (addr & 2)) << 3;
246 addr ^= addr & 2; 240 addr ^= addr & 2;
247 asm volatile( 241 asm volatile(
248 " l %0,0(%4)\n" 242 " l %0,0(%4)\n"
249 "0: nr %0,%5\n" 243 "0: nr %0,%5\n"
250 " lr %1,%0\n" 244 " lr %1,%0\n"
251 " or %0,%2\n" 245 " or %0,%2\n"
252 " or %1,%3\n" 246 " or %1,%3\n"
253 " cs %0,%1,0(%4)\n" 247 " cs %0,%1,0(%4)\n"
254 " jnl 1f\n" 248 " jnl 1f\n"
255 " xr %1,%0\n" 249 " xr %1,%0\n"
256 " nr %1,%5\n" 250 " nr %1,%5\n"
257 " jnz 0b\n" 251 " jnz 0b\n"
258 "1:" 252 "1:"
259 : "=&d" (prev), "=&d" (tmp) 253 : "=&d" (prev), "=&d" (tmp)
260 : "d" (old << shift), "d" (new << shift), "a" (ptr), 254 : "d" (old << shift), "d" (new << shift), "a" (ptr),
261 "d" (~(65535 << shift)) 255 "d" (~(65535 << shift))
262 : "memory", "cc" ); 256 : "memory", "cc");
263 return prev >> shift; 257 return prev >> shift;
264 case 4: 258 case 4:
265 asm volatile ( 259 asm volatile(
266 " cs %0,%2,0(%3)\n" 260 " cs %0,%2,0(%3)\n"
267 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 261 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
268 : "memory", "cc" ); 262 : "memory", "cc");
269 return prev; 263 return prev;
270#ifdef __s390x__ 264#ifdef __s390x__
271 case 8: 265 case 8:
272 asm volatile ( 266 asm volatile(
273 " csg %0,%2,0(%3)\n" 267 " csg %0,%2,0(%3)\n"
274 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 268 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
275 : "memory", "cc" ); 269 : "memory", "cc");
276 return prev; 270 return prev;
277#endif /* __s390x__ */ 271#endif /* __s390x__ */
278 } 272 }
@@ -289,8 +283,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
289 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). 283 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
290 */ 284 */
291 285
292#define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) 286#define eieio() asm volatile("bcr 15,0" : : : "memory")
293# define SYNC_OTHER_CORES(x) eieio() 287#define SYNC_OTHER_CORES(x) eieio()
294#define mb() eieio() 288#define mb() eieio()
295#define rmb() eieio() 289#define rmb() eieio()
296#define wmb() eieio() 290#define wmb() eieio()
@@ -307,117 +301,56 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
307 301
308#ifdef __s390x__ 302#ifdef __s390x__
309 303
310#define __ctl_load(array, low, high) ({ \ 304#define __ctl_load(array, low, high) ({ \
311 typedef struct { char _[sizeof(array)]; } addrtype; \ 305 typedef struct { char _[sizeof(array)]; } addrtype; \
312 __asm__ __volatile__ ( \ 306 asm volatile( \
313 " bras 1,0f\n" \ 307 " lctlg %1,%2,0(%0)\n" \
314 " lctlg 0,0,0(%0)\n" \ 308 : : "a" (&array), "i" (low), "i" (high), \
315 "0: ex %1,0(1)" \ 309 "m" (*(addrtype *)(array))); \
316 : : "a" (&array), "a" (((low)<<4)+(high)), \
317 "m" (*(addrtype *)(array)) : "1" ); \
318 }) 310 })
319 311
320#define __ctl_store(array, low, high) ({ \ 312#define __ctl_store(array, low, high) ({ \
321 typedef struct { char _[sizeof(array)]; } addrtype; \ 313 typedef struct { char _[sizeof(array)]; } addrtype; \
322 __asm__ __volatile__ ( \ 314 asm volatile( \
323 " bras 1,0f\n" \ 315 " stctg %2,%3,0(%1)\n" \
324 " stctg 0,0,0(%1)\n" \ 316 : "=m" (*(addrtype *)(array)) \
325 "0: ex %2,0(1)" \ 317 : "a" (&array), "i" (low), "i" (high)); \
326 : "=m" (*(addrtype *)(array)) \
327 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
328 }) 318 })
329 319
330#define __ctl_set_bit(cr, bit) ({ \
331 __u8 __dummy[24]; \
332 __asm__ __volatile__ ( \
333 " bras 1,0f\n" /* skip indirect insns */ \
334 " stctg 0,0,0(%1)\n" \
335 " lctlg 0,0,0(%1)\n" \
336 "0: ex %2,0(1)\n" /* execute stctl */ \
337 " lg 0,0(%1)\n" \
338 " ogr 0,%3\n" /* set the bit */ \
339 " stg 0,0(%1)\n" \
340 "1: ex %2,6(1)" /* execute lctl */ \
341 : "=m" (__dummy) \
342 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
343 "a" (cr*17), "a" (1L<<(bit)) \
344 : "cc", "0", "1" ); \
345 })
346
347#define __ctl_clear_bit(cr, bit) ({ \
348 __u8 __dummy[16]; \
349 __asm__ __volatile__ ( \
350 " bras 1,0f\n" /* skip indirect insns */ \
351 " stctg 0,0,0(%1)\n" \
352 " lctlg 0,0,0(%1)\n" \
353 "0: ex %2,0(1)\n" /* execute stctl */ \
354 " lg 0,0(%1)\n" \
355 " ngr 0,%3\n" /* set the bit */ \
356 " stg 0,0(%1)\n" \
357 "1: ex %2,6(1)" /* execute lctl */ \
358 : "=m" (__dummy) \
359 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
360 "a" (cr*17), "a" (~(1L<<(bit))) \
361 : "cc", "0", "1" ); \
362 })
363
364#else /* __s390x__ */ 320#else /* __s390x__ */
365 321
366#define __ctl_load(array, low, high) ({ \ 322#define __ctl_load(array, low, high) ({ \
367 typedef struct { char _[sizeof(array)]; } addrtype; \ 323 typedef struct { char _[sizeof(array)]; } addrtype; \
368 __asm__ __volatile__ ( \ 324 asm volatile( \
369 " bras 1,0f\n" \ 325 " lctl %1,%2,0(%0)\n" \
370 " lctl 0,0,0(%0)\n" \ 326 : : "a" (&array), "i" (low), "i" (high), \
371 "0: ex %1,0(1)" \ 327 "m" (*(addrtype *)(array))); \
372 : : "a" (&array), "a" (((low)<<4)+(high)), \ 328})
373 "m" (*(addrtype *)(array)) : "1" ); \
374 })
375 329
376#define __ctl_store(array, low, high) ({ \ 330#define __ctl_store(array, low, high) ({ \
377 typedef struct { char _[sizeof(array)]; } addrtype; \ 331 typedef struct { char _[sizeof(array)]; } addrtype; \
378 __asm__ __volatile__ ( \ 332 asm volatile( \
379 " bras 1,0f\n" \ 333 " stctl %2,%3,0(%1)\n" \
380 " stctl 0,0,0(%1)\n" \ 334 : "=m" (*(addrtype *)(array)) \
381 "0: ex %2,0(1)" \ 335 : "a" (&array), "i" (low), "i" (high)); \
382 : "=m" (*(addrtype *)(array)) \
383 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
384 }) 336 })
385 337
386#define __ctl_set_bit(cr, bit) ({ \
387 __u8 __dummy[16]; \
388 __asm__ __volatile__ ( \
389 " bras 1,0f\n" /* skip indirect insns */ \
390 " stctl 0,0,0(%1)\n" \
391 " lctl 0,0,0(%1)\n" \
392 "0: ex %2,0(1)\n" /* execute stctl */ \
393 " l 0,0(%1)\n" \
394 " or 0,%3\n" /* set the bit */ \
395 " st 0,0(%1)\n" \
396 "1: ex %2,4(1)" /* execute lctl */ \
397 : "=m" (__dummy) \
398 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
399 "a" (cr*17), "a" (1<<(bit)) \
400 : "cc", "0", "1" ); \
401 })
402
403#define __ctl_clear_bit(cr, bit) ({ \
404 __u8 __dummy[16]; \
405 __asm__ __volatile__ ( \
406 " bras 1,0f\n" /* skip indirect insns */ \
407 " stctl 0,0,0(%1)\n" \
408 " lctl 0,0,0(%1)\n" \
409 "0: ex %2,0(1)\n" /* execute stctl */ \
410 " l 0,0(%1)\n" \
411 " nr 0,%3\n" /* set the bit */ \
412 " st 0,0(%1)\n" \
413 "1: ex %2,4(1)" /* execute lctl */ \
414 : "=m" (__dummy) \
415 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
416 "a" (cr*17), "a" (~(1<<(bit))) \
417 : "cc", "0", "1" ); \
418 })
419#endif /* __s390x__ */ 338#endif /* __s390x__ */
420 339
340#define __ctl_set_bit(cr, bit) ({ \
341 unsigned long __dummy; \
342 __ctl_store(__dummy, cr, cr); \
343 __dummy |= 1UL << (bit); \
344 __ctl_load(__dummy, cr, cr); \
345})
346
347#define __ctl_clear_bit(cr, bit) ({ \
348 unsigned long __dummy; \
349 __ctl_store(__dummy, cr, cr); \
350 __dummy &= ~(1UL << (bit)); \
351 __ctl_load(__dummy, cr, cr); \
352})
353
421#include <linux/irqflags.h> 354#include <linux/irqflags.h>
422 355
423/* 356/*
@@ -427,8 +360,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
427static inline void 360static inline void
428__set_psw_mask(unsigned long mask) 361__set_psw_mask(unsigned long mask)
429{ 362{
430 local_save_flags(mask); 363 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
431 __load_psw_mask(mask);
432} 364}
433 365
434#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 366#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 5d0332a4c2bd..4df4a41029a3 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -15,20 +15,21 @@
15 15
16typedef unsigned long long cycles_t; 16typedef unsigned long long cycles_t;
17 17
18static inline cycles_t get_cycles(void)
19{
20 cycles_t cycles;
21
22 __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
23 return cycles >> 2;
24}
25
26static inline unsigned long long get_clock (void) 18static inline unsigned long long get_clock (void)
27{ 19{
28 unsigned long long clk; 20 unsigned long long clk;
29 21
30 __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 22#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
23 asm volatile("stck %0" : "=Q" (clk) : : "cc");
24#else /* __GNUC__ */
25 asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
26#endif /* __GNUC__ */
31 return clk; 27 return clk;
32} 28}
33 29
30static inline cycles_t get_cycles(void)
31{
32 return (cycles_t) get_clock() >> 2;
33}
34
34#endif 35#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 73cd85bebfb2..fa4dc916a9bf 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -25,7 +25,7 @@
25 */ 25 */
26 26
27#define local_flush_tlb() \ 27#define local_flush_tlb() \
28do { __asm__ __volatile__("ptlb": : :"memory"); } while (0) 28do { asm volatile("ptlb": : :"memory"); } while (0)
29 29
30#ifndef CONFIG_SMP 30#ifndef CONFIG_SMP
31 31
@@ -68,24 +68,24 @@ extern void smp_ptlb_all(void);
68 68
69static inline void global_flush_tlb(void) 69static inline void global_flush_tlb(void)
70{ 70{
71 register unsigned long reg2 asm("2");
72 register unsigned long reg3 asm("3");
73 register unsigned long reg4 asm("4");
74 long dummy;
75
71#ifndef __s390x__ 76#ifndef __s390x__
72 if (!MACHINE_HAS_CSP) { 77 if (!MACHINE_HAS_CSP) {
73 smp_ptlb_all(); 78 smp_ptlb_all();
74 return; 79 return;
75 } 80 }
76#endif /* __s390x__ */ 81#endif /* __s390x__ */
77 { 82
78 register unsigned long addr asm("4"); 83 dummy = 0;
79 long dummy; 84 reg2 = reg3 = 0;
80 85 reg4 = ((unsigned long) &dummy) + 1;
81 dummy = 0; 86 asm volatile(
82 addr = ((unsigned long) &dummy) + 1; 87 " csp %0,%2"
83 __asm__ __volatile__ ( 88 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
84 " slr 2,2\n"
85 " slr 3,3\n"
86 " csp 2,%0"
87 : : "a" (addr), "m" (dummy) : "cc", "2", "3" );
88 }
89} 89}
90 90
91/* 91/*
@@ -102,9 +102,9 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 102 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 103 return;
104 if (MACHINE_HAS_IDTE) { 104 if (MACHINE_HAS_IDTE) {
105 asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0" 105 asm volatile(
106 : : "a" (2048), 106 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 "a" (__pa(mm->pgd)&PAGE_MASK) : "cc" ); 107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
108 return; 108 return;
109 } 109 }
110 preempt_disable(); 110 preempt_disable();
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index e2047b0c9092..72ae4efddb49 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -38,25 +38,14 @@
38#define get_ds() (KERNEL_DS) 38#define get_ds() (KERNEL_DS)
39#define get_fs() (current->thread.mm_segment) 39#define get_fs() (current->thread.mm_segment)
40 40
41#ifdef __s390x__
42#define set_fs(x) \ 41#define set_fs(x) \
43({ \ 42({ \
44 unsigned long __pto; \ 43 unsigned long __pto; \
45 current->thread.mm_segment = (x); \ 44 current->thread.mm_segment = (x); \
46 __pto = current->thread.mm_segment.ar4 ? \ 45 __pto = current->thread.mm_segment.ar4 ? \
47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 46 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 47 __ctl_load(__pto, 7, 7); \
49}) 48})
50#else /* __s390x__ */
51#define set_fs(x) \
52({ \
53 unsigned long __pto; \
54 current->thread.mm_segment = (x); \
55 __pto = current->thread.mm_segment.ar4 ? \
56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
58})
59#endif /* __s390x__ */
60 49
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 50#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 51
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index d49c54cb5505..0361ac5dcde3 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -355,145 +355,145 @@ do { \
355 355
356#define _svc_clobber "1", "cc", "memory" 356#define _svc_clobber "1", "cc", "memory"
357 357
358#define _syscall0(type,name) \ 358#define _syscall0(type,name) \
359type name(void) { \ 359type name(void) { \
360 register long __svcres asm("2"); \ 360 register long __svcres asm("2"); \
361 long __res; \ 361 long __res; \
362 __asm__ __volatile__ ( \ 362 asm volatile( \
363 " .if %1 < 256\n" \ 363 " .if %1 < 256\n" \
364 " svc %b1\n" \ 364 " svc %b1\n" \
365 " .else\n" \ 365 " .else\n" \
366 " la %%r1,%1\n" \ 366 " la %%r1,%1\n" \
367 " svc 0\n" \ 367 " svc 0\n" \
368 " .endif" \ 368 " .endif" \
369 : "=d" (__svcres) \ 369 : "=d" (__svcres) \
370 : "i" (__NR_##name) \ 370 : "i" (__NR_##name) \
371 : _svc_clobber ); \ 371 : _svc_clobber); \
372 __res = __svcres; \ 372 __res = __svcres; \
373 __syscall_return(type,__res); \ 373 __syscall_return(type,__res); \
374} 374}
375 375
376#define _syscall1(type,name,type1,arg1) \ 376#define _syscall1(type,name,type1,arg1) \
377type name(type1 arg1) { \ 377type name(type1 arg1) { \
378 register type1 __arg1 asm("2") = arg1; \ 378 register type1 __arg1 asm("2") = arg1; \
379 register long __svcres asm("2"); \ 379 register long __svcres asm("2"); \
380 long __res; \ 380 long __res; \
381 __asm__ __volatile__ ( \ 381 asm volatile( \
382 " .if %1 < 256\n" \ 382 " .if %1 < 256\n" \
383 " svc %b1\n" \ 383 " svc %b1\n" \
384 " .else\n" \ 384 " .else\n" \
385 " la %%r1,%1\n" \ 385 " la %%r1,%1\n" \
386 " svc 0\n" \ 386 " svc 0\n" \
387 " .endif" \ 387 " .endif" \
388 : "=d" (__svcres) \ 388 : "=d" (__svcres) \
389 : "i" (__NR_##name), \ 389 : "i" (__NR_##name), \
390 "0" (__arg1) \ 390 "0" (__arg1) \
391 : _svc_clobber ); \ 391 : _svc_clobber); \
392 __res = __svcres; \ 392 __res = __svcres; \
393 __syscall_return(type,__res); \ 393 __syscall_return(type,__res); \
394} 394}
395 395
396#define _syscall2(type,name,type1,arg1,type2,arg2) \ 396#define _syscall2(type,name,type1,arg1,type2,arg2) \
397type name(type1 arg1, type2 arg2) { \ 397type name(type1 arg1, type2 arg2) { \
398 register type1 __arg1 asm("2") = arg1; \ 398 register type1 __arg1 asm("2") = arg1; \
399 register type2 __arg2 asm("3") = arg2; \ 399 register type2 __arg2 asm("3") = arg2; \
400 register long __svcres asm("2"); \ 400 register long __svcres asm("2"); \
401 long __res; \ 401 long __res; \
402 __asm__ __volatile__ ( \ 402 asm volatile( \
403 " .if %1 < 256\n" \ 403 " .if %1 < 256\n" \
404 " svc %b1\n" \ 404 " svc %b1\n" \
405 " .else\n" \ 405 " .else\n" \
406 " la %%r1,%1\n" \ 406 " la %%r1,%1\n" \
407 " svc 0\n" \ 407 " svc 0\n" \
408 " .endif" \ 408 " .endif" \
409 : "=d" (__svcres) \ 409 : "=d" (__svcres) \
410 : "i" (__NR_##name), \ 410 : "i" (__NR_##name), \
411 "0" (__arg1), \ 411 "0" (__arg1), \
412 "d" (__arg2) \ 412 "d" (__arg2) \
413 : _svc_clobber ); \ 413 : _svc_clobber ); \
414 __res = __svcres; \ 414 __res = __svcres; \
415 __syscall_return(type,__res); \ 415 __syscall_return(type,__res); \
416} 416}
417 417
418#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\ 418#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
419type name(type1 arg1, type2 arg2, type3 arg3) { \ 419type name(type1 arg1, type2 arg2, type3 arg3) { \
420 register type1 __arg1 asm("2") = arg1; \ 420 register type1 __arg1 asm("2") = arg1; \
421 register type2 __arg2 asm("3") = arg2; \ 421 register type2 __arg2 asm("3") = arg2; \
422 register type3 __arg3 asm("4") = arg3; \ 422 register type3 __arg3 asm("4") = arg3; \
423 register long __svcres asm("2"); \ 423 register long __svcres asm("2"); \
424 long __res; \ 424 long __res; \
425 __asm__ __volatile__ ( \ 425 asm volatile( \
426 " .if %1 < 256\n" \ 426 " .if %1 < 256\n" \
427 " svc %b1\n" \ 427 " svc %b1\n" \
428 " .else\n" \ 428 " .else\n" \
429 " la %%r1,%1\n" \ 429 " la %%r1,%1\n" \
430 " svc 0\n" \ 430 " svc 0\n" \
431 " .endif" \ 431 " .endif" \
432 : "=d" (__svcres) \ 432 : "=d" (__svcres) \
433 : "i" (__NR_##name), \ 433 : "i" (__NR_##name), \
434 "0" (__arg1), \ 434 "0" (__arg1), \
435 "d" (__arg2), \ 435 "d" (__arg2), \
436 "d" (__arg3) \ 436 "d" (__arg3) \
437 : _svc_clobber ); \ 437 : _svc_clobber); \
438 __res = __svcres; \ 438 __res = __svcres; \
439 __syscall_return(type,__res); \ 439 __syscall_return(type,__res); \
440} 440}
441 441
442#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\ 442#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
443 type4,name4) \ 443 type4,name4) \
444type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 444type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
445 register type1 __arg1 asm("2") = arg1; \ 445 register type1 __arg1 asm("2") = arg1; \
446 register type2 __arg2 asm("3") = arg2; \ 446 register type2 __arg2 asm("3") = arg2; \
447 register type3 __arg3 asm("4") = arg3; \ 447 register type3 __arg3 asm("4") = arg3; \
448 register type4 __arg4 asm("5") = arg4; \ 448 register type4 __arg4 asm("5") = arg4; \
449 register long __svcres asm("2"); \ 449 register long __svcres asm("2"); \
450 long __res; \ 450 long __res; \
451 __asm__ __volatile__ ( \ 451 asm volatile( \
452 " .if %1 < 256\n" \ 452 " .if %1 < 256\n" \
453 " svc %b1\n" \ 453 " svc %b1\n" \
454 " .else\n" \ 454 " .else\n" \
455 " la %%r1,%1\n" \ 455 " la %%r1,%1\n" \
456 " svc 0\n" \ 456 " svc 0\n" \
457 " .endif" \ 457 " .endif" \
458 : "=d" (__svcres) \ 458 : "=d" (__svcres) \
459 : "i" (__NR_##name), \ 459 : "i" (__NR_##name), \
460 "0" (__arg1), \ 460 "0" (__arg1), \
461 "d" (__arg2), \ 461 "d" (__arg2), \
462 "d" (__arg3), \ 462 "d" (__arg3), \
463 "d" (__arg4) \ 463 "d" (__arg4) \
464 : _svc_clobber ); \ 464 : _svc_clobber); \
465 __res = __svcres; \ 465 __res = __svcres; \
466 __syscall_return(type,__res); \ 466 __syscall_return(type,__res); \
467} 467}
468 468
469#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\ 469#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
470 type4,name4,type5,name5) \ 470 type4,name4,type5,name5) \
471type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 471type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
472 type5 arg5) { \ 472 type5 arg5) { \
473 register type1 __arg1 asm("2") = arg1; \ 473 register type1 __arg1 asm("2") = arg1; \
474 register type2 __arg2 asm("3") = arg2; \ 474 register type2 __arg2 asm("3") = arg2; \
475 register type3 __arg3 asm("4") = arg3; \ 475 register type3 __arg3 asm("4") = arg3; \
476 register type4 __arg4 asm("5") = arg4; \ 476 register type4 __arg4 asm("5") = arg4; \
477 register type5 __arg5 asm("6") = arg5; \ 477 register type5 __arg5 asm("6") = arg5; \
478 register long __svcres asm("2"); \ 478 register long __svcres asm("2"); \
479 long __res; \ 479 long __res; \
480 __asm__ __volatile__ ( \ 480 asm volatile( \
481 " .if %1 < 256\n" \ 481 " .if %1 < 256\n" \
482 " svc %b1\n" \ 482 " svc %b1\n" \
483 " .else\n" \ 483 " .else\n" \
484 " la %%r1,%1\n" \ 484 " la %%r1,%1\n" \
485 " svc 0\n" \ 485 " svc 0\n" \
486 " .endif" \ 486 " .endif" \
487 : "=d" (__svcres) \ 487 : "=d" (__svcres) \
488 : "i" (__NR_##name), \ 488 : "i" (__NR_##name), \
489 "0" (__arg1), \ 489 "0" (__arg1), \
490 "d" (__arg2), \ 490 "d" (__arg2), \
491 "d" (__arg3), \ 491 "d" (__arg3), \
492 "d" (__arg4), \ 492 "d" (__arg4), \
493 "d" (__arg5) \ 493 "d" (__arg5) \
494 : _svc_clobber ); \ 494 : _svc_clobber); \
495 __res = __svcres; \ 495 __res = __svcres; \
496 __syscall_return(type,__res); \ 496 __syscall_return(type,__res); \
497} 497}
498 498
499#define __ARCH_WANT_IPC_PARSE_VERSION 499#define __ARCH_WANT_IPC_PARSE_VERSION