aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /arch/s390
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/crypto/crypt_s390.h204
-rw-r--r--arch/s390/hypfs/hypfs_diag.c23
-rw-r--r--arch/s390/kernel/compat_linux.c5
-rw-r--r--arch/s390/kernel/cpcmd.c83
-rw-r--r--arch/s390/kernel/ipl.c21
-rw-r--r--arch/s390/kernel/process.c5
-rw-r--r--arch/s390/kernel/semaphore.c22
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c73
-rw-r--r--arch/s390/kernel/time.c10
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/lib/delay.c11
-rw-r--r--arch/s390/math-emu/math.c126
-rw-r--r--arch/s390/math-emu/sfp-util.h73
-rw-r--r--arch/s390/mm/extmem.c16
-rw-r--r--arch/s390/mm/fault.c34
-rw-r--r--arch/s390/mm/init.c41
17 files changed, 295 insertions, 457 deletions
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index efd836c2e4a6..2b137089f625 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -105,63 +105,6 @@ struct crypt_s390_query_status {
105}; 105};
106 106
107/* 107/*
108 * Standard fixup and ex_table sections for crypt_s390 inline functions.
109 * label 0: the s390 crypto operation
110 * label 1: just after 1 to catch illegal operation exception
111 * (unsupported model)
112 * label 6: the return point after fixup
113 * label 7: set error value if exception _in_ crypto operation
114 * label 8: set error value if illegal operation exception
115 * [ret] is the variable to receive the error code
116 * [ERR] is the error code value
117 */
118#ifndef CONFIG_64BIT
119#define __crypt_s390_fixup \
120 ".section .fixup,\"ax\" \n" \
121 "7: lhi %0,%h[e1] \n" \
122 " bras 1,9f \n" \
123 " .long 6b \n" \
124 "8: lhi %0,%h[e2] \n" \
125 " bras 1,9f \n" \
126 " .long 6b \n" \
127 "9: l 1,0(1) \n" \
128 " br 1 \n" \
129 ".previous \n" \
130 ".section __ex_table,\"a\" \n" \
131 " .align 4 \n" \
132 " .long 0b,7b \n" \
133 " .long 1b,8b \n" \
134 ".previous"
135#else /* CONFIG_64BIT */
136#define __crypt_s390_fixup \
137 ".section .fixup,\"ax\" \n" \
138 "7: lhi %0,%h[e1] \n" \
139 " jg 6b \n" \
140 "8: lhi %0,%h[e2] \n" \
141 " jg 6b \n" \
142 ".previous\n" \
143 ".section __ex_table,\"a\" \n" \
144 " .align 8 \n" \
145 " .quad 0b,7b \n" \
146 " .quad 1b,8b \n" \
147 ".previous"
148#endif /* CONFIG_64BIT */
149
150/*
151 * Standard code for setting the result of s390 crypto instructions.
152 * %0: the register which will receive the result
153 * [result]: the register containing the result (e.g. second operand length
154 * to compute number of processed bytes].
155 */
156#ifndef CONFIG_64BIT
157#define __crypt_s390_set_result \
158 " lr %0,%[result] \n"
159#else /* CONFIG_64BIT */
160#define __crypt_s390_set_result \
161 " lgr %0,%[result] \n"
162#endif
163
164/*
165 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 108 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
166 * @param func: the function code passed to KM; see crypt_s390_km_func 109 * @param func: the function code passed to KM; see crypt_s390_km_func
167 * @param param: address of parameter block; see POP for details on each func 110 * @param param: address of parameter block; see POP for details on each func
@@ -176,28 +119,24 @@ crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
176{ 119{
177 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
178 register void* __param asm("1") = param; 121 register void* __param asm("1") = param;
179 register u8* __dest asm("4") = dest;
180 register const u8* __src asm("2") = src; 122 register const u8* __src asm("2") = src;
181 register long __src_len asm("3") = src_len; 123 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest;
182 int ret; 125 int ret;
183 126
184 ret = 0; 127 asm volatile(
185 __asm__ __volatile__ ( 128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
186 "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
187 "1: brc 1,0b \n" /* handle partial completion */ 129 "1: brc 1,0b \n" /* handle partial completion */
188 __crypt_s390_set_result 130 " ahi %0,%h7\n"
189 "6: \n" 131 "2: ahi %0,%h8\n"
190 __crypt_s390_fixup 132 "3:\n"
191 : "+d" (ret), "+a" (__dest), "+a" (__src), 133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
192 [result] "+d" (__src_len) 134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
193 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 135 : "d" (__func), "a" (__param), "0" (-EFAULT),
194 "a" (__param) 136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
195 : "cc", "memory" 137 if (ret < 0)
196 ); 138 return ret;
197 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
198 ret = src_len - ret;
199 }
200 return ret;
201} 140}
202 141
203/* 142/*
@@ -215,28 +154,24 @@ crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
215{ 154{
216 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
217 register void* __param asm("1") = param; 156 register void* __param asm("1") = param;
218 register u8* __dest asm("4") = dest;
219 register const u8* __src asm("2") = src; 157 register const u8* __src asm("2") = src;
220 register long __src_len asm("3") = src_len; 158 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest;
221 int ret; 160 int ret;
222 161
223 ret = 0; 162 asm volatile(
224 __asm__ __volatile__ ( 163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
225 "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
226 "1: brc 1,0b \n" /* handle partial completion */ 164 "1: brc 1,0b \n" /* handle partial completion */
227 __crypt_s390_set_result 165 " ahi %0,%h7\n"
228 "6: \n" 166 "2: ahi %0,%h8\n"
229 __crypt_s390_fixup 167 "3:\n"
230 : "+d" (ret), "+a" (__dest), "+a" (__src), 168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
231 [result] "+d" (__src_len) 169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
232 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 170 : "d" (__func), "a" (__param), "0" (-EFAULT),
233 "a" (__param) 171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
234 : "cc", "memory" 172 if (ret < 0)
235 ); 173 return ret;
236 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
237 ret = src_len - ret;
238 }
239 return ret;
240} 175}
241 176
242/* 177/*
@@ -258,22 +193,19 @@ crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
258 register long __src_len asm("3") = src_len; 193 register long __src_len asm("3") = src_len;
259 int ret; 194 int ret;
260 195
261 ret = 0; 196 asm volatile(
262 __asm__ __volatile__ ( 197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
263 "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */ 198 "1: brc 1,0b \n" /* handle partial completion */
264 "1: brc 1,0b \n" /* handle partical completion */ 199 " ahi %0,%h6\n"
265 __crypt_s390_set_result 200 "2: ahi %0,%h7\n"
266 "6: \n" 201 "3:\n"
267 __crypt_s390_fixup 202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
268 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 203 : "=d" (ret), "+a" (__src), "+d" (__src_len)
269 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 204 : "d" (__func), "a" (__param), "0" (-EFAULT),
270 "a" (__param) 205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
271 : "cc", "memory" 206 if (ret < 0)
272 ); 207 return ret;
273 if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){ 208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
274 ret = src_len - ret;
275 }
276 return ret;
277} 209}
278 210
279/* 211/*
@@ -294,22 +226,19 @@ crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
294 register long __src_len asm("3") = src_len; 226 register long __src_len asm("3") = src_len;
295 int ret; 227 int ret;
296 228
297 ret = 0; 229 asm volatile(
298 __asm__ __volatile__ ( 230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
299 "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */ 231 "1: brc 1,0b \n" /* handle partial completion */
300 "1: brc 1,0b \n" /* handle partical completion */ 232 " ahi %0,%h6\n"
301 __crypt_s390_set_result 233 "2: ahi %0,%h7\n"
302 "6: \n" 234 "3:\n"
303 __crypt_s390_fixup 235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
304 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 236 : "=d" (ret), "+a" (__src), "+d" (__src_len)
305 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 237 : "d" (__func), "a" (__param), "0" (-EFAULT),
306 "a" (__param) 238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
307 : "cc", "memory" 239 if (ret < 0)
308 ); 240 return ret;
309 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
310 ret = src_len - ret;
311 }
312 return ret;
313} 242}
314 243
315/* 244/*
@@ -331,22 +260,19 @@ crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
331 register long __src_len asm("3") = src_len; 260 register long __src_len asm("3") = src_len;
332 int ret; 261 int ret;
333 262
334 ret = 0; 263 asm volatile(
335 __asm__ __volatile__ ( 264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
336 "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */ 265 "1: brc 1,0b \n" /* handle partial completion */
337 "1: brc 1,0b \n" /* handle partical completion */ 266 " ahi %0,%h6\n"
338 __crypt_s390_set_result 267 "2: ahi %0,%h7\n"
339 "6: \n" 268 "3:\n"
340 __crypt_s390_fixup 269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
341 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 270 : "=d" (ret), "+a" (__src), "+d" (__src_len)
342 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 271 : "d" (__func), "a" (__param), "0" (-EFAULT),
343 "a" (__param) 272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
344 : "cc", "memory" 273 if (ret < 0)
345 ); 274 return ret;
346 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
347 ret = src_len - ret;
348 }
349 return ret;
350} 276}
351 277
352/** 278/**
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 684384f2b364..443fa377d9ff 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -333,22 +333,14 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr)
333 register unsigned long _subcode asm("0") = subcode; 333 register unsigned long _subcode asm("0") = subcode;
334 register unsigned long _size asm("1") = size; 334 register unsigned long _size asm("1") = size;
335 335
336 asm volatile (" diag %2,%0,0x204\n" 336 asm volatile(
337 "0: \n" ".section __ex_table,\"a\"\n" 337 " diag %2,%0,0x204\n"
338#ifndef __s390x__ 338 "0:\n"
339 " .align 4\n" 339 EX_TABLE(0b,0b)
340 " .long 0b,0b\n" 340 : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
341#else
342 " .align 8\n"
343 " .quad 0b,0b\n"
344#endif
345 ".previous":"+d" (_subcode), "+d"(_size)
346 :"d"(addr)
347 :"memory");
348 if (_subcode) 341 if (_subcode)
349 return -1; 342 return -1;
350 else 343 return _size;
351 return _size;
352} 344}
353 345
354/* 346/*
@@ -491,8 +483,7 @@ out:
491 483
492static void diag224(void *ptr) 484static void diag224(void *ptr)
493{ 485{
494 asm volatile(" diag %0,%1,0x224\n" 486 asm volatile("diag %0,%1,0x224" : :"d" (0), "d"(ptr) : "memory");
495 : :"d" (0), "d"(ptr) : "memory");
496} 487}
497 488
498static int diag224_get_name_table(void) 489static int diag224_get_name_table(void)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 91b2884fa5c4..c46e3d48e410 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -544,10 +544,7 @@ sys32_execve(struct pt_regs regs)
544 current->ptrace &= ~PT_DTRACE; 544 current->ptrace &= ~PT_DTRACE;
545 task_unlock(current); 545 task_unlock(current);
546 current->thread.fp_regs.fpc=0; 546 current->thread.fp_regs.fpc=0;
547 __asm__ __volatile__ 547 asm volatile("sfpc %0,0" : : "d" (0));
548 ("sr 0,0\n\t"
549 "sfpc 0,0\n\t"
550 : : :"0");
551 } 548 }
552 putname(filename); 549 putname(filename);
553out: 550out:
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 4ef44e536b2c..1eae74e72f95 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -25,11 +25,8 @@ static char cpcmd_buf[241];
25 */ 25 */
26int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) 26int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
27{ 27{
28 const int mask = 0x40000000L; 28 unsigned long flags, cmdlen;
29 unsigned long flags; 29 int return_code, return_len;
30 int return_code;
31 int return_len;
32 int cmdlen;
33 30
34 spin_lock_irqsave(&cpcmd_lock, flags); 31 spin_lock_irqsave(&cpcmd_lock, flags);
35 cmdlen = strlen(cmd); 32 cmdlen = strlen(cmd);
@@ -38,64 +35,44 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
38 ASCEBC(cpcmd_buf, cmdlen); 35 ASCEBC(cpcmd_buf, cmdlen);
39 36
40 if (response != NULL && rlen > 0) { 37 if (response != NULL && rlen > 0) {
38 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
39 register unsigned long reg3 asm ("3") = (addr_t) response;
40 register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
41 register unsigned long reg5 asm ("5") = rlen;
42
41 memset(response, 0, rlen); 43 memset(response, 0, rlen);
44 asm volatile(
42#ifndef CONFIG_64BIT 45#ifndef CONFIG_64BIT
43 asm volatile ( "lra 2,0(%2)\n" 46 " diag %2,%0,0x8\n"
44 "lr 4,%3\n" 47 " brc 8,1f\n"
45 "o 4,%6\n" 48 " ar %1,%4\n"
46 "lra 3,0(%4)\n"
47 "lr 5,%5\n"
48 "diag 2,4,0x8\n"
49 "brc 8, 1f\n"
50 "ar 5, %5\n"
51 "1: \n"
52 "lr %0,4\n"
53 "lr %1,5\n"
54 : "=d" (return_code), "=d" (return_len)
55 : "a" (cpcmd_buf), "d" (cmdlen),
56 "a" (response), "d" (rlen), "m" (mask)
57 : "cc", "2", "3", "4", "5" );
58#else /* CONFIG_64BIT */ 49#else /* CONFIG_64BIT */
59 asm volatile ( "lrag 2,0(%2)\n" 50 " sam31\n"
60 "lgr 4,%3\n" 51 " diag %2,%0,0x8\n"
61 "o 4,%6\n" 52 " sam64\n"
62 "lrag 3,0(%4)\n" 53 " brc 8,1f\n"
63 "lgr 5,%5\n" 54 " agr %1,%4\n"
64 "sam31\n"
65 "diag 2,4,0x8\n"
66 "sam64\n"
67 "brc 8, 1f\n"
68 "agr 5, %5\n"
69 "1: \n"
70 "lgr %0,4\n"
71 "lgr %1,5\n"
72 : "=d" (return_code), "=d" (return_len)
73 : "a" (cpcmd_buf), "d" (cmdlen),
74 "a" (response), "d" (rlen), "m" (mask)
75 : "cc", "2", "3", "4", "5" );
76#endif /* CONFIG_64BIT */ 55#endif /* CONFIG_64BIT */
56 "1:\n"
57 : "+d" (reg4), "+d" (reg5)
58 : "d" (reg2), "d" (reg3), "d" (rlen) : "cc");
59 return_code = (int) reg4;
60 return_len = (int) reg5;
77 EBCASC(response, rlen); 61 EBCASC(response, rlen);
78 } else { 62 } else {
63 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
64 register unsigned long reg3 asm ("3") = cmdlen;
79 return_len = 0; 65 return_len = 0;
66 asm volatile(
80#ifndef CONFIG_64BIT 67#ifndef CONFIG_64BIT
81 asm volatile ( "lra 2,0(%1)\n" 68 " diag %1,%0,0x8\n"
82 "lr 3,%2\n"
83 "diag 2,3,0x8\n"
84 "lr %0,3\n"
85 : "=d" (return_code)
86 : "a" (cpcmd_buf), "d" (cmdlen)
87 : "2", "3" );
88#else /* CONFIG_64BIT */ 69#else /* CONFIG_64BIT */
89 asm volatile ( "lrag 2,0(%1)\n" 70 " sam31\n"
90 "lgr 3,%2\n" 71 " diag %1,%0,0x8\n"
91 "sam31\n" 72 " sam64\n"
92 "diag 2,3,0x8\n"
93 "sam64\n"
94 "lgr %0,3\n"
95 : "=d" (return_code)
96 : "a" (cpcmd_buf), "d" (cmdlen)
97 : "2", "3" );
98#endif /* CONFIG_64BIT */ 73#endif /* CONFIG_64BIT */
74 : "+d" (reg3) : "d" (reg2) : "cc");
75 return_code = (int) reg3;
99 } 76 }
100 spin_unlock_irqrestore(&cpcmd_lock, flags); 77 spin_unlock_irqrestore(&cpcmd_lock, flags);
101 if (response_code != NULL) 78 if (response_code != NULL)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6555cc48e28f..1f5e782b3d05 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -120,24 +120,15 @@ static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
120 120
121static int diag308(unsigned long subcode, void *addr) 121static int diag308(unsigned long subcode, void *addr)
122{ 122{
123 register unsigned long _addr asm("0") = (unsigned long)addr; 123 register unsigned long _addr asm("0") = (unsigned long) addr;
124 register unsigned long _rc asm("1") = 0; 124 register unsigned long _rc asm("1") = 0;
125 125
126 asm volatile ( 126 asm volatile(
127 " diag %0,%2,0x308\n" 127 " diag %0,%2,0x308\n"
128 "0: \n" 128 "0:\n"
129 ".section __ex_table,\"a\"\n" 129 EX_TABLE(0b,0b)
130#ifdef CONFIG_64BIT
131 " .align 8\n"
132 " .quad 0b, 0b\n"
133#else
134 " .align 4\n"
135 " .long 0b, 0b\n"
136#endif
137 ".previous\n"
138 : "+d" (_addr), "+d" (_rc) 130 : "+d" (_addr), "+d" (_rc)
139 : "d" (subcode) : "cc", "memory" ); 131 : "d" (subcode) : "cc", "memory");
140
141 return _rc; 132 return _rc;
142} 133}
143 134
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d3cbfa3005ec..6603fbb41d07 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -45,7 +45,7 @@
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/timer.h> 46#include <asm/timer.h>
47 47
48asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 48asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
49 49
50/* 50/*
51 * Return saved PC of a blocked thread. used in kernel/sched. 51 * Return saved PC of a blocked thread. used in kernel/sched.
@@ -177,7 +177,8 @@ void show_regs(struct pt_regs *regs)
177 177
178extern void kernel_thread_starter(void); 178extern void kernel_thread_starter(void);
179 179
180__asm__(".align 4\n" 180asm(
181 ".align 4\n"
181 "kernel_thread_starter:\n" 182 "kernel_thread_starter:\n"
182 " la 2,0(10)\n" 183 " la 2,0(10)\n"
183 " basr 14,9\n" 184 " basr 14,9\n"
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
index 8dfb690c159f..191303f6c1d8 100644
--- a/arch/s390/kernel/semaphore.c
+++ b/arch/s390/kernel/semaphore.c
@@ -26,17 +26,17 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
26{ 26{
27 int old_val, new_val; 27 int old_val, new_val;
28 28
29 __asm__ __volatile__(" l %0,0(%3)\n" 29 asm volatile(
30 "0: ltr %1,%0\n" 30 " l %0,0(%3)\n"
31 " jhe 1f\n" 31 "0: ltr %1,%0\n"
32 " lhi %1,0\n" 32 " jhe 1f\n"
33 "1: ar %1,%4\n" 33 " lhi %1,0\n"
34 " cs %0,%1,0(%3)\n" 34 "1: ar %1,%4\n"
35 " jl 0b\n" 35 " cs %0,%1,0(%3)\n"
36 : "=&d" (old_val), "=&d" (new_val), 36 " jl 0b\n"
37 "=m" (sem->count) 37 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
38 : "a" (&sem->count), "d" (incr), "m" (sem->count) 38 : "a" (&sem->count), "d" (incr), "m" (sem->count)
39 : "cc" ); 39 : "cc");
40 return old_val; 40 return old_val;
41} 41}
42 42
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e3d9325f6022..a21cfbb9d97e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -101,7 +101,7 @@ void __devinit cpu_init (void)
101 /* 101 /*
102 * Store processor id in lowcore (used e.g. in timer_interrupt) 102 * Store processor id in lowcore (used e.g. in timer_interrupt)
103 */ 103 */
104 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 104 asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
105 S390_lowcore.cpu_data.cpu_addr = addr; 105 S390_lowcore.cpu_data.cpu_addr = addr;
106 106
107 /* 107 /*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b2e6f4c8d382..a8e6199755d4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -63,7 +63,7 @@ static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 63static void smp_ext_bitcall_others(ec_bit_sig);
64 64
65/* 65/*
66 * Structure and data for smp_call_function(). This is designed to minimise 665B * Structure and data for smp_call_function(). This is designed to minimise
67 * static memory requirements. It also looks cleaner. 67 * static memory requirements. It also looks cleaner.
68 */ 68 */
69static DEFINE_SPINLOCK(call_lock); 69static DEFINE_SPINLOCK(call_lock);
@@ -418,59 +418,49 @@ void smp_send_reschedule(int cpu)
418/* 418/*
419 * parameter area for the set/clear control bit callbacks 419 * parameter area for the set/clear control bit callbacks
420 */ 420 */
421typedef struct 421struct ec_creg_mask_parms {
422{
423 __u16 start_ctl;
424 __u16 end_ctl;
425 unsigned long orvals[16]; 422 unsigned long orvals[16];
426 unsigned long andvals[16]; 423 unsigned long andvals[16];
427} ec_creg_mask_parms; 424};
428 425
429/* 426/*
430 * callback for setting/clearing control bits 427 * callback for setting/clearing control bits
431 */ 428 */
432void smp_ctl_bit_callback(void *info) { 429void smp_ctl_bit_callback(void *info) {
433 ec_creg_mask_parms *pp; 430 struct ec_creg_mask_parms *pp = info;
434 unsigned long cregs[16]; 431 unsigned long cregs[16];
435 int i; 432 int i;
436 433
437 pp = (ec_creg_mask_parms *) info; 434 __ctl_store(cregs, 0, 15);
438 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 435 for (i = 0; i <= 15; i++)
439 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
440 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 436 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
441 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 437 __ctl_load(cregs, 0, 15);
442} 438}
443 439
444/* 440/*
445 * Set a bit in a control register of all cpus 441 * Set a bit in a control register of all cpus
446 */ 442 */
447void smp_ctl_set_bit(int cr, int bit) { 443void smp_ctl_set_bit(int cr, int bit)
448 ec_creg_mask_parms parms; 444{
445 struct ec_creg_mask_parms parms;
449 446
450 parms.start_ctl = cr; 447 memset(&parms.orvals, 0, sizeof(parms.orvals));
451 parms.end_ctl = cr; 448 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
452 parms.orvals[cr] = 1 << bit; 449 parms.orvals[cr] = 1 << bit;
453 parms.andvals[cr] = -1L; 450 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
454 preempt_disable();
455 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
456 __ctl_set_bit(cr, bit);
457 preempt_enable();
458} 451}
459 452
460/* 453/*
461 * Clear a bit in a control register of all cpus 454 * Clear a bit in a control register of all cpus
462 */ 455 */
463void smp_ctl_clear_bit(int cr, int bit) { 456void smp_ctl_clear_bit(int cr, int bit)
464 ec_creg_mask_parms parms; 457{
458 struct ec_creg_mask_parms parms;
465 459
466 parms.start_ctl = cr; 460 memset(&parms.orvals, 0, sizeof(parms.orvals));
467 parms.end_ctl = cr; 461 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
468 parms.orvals[cr] = 0;
469 parms.andvals[cr] = ~(1L << bit); 462 parms.andvals[cr] = ~(1L << bit);
470 preempt_disable(); 463 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
471 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
472 __ctl_clear_bit(cr, bit);
473 preempt_enable();
474} 464}
475 465
476/* 466/*
@@ -650,9 +640,9 @@ __cpu_up(unsigned int cpu)
650 sf->gprs[9] = (unsigned long) sf; 640 sf->gprs[9] = (unsigned long) sf;
651 cpu_lowcore->save_area[15] = (unsigned long) sf; 641 cpu_lowcore->save_area[15] = (unsigned long) sf;
652 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 642 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
653 __asm__ __volatile__("stam 0,15,0(%0)" 643 asm volatile(
654 : : "a" (&cpu_lowcore->access_regs_save_area) 644 " stam 0,15,0(%0)"
655 : "memory"); 645 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
656 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 646 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
657 cpu_lowcore->current_task = (unsigned long) idle; 647 cpu_lowcore->current_task = (unsigned long) idle;
658 cpu_lowcore->cpu_data.cpu_nr = cpu; 648 cpu_lowcore->cpu_data.cpu_nr = cpu;
@@ -708,7 +698,7 @@ int
708__cpu_disable(void) 698__cpu_disable(void)
709{ 699{
710 unsigned long flags; 700 unsigned long flags;
711 ec_creg_mask_parms cr_parms; 701 struct ec_creg_mask_parms cr_parms;
712 int cpu = smp_processor_id(); 702 int cpu = smp_processor_id();
713 703
714 spin_lock_irqsave(&smp_reserve_lock, flags); 704 spin_lock_irqsave(&smp_reserve_lock, flags);
@@ -724,30 +714,21 @@ __cpu_disable(void)
724 pfault_fini(); 714 pfault_fini();
725#endif 715#endif
726 716
727 /* disable all external interrupts */ 717 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
718 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
728 719
729 cr_parms.start_ctl = 0; 720 /* disable all external interrupts */
730 cr_parms.end_ctl = 0;
731 cr_parms.orvals[0] = 0; 721 cr_parms.orvals[0] = 0;
732 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 722 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
733 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 723 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
734 smp_ctl_bit_callback(&cr_parms);
735
736 /* disable all I/O interrupts */ 724 /* disable all I/O interrupts */
737
738 cr_parms.start_ctl = 6;
739 cr_parms.end_ctl = 6;
740 cr_parms.orvals[6] = 0; 725 cr_parms.orvals[6] = 0;
741 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 726 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
742 1<<27 | 1<<26 | 1<<25 | 1<<24); 727 1<<27 | 1<<26 | 1<<25 | 1<<24);
743 smp_ctl_bit_callback(&cr_parms);
744
745 /* disable most machine checks */ 728 /* disable most machine checks */
746
747 cr_parms.start_ctl = 14;
748 cr_parms.end_ctl = 14;
749 cr_parms.orvals[14] = 0; 729 cr_parms.orvals[14] = 0;
750 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 730 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
731
751 smp_ctl_bit_callback(&cr_parms); 732 smp_ctl_bit_callback(&cr_parms);
752 733
753 spin_unlock_irqrestore(&smp_reserve_lock, flags); 734 spin_unlock_irqrestore(&smp_reserve_lock, flags);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 74e6178fbaf2..1981c6199fa2 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -351,10 +351,12 @@ void __init time_init(void)
351 int cc; 351 int cc;
352 352
353 /* kick the TOD clock */ 353 /* kick the TOD clock */
354 asm volatile ("STCK 0(%1)\n\t" 354 asm volatile(
355 "IPM %0\n\t" 355 " stck 0(%2)\n"
356 "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) 356 " ipm %0\n"
357 : "memory", "cc"); 357 " srl %0,28"
358 : "=d" (cc), "=m" (init_timer_cc)
359 : "a" (&init_timer_cc) : "cc");
358 switch (cc) { 360 switch (cc) {
359 case 0: /* clock in set state: all is fine */ 361 case 0: /* clock in set state: all is fine */
360 break; 362 break;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c4982c963424..3eb4fab048b8 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -597,8 +597,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
597 local_irq_enable(); 597 local_irq_enable();
598 598
599 if (MACHINE_HAS_IEEE) 599 if (MACHINE_HAS_IEEE)
600 __asm__ volatile ("stfpc %0\n\t" 600 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
601 : "=m" (current->thread.fp_regs.fpc));
602 601
603#ifdef CONFIG_MATHEMU 602#ifdef CONFIG_MATHEMU
604 else if (regs->psw.mask & PSW_MASK_PSTATE) { 603 else if (regs->psw.mask & PSW_MASK_PSTATE) {
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 468f4ea33f99..027c4742a001 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -27,9 +27,7 @@ void __delay(unsigned long loops)
27 * yield the megahertz number of the cpu. The important function 27 * yield the megahertz number of the cpu. The important function
28 * is udelay and that is done using the tod clock. -- martin. 28 * is udelay and that is done using the tod clock. -- martin.
29 */ 29 */
30 __asm__ __volatile__( 30 asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
31 "0: brct %0,0b"
32 : /* no outputs */ : "r" ((loops/2) + 1));
33} 31}
34 32
35/* 33/*
@@ -38,13 +36,12 @@ void __delay(unsigned long loops)
38 */ 36 */
39void __udelay(unsigned long usecs) 37void __udelay(unsigned long usecs)
40{ 38{
41 uint64_t start_cc, end_cc; 39 uint64_t start_cc;
42 40
43 if (usecs == 0) 41 if (usecs == 0)
44 return; 42 return;
45 asm volatile ("STCK %0" : "=m" (start_cc)); 43 start_cc = get_clock();
46 do { 44 do {
47 cpu_relax(); 45 cpu_relax();
48 asm volatile ("STCK %0" : "=m" (end_cc)); 46 } while (((get_clock() - start_cc)/4096) < usecs);
49 } while (((end_cc - start_cc)/4096) < usecs);
50} 47}
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index b4957c84e4d6..6b9aec5a2c18 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -1564,52 +1564,52 @@ static int emu_tceb (struct pt_regs *regs, int rx, long val) {
1564} 1564}
1565 1565
1566static inline void emu_load_regd(int reg) { 1566static inline void emu_load_regd(int reg) {
1567 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1567 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1568 return; 1568 return;
1569 asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1569 asm volatile( /* load reg from fp_regs.fprs[reg] */
1570 " bras 1,0f\n" 1570 " bras 1,0f\n"
1571 " ld 0,0(%1)\n" 1571 " ld 0,0(%1)\n"
1572 "0: ex %0,0(1)" 1572 "0: ex %0,0(1)"
1573 : /* no output */ 1573 : /* no output */
1574 : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) 1574 : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
1575 : "1" ); 1575 : "1");
1576} 1576}
1577 1577
1578static inline void emu_load_rege(int reg) { 1578static inline void emu_load_rege(int reg) {
1579 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1579 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1580 return; 1580 return;
1581 asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1581 asm volatile( /* load reg from fp_regs.fprs[reg] */
1582 " bras 1,0f\n" 1582 " bras 1,0f\n"
1583 " le 0,0(%1)\n" 1583 " le 0,0(%1)\n"
1584 "0: ex %0,0(1)" 1584 "0: ex %0,0(1)"
1585 : /* no output */ 1585 : /* no output */
1586 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1586 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1587 : "1" ); 1587 : "1");
1588} 1588}
1589 1589
1590static inline void emu_store_regd(int reg) { 1590static inline void emu_store_regd(int reg) {
1591 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1591 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1592 return; 1592 return;
1593 asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1593 asm volatile( /* store reg to fp_regs.fprs[reg] */
1594 " bras 1,0f\n" 1594 " bras 1,0f\n"
1595 " std 0,0(%1)\n" 1595 " std 0,0(%1)\n"
1596 "0: ex %0,0(1)" 1596 "0: ex %0,0(1)"
1597 : /* no output */ 1597 : /* no output */
1598 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) 1598 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
1599 : "1" ); 1599 : "1");
1600} 1600}
1601 1601
1602 1602
1603static inline void emu_store_rege(int reg) { 1603static inline void emu_store_rege(int reg) {
1604 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1604 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1605 return; 1605 return;
1606 asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1606 asm volatile( /* store reg to fp_regs.fprs[reg] */
1607 " bras 1,0f\n" 1607 " bras 1,0f\n"
1608 " ste 0,0(%1)\n" 1608 " ste 0,0(%1)\n"
1609 "0: ex %0,0(1)" 1609 "0: ex %0,0(1)"
1610 : /* no output */ 1610 : /* no output */
1611 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1611 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1612 : "1" ); 1612 : "1");
1613} 1613}
1614 1614
1615int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { 1615int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
@@ -2089,23 +2089,22 @@ int math_emu_ldr(__u8 *opcode) {
2089 2089
2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2091 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2091 /* we got an exception therfore ry can't be in {0,2,4,6} */
2092 __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2092 asm volatile( /* load rx from fp_regs.fprs[ry] */
2093 " bras 1,0f\n" 2093 " bras 1,0f\n"
2094 " ld 0,0(%1)\n" 2094 " ld 0,0(%1)\n"
2095 "0: ex %0,0(1)" 2095 "0: ex %0,0(1)"
2096 : /* no output */ 2096 : /* no output */
2097 : "a" (opc & 0xf0), 2097 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
2098 "a" (&fp_regs->fprs[opc & 0xf].d) 2098 : "1");
2099 : "1" );
2100 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2099 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2101 __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2100 asm volatile ( /* store ry to fp_regs.fprs[rx] */
2102 " bras 1,0f\n" 2101 " bras 1,0f\n"
2103 " std 0,0(%1)\n" 2102 " std 0,0(%1)\n"
2104 "0: ex %0,0(1)" 2103 "0: ex %0,0(1)"
2105 : /* no output */ 2104 : /* no output */
2106 : "a" ((opc & 0xf) << 4), 2105 : "a" ((opc & 0xf) << 4),
2107 "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) 2106 "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
2108 : "1" ); 2107 : "1");
2109 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2108 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2110 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2109 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2111 return 0; 2110 return 0;
@@ -2120,23 +2119,22 @@ int math_emu_ler(__u8 *opcode) {
2120 2119
2121 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2120 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2122 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2121 /* we got an exception therfore ry can't be in {0,2,4,6} */
2123 __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2122 asm volatile( /* load rx from fp_regs.fprs[ry] */
2124 " bras 1,0f\n" 2123 " bras 1,0f\n"
2125 " le 0,0(%1)\n" 2124 " le 0,0(%1)\n"
2126 "0: ex %0,0(1)" 2125 "0: ex %0,0(1)"
2127 : /* no output */ 2126 : /* no output */
2128 : "a" (opc & 0xf0), 2127 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
2129 "a" (&fp_regs->fprs[opc & 0xf].f) 2128 : "1");
2130 : "1" );
2131 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2129 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2132 __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2130 asm volatile( /* store ry to fp_regs.fprs[rx] */
2133 " bras 1,0f\n" 2131 " bras 1,0f\n"
2134 " ste 0,0(%1)\n" 2132 " ste 0,0(%1)\n"
2135 "0: ex %0,0(1)" 2133 "0: ex %0,0(1)"
2136 : /* no output */ 2134 : /* no output */
2137 : "a" ((opc & 0xf) << 4), 2135 : "a" ((opc & 0xf) << 4),
2138 "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) 2136 "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
2139 : "1" ); 2137 : "1");
2140 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2138 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2141 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2139 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2142 return 0; 2140 return 0;
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
index ab556b600f73..5b6ca4570ea4 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/arch/s390/math-emu/sfp-util.h
@@ -4,48 +4,51 @@
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ 6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
7 unsigned int __sh = (ah); \ 7 unsigned int __sh = (ah); \
8 unsigned int __sl = (al); \ 8 unsigned int __sl = (al); \
9 __asm__ (" alr %1,%3\n" \ 9 asm volatile( \
10 " brc 12,0f\n" \ 10 " alr %1,%3\n" \
11 " ahi %0,1\n" \ 11 " brc 12,0f\n" \
12 "0: alr %0,%2" \ 12 " ahi %0,1\n" \
13 : "+&d" (__sh), "+d" (__sl) \ 13 "0: alr %0,%2" \
14 : "d" (bh), "d" (bl) : "cc" ); \ 14 : "+&d" (__sh), "+d" (__sl) \
15 (sh) = __sh; \ 15 : "d" (bh), "d" (bl) : "cc"); \
16 (sl) = __sl; \ 16 (sh) = __sh; \
17 (sl) = __sl; \
17}) 18})
18 19
19#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ 20#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
20 unsigned int __sh = (ah); \ 21 unsigned int __sh = (ah); \
21 unsigned int __sl = (al); \ 22 unsigned int __sl = (al); \
22 __asm__ (" slr %1,%3\n" \ 23 asm volatile( \
23 " brc 3,0f\n" \ 24 " slr %1,%3\n" \
24 " ahi %0,-1\n" \ 25 " brc 3,0f\n" \
25 "0: slr %0,%2" \ 26 " ahi %0,-1\n" \
26 : "+&d" (__sh), "+d" (__sl) \ 27 "0: slr %0,%2" \
27 : "d" (bh), "d" (bl) : "cc" ); \ 28 : "+&d" (__sh), "+d" (__sl) \
28 (sh) = __sh; \ 29 : "d" (bh), "d" (bl) : "cc"); \
29 (sl) = __sl; \ 30 (sh) = __sh; \
31 (sl) = __sl; \
30}) 32})
31 33
32/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ 34/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
33#define umul_ppmm(wh, wl, u, v) ({ \ 35#define umul_ppmm(wh, wl, u, v) ({ \
34 unsigned int __wh = u; \ 36 unsigned int __wh = u; \
35 unsigned int __wl = v; \ 37 unsigned int __wl = v; \
36 __asm__ (" ltr 1,%0\n" \ 38 asm volatile( \
37 " mr 0,%1\n" \ 39 " ltr 1,%0\n" \
38 " jnm 0f\n" \ 40 " mr 0,%1\n" \
39 " alr 0,%1\n" \ 41 " jnm 0f\n" \
40 "0: ltr %1,%1\n" \ 42 " alr 0,%1\n" \
41 " jnm 1f\n" \ 43 "0: ltr %1,%1\n" \
42 " alr 0,%0\n" \ 44 " jnm 1f\n" \
43 "1: lr %0,0\n" \ 45 " alr 0,%0\n" \
44 " lr %1,1\n" \ 46 "1: lr %0,0\n" \
45 : "+d" (__wh), "+d" (__wl) \ 47 " lr %1,1\n" \
46 : : "0", "1", "cc" ); \ 48 : "+d" (__wh), "+d" (__wl) \
47 wh = __wh; \ 49 : : "0", "1", "cc"); \
48 wl = __wl; \ 50 wh = __wh; \
51 wl = __wl; \
49}) 52})
50 53
51#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9b11e3e20903..226275d5c4f6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter,
142 142
143 rx = (unsigned long) parameter; 143 rx = (unsigned long) parameter;
144 ry = (unsigned long) func; 144 ry = (unsigned long) func;
145 __asm__ __volatile__( 145 asm volatile(
146#ifdef CONFIG_64BIT 146#ifdef CONFIG_64BIT
147 " sam31\n" // switch to 31 bit 147 " sam31\n"
148 " diag %0,%1,0x64\n" 148 " diag %0,%1,0x64\n"
149 " sam64\n" // switch back to 64 bit 149 " sam64\n"
150#else 150#else
151 " diag %0,%1,0x64\n" 151 " diag %0,%1,0x64\n"
152#endif 152#endif
153 " ipm %2\n" 153 " ipm %2\n"
154 " srl %2,28\n" 154 " srl %2,28\n"
155 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); 155 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
156 *ret1 = rx; 156 *ret1 = rx;
157 *ret2 = ry; 157 *ret2 = ry;
158 return rc; 158 return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a393c308bb29..f2b9a84dc2bf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -424,20 +424,13 @@ int pfault_init(void)
424 424
425 if (pfault_disable) 425 if (pfault_disable)
426 return -1; 426 return -1;
427 __asm__ __volatile__( 427 asm volatile(
428 " diag %1,%0,0x258\n" 428 " diag %1,%0,0x258\n"
429 "0: j 2f\n" 429 "0: j 2f\n"
430 "1: la %0,8\n" 430 "1: la %0,8\n"
431 "2:\n" 431 "2:\n"
432 ".section __ex_table,\"a\"\n" 432 EX_TABLE(0b,1b)
433 " .align 4\n" 433 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
434#ifndef CONFIG_64BIT
435 " .long 0b,1b\n"
436#else /* CONFIG_64BIT */
437 " .quad 0b,1b\n"
438#endif /* CONFIG_64BIT */
439 ".previous"
440 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
441 __ctl_set_bit(0, 9); 434 __ctl_set_bit(0, 9);
442 return rc; 435 return rc;
443} 436}
@@ -450,18 +443,11 @@ void pfault_fini(void)
450 if (pfault_disable) 443 if (pfault_disable)
451 return; 444 return;
452 __ctl_clear_bit(0,9); 445 __ctl_clear_bit(0,9);
453 __asm__ __volatile__( 446 asm volatile(
454 " diag %0,0,0x258\n" 447 " diag %0,0,0x258\n"
455 "0:\n" 448 "0:\n"
456 ".section __ex_table,\"a\"\n" 449 EX_TABLE(0b,0b)
457 " .align 4\n" 450 : : "a" (&refbk), "m" (refbk) : "cc");
458#ifndef CONFIG_64BIT
459 " .long 0b,0b\n"
460#else /* CONFIG_64BIT */
461 " .quad 0b,0b\n"
462#endif /* CONFIG_64BIT */
463 ".previous"
464 : : "a" (&refbk), "m" (refbk) : "cc" );
465} 451}
466 452
467asmlinkage void 453asmlinkage void
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f7a523..127044e1707c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
45{ 45{
46 if (addr >= 0x7ff00000) 46 if (addr >= 0x7ff00000)
47 return; 47 return;
48 asm volatile(
48#ifdef CONFIG_64BIT 49#ifdef CONFIG_64BIT
49 asm volatile ( 50 " sam31\n"
50 " sam31\n" 51 " diag %0,%0,0x10\n"
51 " diag %0,%0,0x10\n" 52 "0: sam64\n"
52 "0: sam64\n"
53 ".section __ex_table,\"a\"\n"
54 " .align 8\n"
55 " .quad 0b, 0b\n"
56 ".previous\n"
57 : : "a" (addr));
58#else 53#else
59 asm volatile ( 54 " diag %0,%0,0x10\n"
60 " diag %0,%0,0x10\n"
61 "0:\n" 55 "0:\n"
62 ".section __ex_table,\"a\"\n"
63 " .align 4\n"
64 " .long 0b, 0b\n"
65 ".previous\n"
66 : : "a" (addr));
67#endif 56#endif
57 EX_TABLE(0b,0b)
58 : : "a" (addr));
68} 59}
69 60
70void show_mem(void) 61void show_mem(void)
@@ -156,11 +147,10 @@ void __init paging_init(void)
156 S390_lowcore.kernel_asce = pgdir_k; 147 S390_lowcore.kernel_asce = pgdir_k;
157 148
158 /* enable virtual mapping in kernel mode */ 149 /* enable virtual mapping in kernel mode */
159 __asm__ __volatile__(" LCTL 1,1,%0\n" 150 __ctl_load(pgdir_k, 1, 1);
160 " LCTL 7,7,%0\n" 151 __ctl_load(pgdir_k, 7, 7);
161 " LCTL 13,13,%0\n" 152 __ctl_load(pgdir_k, 13, 13);
162 " SSM %1" 153 __raw_local_irq_ssm(ssm_mask);
163 : : "m" (pgdir_k), "m" (ssm_mask));
164 154
165 local_flush_tlb(); 155 local_flush_tlb();
166 return; 156 return;
@@ -241,11 +231,10 @@ void __init paging_init(void)
241 S390_lowcore.kernel_asce = pgdir_k; 231 S390_lowcore.kernel_asce = pgdir_k;
242 232
243 /* enable virtual mapping in kernel mode */ 233 /* enable virtual mapping in kernel mode */
244 __asm__ __volatile__("lctlg 1,1,%0\n\t" 234 __ctl_load(pgdir_k, 1, 1);
245 "lctlg 7,7,%0\n\t" 235 __ctl_load(pgdir_k, 7, 7);
246 "lctlg 13,13,%0\n\t" 236 __ctl_load(pgdir_k, 13, 13);
247 "ssm %1" 237 __raw_local_irq_ssm(ssm_mask);
248 : :"m" (pgdir_k), "m" (ssm_mask));
249 238
250 local_flush_tlb(); 239 local_flush_tlb();
251 240