diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-28 10:56:43 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-28 10:56:43 -0400 |
commit | 94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch) | |
tree | 8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /arch/s390/mm | |
parent | 25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff) |
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common
coding style. Quite a few have been shortened, mainly by using register
asm variables. Use of the EX_TABLE macro helps as well. The atomic ops,
bit ops and locking inlines new use the Q-constraint if a newer gcc
is used. That results in slightly better code.
Thanks to Christian Borntraeger for proof reading the changes.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/extmem.c | 16 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 34 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 41 |
3 files changed, 33 insertions, 58 deletions
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 9b11e3e20903..226275d5c4f6 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter, | |||
142 | 142 | ||
143 | rx = (unsigned long) parameter; | 143 | rx = (unsigned long) parameter; |
144 | ry = (unsigned long) func; | 144 | ry = (unsigned long) func; |
145 | __asm__ __volatile__( | 145 | asm volatile( |
146 | #ifdef CONFIG_64BIT | 146 | #ifdef CONFIG_64BIT |
147 | " sam31\n" // switch to 31 bit | 147 | " sam31\n" |
148 | " diag %0,%1,0x64\n" | 148 | " diag %0,%1,0x64\n" |
149 | " sam64\n" // switch back to 64 bit | 149 | " sam64\n" |
150 | #else | 150 | #else |
151 | " diag %0,%1,0x64\n" | 151 | " diag %0,%1,0x64\n" |
152 | #endif | 152 | #endif |
153 | " ipm %2\n" | 153 | " ipm %2\n" |
154 | " srl %2,28\n" | 154 | " srl %2,28\n" |
155 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); | 155 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); |
156 | *ret1 = rx; | 156 | *ret1 = rx; |
157 | *ret2 = ry; | 157 | *ret2 = ry; |
158 | return rc; | 158 | return rc; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index a393c308bb29..f2b9a84dc2bf 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -424,20 +424,13 @@ int pfault_init(void) | |||
424 | 424 | ||
425 | if (pfault_disable) | 425 | if (pfault_disable) |
426 | return -1; | 426 | return -1; |
427 | __asm__ __volatile__( | 427 | asm volatile( |
428 | " diag %1,%0,0x258\n" | 428 | " diag %1,%0,0x258\n" |
429 | "0: j 2f\n" | 429 | "0: j 2f\n" |
430 | "1: la %0,8\n" | 430 | "1: la %0,8\n" |
431 | "2:\n" | 431 | "2:\n" |
432 | ".section __ex_table,\"a\"\n" | 432 | EX_TABLE(0b,1b) |
433 | " .align 4\n" | 433 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); |
434 | #ifndef CONFIG_64BIT | ||
435 | " .long 0b,1b\n" | ||
436 | #else /* CONFIG_64BIT */ | ||
437 | " .quad 0b,1b\n" | ||
438 | #endif /* CONFIG_64BIT */ | ||
439 | ".previous" | ||
440 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" ); | ||
441 | __ctl_set_bit(0, 9); | 434 | __ctl_set_bit(0, 9); |
442 | return rc; | 435 | return rc; |
443 | } | 436 | } |
@@ -450,18 +443,11 @@ void pfault_fini(void) | |||
450 | if (pfault_disable) | 443 | if (pfault_disable) |
451 | return; | 444 | return; |
452 | __ctl_clear_bit(0,9); | 445 | __ctl_clear_bit(0,9); |
453 | __asm__ __volatile__( | 446 | asm volatile( |
454 | " diag %0,0,0x258\n" | 447 | " diag %0,0,0x258\n" |
455 | "0:\n" | 448 | "0:\n" |
456 | ".section __ex_table,\"a\"\n" | 449 | EX_TABLE(0b,0b) |
457 | " .align 4\n" | 450 | : : "a" (&refbk), "m" (refbk) : "cc"); |
458 | #ifndef CONFIG_64BIT | ||
459 | " .long 0b,0b\n" | ||
460 | #else /* CONFIG_64BIT */ | ||
461 | " .quad 0b,0b\n" | ||
462 | #endif /* CONFIG_64BIT */ | ||
463 | ".previous" | ||
464 | : : "a" (&refbk), "m" (refbk) : "cc" ); | ||
465 | } | 451 | } |
466 | 452 | ||
467 | asmlinkage void | 453 | asmlinkage void |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index cfd9b8f7a523..127044e1707c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -45,26 +45,17 @@ void diag10(unsigned long addr) | |||
45 | { | 45 | { |
46 | if (addr >= 0x7ff00000) | 46 | if (addr >= 0x7ff00000) |
47 | return; | 47 | return; |
48 | asm volatile( | ||
48 | #ifdef CONFIG_64BIT | 49 | #ifdef CONFIG_64BIT |
49 | asm volatile ( | 50 | " sam31\n" |
50 | " sam31\n" | 51 | " diag %0,%0,0x10\n" |
51 | " diag %0,%0,0x10\n" | 52 | "0: sam64\n" |
52 | "0: sam64\n" | ||
53 | ".section __ex_table,\"a\"\n" | ||
54 | " .align 8\n" | ||
55 | " .quad 0b, 0b\n" | ||
56 | ".previous\n" | ||
57 | : : "a" (addr)); | ||
58 | #else | 53 | #else |
59 | asm volatile ( | 54 | " diag %0,%0,0x10\n" |
60 | " diag %0,%0,0x10\n" | ||
61 | "0:\n" | 55 | "0:\n" |
62 | ".section __ex_table,\"a\"\n" | ||
63 | " .align 4\n" | ||
64 | " .long 0b, 0b\n" | ||
65 | ".previous\n" | ||
66 | : : "a" (addr)); | ||
67 | #endif | 56 | #endif |
57 | EX_TABLE(0b,0b) | ||
58 | : : "a" (addr)); | ||
68 | } | 59 | } |
69 | 60 | ||
70 | void show_mem(void) | 61 | void show_mem(void) |
@@ -156,11 +147,10 @@ void __init paging_init(void) | |||
156 | S390_lowcore.kernel_asce = pgdir_k; | 147 | S390_lowcore.kernel_asce = pgdir_k; |
157 | 148 | ||
158 | /* enable virtual mapping in kernel mode */ | 149 | /* enable virtual mapping in kernel mode */ |
159 | __asm__ __volatile__(" LCTL 1,1,%0\n" | 150 | __ctl_load(pgdir_k, 1, 1); |
160 | " LCTL 7,7,%0\n" | 151 | __ctl_load(pgdir_k, 7, 7); |
161 | " LCTL 13,13,%0\n" | 152 | __ctl_load(pgdir_k, 13, 13); |
162 | " SSM %1" | 153 | __raw_local_irq_ssm(ssm_mask); |
163 | : : "m" (pgdir_k), "m" (ssm_mask)); | ||
164 | 154 | ||
165 | local_flush_tlb(); | 155 | local_flush_tlb(); |
166 | return; | 156 | return; |
@@ -241,11 +231,10 @@ void __init paging_init(void) | |||
241 | S390_lowcore.kernel_asce = pgdir_k; | 231 | S390_lowcore.kernel_asce = pgdir_k; |
242 | 232 | ||
243 | /* enable virtual mapping in kernel mode */ | 233 | /* enable virtual mapping in kernel mode */ |
244 | __asm__ __volatile__("lctlg 1,1,%0\n\t" | 234 | __ctl_load(pgdir_k, 1, 1); |
245 | "lctlg 7,7,%0\n\t" | 235 | __ctl_load(pgdir_k, 7, 7); |
246 | "lctlg 13,13,%0\n\t" | 236 | __ctl_load(pgdir_k, 13, 13); |
247 | "ssm %1" | 237 | __raw_local_irq_ssm(ssm_mask); |
248 | : :"m" (pgdir_k), "m" (ssm_mask)); | ||
249 | 238 | ||
250 | local_flush_tlb(); | 239 | local_flush_tlb(); |
251 | 240 | ||