aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /arch/s390/mm/init.c
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c41
1 files changed, 15 insertions, 26 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f7a523..127044e1707c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
45{ 45{
46 if (addr >= 0x7ff00000) 46 if (addr >= 0x7ff00000)
47 return; 47 return;
48 asm volatile(
48#ifdef CONFIG_64BIT 49#ifdef CONFIG_64BIT
49 asm volatile ( 50 " sam31\n"
50 " sam31\n" 51 " diag %0,%0,0x10\n"
51 " diag %0,%0,0x10\n" 52 "0: sam64\n"
52 "0: sam64\n"
53 ".section __ex_table,\"a\"\n"
54 " .align 8\n"
55 " .quad 0b, 0b\n"
56 ".previous\n"
57 : : "a" (addr));
58#else 53#else
59 asm volatile ( 54 " diag %0,%0,0x10\n"
60 " diag %0,%0,0x10\n"
61 "0:\n" 55 "0:\n"
62 ".section __ex_table,\"a\"\n"
63 " .align 4\n"
64 " .long 0b, 0b\n"
65 ".previous\n"
66 : : "a" (addr));
67#endif 56#endif
57 EX_TABLE(0b,0b)
58 : : "a" (addr));
68} 59}
69 60
70void show_mem(void) 61void show_mem(void)
@@ -156,11 +147,10 @@ void __init paging_init(void)
156 S390_lowcore.kernel_asce = pgdir_k; 147 S390_lowcore.kernel_asce = pgdir_k;
157 148
158 /* enable virtual mapping in kernel mode */ 149 /* enable virtual mapping in kernel mode */
159 __asm__ __volatile__(" LCTL 1,1,%0\n" 150 __ctl_load(pgdir_k, 1, 1);
160 " LCTL 7,7,%0\n" 151 __ctl_load(pgdir_k, 7, 7);
161 " LCTL 13,13,%0\n" 152 __ctl_load(pgdir_k, 13, 13);
162 " SSM %1" 153 __raw_local_irq_ssm(ssm_mask);
163 : : "m" (pgdir_k), "m" (ssm_mask));
164 154
165 local_flush_tlb(); 155 local_flush_tlb();
166 return; 156 return;
@@ -241,11 +231,10 @@ void __init paging_init(void)
241 S390_lowcore.kernel_asce = pgdir_k; 231 S390_lowcore.kernel_asce = pgdir_k;
242 232
243 /* enable virtual mapping in kernel mode */ 233 /* enable virtual mapping in kernel mode */
244 __asm__ __volatile__("lctlg 1,1,%0\n\t" 234 __ctl_load(pgdir_k, 1, 1);
245 "lctlg 7,7,%0\n\t" 235 __ctl_load(pgdir_k, 7, 7);
246 "lctlg 13,13,%0\n\t" 236 __ctl_load(pgdir_k, 13, 13);
247 "ssm %1" 237 __raw_local_irq_ssm(ssm_mask);
248 : :"m" (pgdir_k), "m" (ssm_mask));
249 238
250 local_flush_tlb(); 239 local_flush_tlb();
251 240