aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-12 07:08:27 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-03-25 06:49:33 -0400
commit5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch)
tree37264d49f069812f19ced94e6ae171814fb7e498 /arch/s390/kernel
parent1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff)
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile22
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/base.S76
-rw-r--r--arch/s390/kernel/cpcmd.c10
-rw-r--r--arch/s390/kernel/diag.c15
-rw-r--r--arch/s390/kernel/dis.c48
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S966
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S106
-rw-r--r--arch/s390/kernel/head_kdump.S8
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/nmi.c92
-rw-r--r--arch/s390/kernel/pgm_check.S22
-rw-r--r--arch/s390/kernel/process.c29
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reipl.S92
-rw-r--r--arch/s390/kernel/relocate_kernel.S118
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c72
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c33
-rw-r--r--arch/s390/kernel/sys_s390.c49
-rw-r--r--arch/s390/kernel/traps.c153
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
28 files changed, 60 insertions, 2102 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 31fab2676fe9..d94cbba95c50 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -26,20 +26,16 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
26# 26#
27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -w
30 30
31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
35obj-y += dumpstack.o 35obj-y += runtime_instr.o cache.o dumpstack.o
36obj-y += entry64.o reipl64.o relocate_kernel64.o
36 37
37obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 38extra-y += head.o head64.o vmlinux.lds
38obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
39obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
40
41extra-y += head.o vmlinux.lds
42extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
43 39
44obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 40obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
45obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
56obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 52obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57obj-$(CONFIG_UPROBES) += uprobes.o 53obj-$(CONFIG_UPROBES) += uprobes.o
58 54
59ifdef CONFIG_64BIT 55obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
60obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ 56obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
61 perf_cpum_cf_events.o
62obj-y += runtime_instr.o cache.o
63endif
64 57
65# vdso 58# vdso
66obj-$(CONFIG_64BIT) += vdso64/ 59obj-y += vdso64/
67obj-$(CONFIG_32BIT) += vdso32/
68obj-$(CONFIG_COMPAT) += vdso32/ 60obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e91605353..6e94edd90318 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -166,9 +166,6 @@ int main(void)
166 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 166 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
167 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 167 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
168 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 168 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
169#ifdef CONFIG_32BIT
170 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
171#else /* CONFIG_32BIT */
172 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); 169 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
173 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); 170 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
174 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); 171 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
@@ -183,6 +180,5 @@ int main(void)
183 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
184 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
185 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
186#endif /* CONFIG_32BIT */
187 return 0; 183 return 0;
188} 184}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f74a53d339b0..daed3fde42ec 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,8 +11,6 @@
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/sigp.h> 12#include <asm/sigp.h>
13 13
14#ifdef CONFIG_64BIT
15
16ENTRY(s390_base_mcck_handler) 14ENTRY(s390_base_mcck_handler)
17 basr %r13,0 15 basr %r13,0
180: lg %r15,__LC_PANIC_STACK # load panic stack 160: lg %r15,__LC_PANIC_STACK # load panic stack
@@ -131,77 +129,3 @@ ENTRY(diag308_reset)
131.Lfpctl: 129.Lfpctl:
132 .long 0 130 .long 0
133 .previous 131 .previous
134
135#else /* CONFIG_64BIT */
136
137ENTRY(s390_base_mcck_handler)
138 basr %r13,0
1390: l %r15,__LC_PANIC_STACK # load panic stack
140 ahi %r15,-STACK_FRAME_OVERHEAD
141 l %r1,2f-0b(%r13)
142 l %r1,0(%r1)
143 ltr %r1,%r1
144 jz 1f
145 basr %r14,%r1
1461: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
147 lpsw __LC_MCK_OLD_PSW
148
1492: .long s390_base_mcck_handler_fn
150
151 .section .bss
152 .align 4
153 .globl s390_base_mcck_handler_fn
154s390_base_mcck_handler_fn:
155 .long 0
156 .previous
157
158ENTRY(s390_base_ext_handler)
159 stm %r0,%r15,__LC_SAVE_AREA_ASYNC
160 basr %r13,0
1610: ahi %r15,-STACK_FRAME_OVERHEAD
162 l %r1,2f-0b(%r13)
163 l %r1,0(%r1)
164 ltr %r1,%r1
165 jz 1f
166 basr %r14,%r1
1671: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
168 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
169 lpsw __LC_EXT_OLD_PSW
170
1712: .long s390_base_ext_handler_fn
172
173 .section .bss
174 .align 4
175 .globl s390_base_ext_handler_fn
176s390_base_ext_handler_fn:
177 .long 0
178 .previous
179
180ENTRY(s390_base_pgm_handler)
181 stm %r0,%r15,__LC_SAVE_AREA_SYNC
182 basr %r13,0
1830: ahi %r15,-STACK_FRAME_OVERHEAD
184 l %r1,2f-0b(%r13)
185 l %r1,0(%r1)
186 ltr %r1,%r1
187 jz 1f
188 basr %r14,%r1
189 lm %r0,%r15,__LC_SAVE_AREA_SYNC
190 lpsw __LC_PGM_OLD_PSW
191
1921: lpsw disabled_wait_psw-0b(%r13)
193
1942: .long s390_base_pgm_handler_fn
195
196disabled_wait_psw:
197 .align 8
198 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
199
200 .section .bss
201 .align 4
202 .globl s390_base_pgm_handler_fn
203s390_base_pgm_handler_fn:
204 .long 0
205 .previous
206
207#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d27880..199ec92ef4fe 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen)
27 register unsigned long reg3 asm ("3") = cmdlen; 27 register unsigned long reg3 asm ("3") = cmdlen;
28 28
29 asm volatile( 29 asm volatile(
30#ifndef CONFIG_64BIT
31 " diag %1,%0,0x8\n"
32#else /* CONFIG_64BIT */
33 " sam31\n" 30 " sam31\n"
34 " diag %1,%0,0x8\n" 31 " diag %1,%0,0x8\n"
35 " sam64\n" 32 " sam64\n"
36#endif /* CONFIG_64BIT */
37 : "+d" (reg3) : "d" (reg2) : "cc"); 33 : "+d" (reg3) : "d" (reg2) : "cc");
38 return reg3; 34 return reg3;
39} 35}
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
46 register unsigned long reg5 asm ("5") = *rlen; 42 register unsigned long reg5 asm ("5") = *rlen;
47 43
48 asm volatile( 44 asm volatile(
49#ifndef CONFIG_64BIT
50 " diag %2,%0,0x8\n"
51 " brc 8,1f\n"
52 " ar %1,%4\n"
53#else /* CONFIG_64BIT */
54 " sam31\n" 45 " sam31\n"
55 " diag %2,%0,0x8\n" 46 " diag %2,%0,0x8\n"
56 " sam64\n" 47 " sam64\n"
57 " brc 8,1f\n" 48 " brc 8,1f\n"
58 " agr %1,%4\n" 49 " agr %1,%4\n"
59#endif /* CONFIG_64BIT */
60 "1:\n" 50 "1:\n"
61 : "+d" (reg4), "+d" (reg5) 51 : "+d" (reg4), "+d" (reg5)
62 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); 52 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 8237fc07ac79..2f69243bf700 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
18 int rc = 0; 18 int rc = 0;
19 19
20 asm volatile( 20 asm volatile(
21#ifdef CONFIG_64BIT
22 " sam31\n" 21 " sam31\n"
23 " diag %2,2,0x14\n" 22 " diag %2,2,0x14\n"
24 " sam64\n" 23 " sam64\n"
25#else
26 " diag %2,2,0x14\n"
27#endif
28 " ipm %0\n" 24 " ipm %0\n"
29 " srl %0,28\n" 25 " srl %0,28\n"
30 : "=d" (rc), "+d" (_ry2) 26 : "=d" (rc), "+d" (_ry2)
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr)
52 spin_lock_irqsave(&diag210_lock, flags); 48 spin_lock_irqsave(&diag210_lock, flags);
53 diag210_tmp = *addr; 49 diag210_tmp = *addr;
54 50
55#ifdef CONFIG_64BIT
56 asm volatile( 51 asm volatile(
57 " lhi %0,-1\n" 52 " lhi %0,-1\n"
58 " sam31\n" 53 " sam31\n"
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr)
62 "1: sam64\n" 57 "1: sam64\n"
63 EX_TABLE(0b, 1b) 58 EX_TABLE(0b, 1b)
64 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); 59 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
65#else
66 asm volatile(
67 " lhi %0,-1\n"
68 " diag %1,0,0x210\n"
69 "0: ipm %0\n"
70 " srl %0,28\n"
71 "1:\n"
72 EX_TABLE(0b, 1b)
73 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
74#endif
75 60
76 *addr = diag210_tmp; 61 *addr = diag210_tmp;
77 spin_unlock_irqrestore(&diag210_lock, flags); 62 spin_unlock_irqrestore(&diag210_lock, flags);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 533430307da8..8140d10c6785 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -32,12 +32,6 @@
32#include <asm/debug.h> 32#include <asm/debug.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34 34
35#ifndef CONFIG_64BIT
36#define ONELONG "%08lx: "
37#else /* CONFIG_64BIT */
38#define ONELONG "%016lx: "
39#endif /* CONFIG_64BIT */
40
41enum { 35enum {
42 UNUSED, /* Indicates the end of the operand list */ 36 UNUSED, /* Indicates the end of the operand list */
43 R_8, /* GPR starting at position 8 */ 37 R_8, /* GPR starting at position 8 */
@@ -536,12 +530,10 @@ static char *long_insn_name[] = {
536}; 530};
537 531
538static struct s390_insn opcode[] = { 532static struct s390_insn opcode[] = {
539#ifdef CONFIG_64BIT
540 { "bprp", 0xc5, INSTR_MII_UPI }, 533 { "bprp", 0xc5, INSTR_MII_UPI },
541 { "bpp", 0xc7, INSTR_SMI_U0RDP }, 534 { "bpp", 0xc7, INSTR_SMI_U0RDP },
542 { "trtr", 0xd0, INSTR_SS_L0RDRD }, 535 { "trtr", 0xd0, INSTR_SS_L0RDRD },
543 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 536 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
544#endif
545 { "spm", 0x04, INSTR_RR_R0 }, 537 { "spm", 0x04, INSTR_RR_R0 },
546 { "balr", 0x05, INSTR_RR_RR }, 538 { "balr", 0x05, INSTR_RR_RR },
547 { "bctr", 0x06, INSTR_RR_RR }, 539 { "bctr", 0x06, INSTR_RR_RR },
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = {
725}; 717};
726 718
727static struct s390_insn opcode_01[] = { 719static struct s390_insn opcode_01[] = {
728#ifdef CONFIG_64BIT
729 { "ptff", 0x04, INSTR_E }, 720 { "ptff", 0x04, INSTR_E },
730 { "pfpo", 0x0a, INSTR_E }, 721 { "pfpo", 0x0a, INSTR_E },
731 { "sam64", 0x0e, INSTR_E }, 722 { "sam64", 0x0e, INSTR_E },
732#endif
733 { "pr", 0x01, INSTR_E }, 723 { "pr", 0x01, INSTR_E },
734 { "upt", 0x02, INSTR_E }, 724 { "upt", 0x02, INSTR_E },
735 { "sckpf", 0x07, INSTR_E }, 725 { "sckpf", 0x07, INSTR_E },
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = {
741}; 731};
742 732
743static struct s390_insn opcode_a5[] = { 733static struct s390_insn opcode_a5[] = {
744#ifdef CONFIG_64BIT
745 { "iihh", 0x00, INSTR_RI_RU }, 734 { "iihh", 0x00, INSTR_RI_RU },
746 { "iihl", 0x01, INSTR_RI_RU }, 735 { "iihl", 0x01, INSTR_RI_RU },
747 { "iilh", 0x02, INSTR_RI_RU }, 736 { "iilh", 0x02, INSTR_RI_RU },
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = {
758 { "llihl", 0x0d, INSTR_RI_RU }, 747 { "llihl", 0x0d, INSTR_RI_RU },
759 { "llilh", 0x0e, INSTR_RI_RU }, 748 { "llilh", 0x0e, INSTR_RI_RU },
760 { "llill", 0x0f, INSTR_RI_RU }, 749 { "llill", 0x0f, INSTR_RI_RU },
761#endif
762 { "", 0, INSTR_INVALID } 750 { "", 0, INSTR_INVALID }
763}; 751};
764 752
765static struct s390_insn opcode_a7[] = { 753static struct s390_insn opcode_a7[] = {
766#ifdef CONFIG_64BIT
767 { "tmhh", 0x02, INSTR_RI_RU }, 754 { "tmhh", 0x02, INSTR_RI_RU },
768 { "tmhl", 0x03, INSTR_RI_RU }, 755 { "tmhl", 0x03, INSTR_RI_RU },
769 { "brctg", 0x07, INSTR_RI_RP }, 756 { "brctg", 0x07, INSTR_RI_RP },
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = {
771 { "aghi", 0x0b, INSTR_RI_RI }, 758 { "aghi", 0x0b, INSTR_RI_RI },
772 { "mghi", 0x0d, INSTR_RI_RI }, 759 { "mghi", 0x0d, INSTR_RI_RI },
773 { "cghi", 0x0f, INSTR_RI_RI }, 760 { "cghi", 0x0f, INSTR_RI_RI },
774#endif
775 { "tmlh", 0x00, INSTR_RI_RU }, 761 { "tmlh", 0x00, INSTR_RI_RU },
776 { "tmll", 0x01, INSTR_RI_RU }, 762 { "tmll", 0x01, INSTR_RI_RU },
777 { "brc", 0x04, INSTR_RI_UP }, 763 { "brc", 0x04, INSTR_RI_UP },
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = {
785}; 771};
786 772
787static struct s390_insn opcode_aa[] = { 773static struct s390_insn opcode_aa[] = {
788#ifdef CONFIG_64BIT
789 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, 774 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
790 { "rion", 0x01, INSTR_RI_RI }, 775 { "rion", 0x01, INSTR_RI_RI },
791 { "tric", 0x02, INSTR_RI_RI }, 776 { "tric", 0x02, INSTR_RI_RI },
792 { "rioff", 0x03, INSTR_RI_RI }, 777 { "rioff", 0x03, INSTR_RI_RI },
793 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, 778 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
794#endif
795 { "", 0, INSTR_INVALID } 779 { "", 0, INSTR_INVALID }
796}; 780};
797 781
798static struct s390_insn opcode_b2[] = { 782static struct s390_insn opcode_b2[] = {
799#ifdef CONFIG_64BIT
800 { "stckf", 0x7c, INSTR_S_RD }, 783 { "stckf", 0x7c, INSTR_S_RD },
801 { "lpp", 0x80, INSTR_S_RD }, 784 { "lpp", 0x80, INSTR_S_RD },
802 { "lcctl", 0x84, INSTR_S_RD }, 785 { "lcctl", 0x84, INSTR_S_RD },
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = {
819 { "tend", 0xf8, INSTR_S_00 }, 802 { "tend", 0xf8, INSTR_S_00 },
820 { "niai", 0xfa, INSTR_IE_UU }, 803 { "niai", 0xfa, INSTR_IE_UU },
821 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, 804 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
822#endif
823 { "stidp", 0x02, INSTR_S_RD }, 805 { "stidp", 0x02, INSTR_S_RD },
824 { "sck", 0x04, INSTR_S_RD }, 806 { "sck", 0x04, INSTR_S_RD },
825 { "stck", 0x05, INSTR_S_RD }, 807 { "stck", 0x05, INSTR_S_RD },
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = {
908}; 890};
909 891
910static struct s390_insn opcode_b3[] = { 892static struct s390_insn opcode_b3[] = {
911#ifdef CONFIG_64BIT
912 { "maylr", 0x38, INSTR_RRF_F0FF }, 893 { "maylr", 0x38, INSTR_RRF_F0FF },
913 { "mylr", 0x39, INSTR_RRF_F0FF }, 894 { "mylr", 0x39, INSTR_RRF_F0FF },
914 { "mayr", 0x3a, INSTR_RRF_F0FF }, 895 { "mayr", 0x3a, INSTR_RRF_F0FF },
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = {
996 { "qaxtr", 0xfd, INSTR_RRF_FUFF }, 977 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
997 { "iextr", 0xfe, INSTR_RRF_F0FR }, 978 { "iextr", 0xfe, INSTR_RRF_F0FR },
998 { "rrxtr", 0xff, INSTR_RRF_FFRU }, 979 { "rrxtr", 0xff, INSTR_RRF_FFRU },
999#endif
1000 { "lpebr", 0x00, INSTR_RRE_FF }, 980 { "lpebr", 0x00, INSTR_RRE_FF },
1001 { "lnebr", 0x01, INSTR_RRE_FF }, 981 { "lnebr", 0x01, INSTR_RRE_FF },
1002 { "ltebr", 0x02, INSTR_RRE_FF }, 982 { "ltebr", 0x02, INSTR_RRE_FF },
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = {
1091}; 1071};
1092 1072
1093static struct s390_insn opcode_b9[] = { 1073static struct s390_insn opcode_b9[] = {
1094#ifdef CONFIG_64BIT
1095 { "lpgr", 0x00, INSTR_RRE_RR }, 1074 { "lpgr", 0x00, INSTR_RRE_RR },
1096 { "lngr", 0x01, INSTR_RRE_RR }, 1075 { "lngr", 0x01, INSTR_RRE_RR },
1097 { "ltgr", 0x02, INSTR_RRE_RR }, 1076 { "ltgr", 0x02, INSTR_RRE_RR },
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = {
1204 { "srk", 0xf9, INSTR_RRF_R0RR2 }, 1183 { "srk", 0xf9, INSTR_RRF_R0RR2 },
1205 { "alrk", 0xfa, INSTR_RRF_R0RR2 }, 1184 { "alrk", 0xfa, INSTR_RRF_R0RR2 },
1206 { "slrk", 0xfb, INSTR_RRF_R0RR2 }, 1185 { "slrk", 0xfb, INSTR_RRF_R0RR2 },
1207#endif
1208 { "kmac", 0x1e, INSTR_RRE_RR }, 1186 { "kmac", 0x1e, INSTR_RRE_RR },
1209 { "lrvr", 0x1f, INSTR_RRE_RR }, 1187 { "lrvr", 0x1f, INSTR_RRE_RR },
1210 { "km", 0x2e, INSTR_RRE_RR }, 1188 { "km", 0x2e, INSTR_RRE_RR },
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = {
1224}; 1202};
1225 1203
1226static struct s390_insn opcode_c0[] = { 1204static struct s390_insn opcode_c0[] = {
1227#ifdef CONFIG_64BIT
1228 { "lgfi", 0x01, INSTR_RIL_RI }, 1205 { "lgfi", 0x01, INSTR_RIL_RI },
1229 { "xihf", 0x06, INSTR_RIL_RU }, 1206 { "xihf", 0x06, INSTR_RIL_RU },
1230 { "xilf", 0x07, INSTR_RIL_RU }, 1207 { "xilf", 0x07, INSTR_RIL_RU },
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = {
1236 { "oilf", 0x0d, INSTR_RIL_RU }, 1213 { "oilf", 0x0d, INSTR_RIL_RU },
1237 { "llihf", 0x0e, INSTR_RIL_RU }, 1214 { "llihf", 0x0e, INSTR_RIL_RU },
1238 { "llilf", 0x0f, INSTR_RIL_RU }, 1215 { "llilf", 0x0f, INSTR_RIL_RU },
1239#endif
1240 { "larl", 0x00, INSTR_RIL_RP }, 1216 { "larl", 0x00, INSTR_RIL_RP },
1241 { "brcl", 0x04, INSTR_RIL_UP }, 1217 { "brcl", 0x04, INSTR_RIL_UP },
1242 { "brasl", 0x05, INSTR_RIL_RP }, 1218 { "brasl", 0x05, INSTR_RIL_RP },
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = {
1244}; 1220};
1245 1221
1246static struct s390_insn opcode_c2[] = { 1222static struct s390_insn opcode_c2[] = {
1247#ifdef CONFIG_64BIT
1248 { "msgfi", 0x00, INSTR_RIL_RI }, 1223 { "msgfi", 0x00, INSTR_RIL_RI },
1249 { "msfi", 0x01, INSTR_RIL_RI }, 1224 { "msfi", 0x01, INSTR_RIL_RI },
1250 { "slgfi", 0x04, INSTR_RIL_RU }, 1225 { "slgfi", 0x04, INSTR_RIL_RU },
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = {
1257 { "cfi", 0x0d, INSTR_RIL_RI }, 1232 { "cfi", 0x0d, INSTR_RIL_RI },
1258 { "clgfi", 0x0e, INSTR_RIL_RU }, 1233 { "clgfi", 0x0e, INSTR_RIL_RU },
1259 { "clfi", 0x0f, INSTR_RIL_RU }, 1234 { "clfi", 0x0f, INSTR_RIL_RU },
1260#endif
1261 { "", 0, INSTR_INVALID } 1235 { "", 0, INSTR_INVALID }
1262}; 1236};
1263 1237
1264static struct s390_insn opcode_c4[] = { 1238static struct s390_insn opcode_c4[] = {
1265#ifdef CONFIG_64BIT
1266 { "llhrl", 0x02, INSTR_RIL_RP }, 1239 { "llhrl", 0x02, INSTR_RIL_RP },
1267 { "lghrl", 0x04, INSTR_RIL_RP }, 1240 { "lghrl", 0x04, INSTR_RIL_RP },
1268 { "lhrl", 0x05, INSTR_RIL_RP }, 1241 { "lhrl", 0x05, INSTR_RIL_RP },
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = {
1274 { "lrl", 0x0d, INSTR_RIL_RP }, 1247 { "lrl", 0x0d, INSTR_RIL_RP },
1275 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, 1248 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
1276 { "strl", 0x0f, INSTR_RIL_RP }, 1249 { "strl", 0x0f, INSTR_RIL_RP },
1277#endif
1278 { "", 0, INSTR_INVALID } 1250 { "", 0, INSTR_INVALID }
1279}; 1251};
1280 1252
1281static struct s390_insn opcode_c6[] = { 1253static struct s390_insn opcode_c6[] = {
1282#ifdef CONFIG_64BIT
1283 { "exrl", 0x00, INSTR_RIL_RP }, 1254 { "exrl", 0x00, INSTR_RIL_RP },
1284 { "pfdrl", 0x02, INSTR_RIL_UP }, 1255 { "pfdrl", 0x02, INSTR_RIL_UP },
1285 { "cghrl", 0x04, INSTR_RIL_RP }, 1256 { "cghrl", 0x04, INSTR_RIL_RP },
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = {
1292 { "crl", 0x0d, INSTR_RIL_RP }, 1263 { "crl", 0x0d, INSTR_RIL_RP },
1293 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, 1264 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
1294 { "clrl", 0x0f, INSTR_RIL_RP }, 1265 { "clrl", 0x0f, INSTR_RIL_RP },
1295#endif
1296 { "", 0, INSTR_INVALID } 1266 { "", 0, INSTR_INVALID }
1297}; 1267};
1298 1268
1299static struct s390_insn opcode_c8[] = { 1269static struct s390_insn opcode_c8[] = {
1300#ifdef CONFIG_64BIT
1301 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 1270 { "mvcos", 0x00, INSTR_SSF_RRDRD },
1302 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1271 { "ectg", 0x01, INSTR_SSF_RRDRD },
1303 { "csst", 0x02, INSTR_SSF_RRDRD }, 1272 { "csst", 0x02, INSTR_SSF_RRDRD },
1304 { "lpd", 0x04, INSTR_SSF_RRDRD2 }, 1273 { "lpd", 0x04, INSTR_SSF_RRDRD2 },
1305 { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, 1274 { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
1306#endif
1307 { "", 0, INSTR_INVALID } 1275 { "", 0, INSTR_INVALID }
1308}; 1276};
1309 1277
1310static struct s390_insn opcode_cc[] = { 1278static struct s390_insn opcode_cc[] = {
1311#ifdef CONFIG_64BIT
1312 { "brcth", 0x06, INSTR_RIL_RP }, 1279 { "brcth", 0x06, INSTR_RIL_RP },
1313 { "aih", 0x08, INSTR_RIL_RI }, 1280 { "aih", 0x08, INSTR_RIL_RI },
1314 { "alsih", 0x0a, INSTR_RIL_RI }, 1281 { "alsih", 0x0a, INSTR_RIL_RI },
1315 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, 1282 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
1316 { "cih", 0x0d, INSTR_RIL_RI }, 1283 { "cih", 0x0d, INSTR_RIL_RI },
1317 { "clih", 0x0f, INSTR_RIL_RI }, 1284 { "clih", 0x0f, INSTR_RIL_RI },
1318#endif
1319 { "", 0, INSTR_INVALID } 1285 { "", 0, INSTR_INVALID }
1320}; 1286};
1321 1287
1322static struct s390_insn opcode_e3[] = { 1288static struct s390_insn opcode_e3[] = {
1323#ifdef CONFIG_64BIT
1324 { "ltg", 0x02, INSTR_RXY_RRRD }, 1289 { "ltg", 0x02, INSTR_RXY_RRRD },
1325 { "lrag", 0x03, INSTR_RXY_RRRD }, 1290 { "lrag", 0x03, INSTR_RXY_RRRD },
1326 { "lg", 0x04, INSTR_RXY_RRRD }, 1291 { "lg", 0x04, INSTR_RXY_RRRD },
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = {
1414 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1379 { "clhf", 0xcf, INSTR_RXY_RRRD },
1415 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, 1380 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
1416 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, 1381 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
1417#endif
1418 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1382 { "lrv", 0x1e, INSTR_RXY_RRRD },
1419 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1383 { "lrvh", 0x1f, INSTR_RXY_RRRD },
1420 { "strv", 0x3e, INSTR_RXY_RRRD }, 1384 { "strv", 0x3e, INSTR_RXY_RRRD },
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = {
1426}; 1390};
1427 1391
1428static struct s390_insn opcode_e5[] = { 1392static struct s390_insn opcode_e5[] = {
1429#ifdef CONFIG_64BIT
1430 { "strag", 0x02, INSTR_SSE_RDRD }, 1393 { "strag", 0x02, INSTR_SSE_RDRD },
1431 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1394 { "mvhhi", 0x44, INSTR_SIL_RDI },
1432 { "mvghi", 0x48, INSTR_SIL_RDI }, 1395 { "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = {
1439 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, 1402 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
1440 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, 1403 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
1441 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, 1404 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
1442#endif
1443 { "lasp", 0x00, INSTR_SSE_RDRD }, 1405 { "lasp", 0x00, INSTR_SSE_RDRD },
1444 { "tprot", 0x01, INSTR_SSE_RDRD }, 1406 { "tprot", 0x01, INSTR_SSE_RDRD },
1445 { "mvcsk", 0x0e, INSTR_SSE_RDRD }, 1407 { "mvcsk", 0x0e, INSTR_SSE_RDRD },
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = {
1448}; 1410};
1449 1411
1450static struct s390_insn opcode_e7[] = { 1412static struct s390_insn opcode_e7[] = {
1451#ifdef CONFIG_64BIT
1452 { "lcbb", 0x27, INSTR_RXE_RRRDM }, 1413 { "lcbb", 0x27, INSTR_RXE_RRRDM },
1453 { "vgef", 0x13, INSTR_VRV_VVRDM }, 1414 { "vgef", 0x13, INSTR_VRV_VVRDM },
1454 { "vgeg", 0x12, INSTR_VRV_VVRDM }, 1415 { "vgeg", 0x12, INSTR_VRV_VVRDM },
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = {
1588 { "vfsq", 0xce, INSTR_VRR_VV000MM }, 1549 { "vfsq", 0xce, INSTR_VRR_VV000MM },
1589 { "vfs", 0xe2, INSTR_VRR_VVV00MM }, 1550 { "vfs", 0xe2, INSTR_VRR_VVV00MM },
1590 { "vftci", 0x4a, INSTR_VRI_VVIMM }, 1551 { "vftci", 0x4a, INSTR_VRI_VVIMM },
1591#endif
1592}; 1552};
1593 1553
1594static struct s390_insn opcode_eb[] = { 1554static struct s390_insn opcode_eb[] = {
1595#ifdef CONFIG_64BIT
1596 { "lmg", 0x04, INSTR_RSY_RRRD }, 1555 { "lmg", 0x04, INSTR_RSY_RRRD },
1597 { "srag", 0x0a, INSTR_RSY_RRRD }, 1556 { "srag", 0x0a, INSTR_RSY_RRRD },
1598 { "slag", 0x0b, INSTR_RSY_RRRD }, 1557 { "slag", 0x0b, INSTR_RSY_RRRD },
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = {
1659 { "stric", 0x61, INSTR_RSY_RDRM }, 1618 { "stric", 0x61, INSTR_RSY_RDRM },
1660 { "mric", 0x62, INSTR_RSY_RDRM }, 1619 { "mric", 0x62, INSTR_RSY_RDRM },
1661 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, 1620 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
1662#endif
1663 { "rll", 0x1d, INSTR_RSY_RRRD }, 1621 { "rll", 0x1d, INSTR_RSY_RRRD },
1664 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1622 { "mvclu", 0x8e, INSTR_RSY_RRRD },
1665 { "tp", 0xc0, INSTR_RSL_R0RD }, 1623 { "tp", 0xc0, INSTR_RSL_R0RD },
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = {
1667}; 1625};
1668 1626
1669static struct s390_insn opcode_ec[] = { 1627static struct s390_insn opcode_ec[] = {
1670#ifdef CONFIG_64BIT
1671 { "brxhg", 0x44, INSTR_RIE_RRP }, 1628 { "brxhg", 0x44, INSTR_RIE_RRP },
1672 { "brxlg", 0x45, INSTR_RIE_RRP }, 1629 { "brxlg", 0x45, INSTR_RIE_RRP },
1673 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, 1630 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = {
1701 { "clgib", 0xfd, INSTR_RIS_RURDU }, 1658 { "clgib", 0xfd, INSTR_RIS_RURDU },
1702 { "cib", 0xfe, INSTR_RIS_RURDI }, 1659 { "cib", 0xfe, INSTR_RIS_RURDI },
1703 { "clib", 0xff, INSTR_RIS_RURDU }, 1660 { "clib", 0xff, INSTR_RIS_RURDU },
1704#endif
1705 { "", 0, INSTR_INVALID } 1661 { "", 0, INSTR_INVALID }
1706}; 1662};
1707 1663
1708static struct s390_insn opcode_ed[] = { 1664static struct s390_insn opcode_ed[] = {
1709#ifdef CONFIG_64BIT
1710 { "mayl", 0x38, INSTR_RXF_FRRDF }, 1665 { "mayl", 0x38, INSTR_RXF_FRRDF },
1711 { "myl", 0x39, INSTR_RXF_FRRDF }, 1666 { "myl", 0x39, INSTR_RXF_FRRDF },
1712 { "may", 0x3a, INSTR_RXF_FRRDF }, 1667 { "may", 0x3a, INSTR_RXF_FRRDF },
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = {
1731 { "czxt", 0xa9, INSTR_RSL_LRDFU }, 1686 { "czxt", 0xa9, INSTR_RSL_LRDFU },
1732 { "cdzt", 0xaa, INSTR_RSL_LRDFU }, 1687 { "cdzt", 0xaa, INSTR_RSL_LRDFU },
1733 { "cxzt", 0xab, INSTR_RSL_LRDFU }, 1688 { "cxzt", 0xab, INSTR_RSL_LRDFU },
1734#endif
1735 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1689 { "ldeb", 0x04, INSTR_RXE_FRRD },
1736 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1690 { "lxdb", 0x05, INSTR_RXE_FRRD },
1737 { "lxeb", 0x06, INSTR_RXE_FRRD }, 1691 { "lxeb", 0x06, INSTR_RXE_FRRD },
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs)
2051 else 2005 else
2052 *ptr++ = ' '; 2006 *ptr++ = ' ';
2053 addr = regs->psw.addr + start - 32; 2007 addr = regs->psw.addr + start - 32;
2054 ptr += sprintf(ptr, ONELONG, addr); 2008 ptr += sprintf(ptr, "%016lx: ", addr);
2055 if (start + opsize >= end) 2009 if (start + opsize >= end)
2056 break; 2010 break;
2057 for (i = 0; i < opsize; i++) 2011 for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index a99852e96a77..dc8e20473484 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -18,16 +18,6 @@
18#include <asm/dis.h> 18#include <asm/dis.h>
19#include <asm/ipl.h> 19#include <asm/ipl.h>
20 20
21#ifndef CONFIG_64BIT
22#define LONG "%08lx "
23#define FOURLONG "%08lx %08lx %08lx %08lx\n"
24static int kstack_depth_to_print = 12;
25#else /* CONFIG_64BIT */
26#define LONG "%016lx "
27#define FOURLONG "%016lx %016lx %016lx %016lx\n"
28static int kstack_depth_to_print = 20;
29#endif /* CONFIG_64BIT */
30
31/* 21/*
32 * For show_trace we have tree different stack to consider: 22 * For show_trace we have tree different stack to consider:
33 * - the panic stack which is used if the kernel stack has overflown 23 * - the panic stack which is used if the kernel stack has overflown
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
115 else 105 else
116 stack = sp; 106 stack = sp;
117 107
118 for (i = 0; i < kstack_depth_to_print; i++) { 108 for (i = 0; i < 20; i++) {
119 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 109 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
120 break; 110 break;
121 if ((i * sizeof(long) % 32) == 0) 111 if ((i * sizeof(long) % 32) == 0)
122 printk("%s ", i == 0 ? "" : "\n"); 112 printk("%s ", i == 0 ? "" : "\n");
123 printk(LONG, *stack++); 113 printk("%016lx ", *stack++);
124 } 114 }
125 printk("\n"); 115 printk("\n");
126 show_trace(task, sp); 116 show_trace(task, sp);
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
128 118
129static void show_last_breaking_event(struct pt_regs *regs) 119static void show_last_breaking_event(struct pt_regs *regs)
130{ 120{
131#ifdef CONFIG_64BIT
132 printk("Last Breaking-Event-Address:\n"); 121 printk("Last Breaking-Event-Address:\n");
133 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); 122 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
134#endif
135} 123}
136 124
137static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 125static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs)
155 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 143 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
156 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 144 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
157 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 145 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
158#ifdef CONFIG_64BIT
159 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 146 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
160#endif 147 printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
161 printk("\n%s GPRS: " FOURLONG, mode,
162 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 148 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
163 printk(" " FOURLONG, 149 printk(" %016lx %016lx %016lx %016lx\n",
164 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 150 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
165 printk(" " FOURLONG, 151 printk(" %016lx %016lx %016lx %016lx\n",
166 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 152 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
167 printk(" " FOURLONG, 153 printk(" %016lx %016lx %016lx %016lx\n",
168 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 154 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
169 show_code(regs); 155 show_code(regs);
170} 156}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4427ab7ac23a..549a73a4b543 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,7 +64,6 @@ asm(
64 " .align 4\n" 64 " .align 4\n"
65 " .type savesys_ipl_nss, @function\n" 65 " .type savesys_ipl_nss, @function\n"
66 "savesys_ipl_nss:\n" 66 "savesys_ipl_nss:\n"
67#ifdef CONFIG_64BIT
68 " stmg 6,15,48(15)\n" 67 " stmg 6,15,48(15)\n"
69 " lgr 14,3\n" 68 " lgr 14,3\n"
70 " sam31\n" 69 " sam31\n"
@@ -72,13 +71,6 @@ asm(
72 " sam64\n" 71 " sam64\n"
73 " lgr 2,14\n" 72 " lgr 2,14\n"
74 " lmg 6,15,48(15)\n" 73 " lmg 6,15,48(15)\n"
75#else
76 " stm 6,15,24(15)\n"
77 " lr 14,3\n"
78 " diag 2,14,0x8\n"
79 " lr 2,14\n"
80 " lm 6,15,24(15)\n"
81#endif
82 " br 14\n" 74 " br 14\n"
83 " .size savesys_ipl_nss, .-savesys_ipl_nss\n" 75 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
84 " .previous\n"); 76 " .previous\n");
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void)
240 232
241static __init void setup_topology(void) 233static __init void setup_topology(void)
242{ 234{
243#ifdef CONFIG_64BIT
244 int max_mnest; 235 int max_mnest;
245 236
246 if (!test_facility(11)) 237 if (!test_facility(11))
@@ -251,7 +242,6 @@ static __init void setup_topology(void)
251 break; 242 break;
252 } 243 }
253 topology_max_mnest = max_mnest; 244 topology_max_mnest = max_mnest;
254#endif
255} 245}
256 246
257static void early_pgm_check_handler(void) 247static void early_pgm_check_handler(void)
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void)
290 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 280 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
291} 281}
292 282
293static __init void detect_mvpg(void)
294{
295#ifndef CONFIG_64BIT
296 int rc;
297
298 asm volatile(
299 " la 0,0\n"
300 " mvpg %2,%2\n"
301 "0: la %0,0\n"
302 "1:\n"
303 EX_TABLE(0b,1b)
304 : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
305 if (!rc)
306 S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
307#endif
308}
309
310static __init void detect_ieee(void)
311{
312#ifndef CONFIG_64BIT
313 int rc, tmp;
314
315 asm volatile(
316 " efpc %1,0\n"
317 "0: la %0,0\n"
318 "1:\n"
319 EX_TABLE(0b,1b)
320 : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
321 if (!rc)
322 S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
323#endif
324}
325
326static __init void detect_csp(void)
327{
328#ifndef CONFIG_64BIT
329 int rc;
330
331 asm volatile(
332 " la 0,0\n"
333 " la 1,0\n"
334 " la 2,4\n"
335 " csp 0,2\n"
336 "0: la %0,0\n"
337 "1:\n"
338 EX_TABLE(0b,1b)
339 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
340 if (!rc)
341 S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
342#endif
343}
344
345static __init void detect_diag9c(void) 283static __init void detect_diag9c(void)
346{ 284{
347 unsigned int cpu_address; 285 unsigned int cpu_address;
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void)
360 298
361static __init void detect_diag44(void) 299static __init void detect_diag44(void)
362{ 300{
363#ifdef CONFIG_64BIT
364 int rc; 301 int rc;
365 302
366 asm volatile( 303 asm volatile(
@@ -371,12 +308,10 @@ static __init void detect_diag44(void)
371 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); 308 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
372 if (!rc) 309 if (!rc)
373 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; 310 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
374#endif
375} 311}
376 312
377static __init void detect_machine_facilities(void) 313static __init void detect_machine_facilities(void)
378{ 314{
379#ifdef CONFIG_64BIT
380 if (test_facility(8)) { 315 if (test_facility(8)) {
381 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; 316 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
382 __ctl_set_bit(0, 23); 317 __ctl_set_bit(0, 23);
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 328 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 329 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 330 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396#endif
397} 331}
398 332
399static int __init cad_setup(char *str) 333static int __init cad_setup(char *str)
@@ -501,9 +435,6 @@ void __init startup_init(void)
501 ipl_update_parameters(); 435 ipl_update_parameters();
502 setup_boot_command_line(); 436 setup_boot_command_line();
503 create_kernel_nss(); 437 create_kernel_nss();
504 detect_mvpg();
505 detect_ieee();
506 detect_csp();
507 detect_diag9c(); 438 detect_diag9c();
508 detect_diag44(); 439 detect_diag44();
509 detect_machine_facilities(); 440 detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
deleted file mode 100644
index 398329b2b518..000000000000
--- a/arch/s390/kernel/entry.S
+++ /dev/null
@@ -1,966 +0,0 @@
1/*
2 * S390 low-level entry points.
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 * Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h>
15#include <asm/errno.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <asm/unistd.h>
20#include <asm/page.h>
21#include <asm/sigp.h>
22#include <asm/irq.h>
23
24__PT_R0 = __PT_GPRS
25__PT_R1 = __PT_GPRS + 4
26__PT_R2 = __PT_GPRS + 8
27__PT_R3 = __PT_GPRS + 12
28__PT_R4 = __PT_GPRS + 16
29__PT_R5 = __PT_GPRS + 20
30__PT_R6 = __PT_GPRS + 24
31__PT_R7 = __PT_GPRS + 28
32__PT_R8 = __PT_GPRS + 32
33__PT_R9 = __PT_GPRS + 36
34__PT_R10 = __PT_GPRS + 40
35__PT_R11 = __PT_GPRS + 44
36__PT_R12 = __PT_GPRS + 48
37__PT_R13 = __PT_GPRS + 524
38__PT_R14 = __PT_GPRS + 56
39__PT_R15 = __PT_GPRS + 60
40
41STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44
45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
46_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
47 _TIF_SYSCALL_TRACEPOINT)
48_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
49_PIF_WORK = (_PIF_PER_TRAP)
50
51#define BASED(name) name-system_call(%r13)
52
53 .macro TRACE_IRQS_ON
54#ifdef CONFIG_TRACE_IRQFLAGS
55 basr %r2,%r0
56 l %r1,BASED(.Lc_hardirqs_on)
57 basr %r14,%r1 # call trace_hardirqs_on_caller
58#endif
59 .endm
60
61 .macro TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS
63 basr %r2,%r0
64 l %r1,BASED(.Lc_hardirqs_off)
65 basr %r14,%r1 # call trace_hardirqs_off_caller
66#endif
67 .endm
68
69 .macro LOCKDEP_SYS_EXIT
70#ifdef CONFIG_LOCKDEP
71 tm __PT_PSW+1(%r11),0x01 # returning to user ?
72 jz .+10
73 l %r1,BASED(.Lc_lockdep_sys_exit)
74 basr %r14,%r1 # call lockdep_sys_exit
75#endif
76 .endm
77
78 .macro CHECK_STACK stacksize,savearea
79#ifdef CONFIG_CHECK_STACK
80 tml %r15,\stacksize - CONFIG_STACK_GUARD
81 la %r14,\savearea
82 jz stack_overflow
83#endif
84 .endm
85
86 .macro SWITCH_ASYNC savearea,stack,shift
87 tmh %r8,0x0001 # interrupting from user ?
88 jnz 1f
89 lr %r14,%r9
90 sl %r14,BASED(.Lc_critical_start)
91 cl %r14,BASED(.Lc_critical_length)
92 jhe 0f
93 la %r11,\savearea # inside critical section, do cleanup
94 bras %r14,cleanup_critical
95 tmh %r8,0x0001 # retest problem state after cleanup
96 jnz 1f
970: l %r14,\stack # are we already on the target stack?
98 slr %r14,%r15
99 sra %r14,\shift
100 jnz 1f
101 CHECK_STACK 1<<\shift,\savearea
102 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
103 j 2f
1041: l %r15,\stack # load target stack
1052: la %r11,STACK_FRAME_OVERHEAD(%r15)
106 .endm
107
108 .macro ADD64 high,low,timer
109 al \high,\timer
110 al \low,4+\timer
111 brc 12,.+8
112 ahi \high,1
113 .endm
114
115 .macro SUB64 high,low,timer
116 sl \high,\timer
117 sl \low,4+\timer
118 brc 3,.+8
119 ahi \high,-1
120 .endm
121
122 .macro UPDATE_VTIME high,low,enter_timer
123 lm \high,\low,__LC_EXIT_TIMER
124 SUB64 \high,\low,\enter_timer
125 ADD64 \high,\low,__LC_USER_TIMER
126 stm \high,\low,__LC_USER_TIMER
127 lm \high,\low,__LC_LAST_UPDATE_TIMER
128 SUB64 \high,\low,__LC_EXIT_TIMER
129 ADD64 \high,\low,__LC_SYSTEM_TIMER
130 stm \high,\low,__LC_SYSTEM_TIMER
131 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
132 .endm
133
134 .macro REENABLE_IRQS
135 st %r8,__LC_RETURN_PSW
136 ni __LC_RETURN_PSW,0xbf
137 ssm __LC_RETURN_PSW
138 .endm
139
140 .section .kprobes.text, "ax"
141
142/*
143 * Scheduler resume function, called by switch_to
144 * gpr2 = (task_struct *) prev
145 * gpr3 = (task_struct *) next
146 * Returns:
147 * gpr2 = prev
148 */
149ENTRY(__switch_to)
150 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
151 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
152 l %r4,__THREAD_info(%r2) # get thread_info of prev
153 l %r5,__THREAD_info(%r3) # get thread_info of next
154 lr %r15,%r5
155 ahi %r15,STACK_INIT # end of kernel stack of next
156 st %r3,__LC_CURRENT # store task struct of next
157 st %r5,__LC_THREAD_INFO # store thread info of next
158 st %r15,__LC_KERNEL_STACK # store end of kernel stack
159 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
160 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
161 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
163 br %r14
164
165.L__critical_start:
166/*
167 * SVC interrupt handler routine. System calls are synchronous events and
168 * are executed with interrupts enabled.
169 */
170
171ENTRY(system_call)
172 stpt __LC_SYNC_ENTER_TIMER
173.Lsysc_stm:
174 stm %r8,%r15,__LC_SAVE_AREA_SYNC
175 l %r12,__LC_THREAD_INFO
176 l %r13,__LC_SVC_NEW_PSW+4
177 lhi %r14,_PIF_SYSCALL
178.Lsysc_per:
179 l %r15,__LC_KERNEL_STACK
180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
181.Lsysc_vtime:
182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
183 stm %r0,%r7,__PT_R0(%r11)
184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
187 st %r14,__PT_FLAGS(%r11)
188.Lsysc_do_svc:
189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
190 lh %r8,__PT_INT_CODE+2(%r11)
191 sla %r8,2 # shift and test for svc0
192 jnz .Lsysc_nr_ok
193 # svc 0: system call number in %r1
194 cl %r1,BASED(.Lnr_syscalls)
195 jnl .Lsysc_nr_ok
196 sth %r1,__PT_INT_CODE+2(%r11)
197 lr %r8,%r1
198 sla %r8,2
199.Lsysc_nr_ok:
200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
201 st %r2,__PT_ORIG_GPR2(%r11)
202 st %r7,STACK_FRAME_OVERHEAD(%r15)
203 l %r9,0(%r8,%r10) # get system call addr.
204 tm __TI_flags+3(%r12),_TIF_TRACE
205 jnz .Lsysc_tracesys
206 basr %r14,%r9 # call sys_xxxx
207 st %r2,__PT_R2(%r11) # store return value
208
209.Lsysc_return:
210 LOCKDEP_SYS_EXIT
211.Lsysc_tif:
212 tm __PT_PSW+1(%r11),0x01 # returning to user ?
213 jno .Lsysc_restore
214 tm __PT_FLAGS+3(%r11),_PIF_WORK
215 jnz .Lsysc_work
216 tm __TI_flags+3(%r12),_TIF_WORK
217 jnz .Lsysc_work # check for thread work
218 tm __LC_CPU_FLAGS+3,_CIF_WORK
219 jnz .Lsysc_work
220.Lsysc_restore:
221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
222 stpt __LC_EXIT_TIMER
223 lm %r0,%r15,__PT_R0(%r11)
224 lpsw __LC_RETURN_PSW
225.Lsysc_done:
226
227#
228# One of the work bits is on. Find out which one.
229#
230.Lsysc_work:
231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
232 jo .Lsysc_mcck_pending
233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
234 jo .Lsysc_reschedule
235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
236 jo .Lsysc_singlestep
237 tm __TI_flags+3(%r12),_TIF_SIGPENDING
238 jo .Lsysc_sigpending
239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
240 jo .Lsysc_notify_resume
241 tm __LC_CPU_FLAGS+3,_CIF_ASCE
242 jo .Lsysc_uaccess
243 j .Lsysc_return # beware of critical section cleanup
244
245#
246# _TIF_NEED_RESCHED is set, call schedule
247#
248.Lsysc_reschedule:
249 l %r1,BASED(.Lc_schedule)
250 la %r14,BASED(.Lsysc_return)
251 br %r1 # call schedule
252
253#
254# _CIF_MCCK_PENDING is set, call handler
255#
256.Lsysc_mcck_pending:
257 l %r1,BASED(.Lc_handle_mcck)
258 la %r14,BASED(.Lsysc_return)
259 br %r1 # TIF bit will be cleared by handler
260
261#
262# _CIF_ASCE is set, load user space asce
263#
264.Lsysc_uaccess:
265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
267 j .Lsysc_return
268
269#
270# _TIF_SIGPENDING is set, call do_signal
271#
272.Lsysc_sigpending:
273 lr %r2,%r11 # pass pointer to pt_regs
274 l %r1,BASED(.Lc_do_signal)
275 basr %r14,%r1 # call do_signal
276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
277 jno .Lsysc_return
278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
280 xr %r8,%r8 # svc 0 returns -ENOSYS
281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
282 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
284 sla %r8,2
285 j .Lsysc_nr_ok # restart svc
286
287#
288# _TIF_NOTIFY_RESUME is set, call do_notify_resume
289#
290.Lsysc_notify_resume:
291 lr %r2,%r11 # pass pointer to pt_regs
292 l %r1,BASED(.Lc_do_notify_resume)
293 la %r14,BASED(.Lsysc_return)
294 br %r1 # call do_notify_resume
295
296#
297# _PIF_PER_TRAP is set, call do_per_trap
298#
299.Lsysc_singlestep:
300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
301 lr %r2,%r11 # pass pointer to pt_regs
302 l %r1,BASED(.Lc_do_per_trap)
303 la %r14,BASED(.Lsysc_return)
304 br %r1 # call do_per_trap
305
306#
307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
308# and after the system call
309#
310.Lsysc_tracesys:
311 l %r1,BASED(.Lc_trace_enter)
312 lr %r2,%r11 # pass pointer to pt_regs
313 la %r3,0
314 xr %r0,%r0
315 icm %r0,3,__PT_INT_CODE+2(%r11)
316 st %r0,__PT_R2(%r11)
317 basr %r14,%r1 # call do_syscall_trace_enter
318 cl %r2,BASED(.Lnr_syscalls)
319 jnl .Lsysc_tracenogo
320 lr %r8,%r2
321 sll %r8,2
322 l %r9,0(%r8,%r10)
323.Lsysc_tracego:
324 lm %r3,%r7,__PT_R3(%r11)
325 st %r7,STACK_FRAME_OVERHEAD(%r15)
326 l %r2,__PT_ORIG_GPR2(%r11)
327 basr %r14,%r9 # call sys_xxx
328 st %r2,__PT_R2(%r11) # store return value
329.Lsysc_tracenogo:
330 tm __TI_flags+3(%r12),_TIF_TRACE
331 jz .Lsysc_return
332 l %r1,BASED(.Lc_trace_exit)
333 lr %r2,%r11 # pass pointer to pt_regs
334 la %r14,BASED(.Lsysc_return)
335 br %r1 # call do_syscall_trace_exit
336
337#
338# a new process exits the kernel with ret_from_fork
339#
340ENTRY(ret_from_fork)
341 la %r11,STACK_FRAME_OVERHEAD(%r15)
342 l %r12,__LC_THREAD_INFO
343 l %r13,__LC_SVC_NEW_PSW+4
344 l %r1,BASED(.Lc_schedule_tail)
345 basr %r14,%r1 # call schedule_tail
346 TRACE_IRQS_ON
347 ssm __LC_SVC_NEW_PSW # reenable interrupts
348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
349 jne .Lsysc_tracenogo
350 # it's a kernel thread
351 lm %r9,%r10,__PT_R9(%r11) # load gprs
352ENTRY(kernel_thread_starter)
353 la %r2,0(%r10)
354 basr %r14,%r9
355 j .Lsysc_tracenogo
356
357/*
358 * Program check handler routine
359 */
360
361ENTRY(pgm_check_handler)
362 stpt __LC_SYNC_ENTER_TIMER
363 stm %r8,%r15,__LC_SAVE_AREA_SYNC
364 l %r12,__LC_THREAD_INFO
365 l %r13,__LC_SVC_NEW_PSW+4
366 lm %r8,%r9,__LC_PGM_OLD_PSW
367 tmh %r8,0x0001 # test problem state bit
368 jnz 1f # -> fault in user space
369 tmh %r8,0x4000 # PER bit set in old PSW ?
370 jnz 0f # -> enabled, can't be a double fault
371 tm __LC_PGM_ILC+3,0x80 # check for per exception
372 jnz .Lpgm_svcper # -> single stepped svc
3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
375 j 2f
3761: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
377 l %r15,__LC_KERNEL_STACK
3782: la %r11,STACK_FRAME_OVERHEAD(%r15)
379 stm %r0,%r7,__PT_R0(%r11)
380 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
381 stm %r8,%r9,__PT_PSW(%r11)
382 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
383 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
384 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
385 tm __LC_PGM_ILC+3,0x80 # check for per exception
386 jz 0f
387 l %r1,__TI_task(%r12)
388 tmh %r8,0x0001 # kernel per event ?
389 jz .Lpgm_kprobe
390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
3940: REENABLE_IRQS
395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Lc_jump_table)
397 la %r10,0x7f
398 n %r10,__PT_INT_CODE(%r11)
399 je .Lsysc_return
400 sll %r10,2
401 l %r1,0(%r10,%r1) # load address of handler routine
402 lr %r2,%r11 # pass pointer to pt_regs
403 basr %r14,%r1 # branch to interrupt-handler
404 j .Lsysc_return
405
406#
407# PER event in supervisor state, must be kprobes
408#
409.Lpgm_kprobe:
410 REENABLE_IRQS
411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
412 l %r1,BASED(.Lc_do_per_trap)
413 lr %r2,%r11 # pass pointer to pt_regs
414 basr %r14,%r1 # call do_per_trap
415 j .Lsysc_return
416
417#
418# single stepped system call
419#
420.Lpgm_svcper:
421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
422 mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
424 lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
425
426/*
427 * IO interrupt handler routine
428 */
429
430ENTRY(io_int_handler)
431 stck __LC_INT_CLOCK
432 stpt __LC_ASYNC_ENTER_TIMER
433 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
434 l %r12,__LC_THREAD_INFO
435 l %r13,__LC_SVC_NEW_PSW+4
436 lm %r8,%r9,__LC_IO_OLD_PSW
437 tmh %r8,0x0001 # interrupting from user ?
438 jz .Lio_skip
439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
440.Lio_skip:
441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
442 stm %r0,%r7,__PT_R0(%r11)
443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
444 stm %r8,%r9,__PT_PSW(%r11)
445 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
447 TRACE_IRQS_OFF
448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449.Lio_loop:
450 l %r1,BASED(.Lc_do_IRQ)
451 lr %r2,%r11 # pass pointer to pt_regs
452 lhi %r3,IO_INTERRUPT
453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
454 jz .Lio_call
455 lhi %r3,THIN_INTERRUPT
456.Lio_call:
457 basr %r14,%r1 # call do_IRQ
458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
459 jz .Lio_return
460 tpi 0
461 jz .Lio_return
462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
463 j .Lio_loop
464.Lio_return:
465 LOCKDEP_SYS_EXIT
466 TRACE_IRQS_ON
467.Lio_tif:
468 tm __TI_flags+3(%r12),_TIF_WORK
469 jnz .Lio_work # there is work to do (signals etc.)
470 tm __LC_CPU_FLAGS+3,_CIF_WORK
471 jnz .Lio_work
472.Lio_restore:
473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
474 stpt __LC_EXIT_TIMER
475 lm %r0,%r15,__PT_R0(%r11)
476 lpsw __LC_RETURN_PSW
477.Lio_done:
478
479#
480# There is work todo, find out in which context we have been interrupted:
481# 1) if we return to user space we can do all _TIF_WORK work
482# 2) if we return to kernel code and preemptive scheduling is enabled check
483# the preemption counter and if it is zero call preempt_schedule_irq
484# Before any work can be done, a switch to the kernel stack is required.
485#
486.Lio_work:
487 tm __PT_PSW+1(%r11),0x01 # returning to user ?
488 jo .Lio_work_user # yes -> do resched & signal
489#ifdef CONFIG_PREEMPT
490 # check for preemptive scheduling
491 icm %r0,15,__TI_precount(%r12)
492 jnz .Lio_restore # preemption disabled
493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
494 jno .Lio_restore
495 # switch to kernel stack
496 l %r1,__PT_R15(%r11)
497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
498 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
500 la %r11,STACK_FRAME_OVERHEAD(%r1)
501 lr %r15,%r1
502 # TRACE_IRQS_ON already done at .Lio_return, call
503 # TRACE_IRQS_OFF to keep things symmetrical
504 TRACE_IRQS_OFF
505 l %r1,BASED(.Lc_preempt_irq)
506 basr %r14,%r1 # call preempt_schedule_irq
507 j .Lio_return
508#else
509 j .Lio_restore
510#endif
511
512#
513# Need to do work before returning to userspace, switch to kernel stack
514#
515.Lio_work_user:
516 l %r1,__LC_KERNEL_STACK
517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
519 la %r11,STACK_FRAME_OVERHEAD(%r1)
520 lr %r15,%r1
521
522#
523# One of the work bits is on. Find out which one.
524#
525.Lio_work_tif:
526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
527 jo .Lio_mcck_pending
528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
529 jo .Lio_reschedule
530 tm __TI_flags+3(%r12),_TIF_SIGPENDING
531 jo .Lio_sigpending
532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
533 jo .Lio_notify_resume
534 tm __LC_CPU_FLAGS+3,_CIF_ASCE
535 jo .Lio_uaccess
536 j .Lio_return # beware of critical section cleanup
537
538#
539# _CIF_MCCK_PENDING is set, call handler
540#
541.Lio_mcck_pending:
542 # TRACE_IRQS_ON already done at .Lio_return
543 l %r1,BASED(.Lc_handle_mcck)
544 basr %r14,%r1 # TIF bit will be cleared by handler
545 TRACE_IRQS_OFF
546 j .Lio_return
547
548#
549# _CIF_ASCE is set, load user space asce
550#
551.Lio_uaccess:
552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
554 j .Lio_return
555
556#
557# _TIF_NEED_RESCHED is set, call schedule
558#
559.Lio_reschedule:
560 # TRACE_IRQS_ON already done at .Lio_return
561 l %r1,BASED(.Lc_schedule)
562 ssm __LC_SVC_NEW_PSW # reenable interrupts
563 basr %r14,%r1 # call scheduler
564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
565 TRACE_IRQS_OFF
566 j .Lio_return
567
568#
569# _TIF_SIGPENDING is set, call do_signal
570#
571.Lio_sigpending:
572 # TRACE_IRQS_ON already done at .Lio_return
573 l %r1,BASED(.Lc_do_signal)
574 ssm __LC_SVC_NEW_PSW # reenable interrupts
575 lr %r2,%r11 # pass pointer to pt_regs
576 basr %r14,%r1 # call do_signal
577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
578 TRACE_IRQS_OFF
579 j .Lio_return
580
581#
582# _TIF_SIGPENDING is set, call do_signal
583#
584.Lio_notify_resume:
585 # TRACE_IRQS_ON already done at .Lio_return
586 l %r1,BASED(.Lc_do_notify_resume)
587 ssm __LC_SVC_NEW_PSW # reenable interrupts
588 lr %r2,%r11 # pass pointer to pt_regs
589 basr %r14,%r1 # call do_notify_resume
590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
591 TRACE_IRQS_OFF
592 j .Lio_return
593
594/*
595 * External interrupt handler routine
596 */
597
598ENTRY(ext_int_handler)
599 stck __LC_INT_CLOCK
600 stpt __LC_ASYNC_ENTER_TIMER
601 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
602 l %r12,__LC_THREAD_INFO
603 l %r13,__LC_SVC_NEW_PSW+4
604 lm %r8,%r9,__LC_EXT_OLD_PSW
605 tmh %r8,0x0001 # interrupting from user ?
606 jz .Lext_skip
607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
608.Lext_skip:
609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
610 stm %r0,%r7,__PT_R0(%r11)
611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
612 stm %r8,%r9,__PT_PSW(%r11)
613 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
616 TRACE_IRQS_OFF
617 l %r1,BASED(.Lc_do_IRQ)
618 lr %r2,%r11 # pass pointer to pt_regs
619 lhi %r3,EXT_INTERRUPT
620 basr %r14,%r1 # call do_IRQ
621 j .Lio_return
622
623/*
624 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
625 */
626ENTRY(psw_idle)
627 st %r3,__SF_EMPTY(%r15)
628 basr %r1,0
629 la %r1,.Lpsw_idle_lpsw+4-.(%r1)
630 st %r1,__SF_EMPTY+4(%r15)
631 oi __SF_EMPTY+4(%r15),0x80
632 stck __CLOCK_IDLE_ENTER(%r2)
633 stpt __TIMER_IDLE_ENTER(%r2)
634.Lpsw_idle_lpsw:
635 lpsw __SF_EMPTY(%r15)
636 br %r14
637.Lpsw_idle_end:
638
639.L__critical_end:
640
641/*
642 * Machine check handler routines
643 */
644
645ENTRY(mcck_int_handler)
646 stck __LC_MCCK_CLOCK
647 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
648 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
649 l %r12,__LC_THREAD_INFO
650 l %r13,__LC_SVC_NEW_PSW+4
651 lm %r8,%r9,__LC_MCK_OLD_PSW
652 tm __LC_MCCK_CODE,0x80 # system damage?
653 jo .Lmcck_panic # yes -> rest of mcck code invalid
654 la %r14,__LC_CPU_TIMER_SAVE_AREA
655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
657 jo 3f
658 la %r14,__LC_SYNC_ENTER_TIMER
659 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
660 jl 0f
661 la %r14,__LC_ASYNC_ENTER_TIMER
6620: clc 0(8,%r14),__LC_EXIT_TIMER
663 jl 1f
664 la %r14,__LC_EXIT_TIMER
6651: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
666 jl 2f
667 la %r14,__LC_LAST_UPDATE_TIMER
6682: spt 0(%r14)
669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
671 jno .Lmcck_panic # no -> skip cleanup critical
672 tm %r8,0x0001 # interrupting from user ?
673 jz .Lmcck_skip
674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
675.Lmcck_skip:
676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
677 stm %r0,%r7,__PT_R0(%r11)
678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
679 stm %r8,%r9,__PT_PSW(%r11)
680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
682 l %r1,BASED(.Lc_do_machine_check)
683 lr %r2,%r11 # pass pointer to pt_regs
684 basr %r14,%r1 # call s390_do_machine_check
685 tm __PT_PSW+1(%r11),0x01 # returning to user ?
686 jno .Lmcck_return
687 l %r1,__LC_KERNEL_STACK # switch to kernel stack
688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
690 la %r11,STACK_FRAME_OVERHEAD(%r15)
691 lr %r15,%r1
692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
694 jno .Lmcck_return
695 TRACE_IRQS_OFF
696 l %r1,BASED(.Lc_handle_mcck)
697 basr %r14,%r1 # call s390_handle_mcck
698 TRACE_IRQS_ON
699.Lmcck_return:
700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
702 jno 0f
703 lm %r0,%r15,__PT_R0(%r11)
704 stpt __LC_EXIT_TIMER
705 lpsw __LC_RETURN_MCCK_PSW
7060: lm %r0,%r15,__PT_R0(%r11)
707 lpsw __LC_RETURN_MCCK_PSW
708
709.Lmcck_panic:
710 l %r14,__LC_PANIC_STACK
711 slr %r14,%r15
712 sra %r14,PAGE_SHIFT
713 jz 0f
714 l %r15,__LC_PANIC_STACK
715 j .Lmcck_skip
7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
717 j .Lmcck_skip
718
719#
720# PSW restart interrupt handler
721#
722ENTRY(restart_int_handler)
723 st %r15,__LC_SAVE_AREA_RESTART
724 l %r15,__LC_RESTART_STACK
725 ahi %r15,-__PT_SIZE # create pt_regs on stack
726 xc 0(__PT_SIZE,%r15),0(%r15)
727 stm %r0,%r14,__PT_R0(%r15)
728 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
729 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
730 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
731 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
732 l %r1,__LC_RESTART_FN # load fn, parm & source cpu
733 l %r2,__LC_RESTART_DATA
734 l %r3,__LC_RESTART_SOURCE
735 ltr %r3,%r3 # test source cpu address
736 jm 1f # negative -> skip source stop
7370: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
738 brc 10,0b # wait for status stored
7391: basr %r14,%r1 # call function
740 stap __SF_EMPTY(%r15) # store cpu address
741 lh %r3,__SF_EMPTY(%r15)
7422: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
743 brc 2,2b
7443: j 3b
745
746 .section .kprobes.text, "ax"
747
748#ifdef CONFIG_CHECK_STACK
749/*
750 * The synchronous or the asynchronous stack overflowed. We are dead.
751 * No need to properly save the registers, we are going to panic anyway.
752 * Setup a pt_regs so that show_trace can provide a good call trace.
753 */
754stack_overflow:
755 l %r15,__LC_PANIC_STACK # change to panic stack
756 la %r11,STACK_FRAME_OVERHEAD(%r15)
757 stm %r0,%r7,__PT_R0(%r11)
758 stm %r8,%r9,__PT_PSW(%r11)
759 mvc __PT_R8(32,%r11),0(%r14)
760 l %r1,BASED(1f)
761 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
762 lr %r2,%r11 # pass pointer to pt_regs
763 br %r1 # branch to kernel_stack_overflow
7641: .long kernel_stack_overflow
765#endif
766
767.Lcleanup_table:
768 .long system_call + 0x80000000
769 .long .Lsysc_do_svc + 0x80000000
770 .long .Lsysc_tif + 0x80000000
771 .long .Lsysc_restore + 0x80000000
772 .long .Lsysc_done + 0x80000000
773 .long .Lio_tif + 0x80000000
774 .long .Lio_restore + 0x80000000
775 .long .Lio_done + 0x80000000
776 .long psw_idle + 0x80000000
777 .long .Lpsw_idle_end + 0x80000000
778
779cleanup_critical:
780 cl %r9,BASED(.Lcleanup_table) # system_call
781 jl 0f
782 cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
783 jl .Lcleanup_system_call
784 cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
785 jl 0f
786 cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
787 jl .Lcleanup_sysc_tif
788 cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
789 jl .Lcleanup_sysc_restore
790 cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
791 jl 0f
792 cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
793 jl .Lcleanup_io_tif
794 cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
795 jl .Lcleanup_io_restore
796 cl %r9,BASED(.Lcleanup_table+32) # psw_idle
797 jl 0f
798 cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
799 jl .Lcleanup_idle
8000: br %r14
801
802.Lcleanup_system_call:
803 # check if stpt has been executed
804 cl %r9,BASED(.Lcleanup_system_call_insn)
805 jh 0f
806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
807 chi %r11,__LC_SAVE_AREA_ASYNC
808 je 0f
809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8100: # check if stm has been executed
811 cl %r9,BASED(.Lcleanup_system_call_insn+4)
812 jh 0f
813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
8140: # set up saved registers r12, and r13
815 st %r12,16(%r11) # r12 thread-info pointer
816 st %r13,20(%r11) # r13 literal-pool pointer
817 # check if the user time calculation has been done
818 cl %r9,BASED(.Lcleanup_system_call_insn+8)
819 jh 0f
820 l %r10,__LC_EXIT_TIMER
821 l %r15,__LC_EXIT_TIMER+4
822 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
823 ADD64 %r10,%r15,__LC_USER_TIMER
824 st %r10,__LC_USER_TIMER
825 st %r15,__LC_USER_TIMER+4
8260: # check if the system time calculation has been done
827 cl %r9,BASED(.Lcleanup_system_call_insn+12)
828 jh 0f
829 l %r10,__LC_LAST_UPDATE_TIMER
830 l %r15,__LC_LAST_UPDATE_TIMER+4
831 SUB64 %r10,%r15,__LC_EXIT_TIMER
832 ADD64 %r10,%r15,__LC_SYSTEM_TIMER
833 st %r10,__LC_SYSTEM_TIMER
834 st %r15,__LC_SYSTEM_TIMER+4
8350: # update accounting time stamp
836 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
837 # set up saved register 11
838 l %r15,__LC_KERNEL_STACK
839 la %r9,STACK_FRAME_OVERHEAD(%r15)
840 st %r9,12(%r11) # r11 pt_regs pointer
841 # fill pt_regs
842 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
843 stm %r0,%r7,__PT_R0(%r9)
844 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
845 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
846 xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
847 mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
848 # setup saved register 15
849 st %r15,28(%r11) # r15 stack pointer
850 # set new psw address and exit
851 l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
852 br %r14
853.Lcleanup_system_call_insn:
854 .long system_call + 0x80000000
855 .long .Lsysc_stm + 0x80000000
856 .long .Lsysc_vtime + 0x80000000 + 36
857 .long .Lsysc_vtime + 0x80000000 + 76
858
859.Lcleanup_sysc_tif:
860 l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
861 br %r14
862
863.Lcleanup_sysc_restore:
864 cl %r9,BASED(.Lcleanup_sysc_restore_insn)
865 jhe 0f
866 l %r9,12(%r11) # get saved pointer to pt_regs
867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
868 mvc 0(32,%r11),__PT_R8(%r9)
869 lm %r0,%r7,__PT_R0(%r9)
8700: lm %r8,%r9,__LC_RETURN_PSW
871 br %r14
872.Lcleanup_sysc_restore_insn:
873 .long .Lsysc_done - 4 + 0x80000000
874
875.Lcleanup_io_tif:
876 l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
877 br %r14
878
879.Lcleanup_io_restore:
880 cl %r9,BASED(.Lcleanup_io_restore_insn)
881 jhe 0f
882 l %r9,12(%r11) # get saved r11 pointer to pt_regs
883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
884 mvc 0(32,%r11),__PT_R8(%r9)
885 lm %r0,%r7,__PT_R0(%r9)
8860: lm %r8,%r9,__LC_RETURN_PSW
887 br %r14
888.Lcleanup_io_restore_insn:
889 .long .Lio_done - 4 + 0x80000000
890
891.Lcleanup_idle:
892 # copy interrupt clock & cpu timer
893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
895 chi %r11,__LC_SAVE_AREA_ASYNC
896 je 0f
897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8990: # check if stck has been executed
900 cl %r9,BASED(.Lcleanup_idle_insn)
901 jhe 1f
902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
9041: # account system time going idle
905 lm %r9,%r10,__LC_STEAL_TIMER
906 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
907 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
908 stm %r9,%r10,__LC_STEAL_TIMER
909 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
910 lm %r9,%r10,__LC_SYSTEM_TIMER
911 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
912 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
913 stm %r9,%r10,__LC_SYSTEM_TIMER
914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 # prepare return psw
916 n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
917 l %r9,24(%r11) # return from psw_idle
918 br %r14
919.Lcleanup_idle_insn:
920 .long .Lpsw_idle_lpsw + 0x80000000
921.Lcleanup_idle_wait:
922 .long 0xfcfdffff
923
924/*
925 * Integer constants
926 */
927 .align 4
928.Lnr_syscalls:
929 .long NR_syscalls
930.Lvtimer_max:
931 .quad 0x7fffffffffffffff
932
933/*
934 * Symbol constants
935 */
936.Lc_do_machine_check: .long s390_do_machine_check
937.Lc_handle_mcck: .long s390_handle_mcck
938.Lc_do_IRQ: .long do_IRQ
939.Lc_do_signal: .long do_signal
940.Lc_do_notify_resume: .long do_notify_resume
941.Lc_do_per_trap: .long do_per_trap
942.Lc_jump_table: .long pgm_check_table
943.Lc_schedule: .long schedule
944#ifdef CONFIG_PREEMPT
945.Lc_preempt_irq: .long preempt_schedule_irq
946#endif
947.Lc_trace_enter: .long do_syscall_trace_enter
948.Lc_trace_exit: .long do_syscall_trace_exit
949.Lc_schedule_tail: .long schedule_tail
950.Lc_sysc_per: .long .Lsysc_per + 0x80000000
951#ifdef CONFIG_TRACE_IRQFLAGS
952.Lc_hardirqs_on: .long trace_hardirqs_on_caller
953.Lc_hardirqs_off: .long trace_hardirqs_off_caller
954#endif
955#ifdef CONFIG_LOCKDEP
956.Lc_lockdep_sys_exit: .long lockdep_sys_exit
957#endif
958.Lc_critical_start: .long .L__critical_start + 0x80000000
959.Lc_critical_length: .long .L__critical_end - .L__critical_start
960
961 .section .rodata, "a"
962#define SYSCALL(esa,esame,emu) .long esa
963 .globl sys_call_table
964sys_call_table:
965#include "syscalls.S"
966#undef SYSCALL
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 132f4c9ade60..59b7c6470567 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,11 +27,7 @@
27#include <asm/thread_info.h> 27#include <asm/thread_info.h>
28#include <asm/page.h> 28#include <asm/page.h>
29 29
30#ifdef CONFIG_64BIT
31#define ARCH_OFFSET 4 30#define ARCH_OFFSET 4
32#else
33#define ARCH_OFFSET 0
34#endif
35 31
36__HEAD 32__HEAD
37 33
@@ -67,7 +63,6 @@ __HEAD
67# subroutine to set architecture mode 63# subroutine to set architecture mode
68# 64#
69.Lsetmode: 65.Lsetmode:
70#ifdef CONFIG_64BIT
71 mvi __LC_AR_MODE_ID,1 # set esame flag 66 mvi __LC_AR_MODE_ID,1 # set esame flag
72 slr %r0,%r0 # set cpuid to zero 67 slr %r0,%r0 # set cpuid to zero
73 lhi %r1,2 # mode 2 = esame (dump) 68 lhi %r1,2 # mode 2 = esame (dump)
@@ -76,16 +71,12 @@ __HEAD
76 .fill 16,4,0x0 71 .fill 16,4,0x0
770: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 720: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
78 sam31 # switch to 31 bit addressing mode 73 sam31 # switch to 31 bit addressing mode
79#else
80 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
81#endif
82 br %r14 74 br %r14
83 75
84# 76#
85# subroutine to wait for end I/O 77# subroutine to wait for end I/O
86# 78#
87.Lirqwait: 79.Lirqwait:
88#ifdef CONFIG_64BIT
89 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw 80 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
90 lpsw .Lwaitpsw 81 lpsw .Lwaitpsw
91.Lioint: 82.Lioint:
@@ -93,15 +84,6 @@ __HEAD
93 .align 8 84 .align 8
94.Lnewpsw: 85.Lnewpsw:
95 .quad 0x0000000080000000,.Lioint 86 .quad 0x0000000080000000,.Lioint
96#else
97 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
98 lpsw .Lwaitpsw
99.Lioint:
100 br %r14
101 .align 8
102.Lnewpsw:
103 .long 0x00080000,0x80000000+.Lioint
104#endif
105.Lwaitpsw: 87.Lwaitpsw:
106 .long 0x020a0000,0x80000000+.Lioint 88 .long 0x020a0000,0x80000000+.Lioint
107 89
@@ -375,7 +357,6 @@ ENTRY(startup)
375ENTRY(startup_kdump) 357ENTRY(startup_kdump)
376 j .Lep_startup_kdump 358 j .Lep_startup_kdump
377.Lep_startup_normal: 359.Lep_startup_normal:
378#ifdef CONFIG_64BIT
379 mvi __LC_AR_MODE_ID,1 # set esame flag 360 mvi __LC_AR_MODE_ID,1 # set esame flag
380 slr %r0,%r0 # set cpuid to zero 361 slr %r0,%r0 # set cpuid to zero
381 lhi %r1,2 # mode 2 = esame (dump) 362 lhi %r1,2 # mode 2 = esame (dump)
@@ -384,9 +365,6 @@ ENTRY(startup_kdump)
384 .fill 16,4,0x0 365 .fill 16,4,0x0
3850: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 3660: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
386 sam31 # switch to 31 bit addressing mode 367 sam31 # switch to 31 bit addressing mode
387#else
388 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
389#endif
390 basr %r13,0 # get base 368 basr %r13,0 # get base
391.LPG0: 369.LPG0:
392 xc 0x200(256),0x200 # partially clear lowcore 370 xc 0x200(256),0x200 # partially clear lowcore
@@ -396,7 +374,6 @@ ENTRY(startup_kdump)
396 spt 6f-.LPG0(%r13) 374 spt 6f-.LPG0(%r13)
397 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 375 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 376 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 377 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
401 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST 378 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 379 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
@@ -435,7 +412,6 @@ ENTRY(startup_kdump)
435# the kernel will crash. Format is number of facility words with bits set, 412# the kernel will crash. Format is number of facility words with bits set,
436# followed by the facility words. 413# followed by the facility words.
437 414
438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_Z13) 415#if defined(CONFIG_MARCH_Z13)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 416 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_ZEC12) 417#elif defined(CONFIG_MARCH_ZEC12)
@@ -451,35 +427,10 @@ ENTRY(startup_kdump)
451#elif defined(CONFIG_MARCH_Z900) 427#elif defined(CONFIG_MARCH_Z900)
452 .long 1, 0xc0000000 428 .long 1, 0xc0000000
453#endif 429#endif
454#else
455#if defined(CONFIG_MARCH_ZEC12)
456 .long 1, 0x8100c880
457#elif defined(CONFIG_MARCH_Z196)
458 .long 1, 0x8100c880
459#elif defined(CONFIG_MARCH_Z10)
460 .long 1, 0x8100c880
461#elif defined(CONFIG_MARCH_Z9_109)
462 .long 1, 0x8100c880
463#elif defined(CONFIG_MARCH_Z990)
464 .long 1, 0x80002000
465#elif defined(CONFIG_MARCH_Z900)
466 .long 1, 0x80000000
467#endif
468#endif
4694: 4304:
470#endif
471
472#ifdef CONFIG_64BIT
473 /* Continue with 64bit startup code in head64.S */ 431 /* Continue with 64bit startup code in head64.S */
474 sam64 # switch to 64 bit mode 432 sam64 # switch to 64 bit mode
475 jg startup_continue 433 jg startup_continue
476#else
477 /* Continue with 31bit startup code in head31.S */
478 l %r13,5f-.LPG0(%r13)
479 b 0(%r13)
480 .align 8
4815: .long startup_continue
482#endif
483 434
484 .align 8 435 .align 8
4856: .long 0x7fffffff,0xffffffff 4366: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
deleted file mode 100644
index 6dbe80983a24..000000000000
--- a/arch/s390/kernel/head31.S
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005, 2010
3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Rob van der Heij <rvdhei@iae.nl>
7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 *
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-offsets.h>
14#include <asm/thread_info.h>
15#include <asm/page.h>
16
17__HEAD
18ENTRY(startup_continue)
19 basr %r13,0 # get base
20.LPG1:
21
22 l %r1,.Lbase_cc-.LPG1(%r13)
23 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
24 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
25 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
26 # move IPL device to lowcore
27#
28# Setup stack
29#
30 l %r15,.Linittu-.LPG1(%r13)
31 st %r15,__LC_THREAD_INFO # cache thread info in lowcore
32 mvc __LC_CURRENT(4),__TI_task(%r15)
33 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
34 st %r15,__LC_KERNEL_STACK # set end of kernel stack
35 ahi %r15,-96
36#
37# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
38# and create a kernel NSS if the SAVESYS= parm is defined
39#
40 l %r14,.Lstartup_init-.LPG1(%r13)
41 basr %r14,%r14
42 lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
43 # virtual and never return ...
44 .align 8
45.Lentry:.long 0x00080000,0x80000000 + _stext
46.Lctl: .long 0x04b50000 # cr0: various things
47 .long 0 # cr1: primary space segment table
48 .long .Lduct # cr2: dispatchable unit control table
49 .long 0 # cr3: instruction authorization
50 .long 0 # cr4: instruction authorization
51 .long .Lduct # cr5: primary-aste origin
52 .long 0 # cr6: I/O interrupts
53 .long 0 # cr7: secondary space segment table
54 .long 0 # cr8: access registers translation
55 .long 0 # cr9: tracing off
56 .long 0 # cr10: tracing off
57 .long 0 # cr11: tracing off
58 .long 0 # cr12: tracing off
59 .long 0 # cr13: home space segment table
60 .long 0xc0000000 # cr14: machine check handling off
61 .long 0 # cr15: linkage stack operations
62.Lbss_bgn: .long __bss_start
63.Lbss_end: .long _end
64.Lparmaddr: .long PARMAREA
65.Linittu: .long init_thread_union
66.Lstartup_init:
67 .long startup_init
68 .align 64
69.Lduct: .long 0,0,0,0,.Lduald,0,0,0
70 .long 0,0,0,0,0,0,0,0
71 .align 128
72.Lduald:.rept 8
73 .long 0x80000000,0,0,0 # invalid access-list entries
74 .endr
75.Lbase_cc:
76 .long sched_clock_base_cc
77
78ENTRY(_ehead)
79
80 .org 0x100000 - 0x11000 # head.o ends at 0x11000
81#
82# startup-code, running in absolute addressing mode
83#
84ENTRY(_stext)
85 basr %r13,0 # get base
86.LPG3:
87# check control registers
88 stctl %c0,%c15,0(%r15)
89 oi 2(%r15),0x60 # enable sigp emergency & external call
90 oi 0(%r15),0x10 # switch on low address protection
91 lctl %c0,%c15,0(%r15)
92
93#
94 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
95 l %r14,.Lstart-.LPG3(%r13)
96 basr %r14,%r14 # call start_kernel
97#
98# We returned from start_kernel ?!? PANIK
99#
100 basr %r13,0
101 lpsw .Ldw-.(%r13) # load disabled wait psw
102#
103 .align 8
104.Ldw: .long 0x000a0000,0x00000000
105.Lstart:.long start_kernel
106.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 085a95eb315f..d05950f02c34 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -92,17 +92,9 @@ startup_kdump_relocated:
92#else 92#else
93.align 2 93.align 2
94.Lep_startup_kdump: 94.Lep_startup_kdump:
95#ifdef CONFIG_64BIT
96 larl %r13,startup_kdump_crash 95 larl %r13,startup_kdump_crash
97 lpswe 0(%r13) 96 lpswe 0(%r13)
98.align 8 97.align 8
99startup_kdump_crash: 98startup_kdump_crash:
100 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash 99 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
101#else
102 basr %r13,0
1030: lpsw startup_kdump_crash-0b(%r13)
104.align 8
105startup_kdump_crash:
106 .long 0x000a0000,0x00000000 + startup_kdump_crash
107#endif /* CONFIG_64BIT */
108#endif /* CONFIG_CRASH_DUMP */ 100#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5c8651f36509..c57951f008c4 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2062,12 +2062,10 @@ static void do_reset_calls(void)
2062{ 2062{
2063 struct reset_call *reset; 2063 struct reset_call *reset;
2064 2064
2065#ifdef CONFIG_64BIT
2066 if (diag308_set_works) { 2065 if (diag308_set_works) {
2067 diag308_reset(); 2066 diag308_reset();
2068 return; 2067 return;
2069 } 2068 }
2070#endif
2071 list_for_each_entry(reset, &rcall, list) 2069 list_for_each_entry(reset, &rcall, list)
2072 reset->fn(); 2070 reset->fn();
2073} 2071}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 2ca95862e336..0c1a679314dd 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -38,13 +38,8 @@
38#define DEBUGP(fmt , ...) 38#define DEBUGP(fmt , ...)
39#endif 39#endif
40 40
41#ifndef CONFIG_64BIT
42#define PLT_ENTRY_SIZE 12
43#else /* CONFIG_64BIT */
44#define PLT_ENTRY_SIZE 20 41#define PLT_ENTRY_SIZE 20
45#endif /* CONFIG_64BIT */
46 42
47#ifdef CONFIG_64BIT
48void *module_alloc(unsigned long size) 43void *module_alloc(unsigned long size)
49{ 44{
50 if (PAGE_ALIGN(size) > MODULES_LEN) 45 if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size)
53 GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, 48 GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
54 __builtin_return_address(0)); 49 __builtin_return_address(0));
55} 50}
56#endif
57 51
58void module_arch_freeing_init(struct module *mod) 52void module_arch_freeing_init(struct module *mod)
59{ 53{
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
323 unsigned int *ip; 317 unsigned int *ip;
324 ip = me->module_core + me->arch.plt_offset + 318 ip = me->module_core + me->arch.plt_offset +
325 info->plt_offset; 319 info->plt_offset;
326#ifndef CONFIG_64BIT
327 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
328 ip[1] = 0x100607f1;
329 ip[2] = val;
330#else /* CONFIG_64BIT */
331 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
332 ip[1] = 0x100a0004; 321 ip[1] = 0x100a0004;
333 ip[2] = 0x07f10000; 322 ip[2] = 0x07f10000;
334 ip[3] = (unsigned int) (val >> 32); 323 ip[3] = (unsigned int) (val >> 32);
335 ip[4] = (unsigned int) val; 324 ip[4] = (unsigned int) val;
336#endif /* CONFIG_64BIT */
337 info->plt_initialized = 1; 325 info->plt_initialized = 1;
338 } 326 }
339 if (r_type == R_390_PLTOFF16 || 327 if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f51cf4e8f02..505c17c0ae1a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci)
117 */ 117 */
118 kill_task = 1; 118 kill_task = 1;
119 } 119 }
120#ifndef CONFIG_64BIT 120 fpt_save_area = &S390_lowcore.floating_pt_save_area;
121 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
122 if (!mci->fc) {
123 /*
124 * Floating point control register can't be restored.
125 * Task will be terminated.
126 */
127 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
128 kill_task = 1;
129 } else
130 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
131
121 asm volatile( 132 asm volatile(
122 " ld 0,0(%0)\n" 133 " ld 0,0(%0)\n"
123 " ld 2,8(%0)\n" 134 " ld 1,8(%0)\n"
124 " ld 4,16(%0)\n" 135 " ld 2,16(%0)\n"
125 " ld 6,24(%0)" 136 " ld 3,24(%0)\n"
126 : : "a" (&S390_lowcore.floating_pt_save_area)); 137 " ld 4,32(%0)\n"
127#endif 138 " ld 5,40(%0)\n"
128 139 " ld 6,48(%0)\n"
129 if (MACHINE_HAS_IEEE) { 140 " ld 7,56(%0)\n"
130#ifdef CONFIG_64BIT 141 " ld 8,64(%0)\n"
131 fpt_save_area = &S390_lowcore.floating_pt_save_area; 142 " ld 9,72(%0)\n"
132 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 143 " ld 10,80(%0)\n"
133#else 144 " ld 11,88(%0)\n"
134 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; 145 " ld 12,96(%0)\n"
135 fpt_creg_save_area = fpt_save_area + 128; 146 " ld 13,104(%0)\n"
136#endif 147 " ld 14,112(%0)\n"
137 if (!mci->fc) { 148 " ld 15,120(%0)\n"
138 /* 149 : : "a" (fpt_save_area));
139 * Floating point control register can't be restored.
140 * Task will be terminated.
141 */
142 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
143 kill_task = 1;
144
145 } else
146 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
147
148 asm volatile(
149 " ld 0,0(%0)\n"
150 " ld 1,8(%0)\n"
151 " ld 2,16(%0)\n"
152 " ld 3,24(%0)\n"
153 " ld 4,32(%0)\n"
154 " ld 5,40(%0)\n"
155 " ld 6,48(%0)\n"
156 " ld 7,56(%0)\n"
157 " ld 8,64(%0)\n"
158 " ld 9,72(%0)\n"
159 " ld 10,80(%0)\n"
160 " ld 11,88(%0)\n"
161 " ld 12,96(%0)\n"
162 " ld 13,104(%0)\n"
163 " ld 14,112(%0)\n"
164 " ld 15,120(%0)\n"
165 : : "a" (fpt_save_area));
166 }
167
168#ifdef CONFIG_64BIT
169 /* Revalidate vector registers */ 150 /* Revalidate vector registers */
170 if (MACHINE_HAS_VX && current->thread.vxrs) { 151 if (MACHINE_HAS_VX && current->thread.vxrs) {
171 if (!mci->vr) { 152 if (!mci->vr) {
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
178 restore_vx_regs((__vector128 *) 159 restore_vx_regs((__vector128 *)
179 S390_lowcore.vector_save_area_addr); 160 S390_lowcore.vector_save_area_addr);
180 } 161 }
181#endif
182 /* Revalidate access registers */ 162 /* Revalidate access registers */
183 asm volatile( 163 asm volatile(
184 " lam 0,15,0(%0)" 164 " lam 0,15,0(%0)"
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
198 */ 178 */
199 s390_handle_damage("invalid control registers."); 179 s390_handle_damage("invalid control registers.");
200 } else { 180 } else {
201#ifdef CONFIG_64BIT
202 asm volatile( 181 asm volatile(
203 " lctlg 0,15,0(%0)" 182 " lctlg 0,15,0(%0)"
204 : : "a" (&S390_lowcore.cregs_save_area)); 183 : : "a" (&S390_lowcore.cregs_save_area));
205#else
206 asm volatile(
207 " lctl 0,15,0(%0)"
208 : : "a" (&S390_lowcore.cregs_save_area));
209#endif
210 } 184 }
211 /* 185 /*
212 * We don't even try to revalidate the TOD register, since we simply 186 * We don't even try to revalidate the TOD register, since we simply
213 * can't write something sensible into that register. 187 * can't write something sensible into that register.
214 */ 188 */
215#ifdef CONFIG_64BIT
216 /* 189 /*
217 * See if we can revalidate the TOD programmable register with its 190 * See if we can revalidate the TOD programmable register with its
218 * old contents (should be zero) otherwise set it to zero. 191 * old contents (should be zero) otherwise set it to zero.
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
228 " sckpf" 201 " sckpf"
229 : : "a" (&S390_lowcore.tod_progreg_save_area) 202 : : "a" (&S390_lowcore.tod_progreg_save_area)
230 : "0", "cc"); 203 : "0", "cc");
231#endif
232 /* Revalidate clock comparator register */ 204 /* Revalidate clock comparator register */
233 set_clock_comparator(S390_lowcore.clock_comparator); 205 set_clock_comparator(S390_lowcore.clock_comparator);
234 /* Check if old PSW is valid */ 206 /* Check if old PSW is valid */
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
280 if (mci->b) { 252 if (mci->b) {
281 /* Processing backup -> verify if we can survive this */ 253 /* Processing backup -> verify if we can survive this */
282 u64 z_mcic, o_mcic, t_mcic; 254 u64 z_mcic, o_mcic, t_mcic;
283#ifdef CONFIG_64BIT
284 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 255 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
285 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 256 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
286 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 257 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
287 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 258 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
288 1ULL<<16); 259 1ULL<<16);
289#else
290 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
291 1ULL<<29);
292 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
293 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
294 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
295#endif
296 t_mcic = *(u64 *)mci; 260 t_mcic = *(u64 *)mci;
297 261
298 if (((t_mcic & z_mcic) != 0) || 262 if (((t_mcic & z_mcic) != 0) ||
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index f6f8886399f6..036aa01d06a9 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -6,19 +6,13 @@
6 6
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8 8
9#ifdef CONFIG_32BIT
10#define PGM_CHECK_64BIT(handler) .long default_trap_handler
11#else
12#define PGM_CHECK_64BIT(handler) .long handler
13#endif
14
15#define PGM_CHECK(handler) .long handler 9#define PGM_CHECK(handler) .long handler
16#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) 10#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
17 11
18/* 12/*
19 * The program check table contains exactly 128 (0x00-0x7f) entries. Each 13 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
20 * line defines the 31 and/or 64 bit function to be called corresponding 14 * line defines the function to be called corresponding to the program check
21 * to the program check interruption code. 15 * interruption code.
22 */ 16 */
23.section .rodata, "a" 17.section .rodata, "a"
24ENTRY(pgm_check_table) 18ENTRY(pgm_check_table)
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */
46PGM_CHECK(operand_exception) /* 15 */ 40PGM_CHECK(operand_exception) /* 15 */
47PGM_CHECK_DEFAULT /* 16 */ 41PGM_CHECK_DEFAULT /* 16 */
48PGM_CHECK_DEFAULT /* 17 */ 42PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */ 43PGM_CHECK(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */ 44PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */ 45PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_64BIT(vector_exception) /* 1b */ 46PGM_CHECK(vector_exception) /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */ 47PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */ 48PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */ 49PGM_CHECK_DEFAULT /* 1e */
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 72PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 73PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 74PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_64BIT(do_dat_exception) /* 38 */ 75PGM_CHECK(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 76PGM_CHECK(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 77PGM_CHECK(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 78PGM_CHECK(do_dat_exception) /* 3b */
85PGM_CHECK_DEFAULT /* 3c */ 79PGM_CHECK_DEFAULT /* 3c */
86PGM_CHECK_DEFAULT /* 3d */ 80PGM_CHECK_DEFAULT /* 3d */
87PGM_CHECK_DEFAULT /* 3e */ 81PGM_CHECK_DEFAULT /* 3e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 13fc0978ca7e..dc5edc29b73a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task)
79{ 79{
80} 80}
81 81
82#ifdef CONFIG_64BIT
83void arch_release_task_struct(struct task_struct *tsk) 82void arch_release_task_struct(struct task_struct *tsk)
84{ 83{
85 if (tsk->thread.vxrs) 84 if (tsk->thread.vxrs)
86 kfree(tsk->thread.vxrs); 85 kfree(tsk->thread.vxrs);
87} 86}
88#endif
89 87
90int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 88int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
91 unsigned long arg, struct task_struct *p) 89 unsigned long arg, struct task_struct *p)
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
144 p->thread.ri_signum = 0; 142 p->thread.ri_signum = 0;
145 frame->childregs.psw.mask &= ~PSW_MASK_RI; 143 frame->childregs.psw.mask &= ~PSW_MASK_RI;
146 144
147#ifndef CONFIG_64BIT
148 /*
149 * save fprs to current->thread.fp_regs to merge them with
150 * the emulated registers and then copy the result to the child.
151 */
152 save_fp_ctl(&current->thread.fp_regs.fpc);
153 save_fp_regs(current->thread.fp_regs.fprs);
154 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
155 sizeof(s390_fp_regs));
156 /* Set a new TLS ? */
157 if (clone_flags & CLONE_SETTLS)
158 p->thread.acrs[0] = frame->childregs.gprs[6];
159#else /* CONFIG_64BIT */
160 /* Save the fpu registers to new thread structure. */ 145 /* Save the fpu registers to new thread structure. */
161 save_fp_ctl(&p->thread.fp_regs.fpc); 146 save_fp_ctl(&p->thread.fp_regs.fpc);
162 save_fp_regs(p->thread.fp_regs.fprs); 147 save_fp_regs(p->thread.fp_regs.fprs);
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
172 p->thread.acrs[1] = (unsigned int)tls; 157 p->thread.acrs[1] = (unsigned int)tls;
173 } 158 }
174 } 159 }
175#endif /* CONFIG_64BIT */
176 return 0; 160 return 0;
177} 161}
178 162
179asmlinkage void execve_tail(void) 163asmlinkage void execve_tail(void)
180{ 164{
181 current->thread.fp_regs.fpc = 0; 165 current->thread.fp_regs.fpc = 0;
182 if (MACHINE_HAS_IEEE) 166 asm volatile("sfpc %0,%0" : : "d" (0));
183 asm volatile("sfpc %0,%0" : : "d" (0));
184} 167}
185 168
186/* 169/*
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void)
188 */ 171 */
189int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 172int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
190{ 173{
191#ifndef CONFIG_64BIT
192 /*
193 * save fprs to current->thread.fp_regs to merge them with
194 * the emulated registers and then copy the result to the dump.
195 */
196 save_fp_ctl(&current->thread.fp_regs.fpc);
197 save_fp_regs(current->thread.fp_regs.fprs);
198 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
199#else /* CONFIG_64BIT */
200 save_fp_ctl(&fpregs->fpc); 174 save_fp_ctl(&fpregs->fpc);
201 save_fp_regs(fpregs->fprs); 175 save_fp_regs(fpregs->fprs);
202#endif /* CONFIG_64BIT */
203 return 1; 176 return 1;
204} 177}
205EXPORT_SYMBOL(dump_fpu); 178EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index eabfb4594517..d363c9c322a1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task)
44 struct thread_struct *thread = &task->thread; 44 struct thread_struct *thread = &task->thread;
45 struct per_regs old, new; 45 struct per_regs old, new;
46 46
47#ifdef CONFIG_64BIT
48 /* Take care of the enable/disable of transactional execution. */ 47 /* Take care of the enable/disable of transactional execution. */
49 if (MACHINE_HAS_TE || MACHINE_HAS_VX) { 48 if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
50 unsigned long cr, cr_new; 49 unsigned long cr, cr_new;
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task)
80 __ctl_load(cr_new, 2, 2); 79 __ctl_load(cr_new, 2, 2);
81 } 80 }
82 } 81 }
83#endif
84 /* Copy user specified PER registers */ 82 /* Copy user specified PER registers */
85 new.control = thread->per_user.control; 83 new.control = thread->per_user.control;
86 new.start = thread->per_user.start; 84 new.start = thread->per_user.start;
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task)
93 new.control |= PER_EVENT_BRANCH; 91 new.control |= PER_EVENT_BRANCH;
94 else 92 else
95 new.control |= PER_EVENT_IFETCH; 93 new.control |= PER_EVENT_IFETCH;
96#ifdef CONFIG_64BIT
97 new.control |= PER_CONTROL_SUSPENSION; 94 new.control |= PER_CONTROL_SUSPENSION;
98 new.control |= PER_EVENT_TRANSACTION_END; 95 new.control |= PER_EVENT_TRANSACTION_END;
99#endif
100 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) 96 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
101 new.control |= PER_EVENT_IFETCH; 97 new.control |= PER_EVENT_IFETCH;
102 new.start = 0; 98 new.start = 0;
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task)
146 task->thread.per_flags = 0; 142 task->thread.per_flags = 0;
147} 143}
148 144
149#ifndef CONFIG_64BIT 145#define __ADDR_MASK 7
150# define __ADDR_MASK 3
151#else
152# define __ADDR_MASK 7
153#endif
154 146
155static inline unsigned long __peek_user_per(struct task_struct *child, 147static inline unsigned long __peek_user_per(struct task_struct *child,
156 addr_t addr) 148 addr_t addr)
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
223 * access registers are stored in the thread structure 215 * access registers are stored in the thread structure
224 */ 216 */
225 offset = addr - (addr_t) &dummy->regs.acrs; 217 offset = addr - (addr_t) &dummy->regs.acrs;
226#ifdef CONFIG_64BIT
227 /* 218 /*
228 * Very special case: old & broken 64 bit gdb reading 219 * Very special case: old & broken 64 bit gdb reading
229 * from acrs[15]. Result is a 64 bit value. Read the 220 * from acrs[15]. Result is a 64 bit value. Read the
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
232 if (addr == (addr_t) &dummy->regs.acrs[15]) 223 if (addr == (addr_t) &dummy->regs.acrs[15])
233 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 224 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
234 else 225 else
235#endif 226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
236 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
237 227
238 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 228 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
239 /* 229 /*
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
261 * or the child->thread.vxrs array 251 * or the child->thread.vxrs array
262 */ 252 */
263 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 253 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
264#ifdef CONFIG_64BIT
265 if (child->thread.vxrs) 254 if (child->thread.vxrs)
266 tmp = *(addr_t *) 255 tmp = *(addr_t *)
267 ((addr_t) child->thread.vxrs + 2*offset); 256 ((addr_t) child->thread.vxrs + 2*offset);
268 else 257 else
269#endif
270 tmp = *(addr_t *) 258 tmp = *(addr_t *)
271 ((addr_t) &child->thread.fp_regs.fprs + offset); 259 ((addr_t) &child->thread.fp_regs.fprs + offset);
272 260
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
293 * an alignment of 4. Programmers from hell... 281 * an alignment of 4. Programmers from hell...
294 */ 282 */
295 mask = __ADDR_MASK; 283 mask = __ADDR_MASK;
296#ifdef CONFIG_64BIT
297 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 284 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
298 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 285 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
299 mask = 3; 286 mask = 3;
300#endif
301 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 287 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
302 return -EIO; 288 return -EIO;
303 289
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
370 * access registers are stored in the thread structure 356 * access registers are stored in the thread structure
371 */ 357 */
372 offset = addr - (addr_t) &dummy->regs.acrs; 358 offset = addr - (addr_t) &dummy->regs.acrs;
373#ifdef CONFIG_64BIT
374 /* 359 /*
375 * Very special case: old & broken 64 bit gdb writing 360 * Very special case: old & broken 64 bit gdb writing
376 * to acrs[15] with a 64 bit value. Ignore the lower 361 * to acrs[15] with a 64 bit value. Ignore the lower
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
380 if (addr == (addr_t) &dummy->regs.acrs[15]) 365 if (addr == (addr_t) &dummy->regs.acrs[15])
381 child->thread.acrs[15] = (unsigned int) (data >> 32); 366 child->thread.acrs[15] = (unsigned int) (data >> 32);
382 else 367 else
383#endif 368 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
384 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
385 369
386 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 370 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
387 /* 371 /*
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
411 * or the child->thread.vxrs array 395 * or the child->thread.vxrs array
412 */ 396 */
413 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 397 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
414#ifdef CONFIG_64BIT
415 if (child->thread.vxrs) 398 if (child->thread.vxrs)
416 *(addr_t *)((addr_t) 399 *(addr_t *)((addr_t)
417 child->thread.vxrs + 2*offset) = data; 400 child->thread.vxrs + 2*offset) = data;
418 else 401 else
419#endif
420 *(addr_t *)((addr_t) 402 *(addr_t *)((addr_t)
421 &child->thread.fp_regs.fprs + offset) = data; 403 &child->thread.fp_regs.fprs + offset) = data;
422 404
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
441 * an alignment of 4. Programmers from hell indeed... 423 * an alignment of 4. Programmers from hell indeed...
442 */ 424 */
443 mask = __ADDR_MASK; 425 mask = __ADDR_MASK;
444#ifdef CONFIG_64BIT
445 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 426 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
446 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 427 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
447 mask = 3; 428 mask = 3;
448#endif
449 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 429 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
450 return -EIO; 430 return -EIO;
451 431
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
649 * or the child->thread.vxrs array 629 * or the child->thread.vxrs array
650 */ 630 */
651 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 631 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
652#ifdef CONFIG_64BIT
653 if (child->thread.vxrs) 632 if (child->thread.vxrs)
654 tmp = *(__u32 *) 633 tmp = *(__u32 *)
655 ((addr_t) child->thread.vxrs + 2*offset); 634 ((addr_t) child->thread.vxrs + 2*offset);
656 else 635 else
657#endif
658 tmp = *(__u32 *) 636 tmp = *(__u32 *)
659 ((addr_t) &child->thread.fp_regs.fprs + offset); 637 ((addr_t) &child->thread.fp_regs.fprs + offset);
660 638
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child,
776 * or the child->thread.vxrs array 754 * or the child->thread.vxrs array
777 */ 755 */
778 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 756 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
779#ifdef CONFIG_64BIT
780 if (child->thread.vxrs) 757 if (child->thread.vxrs)
781 *(__u32 *)((addr_t) 758 *(__u32 *)((addr_t)
782 child->thread.vxrs + 2*offset) = tmp; 759 child->thread.vxrs + 2*offset) = tmp;
783 else 760 else
784#endif
785 *(__u32 *)((addr_t) 761 *(__u32 *)((addr_t)
786 &child->thread.fp_regs.fprs + offset) = tmp; 762 &child->thread.fp_regs.fprs + offset) = tmp;
787 763
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target,
979 if (target == current) { 955 if (target == current) {
980 save_fp_ctl(&target->thread.fp_regs.fpc); 956 save_fp_ctl(&target->thread.fp_regs.fpc);
981 save_fp_regs(target->thread.fp_regs.fprs); 957 save_fp_regs(target->thread.fp_regs.fprs);
982 } 958 } else if (target->thread.vxrs) {
983#ifdef CONFIG_64BIT
984 else if (target->thread.vxrs) {
985 int i; 959 int i;
986 960
987 for (i = 0; i < __NUM_VXRS_LOW; i++) 961 for (i = 0; i < __NUM_VXRS_LOW; i++)
988 target->thread.fp_regs.fprs[i] = 962 target->thread.fp_regs.fprs[i] =
989 *(freg_t *)(target->thread.vxrs + i); 963 *(freg_t *)(target->thread.vxrs + i);
990 } 964 }
991#endif
992 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 965 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
993 &target->thread.fp_regs, 0, -1); 966 &target->thread.fp_regs, 0, -1);
994} 967}
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target,
1026 if (target == current) { 999 if (target == current) {
1027 restore_fp_ctl(&target->thread.fp_regs.fpc); 1000 restore_fp_ctl(&target->thread.fp_regs.fpc);
1028 restore_fp_regs(target->thread.fp_regs.fprs); 1001 restore_fp_regs(target->thread.fp_regs.fprs);
1029 } 1002 } else if (target->thread.vxrs) {
1030#ifdef CONFIG_64BIT
1031 else if (target->thread.vxrs) {
1032 int i; 1003 int i;
1033 1004
1034 for (i = 0; i < __NUM_VXRS_LOW; i++) 1005 for (i = 0; i < __NUM_VXRS_LOW; i++)
1035 *(freg_t *)(target->thread.vxrs + i) = 1006 *(freg_t *)(target->thread.vxrs + i) =
1036 target->thread.fp_regs.fprs[i]; 1007 target->thread.fp_regs.fprs[i];
1037 } 1008 }
1038#endif
1039 } 1009 }
1040 1010
1041 return rc; 1011 return rc;
1042} 1012}
1043 1013
1044#ifdef CONFIG_64BIT
1045
1046static int s390_last_break_get(struct task_struct *target, 1014static int s390_last_break_get(struct task_struct *target,
1047 const struct user_regset *regset, 1015 const struct user_regset *regset,
1048 unsigned int pos, unsigned int count, 1016 unsigned int pos, unsigned int count,
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target,
1182 return rc; 1150 return rc;
1183} 1151}
1184 1152
1185#endif
1186
1187static int s390_system_call_get(struct task_struct *target, 1153static int s390_system_call_get(struct task_struct *target,
1188 const struct user_regset *regset, 1154 const struct user_regset *regset,
1189 unsigned int pos, unsigned int count, 1155 unsigned int pos, unsigned int count,
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = {
1229 .get = s390_system_call_get, 1195 .get = s390_system_call_get,
1230 .set = s390_system_call_set, 1196 .set = s390_system_call_set,
1231 }, 1197 },
1232#ifdef CONFIG_64BIT
1233 { 1198 {
1234 .core_note_type = NT_S390_LAST_BREAK, 1199 .core_note_type = NT_S390_LAST_BREAK,
1235 .n = 1, 1200 .n = 1,
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = {
1262 .get = s390_vxrs_high_get, 1227 .get = s390_vxrs_high_get,
1263 .set = s390_vxrs_high_set, 1228 .set = s390_vxrs_high_set,
1264 }, 1229 },
1265#endif
1266}; 1230};
1267 1231
1268static const struct user_regset_view user_s390_view = { 1232static const struct user_regset_view user_s390_view = {
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
deleted file mode 100644
index dd8016b0477e..000000000000
--- a/arch/s390/kernel/reipl.S
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * S390 version
3 * Copyright IBM Corp. 2000
4 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
5 */
6
7#include <linux/linkage.h>
8#include <asm/asm-offsets.h>
9#include <asm/sigp.h>
10
11#
12# store_status: Empty implementation until kdump is supported on 31 bit
13#
14ENTRY(store_status)
15 br %r14
16
17#
18# do_reipl_asm
19# Parameter: r2 = schid of reipl device
20#
21ENTRY(do_reipl_asm)
22 basr %r13,0
23.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
24.Lpg1: # do store status of all registers
25
26 stm %r0,%r15,__LC_GPREGS_SAVE_AREA
27 stctl %c0,%c15,__LC_CREGS_SAVE_AREA
28 stam %a0,%a15,__LC_AREGS_SAVE_AREA
29 l %r10,.Ldump_pfx-.Lpg0(%r13)
30 mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
31 stckc .Lclkcmp-.Lpg0(%r13)
32 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
33 stpt __LC_CPU_TIMER_SAVE_AREA
34 st %r13, __LC_PSW_SAVE_AREA+4
35 lctl %c6,%c6,.Lall-.Lpg0(%r13)
36 lr %r1,%r2
37 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
38 stsch .Lschib-.Lpg0(%r13)
39 oi .Lschib+5-.Lpg0(%r13),0x84
40.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
41 msch .Lschib-.Lpg0(%r13)
42 lhi %r0,5
43.Lssch: ssch .Liplorb-.Lpg0(%r13)
44 jz .L001
45 brct %r0,.Lssch
46 bas %r14,.Ldisab-.Lpg0(%r13)
47.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
48.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
49.Lcont: c %r1,__LC_SUBCHANNEL_ID
50 jnz .Ltpi
51 clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
52 jnz .Ltpi
53 tsch .Liplirb-.Lpg0(%r13)
54 tm .Liplirb+9-.Lpg0(%r13),0xbf
55 jz .L002
56 bas %r14,.Ldisab-.Lpg0(%r13)
57.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
58 jz .L003
59 bas %r14,.Ldisab-.Lpg0(%r13)
60.L003: st %r1,__LC_SUBCHANNEL_ID
61 lpsw 0
62 sigp 0,0,SIGP_RESTART
63.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
64 lpsw .Ldispsw-.Lpg0(%r13)
65 .align 8
66.Lclkcmp: .quad 0x0000000000000000
67.Lall: .long 0xff000000
68.Ldump_pfx: .long dump_prefix_page
69 .align 8
70.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
71.Lpcnew: .long 0x00080000,0x80000000+.Lecs
72.Lionew: .long 0x00080000,0x80000000+.Lcont
73.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
74.Ldispsw: .long 0x000a0000,0x00000000
75.Liplccws: .long 0x02000000,0x60000018
76 .long 0x08000008,0x20000001
77.Liplorb: .long 0x0049504c,0x0040ff80
78 .long 0x00000000+.Liplccws
79.Lschib: .long 0x00000000,0x00000000
80 .long 0x00000000,0x00000000
81 .long 0x00000000,0x00000000
82 .long 0x00000000,0x00000000
83 .long 0x00000000,0x00000000
84 .long 0x00000000,0x00000000
85.Liplirb: .long 0x00000000,0x00000000
86 .long 0x00000000,0x00000000
87 .long 0x00000000,0x00000000
88 .long 0x00000000,0x00000000
89 .long 0x00000000,0x00000000
90 .long 0x00000000,0x00000000
91 .long 0x00000000,0x00000000
92 .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
deleted file mode 100644
index f4e6f20e117a..000000000000
--- a/arch/s390/kernel/relocate_kernel.S
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005
3 *
4 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 *
7 */
8
9#include <linux/linkage.h>
10#include <asm/sigp.h>
11
12/*
13 * moves the new kernel to its destination...
14 * %r2 = pointer to first kimage_entry_t
15 * %r3 = start address - where to jump to after the job is done...
16 *
17 * %r5 will be used as temp. storage
18 * %r6 holds the destination address
19 * %r7 = PAGE_SIZE
20 * %r8 holds the source address
21 * %r9 = PAGE_SIZE
22 * %r10 is a page mask
23 */
24
25 .text
26ENTRY(relocate_kernel)
27 basr %r13,0 # base address
28 .base:
29 stnsm sys_msk-.base(%r13),0xfb # disable DAT
30 stctl %c0,%c15,ctlregs-.base(%r13)
31 stm %r0,%r15,gprregs-.base(%r13)
32 la %r1,load_psw-.base(%r13)
33 mvc 0(8,%r0),0(%r1)
34 la %r0,.back-.base(%r13)
35 st %r0,4(%r0)
36 oi 4(%r0),0x80
37 mvc 0x68(8,%r0),0(%r1)
38 la %r0,.back_pgm-.base(%r13)
39 st %r0,0x6c(%r0)
40 oi 0x6c(%r0),0x80
41 lhi %r0,0
42 diag %r0,%r0,0x308
43 .back:
44 basr %r13,0
45 .back_base:
46 oi have_diag308-.back_base(%r13),0x01
47 lctl %c0,%c15,ctlregs-.back_base(%r13)
48 lm %r0,%r15,gprregs-.back_base(%r13)
49 j .start_reloc
50 .back_pgm:
51 lm %r0,%r15,gprregs-.base(%r13)
52 .start_reloc:
53 lhi %r10,-1 # preparing the mask
54 sll %r10,12 # shift it such that it becomes 0xf000
55 .top:
56 lhi %r7,4096 # load PAGE_SIZE in r7
57 lhi %r9,4096 # load PAGE_SIZE in r9
58 l %r5,0(%r2) # read another word for indirection page
59 ahi %r2,4 # increment pointer
60 tml %r5,0x1 # is it a destination page?
61 je .indir_check # NO, goto "indir_check"
62 lr %r6,%r5 # r6 = r5
63 nr %r6,%r10 # mask it out and...
64 j .top # ...next iteration
65 .indir_check:
66 tml %r5,0x2 # is it a indirection page?
67 je .done_test # NO, goto "done_test"
68 nr %r5,%r10 # YES, mask out,
69 lr %r2,%r5 # move it into the right register,
70 j .top # and read next...
71 .done_test:
72 tml %r5,0x4 # is it the done indicator?
73 je .source_test # NO! Well, then it should be the source indicator...
74 j .done # ok, lets finish it here...
75 .source_test:
76 tml %r5,0x8 # it should be a source indicator...
77 je .top # NO, ignore it...
78 lr %r8,%r5 # r8 = r5
79 nr %r8,%r10 # masking
80 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
81 jo 0b
82 j .top
83 .done:
84 sr %r0,%r0 # clear register r0
85 la %r4,load_psw-.base(%r13) # load psw-address into the register
86 o %r3,4(%r4) # or load address into psw
87 st %r3,4(%r4)
88 mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
89 tm have_diag308-.base(%r13),0x01
90 jno .no_diag308
91 diag %r0,%r0,0x308
92 .no_diag308:
93 sr %r1,%r1 # clear %r1
94 sr %r2,%r2 # clear %r2
95 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
96 lpsw 0 # hopefully start new kernel...
97
98 .align 8
99 load_psw:
100 .long 0x00080000,0x80000000
101 sys_msk:
102 .quad 0
103 ctlregs:
104 .rept 16
105 .long 0
106 .endr
107 gprregs:
108 .rept 16
109 .long 0
110 .endr
111 have_diag308:
112 .byte 0
113 .align 8
114 relocate_kernel_end:
115 .align 8
116 .globl relocate_kernel_len
117 relocate_kernel_len:
118 .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 7e77e03378f3..43c3169ea49c 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -36,21 +36,17 @@ _sclp_wait_int:
36 ahi %r15,-96 # create stack frame 36 ahi %r15,-96 # create stack frame
37 la %r8,LC_EXT_NEW_PSW # register int handler 37 la %r8,LC_EXT_NEW_PSW # register int handler
38 la %r9,.LextpswS1-.LbaseS1(%r13) 38 la %r9,.LextpswS1-.LbaseS1(%r13)
39#ifdef CONFIG_64BIT
40 tm LC_AR_MODE_ID,1 39 tm LC_AR_MODE_ID,1
41 jno .Lesa1 40 jno .Lesa1
42 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit 41 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
43 la %r9,.LextpswS1_64-.LbaseS1(%r13) 42 la %r9,.LextpswS1_64-.LbaseS1(%r13)
44.Lesa1: 43.Lesa1:
45#endif
46 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) 44 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
47 mvc 0(16,%r8),0(%r9) 45 mvc 0(16,%r8),0(%r9)
48#ifdef CONFIG_64BIT
49 epsw %r6,%r7 # set current addressing mode 46 epsw %r6,%r7 # set current addressing mode
50 nill %r6,0x1 # in new psw (31 or 64 bit mode) 47 nill %r6,0x1 # in new psw (31 or 64 bit mode)
51 nilh %r7,0x8000 48 nilh %r7,0x8000
52 stm %r6,%r7,0(%r8) 49 stm %r6,%r7,0(%r8)
53#endif
54 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 50 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
55 ltr %r2,%r2 51 ltr %r2,%r2
56 jz .LsetctS1 52 jz .LsetctS1
@@ -92,10 +88,8 @@ _sclp_wait_int:
92 .long 0, 0, 0, 0 # old ext int PSW 88 .long 0, 0, 0, 0 # old ext int PSW
93.LextpswS1: 89.LextpswS1:
94 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 90 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
95#ifdef CONFIG_64BIT
96.LextpswS1_64: 91.LextpswS1_64:
97 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit 92 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
98#endif
99.LwaitpswS1: 93.LwaitpswS1:
100 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 94 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
101.LtimeS1: 95.LtimeS1:
@@ -272,13 +266,11 @@ _sclp_print:
272ENTRY(_sclp_print_early) 266ENTRY(_sclp_print_early)
273 stm %r6,%r15,24(%r15) # save registers 267 stm %r6,%r15,24(%r15) # save registers
274 ahi %r15,-96 # create stack frame 268 ahi %r15,-96 # create stack frame
275#ifdef CONFIG_64BIT
276 tm LC_AR_MODE_ID,1 269 tm LC_AR_MODE_ID,1
277 jno .Lesa2 270 jno .Lesa2
278 ahi %r15,-80 271 ahi %r15,-80
279 stmh %r6,%r15,96(%r15) # store upper register halves 272 stmh %r6,%r15,96(%r15) # store upper register halves
280.Lesa2: 273.Lesa2:
281#endif
282 lr %r10,%r2 # save string pointer 274 lr %r10,%r2 # save string pointer
283 lhi %r2,0 275 lhi %r2,0
284 bras %r14,_sclp_setup # enable console 276 bras %r14,_sclp_setup # enable console
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early)
291 lhi %r2,1 283 lhi %r2,1
292 bras %r14,_sclp_setup # disable console 284 bras %r14,_sclp_setup # disable console
293.LendS5: 285.LendS5:
294#ifdef CONFIG_64BIT
295 tm LC_AR_MODE_ID,1 286 tm LC_AR_MODE_ID,1
296 jno .Lesa3 287 jno .Lesa3
297 lgfr %r2,%r2 # sign extend return value 288 lgfr %r2,%r2 # sign extend return value
298 lmh %r6,%r15,96(%r15) # restore upper register halves 289 lmh %r6,%r15,96(%r15) # restore upper register halves
299 ahi %r15,80 290 ahi %r15,80
300.Lesa3: 291.Lesa3:
301#endif
302 lm %r6,%r15,120(%r15) # restore registers 292 lm %r6,%r15,120(%r15) # restore registers
303 br %r14 293 br %r14
304 294
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5ea8bc17cb3..7262fe438c99 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END);
92struct page *vmemmap; 92struct page *vmemmap;
93EXPORT_SYMBOL(vmemmap); 93EXPORT_SYMBOL(vmemmap);
94 94
95#ifdef CONFIG_64BIT
96unsigned long MODULES_VADDR; 95unsigned long MODULES_VADDR;
97unsigned long MODULES_END; 96unsigned long MODULES_END;
98#endif
99 97
100/* An array with a pointer to the lowcore of every CPU. */ 98/* An array with a pointer to the lowcore of every CPU. */
101struct _lowcore *lowcore_ptr[NR_CPUS]; 99struct _lowcore *lowcore_ptr[NR_CPUS];
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void)
334 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 332 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
335 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 333 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
336 MAX_FACILITY_BIT/8); 334 MAX_FACILITY_BIT/8);
337#ifndef CONFIG_64BIT
338 if (MACHINE_HAS_IEEE) {
339 lc->extended_save_area_addr = (__u32)
340 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
341 /* enable extended save area */
342 __ctl_set_bit(14, 29);
343 }
344#else
345 if (MACHINE_HAS_VX) 335 if (MACHINE_HAS_VX)
346 lc->vector_save_area_addr = 336 lc->vector_save_area_addr =
347 (unsigned long) &lc->vector_save_area; 337 (unsigned long) &lc->vector_save_area;
348 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 338 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
349#endif
350 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 339 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
351 lc->async_enter_timer = S390_lowcore.async_enter_timer; 340 lc->async_enter_timer = S390_lowcore.async_enter_timer;
352 lc->exit_timer = S390_lowcore.exit_timer; 341 lc->exit_timer = S390_lowcore.exit_timer;
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void)
450 unsigned long vmax, vmalloc_size, tmp; 439 unsigned long vmax, vmalloc_size, tmp;
451 440
452 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 441 /* Choose kernel address space layout: 2, 3, or 4 levels. */
453#ifdef CONFIG_64BIT
454 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 442 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
455 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 443 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
456 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 444 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void)
462 MODULES_END = vmax; 450 MODULES_END = vmax;
463 MODULES_VADDR = MODULES_END - MODULES_LEN; 451 MODULES_VADDR = MODULES_END - MODULES_LEN;
464 VMALLOC_END = MODULES_VADDR; 452 VMALLOC_END = MODULES_VADDR;
465#else
466 vmalloc_size = VMALLOC_END ?: 96UL << 20;
467 vmax = 1UL << 31; /* 2-level kernel page table */
468 /* vmalloc area is at the end of the kernel address space. */
469 VMALLOC_END = vmax;
470#endif
471 VMALLOC_START = vmax - vmalloc_size; 453 VMALLOC_START = vmax - vmalloc_size;
472 454
473 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 455 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void)
754 if (MACHINE_HAS_HPAGE) 736 if (MACHINE_HAS_HPAGE)
755 elf_hwcap |= HWCAP_S390_HPAGE; 737 elf_hwcap |= HWCAP_S390_HPAGE;
756 738
757#if defined(CONFIG_64BIT)
758 /* 739 /*
759 * 64-bit register support for 31-bit processes 740 * 64-bit register support for 31-bit processes
760 * HWCAP_S390_HIGH_GPRS is bit 9. 741 * HWCAP_S390_HIGH_GPRS is bit 9.
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void)
772 */ 753 */
773 if (test_facility(129)) 754 if (test_facility(129))
774 elf_hwcap |= HWCAP_S390_VXRS; 755 elf_hwcap |= HWCAP_S390_VXRS;
775#endif
776
777 get_cpu_id(&cpu_id); 756 get_cpu_id(&cpu_id);
778 add_device_randomness(&cpu_id, sizeof(cpu_id)); 757 add_device_randomness(&cpu_id, sizeof(cpu_id));
779 switch (cpu_id.machine) { 758 switch (cpu_id.machine) {
780 case 0x9672: 759 case 0x9672:
781#if !defined(CONFIG_64BIT)
782 default: /* Use "g5" as default for 31 bit kernels. */
783#endif
784 strcpy(elf_platform, "g5"); 760 strcpy(elf_platform, "g5");
785 break; 761 break;
786 case 0x2064: 762 case 0x2064:
787 case 0x2066: 763 case 0x2066:
788#if defined(CONFIG_64BIT)
789 default: /* Use "z900" as default for 64 bit kernels. */ 764 default: /* Use "z900" as default for 64 bit kernels. */
790#endif
791 strcpy(elf_platform, "z900"); 765 strcpy(elf_platform, "z900");
792 break; 766 break;
793 case 0x2084: 767 case 0x2084:
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p)
839 /* 813 /*
840 * print what head.S has found out about the machine 814 * print what head.S has found out about the machine
841 */ 815 */
842#ifndef CONFIG_64BIT
843 if (MACHINE_IS_VM)
844 pr_info("Linux is running as a z/VM "
845 "guest operating system in 31-bit mode\n");
846 else if (MACHINE_IS_LPAR)
847 pr_info("Linux is running natively in 31-bit mode\n");
848 if (MACHINE_HAS_IEEE)
849 pr_info("The hardware system has IEEE compatible "
850 "floating point units\n");
851 else
852 pr_info("The hardware system has no IEEE compatible "
853 "floating point units\n");
854#else /* CONFIG_64BIT */
855 if (MACHINE_IS_VM) 816 if (MACHINE_IS_VM)
856 pr_info("Linux is running as a z/VM " 817 pr_info("Linux is running as a z/VM "
857 "guest operating system in 64-bit mode\n"); 818 "guest operating system in 64-bit mode\n");
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p)
859 pr_info("Linux is running under KVM in 64-bit mode\n"); 820 pr_info("Linux is running under KVM in 64-bit mode\n");
860 else if (MACHINE_IS_LPAR) 821 else if (MACHINE_IS_LPAR)
861 pr_info("Linux is running natively in 64-bit mode\n"); 822 pr_info("Linux is running natively in 64-bit mode\n");
862#endif /* CONFIG_64BIT */
863 823
864 /* Have one command line that is parsed and saved in /proc/cmdline */ 824 /* Have one command line that is parsed and saved in /proc/cmdline */
865 /* boot_command_line has been already set up in early.c */ 825 /* boot_command_line has been already set up in early.c */
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p)
930 /* Add system specific data to the random pool */ 890 /* Add system specific data to the random pool */
931 setup_randomness(); 891 setup_randomness();
932} 892}
933
934#ifdef CONFIG_32BIT
935static int no_removal_warning __initdata;
936
937static int __init parse_no_removal_warning(char *str)
938{
939 no_removal_warning = 1;
940 return 0;
941}
942__setup("no_removal_warning", parse_no_removal_warning);
943
944static int __init removal_warning(void)
945{
946 if (no_removal_warning)
947 return 0;
948 printk(KERN_ALERT "\n\n");
949 printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
950 printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
951 printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
952 printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
953 printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
954 printk(KERN_CONT "please let us know. Please write to:\n");
955 printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
956 printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
957 printk(KERN_CONT "Thank you!\n\n");
958 printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
959 printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
960 schedule_timeout_uninterruptible(300 * HZ);
961 return 0;
962}
963early_initcall(removal_warning);
964#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b3ae6f70c6d6..7fec60cb0b75 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -106,7 +106,6 @@ static void store_sigregs(void)
106{ 106{
107 save_access_regs(current->thread.acrs); 107 save_access_regs(current->thread.acrs);
108 save_fp_ctl(&current->thread.fp_regs.fpc); 108 save_fp_ctl(&current->thread.fp_regs.fpc);
109#ifdef CONFIG_64BIT
110 if (current->thread.vxrs) { 109 if (current->thread.vxrs) {
111 int i; 110 int i;
112 111
@@ -115,7 +114,6 @@ static void store_sigregs(void)
115 current->thread.fp_regs.fprs[i] = 114 current->thread.fp_regs.fprs[i] =
116 *(freg_t *)(current->thread.vxrs + i); 115 *(freg_t *)(current->thread.vxrs + i);
117 } else 116 } else
118#endif
119 save_fp_regs(current->thread.fp_regs.fprs); 117 save_fp_regs(current->thread.fp_regs.fprs);
120} 118}
121 119
@@ -124,7 +122,6 @@ static void load_sigregs(void)
124{ 122{
125 restore_access_regs(current->thread.acrs); 123 restore_access_regs(current->thread.acrs);
126 /* restore_fp_ctl is done in restore_sigregs */ 124 /* restore_fp_ctl is done in restore_sigregs */
127#ifdef CONFIG_64BIT
128 if (current->thread.vxrs) { 125 if (current->thread.vxrs) {
129 int i; 126 int i;
130 127
@@ -133,7 +130,6 @@ static void load_sigregs(void)
133 current->thread.fp_regs.fprs[i]; 130 current->thread.fp_regs.fprs[i];
134 restore_vx_regs(current->thread.vxrs); 131 restore_vx_regs(current->thread.vxrs);
135 } else 132 } else
136#endif
137 restore_fp_regs(current->thread.fp_regs.fprs); 133 restore_fp_regs(current->thread.fp_regs.fprs);
138} 134}
139 135
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
200static int save_sigregs_ext(struct pt_regs *regs, 196static int save_sigregs_ext(struct pt_regs *regs,
201 _sigregs_ext __user *sregs_ext) 197 _sigregs_ext __user *sregs_ext)
202{ 198{
203#ifdef CONFIG_64BIT
204 __u64 vxrs[__NUM_VXRS_LOW]; 199 __u64 vxrs[__NUM_VXRS_LOW];
205 int i; 200 int i;
206 201
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs,
215 sizeof(sregs_ext->vxrs_high))) 210 sizeof(sregs_ext->vxrs_high)))
216 return -EFAULT; 211 return -EFAULT;
217 } 212 }
218#endif
219 return 0; 213 return 0;
220} 214}
221 215
222static int restore_sigregs_ext(struct pt_regs *regs, 216static int restore_sigregs_ext(struct pt_regs *regs,
223 _sigregs_ext __user *sregs_ext) 217 _sigregs_ext __user *sregs_ext)
224{ 218{
225#ifdef CONFIG_64BIT
226 __u64 vxrs[__NUM_VXRS_LOW]; 219 __u64 vxrs[__NUM_VXRS_LOW];
227 int i; 220 int i;
228 221
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs,
237 for (i = 0; i < __NUM_VXRS_LOW; i++) 230 for (i = 0; i < __NUM_VXRS_LOW; i++)
238 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; 231 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
239 } 232 }
240#endif
241 return 0; 233 return 0;
242} 234}
243 235
@@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
416 * included in the signal frame on a 31-bit system. 408 * included in the signal frame on a 31-bit system.
417 */ 409 */
418 uc_flags = 0; 410 uc_flags = 0;
419#ifdef CONFIG_64BIT
420 if (MACHINE_HAS_VX) { 411 if (MACHINE_HAS_VX) {
421 frame_size += sizeof(_sigregs_ext); 412 frame_size += sizeof(_sigregs_ext);
422 if (current->thread.vxrs) 413 if (current->thread.vxrs)
423 uc_flags |= UC_VXRS; 414 uc_flags |= UC_VXRS;
424 } 415 }
425#endif
426 frame = get_sigframe(&ksig->ka, regs, frame_size); 416 frame = get_sigframe(&ksig->ka, regs, frame_size);
427 if (frame == (void __user *) -1UL) 417 if (frame == (void __user *) -1UL)
428 return -EFAULT; 418 return -EFAULT;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index db8f1115a3bf..d140160c9aec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
199 lc->cpu_nr = cpu; 199 lc->cpu_nr = cpu;
200 lc->spinlock_lockval = arch_spin_lockval(cpu); 200 lc->spinlock_lockval = arch_spin_lockval(cpu);
201#ifndef CONFIG_64BIT
202 if (MACHINE_HAS_IEEE) {
203 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
204 if (!lc->extended_save_area_addr)
205 goto out;
206 }
207#else
208 if (MACHINE_HAS_VX) 201 if (MACHINE_HAS_VX)
209 lc->vector_save_area_addr = 202 lc->vector_save_area_addr =
210 (unsigned long) &lc->vector_save_area; 203 (unsigned long) &lc->vector_save_area;
211 if (vdso_alloc_per_cpu(lc)) 204 if (vdso_alloc_per_cpu(lc))
212 goto out; 205 goto out;
213#endif
214 lowcore_ptr[cpu] = lc; 206 lowcore_ptr[cpu] = lc;
215 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 207 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
216 return 0; 208 return 0;
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
229{ 221{
230 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 222 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
231 lowcore_ptr[pcpu - pcpu_devices] = NULL; 223 lowcore_ptr[pcpu - pcpu_devices] = NULL;
232#ifndef CONFIG_64BIT
233 if (MACHINE_HAS_IEEE) {
234 struct _lowcore *lc = pcpu->lowcore;
235
236 free_page((unsigned long) lc->extended_save_area_addr);
237 lc->extended_save_area_addr = 0;
238 }
239#else
240 vdso_free_per_cpu(pcpu->lowcore); 224 vdso_free_per_cpu(pcpu->lowcore);
241#endif
242 if (pcpu == &pcpu_devices[0]) 225 if (pcpu == &pcpu_devices[0])
243 return; 226 return;
244 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); 227 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu)
492 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 475 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
493} 476}
494 477
495#ifndef CONFIG_64BIT
496/*
497 * this function sends a 'purge tlb' signal to another CPU.
498 */
499static void smp_ptlb_callback(void *info)
500{
501 __tlb_flush_local();
502}
503
504void smp_ptlb_all(void)
505{
506 on_each_cpu(smp_ptlb_callback, NULL, 1);
507}
508EXPORT_SYMBOL(smp_ptlb_all);
509#endif /* ! CONFIG_64BIT */
510
511/* 478/*
512 * this function sends a 'reschedule' IPI to another CPU. 479 * this function sends a 'reschedule' IPI to another CPU.
513 * it goes straight through and wastes no time serializing 480 * it goes straight through and wastes no time serializing
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 23eb222c1658..f145490cce54 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
76 return sys_ipc(call, first, second, third, ptr, third); 76 return sys_ipc(call, first, second, third, ptr, third);
77} 77}
78 78
79#ifdef CONFIG_64BIT
80SYSCALL_DEFINE1(s390_personality, unsigned int, personality) 79SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
81{ 80{
82 unsigned int ret; 81 unsigned int ret;
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
90 89
91 return ret; 90 return ret;
92} 91}
93#endif /* CONFIG_64BIT */
94
95/*
96 * Wrapper function for sys_fadvise64/fadvise64_64
97 */
98#ifndef CONFIG_64BIT
99
100SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
101 size_t, len, int, advice)
102{
103 return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
104 len, advice);
105}
106
107struct fadvise64_64_args {
108 int fd;
109 long long offset;
110 long long len;
111 int advice;
112};
113
114SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
115{
116 struct fadvise64_64_args a;
117
118 if ( copy_from_user(&a, args, sizeof(a)) )
119 return -EFAULT;
120 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
121}
122
123/*
124 * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
125 * 64 bit argument "len" is split into the upper and lower 32 bits. The
126 * system call wrapper in the user space loads the value to %r6/%r7.
127 * The code in entry.S keeps the values in %r2 - %r6 where they are and
128 * stores %r7 to 96(%r15). But the standard C linkage requires that
129 * the whole 64 bit value for len is stored on the stack and doesn't
130 * use %r6 at all. So s390_fallocate has to convert the arguments from
131 * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
132 * to
133 * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
134 */
135SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
136 u32, len_high, u32, len_low)
137{
138 return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
139}
140#endif
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f081cf1157c3..8be11c22ed17 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1;
26 26
27static inline void __user *get_trap_ip(struct pt_regs *regs) 27static inline void __user *get_trap_ip(struct pt_regs *regs)
28{ 28{
29#ifdef CONFIG_64BIT
30 unsigned long address; 29 unsigned long address;
31 30
32 if (regs->int_code & 0x200) 31 if (regs->int_code & 0x200)
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
35 address = regs->psw.addr; 34 address = regs->psw.addr;
36 return (void __user *) 35 return (void __user *)
37 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); 36 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
38#else
39 return (void __user *)
40 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
41#endif
42} 37}
43 38
44static inline void report_user_fault(struct pt_regs *regs, int signr) 39static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
153 "privileged operation") 148 "privileged operation")
154DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 149DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
155 "special operation exception") 150 "special operation exception")
156
157#ifdef CONFIG_64BIT
158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 151DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
159 "transaction constraint exception") 152 "transaction constraint exception")
160#endif
161 153
162static inline void do_fp_trap(struct pt_regs *regs, int fpc) 154static inline void do_fp_trap(struct pt_regs *regs, int fpc)
163{ 155{
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs)
211 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { 203 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
212 is_uprobe_insn = 1; 204 is_uprobe_insn = 1;
213#endif 205#endif
214#ifdef CONFIG_MATHEMU
215 } else if (opcode[0] == 0xb3) {
216 if (get_user(*((__u16 *) (opcode+2)), location+1))
217 return;
218 signal = math_emu_b3(opcode, regs);
219 } else if (opcode[0] == 0xed) {
220 if (get_user(*((__u32 *) (opcode+2)),
221 (__u32 __user *)(location+1)))
222 return;
223 signal = math_emu_ed(opcode, regs);
224 } else if (*((__u16 *) opcode) == 0xb299) {
225 if (get_user(*((__u16 *) (opcode+2)), location+1))
226 return;
227 signal = math_emu_srnm(opcode, regs);
228 } else if (*((__u16 *) opcode) == 0xb29c) {
229 if (get_user(*((__u16 *) (opcode+2)), location+1))
230 return;
231 signal = math_emu_stfpc(opcode, regs);
232 } else if (*((__u16 *) opcode) == 0xb29d) {
233 if (get_user(*((__u16 *) (opcode+2)), location+1))
234 return;
235 signal = math_emu_lfpc(opcode, regs);
236#endif
237 } else 206 } else
238 signal = SIGILL; 207 signal = SIGILL;
239 } 208 }
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs)
247 3, SIGTRAP) != NOTIFY_STOP) 216 3, SIGTRAP) != NOTIFY_STOP)
248 signal = SIGILL; 217 signal = SIGILL;
249 } 218 }
250
251#ifdef CONFIG_MATHEMU
252 if (signal == SIGFPE)
253 do_fp_trap(regs, current->thread.fp_regs.fpc);
254 else if (signal == SIGSEGV)
255 do_trap(regs, signal, SEGV_MAPERR, "user address fault");
256 else
257#endif
258 if (signal) 219 if (signal)
259 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 220 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
260} 221}
261NOKPROBE_SYMBOL(illegal_op); 222NOKPROBE_SYMBOL(illegal_op);
262 223
263#ifdef CONFIG_MATHEMU
264void specification_exception(struct pt_regs *regs)
265{
266 __u8 opcode[6];
267 __u16 __user *location = NULL;
268 int signal = 0;
269
270 location = (__u16 __user *) get_trap_ip(regs);
271
272 if (user_mode(regs)) {
273 get_user(*((__u16 *) opcode), location);
274 switch (opcode[0]) {
275 case 0x28: /* LDR Rx,Ry */
276 signal = math_emu_ldr(opcode);
277 break;
278 case 0x38: /* LER Rx,Ry */
279 signal = math_emu_ler(opcode);
280 break;
281 case 0x60: /* STD R,D(X,B) */
282 get_user(*((__u16 *) (opcode+2)), location+1);
283 signal = math_emu_std(opcode, regs);
284 break;
285 case 0x68: /* LD R,D(X,B) */
286 get_user(*((__u16 *) (opcode+2)), location+1);
287 signal = math_emu_ld(opcode, regs);
288 break;
289 case 0x70: /* STE R,D(X,B) */
290 get_user(*((__u16 *) (opcode+2)), location+1);
291 signal = math_emu_ste(opcode, regs);
292 break;
293 case 0x78: /* LE R,D(X,B) */
294 get_user(*((__u16 *) (opcode+2)), location+1);
295 signal = math_emu_le(opcode, regs);
296 break;
297 default:
298 signal = SIGILL;
299 break;
300 }
301 } else
302 signal = SIGILL;
303
304 if (signal == SIGFPE)
305 do_fp_trap(regs, current->thread.fp_regs.fpc);
306 else if (signal)
307 do_trap(regs, signal, ILL_ILLOPN, "specification exception");
308}
309#else
310DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 224DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
311 "specification exception"); 225 "specification exception");
312#endif
313 226
314#ifdef CONFIG_64BIT
315int alloc_vector_registers(struct task_struct *tsk) 227int alloc_vector_registers(struct task_struct *tsk)
316{ 228{
317 __vector128 *vxrs; 229 __vector128 *vxrs;
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str)
377 return 1; 289 return 1;
378} 290}
379__setup("novx", disable_vector_extension); 291__setup("novx", disable_vector_extension);
380#endif
381 292
382void data_exception(struct pt_regs *regs) 293void data_exception(struct pt_regs *regs)
383{ 294{
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs)
386 297
387 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
388 299
389 if (MACHINE_HAS_IEEE) 300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
390 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
391
392#ifdef CONFIG_MATHEMU
393 else if (user_mode(regs)) {
394 __u8 opcode[6];
395 get_user(*((__u16 *) opcode), location);
396 switch (opcode[0]) {
397 case 0x28: /* LDR Rx,Ry */
398 signal = math_emu_ldr(opcode);
399 break;
400 case 0x38: /* LER Rx,Ry */
401 signal = math_emu_ler(opcode);
402 break;
403 case 0x60: /* STD R,D(X,B) */
404 get_user(*((__u16 *) (opcode+2)), location+1);
405 signal = math_emu_std(opcode, regs);
406 break;
407 case 0x68: /* LD R,D(X,B) */
408 get_user(*((__u16 *) (opcode+2)), location+1);
409 signal = math_emu_ld(opcode, regs);
410 break;
411 case 0x70: /* STE R,D(X,B) */
412 get_user(*((__u16 *) (opcode+2)), location+1);
413 signal = math_emu_ste(opcode, regs);
414 break;
415 case 0x78: /* LE R,D(X,B) */
416 get_user(*((__u16 *) (opcode+2)), location+1);
417 signal = math_emu_le(opcode, regs);
418 break;
419 case 0xb3:
420 get_user(*((__u16 *) (opcode+2)), location+1);
421 signal = math_emu_b3(opcode, regs);
422 break;
423 case 0xed:
424 get_user(*((__u32 *) (opcode+2)),
425 (__u32 __user *)(location+1));
426 signal = math_emu_ed(opcode, regs);
427 break;
428 case 0xb2:
429 if (opcode[1] == 0x99) {
430 get_user(*((__u16 *) (opcode+2)), location+1);
431 signal = math_emu_srnm(opcode, regs);
432 } else if (opcode[1] == 0x9c) {
433 get_user(*((__u16 *) (opcode+2)), location+1);
434 signal = math_emu_stfpc(opcode, regs);
435 } else if (opcode[1] == 0x9d) {
436 get_user(*((__u16 *) (opcode+2)), location+1);
437 signal = math_emu_lfpc(opcode, regs);
438 } else
439 signal = SIGILL;
440 break;
441 default:
442 signal = SIGILL;
443 break;
444 }
445 }
446#endif
447#ifdef CONFIG_64BIT
448 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
449 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
450 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs)
454 clear_pt_regs_flag(regs, PIF_PER_TRAP); 307 clear_pt_regs_flag(regs, PIF_PER_TRAP);
455 return; 308 return;
456 } 309 }
457#endif
458
459 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 310 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
460 signal = SIGFPE; 311 signal = SIGFPE;
461 else 312 else
462 signal = SIGILL; 313 signal = SIGILL;
463 if (signal == SIGFPE) 314 if (signal == SIGFPE)
464 do_fp_trap(regs, current->thread.fp_regs.fpc); 315 do_fp_trap(regs, current->thread.fp_regs.fpc);
465 else if (signal) 316 else if (signal)
466 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 317 do_trap(regs, signal, ILL_ILLOPN, "data exception");
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0bbb7e027c5a..0d58269ff425 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -32,19 +32,17 @@
32#include <asm/vdso.h> 32#include <asm/vdso.h>
33#include <asm/facility.h> 33#include <asm/facility.h>
34 34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 35#ifdef CONFIG_COMPAT
36extern char vdso32_start, vdso32_end; 36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start; 37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages; 38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist; 39static struct page **vdso32_pagelist;
40#endif 40#endif
41 41
42#ifdef CONFIG_64BIT
43extern char vdso64_start, vdso64_end; 42extern char vdso64_start, vdso64_end;
44static void *vdso64_kbase = &vdso64_start; 43static void *vdso64_kbase = &vdso64_start;
45static unsigned int vdso64_pages; 44static unsigned int vdso64_pages;
46static struct page **vdso64_pagelist; 45static struct page **vdso64_pagelist;
47#endif /* CONFIG_64BIT */
48 46
49/* 47/*
50 * Should the kernel map a VDSO page into processes and pass its 48 * Should the kernel map a VDSO page into processes and pass its
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd)
87 vd->ectg_available = test_facility(31); 85 vd->ectg_available = test_facility(31);
88} 86}
89 87
90#ifdef CONFIG_64BIT
91/* 88/*
92 * Allocate/free per cpu vdso data. 89 * Allocate/free per cpu vdso data.
93 */ 90 */
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void)
169 cr5 = offsetof(struct _lowcore, paste); 166 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5); 167 __ctl_load(cr5, 5, 5);
171} 168}
172#endif /* CONFIG_64BIT */
173 169
174/* 170/*
175 * This is called from binfmt_elf, we create the special vma for the 171 * This is called from binfmt_elf, we create the special vma for the
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191 if (!uses_interp) 187 if (!uses_interp)
192 return 0; 188 return 0;
193 189
194#ifdef CONFIG_64BIT
195 vdso_pagelist = vdso64_pagelist; 190 vdso_pagelist = vdso64_pagelist;
196 vdso_pages = vdso64_pages; 191 vdso_pages = vdso64_pages;
197#ifdef CONFIG_COMPAT 192#ifdef CONFIG_COMPAT
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
200 vdso_pages = vdso32_pages; 195 vdso_pages = vdso32_pages;
201 } 196 }
202#endif 197#endif
203#else
204 vdso_pagelist = vdso32_pagelist;
205 vdso_pages = vdso32_pages;
206#endif
207
208 /* 198 /*
209 * vDSO has a problem and was disabled, just don't "enable" it for 199 * vDSO has a problem and was disabled, just don't "enable" it for
210 * the process 200 * the process
@@ -268,7 +258,7 @@ static int __init vdso_init(void)
268 if (!vdso_enabled) 258 if (!vdso_enabled)
269 return 0; 259 return 0;
270 vdso_init_data(vdso_data); 260 vdso_init_data(vdso_data);
271#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 261#ifdef CONFIG_COMPAT
272 /* Calculate the size of the 32 bit vDSO */ 262 /* Calculate the size of the 32 bit vDSO */
273 vdso32_pages = ((&vdso32_end - &vdso32_start 263 vdso32_pages = ((&vdso32_end - &vdso32_start
274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 264 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -287,7 +277,6 @@ static int __init vdso_init(void)
287 vdso32_pagelist[vdso32_pages] = NULL; 277 vdso32_pagelist[vdso32_pages] = NULL;
288#endif 278#endif
289 279
290#ifdef CONFIG_64BIT
291 /* Calculate the size of the 64 bit vDSO */ 280 /* Calculate the size of the 64 bit vDSO */
292 vdso64_pages = ((&vdso64_end - &vdso64_start 281 vdso64_pages = ((&vdso64_end - &vdso64_start
293 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 282 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -307,7 +296,6 @@ static int __init vdso_init(void)
307 if (vdso_alloc_per_cpu(&S390_lowcore)) 296 if (vdso_alloc_per_cpu(&S390_lowcore))
308 BUG(); 297 BUG();
309 vdso_init_cr5(); 298 vdso_init_cr5();
310#endif /* CONFIG_64BIT */
311 299
312 get_page(virt_to_page(vdso_data)); 300 get_page(virt_to_page(vdso_data));
313 301
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed0af5f..445657fe658c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -6,17 +6,10 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8 8
9#ifndef CONFIG_64BIT
10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
11OUTPUT_ARCH(s390:31-bit)
12ENTRY(startup)
13jiffies = jiffies_64 + 4;
14#else
15OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 9OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
16OUTPUT_ARCH(s390:64-bit) 10OUTPUT_ARCH(s390:64-bit)
17ENTRY(startup) 11ENTRY(startup)
18jiffies = jiffies_64; 12jiffies = jiffies_64;
19#endif
20 13
21PHDRS { 14PHDRS {
22 text PT_LOAD FLAGS(5); /* R_E */ 15 text PT_LOAD FLAGS(5); /* R_E */