diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 23:51:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 23:51:44 -0400 |
commit | bdfa54dfd9eea001274dbcd622657a904fe43b81 (patch) | |
tree | ab251ab359e519656d7061bbe8db4c7ab355404b /arch/s390/kernel | |
parent | 2481bc75283ea10e75d5fb1a8b42af363fc4b45c (diff) | |
parent | a1307bba1adcc9b338511180fa94a54b4c3f534b (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"The major change in this merge is the removal of the support for
31-bit kernels. Naturally 31-bit user space will continue to work via
the compat layer.
And then some cleanup, some improvements and bug fixes"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits)
s390/smp: wait until secondaries are active & online
s390/hibernate: fix save and restore of kernel text section
s390/cacheinfo: add missing facility check
s390/syscalls: simplify syscall_get_arch()
s390/irq: enforce correct irqclass_sub_desc array size
s390: remove "64" suffix from mem64.S and swsusp_asm64.S
s390/ipl: cleanup macro usage
s390/ipl: cleanup shutdown_action attributes
s390/ipl: cleanup bin attr usage
s390/uprobes: fix address space annotation
s390: add missing arch_release_task_struct() declaration
s390: make couple of functions and variables static
s390/maccess: improve s390_kernel_write()
s390/maccess: remove potentially broken probe_kernel_write()
s390/watchdog: support for KVM hypervisors and delete pr_info messages
s390/watchdog: enable KEEPALIVE for /dev/watchdog
s390/dasd: remove setting of scheduler from driver
s390/traps: panic() instead of die() on translation exception
s390: remove test_facility(2) (== z/Architecture mode active) checks
s390/cmpxchg: simplify cmpxchg_double
...
Diffstat (limited to 'arch/s390/kernel')
41 files changed, 1173 insertions, 3260 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 31fab2676fe9..ffb87617a36c 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls | |||
26 | # | 26 | # |
27 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 27 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
28 | 28 | ||
29 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | 29 | CFLAGS_sysinfo.o += -w |
30 | 30 | ||
31 | obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o | 31 | obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o |
32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o | 32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o |
33 | obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o | 33 | obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o |
34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o | 34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o |
35 | obj-y += dumpstack.o | 35 | obj-y += runtime_instr.o cache.o dumpstack.o |
36 | obj-y += entry.o reipl.o relocate_kernel.o | ||
36 | 37 | ||
37 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 38 | extra-y += head.o head64.o vmlinux.lds |
38 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | ||
39 | obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | ||
40 | |||
41 | extra-y += head.o vmlinux.lds | ||
42 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) | ||
43 | 39 | ||
44 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 40 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
45 | obj-$(CONFIG_SMP) += smp.o | 41 | obj-$(CONFIG_SMP) += smp.o |
46 | obj-$(CONFIG_SCHED_BOOK) += topology.o | 42 | obj-$(CONFIG_SCHED_BOOK) += topology.o |
47 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | 43 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o |
48 | obj-$(CONFIG_AUDIT) += audit.o | 44 | obj-$(CONFIG_AUDIT) += audit.o |
49 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 45 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
50 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o | 46 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o |
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | |||
56 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 52 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
57 | obj-$(CONFIG_UPROBES) += uprobes.o | 53 | obj-$(CONFIG_UPROBES) += uprobes.o |
58 | 54 | ||
59 | ifdef CONFIG_64BIT | 55 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o |
60 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ | 56 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o |
61 | perf_cpum_cf_events.o | ||
62 | obj-y += runtime_instr.o cache.o | ||
63 | endif | ||
64 | 57 | ||
65 | # vdso | 58 | # vdso |
66 | obj-$(CONFIG_64BIT) += vdso64/ | 59 | obj-y += vdso64/ |
67 | obj-$(CONFIG_32BIT) += vdso32/ | ||
68 | obj-$(CONFIG_COMPAT) += vdso32/ | 60 | obj-$(CONFIG_COMPAT) += vdso32/ |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 8dc4db10d160..f35058da8eaf 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -166,9 +166,6 @@ int main(void) | |||
166 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | 166 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); |
167 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 167 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
168 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 168 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
169 | #ifdef CONFIG_32BIT | ||
170 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | ||
171 | #else /* CONFIG_32BIT */ | ||
172 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); | 169 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); |
173 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); | 170 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); |
174 | DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); | 171 | DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); |
@@ -184,6 +181,5 @@ int main(void) | |||
184 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); | 181 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); |
185 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); | 182 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); |
186 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); | 183 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); |
187 | #endif /* CONFIG_32BIT */ | ||
188 | return 0; | 184 | return 0; |
189 | } | 185 | } |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f74a53d339b0..daed3fde42ec 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
12 | #include <asm/sigp.h> | 12 | #include <asm/sigp.h> |
13 | 13 | ||
14 | #ifdef CONFIG_64BIT | ||
15 | |||
16 | ENTRY(s390_base_mcck_handler) | 14 | ENTRY(s390_base_mcck_handler) |
17 | basr %r13,0 | 15 | basr %r13,0 |
18 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | 16 | 0: lg %r15,__LC_PANIC_STACK # load panic stack |
@@ -131,77 +129,3 @@ ENTRY(diag308_reset) | |||
131 | .Lfpctl: | 129 | .Lfpctl: |
132 | .long 0 | 130 | .long 0 |
133 | .previous | 131 | .previous |
134 | |||
135 | #else /* CONFIG_64BIT */ | ||
136 | |||
137 | ENTRY(s390_base_mcck_handler) | ||
138 | basr %r13,0 | ||
139 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
140 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
141 | l %r1,2f-0b(%r13) | ||
142 | l %r1,0(%r1) | ||
143 | ltr %r1,%r1 | ||
144 | jz 1f | ||
145 | basr %r14,%r1 | ||
146 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
147 | lpsw __LC_MCK_OLD_PSW | ||
148 | |||
149 | 2: .long s390_base_mcck_handler_fn | ||
150 | |||
151 | .section .bss | ||
152 | .align 4 | ||
153 | .globl s390_base_mcck_handler_fn | ||
154 | s390_base_mcck_handler_fn: | ||
155 | .long 0 | ||
156 | .previous | ||
157 | |||
158 | ENTRY(s390_base_ext_handler) | ||
159 | stm %r0,%r15,__LC_SAVE_AREA_ASYNC | ||
160 | basr %r13,0 | ||
161 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
162 | l %r1,2f-0b(%r13) | ||
163 | l %r1,0(%r1) | ||
164 | ltr %r1,%r1 | ||
165 | jz 1f | ||
166 | basr %r14,%r1 | ||
167 | 1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC | ||
168 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
169 | lpsw __LC_EXT_OLD_PSW | ||
170 | |||
171 | 2: .long s390_base_ext_handler_fn | ||
172 | |||
173 | .section .bss | ||
174 | .align 4 | ||
175 | .globl s390_base_ext_handler_fn | ||
176 | s390_base_ext_handler_fn: | ||
177 | .long 0 | ||
178 | .previous | ||
179 | |||
180 | ENTRY(s390_base_pgm_handler) | ||
181 | stm %r0,%r15,__LC_SAVE_AREA_SYNC | ||
182 | basr %r13,0 | ||
183 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
184 | l %r1,2f-0b(%r13) | ||
185 | l %r1,0(%r1) | ||
186 | ltr %r1,%r1 | ||
187 | jz 1f | ||
188 | basr %r14,%r1 | ||
189 | lm %r0,%r15,__LC_SAVE_AREA_SYNC | ||
190 | lpsw __LC_PGM_OLD_PSW | ||
191 | |||
192 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
193 | |||
194 | 2: .long s390_base_pgm_handler_fn | ||
195 | |||
196 | disabled_wait_psw: | ||
197 | .align 8 | ||
198 | .long 0x000a0000,0x00000000 + s390_base_pgm_handler | ||
199 | |||
200 | .section .bss | ||
201 | .align 4 | ||
202 | .globl s390_base_pgm_handler_fn | ||
203 | s390_base_pgm_handler_fn: | ||
204 | .long 0 | ||
205 | .previous | ||
206 | |||
207 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 0969d113b3d6..bff5e3b6d822 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c | |||
@@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m) | |||
70 | struct cacheinfo *cache; | 70 | struct cacheinfo *cache; |
71 | int idx; | 71 | int idx; |
72 | 72 | ||
73 | if (!test_facility(34)) | ||
74 | return; | ||
73 | get_online_cpus(); | 75 | get_online_cpus(); |
74 | this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); | 76 | this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); |
75 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { | 77 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { |
@@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu) | |||
159 | union cache_topology ct; | 161 | union cache_topology ct; |
160 | enum cache_type ctype; | 162 | enum cache_type ctype; |
161 | 163 | ||
164 | if (!test_facility(34)) | ||
165 | return -EOPNOTSUPP; | ||
162 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); | 166 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); |
163 | for (idx = 0, level = 0; level < this_cpu_ci->num_levels && | 167 | for (idx = 0, level = 0; level < this_cpu_ci->num_levels && |
164 | idx < this_cpu_ci->num_leaves; idx++, level++) { | 168 | idx < this_cpu_ci->num_leaves; idx++, level++) { |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d7b0c4d27880..199ec92ef4fe 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen) | |||
27 | register unsigned long reg3 asm ("3") = cmdlen; | 27 | register unsigned long reg3 asm ("3") = cmdlen; |
28 | 28 | ||
29 | asm volatile( | 29 | asm volatile( |
30 | #ifndef CONFIG_64BIT | ||
31 | " diag %1,%0,0x8\n" | ||
32 | #else /* CONFIG_64BIT */ | ||
33 | " sam31\n" | 30 | " sam31\n" |
34 | " diag %1,%0,0x8\n" | 31 | " diag %1,%0,0x8\n" |
35 | " sam64\n" | 32 | " sam64\n" |
36 | #endif /* CONFIG_64BIT */ | ||
37 | : "+d" (reg3) : "d" (reg2) : "cc"); | 33 | : "+d" (reg3) : "d" (reg2) : "cc"); |
38 | return reg3; | 34 | return reg3; |
39 | } | 35 | } |
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen) | |||
46 | register unsigned long reg5 asm ("5") = *rlen; | 42 | register unsigned long reg5 asm ("5") = *rlen; |
47 | 43 | ||
48 | asm volatile( | 44 | asm volatile( |
49 | #ifndef CONFIG_64BIT | ||
50 | " diag %2,%0,0x8\n" | ||
51 | " brc 8,1f\n" | ||
52 | " ar %1,%4\n" | ||
53 | #else /* CONFIG_64BIT */ | ||
54 | " sam31\n" | 45 | " sam31\n" |
55 | " diag %2,%0,0x8\n" | 46 | " diag %2,%0,0x8\n" |
56 | " sam64\n" | 47 | " sam64\n" |
57 | " brc 8,1f\n" | 48 | " brc 8,1f\n" |
58 | " agr %1,%4\n" | 49 | " agr %1,%4\n" |
59 | #endif /* CONFIG_64BIT */ | ||
60 | "1:\n" | 50 | "1:\n" |
61 | : "+d" (reg4), "+d" (reg5) | 51 | : "+d" (reg4), "+d" (reg5) |
62 | : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); | 52 | : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); |
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 8237fc07ac79..2f69243bf700 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c | |||
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) | |||
18 | int rc = 0; | 18 | int rc = 0; |
19 | 19 | ||
20 | asm volatile( | 20 | asm volatile( |
21 | #ifdef CONFIG_64BIT | ||
22 | " sam31\n" | 21 | " sam31\n" |
23 | " diag %2,2,0x14\n" | 22 | " diag %2,2,0x14\n" |
24 | " sam64\n" | 23 | " sam64\n" |
25 | #else | ||
26 | " diag %2,2,0x14\n" | ||
27 | #endif | ||
28 | " ipm %0\n" | 24 | " ipm %0\n" |
29 | " srl %0,28\n" | 25 | " srl %0,28\n" |
30 | : "=d" (rc), "+d" (_ry2) | 26 | : "=d" (rc), "+d" (_ry2) |
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr) | |||
52 | spin_lock_irqsave(&diag210_lock, flags); | 48 | spin_lock_irqsave(&diag210_lock, flags); |
53 | diag210_tmp = *addr; | 49 | diag210_tmp = *addr; |
54 | 50 | ||
55 | #ifdef CONFIG_64BIT | ||
56 | asm volatile( | 51 | asm volatile( |
57 | " lhi %0,-1\n" | 52 | " lhi %0,-1\n" |
58 | " sam31\n" | 53 | " sam31\n" |
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr) | |||
62 | "1: sam64\n" | 57 | "1: sam64\n" |
63 | EX_TABLE(0b, 1b) | 58 | EX_TABLE(0b, 1b) |
64 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | 59 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); |
65 | #else | ||
66 | asm volatile( | ||
67 | " lhi %0,-1\n" | ||
68 | " diag %1,0,0x210\n" | ||
69 | "0: ipm %0\n" | ||
70 | " srl %0,28\n" | ||
71 | "1:\n" | ||
72 | EX_TABLE(0b, 1b) | ||
73 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | ||
74 | #endif | ||
75 | 60 | ||
76 | *addr = diag210_tmp; | 61 | *addr = diag210_tmp; |
77 | spin_unlock_irqrestore(&diag210_lock, flags); | 62 | spin_unlock_irqrestore(&diag210_lock, flags); |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 533430307da8..8140d10c6785 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -32,12 +32,6 @@ | |||
32 | #include <asm/debug.h> | 32 | #include <asm/debug.h> |
33 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
34 | 34 | ||
35 | #ifndef CONFIG_64BIT | ||
36 | #define ONELONG "%08lx: " | ||
37 | #else /* CONFIG_64BIT */ | ||
38 | #define ONELONG "%016lx: " | ||
39 | #endif /* CONFIG_64BIT */ | ||
40 | |||
41 | enum { | 35 | enum { |
42 | UNUSED, /* Indicates the end of the operand list */ | 36 | UNUSED, /* Indicates the end of the operand list */ |
43 | R_8, /* GPR starting at position 8 */ | 37 | R_8, /* GPR starting at position 8 */ |
@@ -536,12 +530,10 @@ static char *long_insn_name[] = { | |||
536 | }; | 530 | }; |
537 | 531 | ||
538 | static struct s390_insn opcode[] = { | 532 | static struct s390_insn opcode[] = { |
539 | #ifdef CONFIG_64BIT | ||
540 | { "bprp", 0xc5, INSTR_MII_UPI }, | 533 | { "bprp", 0xc5, INSTR_MII_UPI }, |
541 | { "bpp", 0xc7, INSTR_SMI_U0RDP }, | 534 | { "bpp", 0xc7, INSTR_SMI_U0RDP }, |
542 | { "trtr", 0xd0, INSTR_SS_L0RDRD }, | 535 | { "trtr", 0xd0, INSTR_SS_L0RDRD }, |
543 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, | 536 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, |
544 | #endif | ||
545 | { "spm", 0x04, INSTR_RR_R0 }, | 537 | { "spm", 0x04, INSTR_RR_R0 }, |
546 | { "balr", 0x05, INSTR_RR_RR }, | 538 | { "balr", 0x05, INSTR_RR_RR }, |
547 | { "bctr", 0x06, INSTR_RR_RR }, | 539 | { "bctr", 0x06, INSTR_RR_RR }, |
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = { | |||
725 | }; | 717 | }; |
726 | 718 | ||
727 | static struct s390_insn opcode_01[] = { | 719 | static struct s390_insn opcode_01[] = { |
728 | #ifdef CONFIG_64BIT | ||
729 | { "ptff", 0x04, INSTR_E }, | 720 | { "ptff", 0x04, INSTR_E }, |
730 | { "pfpo", 0x0a, INSTR_E }, | 721 | { "pfpo", 0x0a, INSTR_E }, |
731 | { "sam64", 0x0e, INSTR_E }, | 722 | { "sam64", 0x0e, INSTR_E }, |
732 | #endif | ||
733 | { "pr", 0x01, INSTR_E }, | 723 | { "pr", 0x01, INSTR_E }, |
734 | { "upt", 0x02, INSTR_E }, | 724 | { "upt", 0x02, INSTR_E }, |
735 | { "sckpf", 0x07, INSTR_E }, | 725 | { "sckpf", 0x07, INSTR_E }, |
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = { | |||
741 | }; | 731 | }; |
742 | 732 | ||
743 | static struct s390_insn opcode_a5[] = { | 733 | static struct s390_insn opcode_a5[] = { |
744 | #ifdef CONFIG_64BIT | ||
745 | { "iihh", 0x00, INSTR_RI_RU }, | 734 | { "iihh", 0x00, INSTR_RI_RU }, |
746 | { "iihl", 0x01, INSTR_RI_RU }, | 735 | { "iihl", 0x01, INSTR_RI_RU }, |
747 | { "iilh", 0x02, INSTR_RI_RU }, | 736 | { "iilh", 0x02, INSTR_RI_RU }, |
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = { | |||
758 | { "llihl", 0x0d, INSTR_RI_RU }, | 747 | { "llihl", 0x0d, INSTR_RI_RU }, |
759 | { "llilh", 0x0e, INSTR_RI_RU }, | 748 | { "llilh", 0x0e, INSTR_RI_RU }, |
760 | { "llill", 0x0f, INSTR_RI_RU }, | 749 | { "llill", 0x0f, INSTR_RI_RU }, |
761 | #endif | ||
762 | { "", 0, INSTR_INVALID } | 750 | { "", 0, INSTR_INVALID } |
763 | }; | 751 | }; |
764 | 752 | ||
765 | static struct s390_insn opcode_a7[] = { | 753 | static struct s390_insn opcode_a7[] = { |
766 | #ifdef CONFIG_64BIT | ||
767 | { "tmhh", 0x02, INSTR_RI_RU }, | 754 | { "tmhh", 0x02, INSTR_RI_RU }, |
768 | { "tmhl", 0x03, INSTR_RI_RU }, | 755 | { "tmhl", 0x03, INSTR_RI_RU }, |
769 | { "brctg", 0x07, INSTR_RI_RP }, | 756 | { "brctg", 0x07, INSTR_RI_RP }, |
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = { | |||
771 | { "aghi", 0x0b, INSTR_RI_RI }, | 758 | { "aghi", 0x0b, INSTR_RI_RI }, |
772 | { "mghi", 0x0d, INSTR_RI_RI }, | 759 | { "mghi", 0x0d, INSTR_RI_RI }, |
773 | { "cghi", 0x0f, INSTR_RI_RI }, | 760 | { "cghi", 0x0f, INSTR_RI_RI }, |
774 | #endif | ||
775 | { "tmlh", 0x00, INSTR_RI_RU }, | 761 | { "tmlh", 0x00, INSTR_RI_RU }, |
776 | { "tmll", 0x01, INSTR_RI_RU }, | 762 | { "tmll", 0x01, INSTR_RI_RU }, |
777 | { "brc", 0x04, INSTR_RI_UP }, | 763 | { "brc", 0x04, INSTR_RI_UP }, |
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = { | |||
785 | }; | 771 | }; |
786 | 772 | ||
787 | static struct s390_insn opcode_aa[] = { | 773 | static struct s390_insn opcode_aa[] = { |
788 | #ifdef CONFIG_64BIT | ||
789 | { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, | 774 | { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, |
790 | { "rion", 0x01, INSTR_RI_RI }, | 775 | { "rion", 0x01, INSTR_RI_RI }, |
791 | { "tric", 0x02, INSTR_RI_RI }, | 776 | { "tric", 0x02, INSTR_RI_RI }, |
792 | { "rioff", 0x03, INSTR_RI_RI }, | 777 | { "rioff", 0x03, INSTR_RI_RI }, |
793 | { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, | 778 | { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, |
794 | #endif | ||
795 | { "", 0, INSTR_INVALID } | 779 | { "", 0, INSTR_INVALID } |
796 | }; | 780 | }; |
797 | 781 | ||
798 | static struct s390_insn opcode_b2[] = { | 782 | static struct s390_insn opcode_b2[] = { |
799 | #ifdef CONFIG_64BIT | ||
800 | { "stckf", 0x7c, INSTR_S_RD }, | 783 | { "stckf", 0x7c, INSTR_S_RD }, |
801 | { "lpp", 0x80, INSTR_S_RD }, | 784 | { "lpp", 0x80, INSTR_S_RD }, |
802 | { "lcctl", 0x84, INSTR_S_RD }, | 785 | { "lcctl", 0x84, INSTR_S_RD }, |
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = { | |||
819 | { "tend", 0xf8, INSTR_S_00 }, | 802 | { "tend", 0xf8, INSTR_S_00 }, |
820 | { "niai", 0xfa, INSTR_IE_UU }, | 803 | { "niai", 0xfa, INSTR_IE_UU }, |
821 | { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, | 804 | { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, |
822 | #endif | ||
823 | { "stidp", 0x02, INSTR_S_RD }, | 805 | { "stidp", 0x02, INSTR_S_RD }, |
824 | { "sck", 0x04, INSTR_S_RD }, | 806 | { "sck", 0x04, INSTR_S_RD }, |
825 | { "stck", 0x05, INSTR_S_RD }, | 807 | { "stck", 0x05, INSTR_S_RD }, |
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = { | |||
908 | }; | 890 | }; |
909 | 891 | ||
910 | static struct s390_insn opcode_b3[] = { | 892 | static struct s390_insn opcode_b3[] = { |
911 | #ifdef CONFIG_64BIT | ||
912 | { "maylr", 0x38, INSTR_RRF_F0FF }, | 893 | { "maylr", 0x38, INSTR_RRF_F0FF }, |
913 | { "mylr", 0x39, INSTR_RRF_F0FF }, | 894 | { "mylr", 0x39, INSTR_RRF_F0FF }, |
914 | { "mayr", 0x3a, INSTR_RRF_F0FF }, | 895 | { "mayr", 0x3a, INSTR_RRF_F0FF }, |
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = { | |||
996 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, | 977 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, |
997 | { "iextr", 0xfe, INSTR_RRF_F0FR }, | 978 | { "iextr", 0xfe, INSTR_RRF_F0FR }, |
998 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, | 979 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, |
999 | #endif | ||
1000 | { "lpebr", 0x00, INSTR_RRE_FF }, | 980 | { "lpebr", 0x00, INSTR_RRE_FF }, |
1001 | { "lnebr", 0x01, INSTR_RRE_FF }, | 981 | { "lnebr", 0x01, INSTR_RRE_FF }, |
1002 | { "ltebr", 0x02, INSTR_RRE_FF }, | 982 | { "ltebr", 0x02, INSTR_RRE_FF }, |
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = { | |||
1091 | }; | 1071 | }; |
1092 | 1072 | ||
1093 | static struct s390_insn opcode_b9[] = { | 1073 | static struct s390_insn opcode_b9[] = { |
1094 | #ifdef CONFIG_64BIT | ||
1095 | { "lpgr", 0x00, INSTR_RRE_RR }, | 1074 | { "lpgr", 0x00, INSTR_RRE_RR }, |
1096 | { "lngr", 0x01, INSTR_RRE_RR }, | 1075 | { "lngr", 0x01, INSTR_RRE_RR }, |
1097 | { "ltgr", 0x02, INSTR_RRE_RR }, | 1076 | { "ltgr", 0x02, INSTR_RRE_RR }, |
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = { | |||
1204 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, | 1183 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, |
1205 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, | 1184 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, |
1206 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, | 1185 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, |
1207 | #endif | ||
1208 | { "kmac", 0x1e, INSTR_RRE_RR }, | 1186 | { "kmac", 0x1e, INSTR_RRE_RR }, |
1209 | { "lrvr", 0x1f, INSTR_RRE_RR }, | 1187 | { "lrvr", 0x1f, INSTR_RRE_RR }, |
1210 | { "km", 0x2e, INSTR_RRE_RR }, | 1188 | { "km", 0x2e, INSTR_RRE_RR }, |
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = { | |||
1224 | }; | 1202 | }; |
1225 | 1203 | ||
1226 | static struct s390_insn opcode_c0[] = { | 1204 | static struct s390_insn opcode_c0[] = { |
1227 | #ifdef CONFIG_64BIT | ||
1228 | { "lgfi", 0x01, INSTR_RIL_RI }, | 1205 | { "lgfi", 0x01, INSTR_RIL_RI }, |
1229 | { "xihf", 0x06, INSTR_RIL_RU }, | 1206 | { "xihf", 0x06, INSTR_RIL_RU }, |
1230 | { "xilf", 0x07, INSTR_RIL_RU }, | 1207 | { "xilf", 0x07, INSTR_RIL_RU }, |
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = { | |||
1236 | { "oilf", 0x0d, INSTR_RIL_RU }, | 1213 | { "oilf", 0x0d, INSTR_RIL_RU }, |
1237 | { "llihf", 0x0e, INSTR_RIL_RU }, | 1214 | { "llihf", 0x0e, INSTR_RIL_RU }, |
1238 | { "llilf", 0x0f, INSTR_RIL_RU }, | 1215 | { "llilf", 0x0f, INSTR_RIL_RU }, |
1239 | #endif | ||
1240 | { "larl", 0x00, INSTR_RIL_RP }, | 1216 | { "larl", 0x00, INSTR_RIL_RP }, |
1241 | { "brcl", 0x04, INSTR_RIL_UP }, | 1217 | { "brcl", 0x04, INSTR_RIL_UP }, |
1242 | { "brasl", 0x05, INSTR_RIL_RP }, | 1218 | { "brasl", 0x05, INSTR_RIL_RP }, |
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = { | |||
1244 | }; | 1220 | }; |
1245 | 1221 | ||
1246 | static struct s390_insn opcode_c2[] = { | 1222 | static struct s390_insn opcode_c2[] = { |
1247 | #ifdef CONFIG_64BIT | ||
1248 | { "msgfi", 0x00, INSTR_RIL_RI }, | 1223 | { "msgfi", 0x00, INSTR_RIL_RI }, |
1249 | { "msfi", 0x01, INSTR_RIL_RI }, | 1224 | { "msfi", 0x01, INSTR_RIL_RI }, |
1250 | { "slgfi", 0x04, INSTR_RIL_RU }, | 1225 | { "slgfi", 0x04, INSTR_RIL_RU }, |
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = { | |||
1257 | { "cfi", 0x0d, INSTR_RIL_RI }, | 1232 | { "cfi", 0x0d, INSTR_RIL_RI }, |
1258 | { "clgfi", 0x0e, INSTR_RIL_RU }, | 1233 | { "clgfi", 0x0e, INSTR_RIL_RU }, |
1259 | { "clfi", 0x0f, INSTR_RIL_RU }, | 1234 | { "clfi", 0x0f, INSTR_RIL_RU }, |
1260 | #endif | ||
1261 | { "", 0, INSTR_INVALID } | 1235 | { "", 0, INSTR_INVALID } |
1262 | }; | 1236 | }; |
1263 | 1237 | ||
1264 | static struct s390_insn opcode_c4[] = { | 1238 | static struct s390_insn opcode_c4[] = { |
1265 | #ifdef CONFIG_64BIT | ||
1266 | { "llhrl", 0x02, INSTR_RIL_RP }, | 1239 | { "llhrl", 0x02, INSTR_RIL_RP }, |
1267 | { "lghrl", 0x04, INSTR_RIL_RP }, | 1240 | { "lghrl", 0x04, INSTR_RIL_RP }, |
1268 | { "lhrl", 0x05, INSTR_RIL_RP }, | 1241 | { "lhrl", 0x05, INSTR_RIL_RP }, |
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = { | |||
1274 | { "lrl", 0x0d, INSTR_RIL_RP }, | 1247 | { "lrl", 0x0d, INSTR_RIL_RP }, |
1275 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, | 1248 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, |
1276 | { "strl", 0x0f, INSTR_RIL_RP }, | 1249 | { "strl", 0x0f, INSTR_RIL_RP }, |
1277 | #endif | ||
1278 | { "", 0, INSTR_INVALID } | 1250 | { "", 0, INSTR_INVALID } |
1279 | }; | 1251 | }; |
1280 | 1252 | ||
1281 | static struct s390_insn opcode_c6[] = { | 1253 | static struct s390_insn opcode_c6[] = { |
1282 | #ifdef CONFIG_64BIT | ||
1283 | { "exrl", 0x00, INSTR_RIL_RP }, | 1254 | { "exrl", 0x00, INSTR_RIL_RP }, |
1284 | { "pfdrl", 0x02, INSTR_RIL_UP }, | 1255 | { "pfdrl", 0x02, INSTR_RIL_UP }, |
1285 | { "cghrl", 0x04, INSTR_RIL_RP }, | 1256 | { "cghrl", 0x04, INSTR_RIL_RP }, |
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = { | |||
1292 | { "crl", 0x0d, INSTR_RIL_RP }, | 1263 | { "crl", 0x0d, INSTR_RIL_RP }, |
1293 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, | 1264 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, |
1294 | { "clrl", 0x0f, INSTR_RIL_RP }, | 1265 | { "clrl", 0x0f, INSTR_RIL_RP }, |
1295 | #endif | ||
1296 | { "", 0, INSTR_INVALID } | 1266 | { "", 0, INSTR_INVALID } |
1297 | }; | 1267 | }; |
1298 | 1268 | ||
1299 | static struct s390_insn opcode_c8[] = { | 1269 | static struct s390_insn opcode_c8[] = { |
1300 | #ifdef CONFIG_64BIT | ||
1301 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, | 1270 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, |
1302 | { "ectg", 0x01, INSTR_SSF_RRDRD }, | 1271 | { "ectg", 0x01, INSTR_SSF_RRDRD }, |
1303 | { "csst", 0x02, INSTR_SSF_RRDRD }, | 1272 | { "csst", 0x02, INSTR_SSF_RRDRD }, |
1304 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, | 1273 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, |
1305 | { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, | 1274 | { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, |
1306 | #endif | ||
1307 | { "", 0, INSTR_INVALID } | 1275 | { "", 0, INSTR_INVALID } |
1308 | }; | 1276 | }; |
1309 | 1277 | ||
1310 | static struct s390_insn opcode_cc[] = { | 1278 | static struct s390_insn opcode_cc[] = { |
1311 | #ifdef CONFIG_64BIT | ||
1312 | { "brcth", 0x06, INSTR_RIL_RP }, | 1279 | { "brcth", 0x06, INSTR_RIL_RP }, |
1313 | { "aih", 0x08, INSTR_RIL_RI }, | 1280 | { "aih", 0x08, INSTR_RIL_RI }, |
1314 | { "alsih", 0x0a, INSTR_RIL_RI }, | 1281 | { "alsih", 0x0a, INSTR_RIL_RI }, |
1315 | { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, | 1282 | { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, |
1316 | { "cih", 0x0d, INSTR_RIL_RI }, | 1283 | { "cih", 0x0d, INSTR_RIL_RI }, |
1317 | { "clih", 0x0f, INSTR_RIL_RI }, | 1284 | { "clih", 0x0f, INSTR_RIL_RI }, |
1318 | #endif | ||
1319 | { "", 0, INSTR_INVALID } | 1285 | { "", 0, INSTR_INVALID } |
1320 | }; | 1286 | }; |
1321 | 1287 | ||
1322 | static struct s390_insn opcode_e3[] = { | 1288 | static struct s390_insn opcode_e3[] = { |
1323 | #ifdef CONFIG_64BIT | ||
1324 | { "ltg", 0x02, INSTR_RXY_RRRD }, | 1289 | { "ltg", 0x02, INSTR_RXY_RRRD }, |
1325 | { "lrag", 0x03, INSTR_RXY_RRRD }, | 1290 | { "lrag", 0x03, INSTR_RXY_RRRD }, |
1326 | { "lg", 0x04, INSTR_RXY_RRRD }, | 1291 | { "lg", 0x04, INSTR_RXY_RRRD }, |
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = { | |||
1414 | { "clhf", 0xcf, INSTR_RXY_RRRD }, | 1379 | { "clhf", 0xcf, INSTR_RXY_RRRD }, |
1415 | { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, | 1380 | { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, |
1416 | { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, | 1381 | { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, |
1417 | #endif | ||
1418 | { "lrv", 0x1e, INSTR_RXY_RRRD }, | 1382 | { "lrv", 0x1e, INSTR_RXY_RRRD }, |
1419 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, | 1383 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, |
1420 | { "strv", 0x3e, INSTR_RXY_RRRD }, | 1384 | { "strv", 0x3e, INSTR_RXY_RRRD }, |
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = { | |||
1426 | }; | 1390 | }; |
1427 | 1391 | ||
1428 | static struct s390_insn opcode_e5[] = { | 1392 | static struct s390_insn opcode_e5[] = { |
1429 | #ifdef CONFIG_64BIT | ||
1430 | { "strag", 0x02, INSTR_SSE_RDRD }, | 1393 | { "strag", 0x02, INSTR_SSE_RDRD }, |
1431 | { "mvhhi", 0x44, INSTR_SIL_RDI }, | 1394 | { "mvhhi", 0x44, INSTR_SIL_RDI }, |
1432 | { "mvghi", 0x48, INSTR_SIL_RDI }, | 1395 | { "mvghi", 0x48, INSTR_SIL_RDI }, |
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = { | |||
1439 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, | 1402 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, |
1440 | { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, | 1403 | { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, |
1441 | { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, | 1404 | { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, |
1442 | #endif | ||
1443 | { "lasp", 0x00, INSTR_SSE_RDRD }, | 1405 | { "lasp", 0x00, INSTR_SSE_RDRD }, |
1444 | { "tprot", 0x01, INSTR_SSE_RDRD }, | 1406 | { "tprot", 0x01, INSTR_SSE_RDRD }, |
1445 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, | 1407 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, |
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = { | |||
1448 | }; | 1410 | }; |
1449 | 1411 | ||
1450 | static struct s390_insn opcode_e7[] = { | 1412 | static struct s390_insn opcode_e7[] = { |
1451 | #ifdef CONFIG_64BIT | ||
1452 | { "lcbb", 0x27, INSTR_RXE_RRRDM }, | 1413 | { "lcbb", 0x27, INSTR_RXE_RRRDM }, |
1453 | { "vgef", 0x13, INSTR_VRV_VVRDM }, | 1414 | { "vgef", 0x13, INSTR_VRV_VVRDM }, |
1454 | { "vgeg", 0x12, INSTR_VRV_VVRDM }, | 1415 | { "vgeg", 0x12, INSTR_VRV_VVRDM }, |
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = { | |||
1588 | { "vfsq", 0xce, INSTR_VRR_VV000MM }, | 1549 | { "vfsq", 0xce, INSTR_VRR_VV000MM }, |
1589 | { "vfs", 0xe2, INSTR_VRR_VVV00MM }, | 1550 | { "vfs", 0xe2, INSTR_VRR_VVV00MM }, |
1590 | { "vftci", 0x4a, INSTR_VRI_VVIMM }, | 1551 | { "vftci", 0x4a, INSTR_VRI_VVIMM }, |
1591 | #endif | ||
1592 | }; | 1552 | }; |
1593 | 1553 | ||
1594 | static struct s390_insn opcode_eb[] = { | 1554 | static struct s390_insn opcode_eb[] = { |
1595 | #ifdef CONFIG_64BIT | ||
1596 | { "lmg", 0x04, INSTR_RSY_RRRD }, | 1555 | { "lmg", 0x04, INSTR_RSY_RRRD }, |
1597 | { "srag", 0x0a, INSTR_RSY_RRRD }, | 1556 | { "srag", 0x0a, INSTR_RSY_RRRD }, |
1598 | { "slag", 0x0b, INSTR_RSY_RRRD }, | 1557 | { "slag", 0x0b, INSTR_RSY_RRRD }, |
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = { | |||
1659 | { "stric", 0x61, INSTR_RSY_RDRM }, | 1618 | { "stric", 0x61, INSTR_RSY_RDRM }, |
1660 | { "mric", 0x62, INSTR_RSY_RDRM }, | 1619 | { "mric", 0x62, INSTR_RSY_RDRM }, |
1661 | { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, | 1620 | { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, |
1662 | #endif | ||
1663 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1621 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
1664 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1622 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
1665 | { "tp", 0xc0, INSTR_RSL_R0RD }, | 1623 | { "tp", 0xc0, INSTR_RSL_R0RD }, |
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = { | |||
1667 | }; | 1625 | }; |
1668 | 1626 | ||
1669 | static struct s390_insn opcode_ec[] = { | 1627 | static struct s390_insn opcode_ec[] = { |
1670 | #ifdef CONFIG_64BIT | ||
1671 | { "brxhg", 0x44, INSTR_RIE_RRP }, | 1628 | { "brxhg", 0x44, INSTR_RIE_RRP }, |
1672 | { "brxlg", 0x45, INSTR_RIE_RRP }, | 1629 | { "brxlg", 0x45, INSTR_RIE_RRP }, |
1673 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, | 1630 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, |
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = { | |||
1701 | { "clgib", 0xfd, INSTR_RIS_RURDU }, | 1658 | { "clgib", 0xfd, INSTR_RIS_RURDU }, |
1702 | { "cib", 0xfe, INSTR_RIS_RURDI }, | 1659 | { "cib", 0xfe, INSTR_RIS_RURDI }, |
1703 | { "clib", 0xff, INSTR_RIS_RURDU }, | 1660 | { "clib", 0xff, INSTR_RIS_RURDU }, |
1704 | #endif | ||
1705 | { "", 0, INSTR_INVALID } | 1661 | { "", 0, INSTR_INVALID } |
1706 | }; | 1662 | }; |
1707 | 1663 | ||
1708 | static struct s390_insn opcode_ed[] = { | 1664 | static struct s390_insn opcode_ed[] = { |
1709 | #ifdef CONFIG_64BIT | ||
1710 | { "mayl", 0x38, INSTR_RXF_FRRDF }, | 1665 | { "mayl", 0x38, INSTR_RXF_FRRDF }, |
1711 | { "myl", 0x39, INSTR_RXF_FRRDF }, | 1666 | { "myl", 0x39, INSTR_RXF_FRRDF }, |
1712 | { "may", 0x3a, INSTR_RXF_FRRDF }, | 1667 | { "may", 0x3a, INSTR_RXF_FRRDF }, |
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = { | |||
1731 | { "czxt", 0xa9, INSTR_RSL_LRDFU }, | 1686 | { "czxt", 0xa9, INSTR_RSL_LRDFU }, |
1732 | { "cdzt", 0xaa, INSTR_RSL_LRDFU }, | 1687 | { "cdzt", 0xaa, INSTR_RSL_LRDFU }, |
1733 | { "cxzt", 0xab, INSTR_RSL_LRDFU }, | 1688 | { "cxzt", 0xab, INSTR_RSL_LRDFU }, |
1734 | #endif | ||
1735 | { "ldeb", 0x04, INSTR_RXE_FRRD }, | 1689 | { "ldeb", 0x04, INSTR_RXE_FRRD }, |
1736 | { "lxdb", 0x05, INSTR_RXE_FRRD }, | 1690 | { "lxdb", 0x05, INSTR_RXE_FRRD }, |
1737 | { "lxeb", 0x06, INSTR_RXE_FRRD }, | 1691 | { "lxeb", 0x06, INSTR_RXE_FRRD }, |
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs) | |||
2051 | else | 2005 | else |
2052 | *ptr++ = ' '; | 2006 | *ptr++ = ' '; |
2053 | addr = regs->psw.addr + start - 32; | 2007 | addr = regs->psw.addr + start - 32; |
2054 | ptr += sprintf(ptr, ONELONG, addr); | 2008 | ptr += sprintf(ptr, "%016lx: ", addr); |
2055 | if (start + opsize >= end) | 2009 | if (start + opsize >= end) |
2056 | break; | 2010 | break; |
2057 | for (i = 0; i < opsize; i++) | 2011 | for (i = 0; i < opsize; i++) |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index a99852e96a77..dc8e20473484 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c | |||
@@ -18,16 +18,6 @@ | |||
18 | #include <asm/dis.h> | 18 | #include <asm/dis.h> |
19 | #include <asm/ipl.h> | 19 | #include <asm/ipl.h> |
20 | 20 | ||
21 | #ifndef CONFIG_64BIT | ||
22 | #define LONG "%08lx " | ||
23 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | ||
24 | static int kstack_depth_to_print = 12; | ||
25 | #else /* CONFIG_64BIT */ | ||
26 | #define LONG "%016lx " | ||
27 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | ||
28 | static int kstack_depth_to_print = 20; | ||
29 | #endif /* CONFIG_64BIT */ | ||
30 | |||
31 | /* | 21 | /* |
32 | * For show_trace we have tree different stack to consider: | 22 | * For show_trace we have tree different stack to consider: |
33 | * - the panic stack which is used if the kernel stack has overflown | 23 | * - the panic stack which is used if the kernel stack has overflown |
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
115 | else | 105 | else |
116 | stack = sp; | 106 | stack = sp; |
117 | 107 | ||
118 | for (i = 0; i < kstack_depth_to_print; i++) { | 108 | for (i = 0; i < 20; i++) { |
119 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | 109 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) |
120 | break; | 110 | break; |
121 | if ((i * sizeof(long) % 32) == 0) | 111 | if ((i * sizeof(long) % 32) == 0) |
122 | printk("%s ", i == 0 ? "" : "\n"); | 112 | printk("%s ", i == 0 ? "" : "\n"); |
123 | printk(LONG, *stack++); | 113 | printk("%016lx ", *stack++); |
124 | } | 114 | } |
125 | printk("\n"); | 115 | printk("\n"); |
126 | show_trace(task, sp); | 116 | show_trace(task, sp); |
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
128 | 118 | ||
129 | static void show_last_breaking_event(struct pt_regs *regs) | 119 | static void show_last_breaking_event(struct pt_regs *regs) |
130 | { | 120 | { |
131 | #ifdef CONFIG_64BIT | ||
132 | printk("Last Breaking-Event-Address:\n"); | 121 | printk("Last Breaking-Event-Address:\n"); |
133 | printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); | 122 | printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); |
134 | #endif | ||
135 | } | 123 | } |
136 | 124 | ||
137 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | 125 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) |
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs) | |||
155 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), | 143 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), |
156 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 144 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
157 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 145 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
158 | #ifdef CONFIG_64BIT | ||
159 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); | 146 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); |
160 | #endif | 147 | printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, |
161 | printk("\n%s GPRS: " FOURLONG, mode, | ||
162 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 148 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
163 | printk(" " FOURLONG, | 149 | printk(" %016lx %016lx %016lx %016lx\n", |
164 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | 150 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); |
165 | printk(" " FOURLONG, | 151 | printk(" %016lx %016lx %016lx %016lx\n", |
166 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | 152 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); |
167 | printk(" " FOURLONG, | 153 | printk(" %016lx %016lx %016lx %016lx\n", |
168 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | 154 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); |
169 | show_code(regs); | 155 | show_code(regs); |
170 | } | 156 | } |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4427ab7ac23a..549a73a4b543 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -64,7 +64,6 @@ asm( | |||
64 | " .align 4\n" | 64 | " .align 4\n" |
65 | " .type savesys_ipl_nss, @function\n" | 65 | " .type savesys_ipl_nss, @function\n" |
66 | "savesys_ipl_nss:\n" | 66 | "savesys_ipl_nss:\n" |
67 | #ifdef CONFIG_64BIT | ||
68 | " stmg 6,15,48(15)\n" | 67 | " stmg 6,15,48(15)\n" |
69 | " lgr 14,3\n" | 68 | " lgr 14,3\n" |
70 | " sam31\n" | 69 | " sam31\n" |
@@ -72,13 +71,6 @@ asm( | |||
72 | " sam64\n" | 71 | " sam64\n" |
73 | " lgr 2,14\n" | 72 | " lgr 2,14\n" |
74 | " lmg 6,15,48(15)\n" | 73 | " lmg 6,15,48(15)\n" |
75 | #else | ||
76 | " stm 6,15,24(15)\n" | ||
77 | " lr 14,3\n" | ||
78 | " diag 2,14,0x8\n" | ||
79 | " lr 2,14\n" | ||
80 | " lm 6,15,24(15)\n" | ||
81 | #endif | ||
82 | " br 14\n" | 74 | " br 14\n" |
83 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n" | 75 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n" |
84 | " .previous\n"); | 76 | " .previous\n"); |
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void) | |||
240 | 232 | ||
241 | static __init void setup_topology(void) | 233 | static __init void setup_topology(void) |
242 | { | 234 | { |
243 | #ifdef CONFIG_64BIT | ||
244 | int max_mnest; | 235 | int max_mnest; |
245 | 236 | ||
246 | if (!test_facility(11)) | 237 | if (!test_facility(11)) |
@@ -251,7 +242,6 @@ static __init void setup_topology(void) | |||
251 | break; | 242 | break; |
252 | } | 243 | } |
253 | topology_max_mnest = max_mnest; | 244 | topology_max_mnest = max_mnest; |
254 | #endif | ||
255 | } | 245 | } |
256 | 246 | ||
257 | static void early_pgm_check_handler(void) | 247 | static void early_pgm_check_handler(void) |
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void) | |||
290 | ARRAY_SIZE(S390_lowcore.stfle_fac_list)); | 280 | ARRAY_SIZE(S390_lowcore.stfle_fac_list)); |
291 | } | 281 | } |
292 | 282 | ||
293 | static __init void detect_mvpg(void) | ||
294 | { | ||
295 | #ifndef CONFIG_64BIT | ||
296 | int rc; | ||
297 | |||
298 | asm volatile( | ||
299 | " la 0,0\n" | ||
300 | " mvpg %2,%2\n" | ||
301 | "0: la %0,0\n" | ||
302 | "1:\n" | ||
303 | EX_TABLE(0b,1b) | ||
304 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); | ||
305 | if (!rc) | ||
306 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; | ||
307 | #endif | ||
308 | } | ||
309 | |||
310 | static __init void detect_ieee(void) | ||
311 | { | ||
312 | #ifndef CONFIG_64BIT | ||
313 | int rc, tmp; | ||
314 | |||
315 | asm volatile( | ||
316 | " efpc %1,0\n" | ||
317 | "0: la %0,0\n" | ||
318 | "1:\n" | ||
319 | EX_TABLE(0b,1b) | ||
320 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); | ||
321 | if (!rc) | ||
322 | S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | static __init void detect_csp(void) | ||
327 | { | ||
328 | #ifndef CONFIG_64BIT | ||
329 | int rc; | ||
330 | |||
331 | asm volatile( | ||
332 | " la 0,0\n" | ||
333 | " la 1,0\n" | ||
334 | " la 2,4\n" | ||
335 | " csp 0,2\n" | ||
336 | "0: la %0,0\n" | ||
337 | "1:\n" | ||
338 | EX_TABLE(0b,1b) | ||
339 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); | ||
340 | if (!rc) | ||
341 | S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; | ||
342 | #endif | ||
343 | } | ||
344 | |||
345 | static __init void detect_diag9c(void) | 283 | static __init void detect_diag9c(void) |
346 | { | 284 | { |
347 | unsigned int cpu_address; | 285 | unsigned int cpu_address; |
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void) | |||
360 | 298 | ||
361 | static __init void detect_diag44(void) | 299 | static __init void detect_diag44(void) |
362 | { | 300 | { |
363 | #ifdef CONFIG_64BIT | ||
364 | int rc; | 301 | int rc; |
365 | 302 | ||
366 | asm volatile( | 303 | asm volatile( |
@@ -371,12 +308,10 @@ static __init void detect_diag44(void) | |||
371 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); | 308 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); |
372 | if (!rc) | 309 | if (!rc) |
373 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; | 310 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; |
374 | #endif | ||
375 | } | 311 | } |
376 | 312 | ||
377 | static __init void detect_machine_facilities(void) | 313 | static __init void detect_machine_facilities(void) |
378 | { | 314 | { |
379 | #ifdef CONFIG_64BIT | ||
380 | if (test_facility(8)) { | 315 | if (test_facility(8)) { |
381 | S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; | 316 | S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; |
382 | __ctl_set_bit(0, 23); | 317 | __ctl_set_bit(0, 23); |
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void) | |||
393 | S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; | 328 | S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; |
394 | if (test_facility(129)) | 329 | if (test_facility(129)) |
395 | S390_lowcore.machine_flags |= MACHINE_FLAG_VX; | 330 | S390_lowcore.machine_flags |= MACHINE_FLAG_VX; |
396 | #endif | ||
397 | } | 331 | } |
398 | 332 | ||
399 | static int __init cad_setup(char *str) | 333 | static int __init cad_setup(char *str) |
@@ -501,9 +435,6 @@ void __init startup_init(void) | |||
501 | ipl_update_parameters(); | 435 | ipl_update_parameters(); |
502 | setup_boot_command_line(); | 436 | setup_boot_command_line(); |
503 | create_kernel_nss(); | 437 | create_kernel_nss(); |
504 | detect_mvpg(); | ||
505 | detect_ieee(); | ||
506 | detect_csp(); | ||
507 | detect_diag9c(); | 438 | detect_diag9c(); |
508 | detect_diag44(); | 439 | detect_diag44(); |
509 | detect_machine_facilities(); | 440 | detect_machine_facilities(); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 398329b2b518..99b44acbfcc7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -22,27 +22,28 @@ | |||
22 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
23 | 23 | ||
24 | __PT_R0 = __PT_GPRS | 24 | __PT_R0 = __PT_GPRS |
25 | __PT_R1 = __PT_GPRS + 4 | 25 | __PT_R1 = __PT_GPRS + 8 |
26 | __PT_R2 = __PT_GPRS + 8 | 26 | __PT_R2 = __PT_GPRS + 16 |
27 | __PT_R3 = __PT_GPRS + 12 | 27 | __PT_R3 = __PT_GPRS + 24 |
28 | __PT_R4 = __PT_GPRS + 16 | 28 | __PT_R4 = __PT_GPRS + 32 |
29 | __PT_R5 = __PT_GPRS + 20 | 29 | __PT_R5 = __PT_GPRS + 40 |
30 | __PT_R6 = __PT_GPRS + 24 | 30 | __PT_R6 = __PT_GPRS + 48 |
31 | __PT_R7 = __PT_GPRS + 28 | 31 | __PT_R7 = __PT_GPRS + 56 |
32 | __PT_R8 = __PT_GPRS + 32 | 32 | __PT_R8 = __PT_GPRS + 64 |
33 | __PT_R9 = __PT_GPRS + 36 | 33 | __PT_R9 = __PT_GPRS + 72 |
34 | __PT_R10 = __PT_GPRS + 40 | 34 | __PT_R10 = __PT_GPRS + 80 |
35 | __PT_R11 = __PT_GPRS + 44 | 35 | __PT_R11 = __PT_GPRS + 88 |
36 | __PT_R12 = __PT_GPRS + 48 | 36 | __PT_R12 = __PT_GPRS + 96 |
37 | __PT_R13 = __PT_GPRS + 524 | 37 | __PT_R13 = __PT_GPRS + 104 |
38 | __PT_R14 = __PT_GPRS + 56 | 38 | __PT_R14 = __PT_GPRS + 112 |
39 | __PT_R15 = __PT_GPRS + 60 | 39 | __PT_R15 = __PT_GPRS + 120 |
40 | 40 | ||
41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
42 | STACK_SIZE = 1 << STACK_SHIFT | 42 | STACK_SIZE = 1 << STACK_SHIFT |
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
44 | 44 | ||
45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) | 45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
46 | _TIF_UPROBE) | ||
46 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 47 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
47 | _TIF_SYSCALL_TRACEPOINT) | 48 | _TIF_SYSCALL_TRACEPOINT) |
48 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) | 49 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) |
@@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
53 | .macro TRACE_IRQS_ON | 54 | .macro TRACE_IRQS_ON |
54 | #ifdef CONFIG_TRACE_IRQFLAGS | 55 | #ifdef CONFIG_TRACE_IRQFLAGS |
55 | basr %r2,%r0 | 56 | basr %r2,%r0 |
56 | l %r1,BASED(.Lc_hardirqs_on) | 57 | brasl %r14,trace_hardirqs_on_caller |
57 | basr %r14,%r1 # call trace_hardirqs_on_caller | ||
58 | #endif | 58 | #endif |
59 | .endm | 59 | .endm |
60 | 60 | ||
61 | .macro TRACE_IRQS_OFF | 61 | .macro TRACE_IRQS_OFF |
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | basr %r2,%r0 | 63 | basr %r2,%r0 |
64 | l %r1,BASED(.Lc_hardirqs_off) | 64 | brasl %r14,trace_hardirqs_off_caller |
65 | basr %r14,%r1 # call trace_hardirqs_off_caller | ||
66 | #endif | 65 | #endif |
67 | .endm | 66 | .endm |
68 | 67 | ||
@@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
70 | #ifdef CONFIG_LOCKDEP | 69 | #ifdef CONFIG_LOCKDEP |
71 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 70 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
72 | jz .+10 | 71 | jz .+10 |
73 | l %r1,BASED(.Lc_lockdep_sys_exit) | 72 | brasl %r14,lockdep_sys_exit |
74 | basr %r14,%r1 # call lockdep_sys_exit | 73 | #endif |
74 | .endm | ||
75 | |||
76 | .macro LPP newpp | ||
77 | #if IS_ENABLED(CONFIG_KVM) | ||
78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP | ||
79 | jz .+8 | ||
80 | .insn s,0xb2800000,\newpp | ||
81 | #endif | ||
82 | .endm | ||
83 | |||
84 | .macro HANDLE_SIE_INTERCEPT scratch,reason | ||
85 | #if IS_ENABLED(CONFIG_KVM) | ||
86 | tmhh %r8,0x0001 # interrupting from user ? | ||
87 | jnz .+62 | ||
88 | lgr \scratch,%r9 | ||
89 | slg \scratch,BASED(.Lsie_critical) | ||
90 | clg \scratch,BASED(.Lsie_critical_length) | ||
91 | .if \reason==1 | ||
92 | # Some program interrupts are suppressing (e.g. protection). | ||
93 | # We must also check the instruction after SIE in that case. | ||
94 | # do_protection_exception will rewind to .Lrewind_pad | ||
95 | jh .+42 | ||
96 | .else | ||
97 | jhe .+42 | ||
98 | .endif | ||
99 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
100 | LPP __SF_EMPTY+16(%r15) # set host id | ||
101 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
102 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
103 | larl %r9,sie_exit # skip forward to sie_exit | ||
104 | mvi __SF_EMPTY+31(%r15),\reason # set exit reason | ||
75 | #endif | 105 | #endif |
76 | .endm | 106 | .endm |
77 | 107 | ||
78 | .macro CHECK_STACK stacksize,savearea | 108 | .macro CHECK_STACK stacksize,savearea |
79 | #ifdef CONFIG_CHECK_STACK | 109 | #ifdef CONFIG_CHECK_STACK |
80 | tml %r15,\stacksize - CONFIG_STACK_GUARD | 110 | tml %r15,\stacksize - CONFIG_STACK_GUARD |
81 | la %r14,\savearea | 111 | lghi %r14,\savearea |
82 | jz stack_overflow | 112 | jz stack_overflow |
83 | #endif | 113 | #endif |
84 | .endm | 114 | .endm |
85 | 115 | ||
86 | .macro SWITCH_ASYNC savearea,stack,shift | 116 | .macro SWITCH_ASYNC savearea,stack,shift |
87 | tmh %r8,0x0001 # interrupting from user ? | 117 | tmhh %r8,0x0001 # interrupting from user ? |
88 | jnz 1f | 118 | jnz 1f |
89 | lr %r14,%r9 | 119 | lgr %r14,%r9 |
90 | sl %r14,BASED(.Lc_critical_start) | 120 | slg %r14,BASED(.Lcritical_start) |
91 | cl %r14,BASED(.Lc_critical_length) | 121 | clg %r14,BASED(.Lcritical_length) |
92 | jhe 0f | 122 | jhe 0f |
93 | la %r11,\savearea # inside critical section, do cleanup | 123 | lghi %r11,\savearea # inside critical section, do cleanup |
94 | bras %r14,cleanup_critical | 124 | brasl %r14,cleanup_critical |
95 | tmh %r8,0x0001 # retest problem state after cleanup | 125 | tmhh %r8,0x0001 # retest problem state after cleanup |
96 | jnz 1f | 126 | jnz 1f |
97 | 0: l %r14,\stack # are we already on the target stack? | 127 | 0: lg %r14,\stack # are we already on the target stack? |
98 | slr %r14,%r15 | 128 | slgr %r14,%r15 |
99 | sra %r14,\shift | 129 | srag %r14,%r14,\shift |
100 | jnz 1f | 130 | jnz 1f |
101 | CHECK_STACK 1<<\shift,\savearea | 131 | CHECK_STACK 1<<\shift,\savearea |
102 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 132 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
103 | j 2f | 133 | j 2f |
104 | 1: l %r15,\stack # load target stack | 134 | 1: lg %r15,\stack # load target stack |
105 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | 135 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
106 | .endm | 136 | .endm |
107 | 137 | ||
108 | .macro ADD64 high,low,timer | 138 | .macro UPDATE_VTIME scratch,enter_timer |
109 | al \high,\timer | 139 | lg \scratch,__LC_EXIT_TIMER |
110 | al \low,4+\timer | 140 | slg \scratch,\enter_timer |
111 | brc 12,.+8 | 141 | alg \scratch,__LC_USER_TIMER |
112 | ahi \high,1 | 142 | stg \scratch,__LC_USER_TIMER |
113 | .endm | 143 | lg \scratch,__LC_LAST_UPDATE_TIMER |
114 | 144 | slg \scratch,__LC_EXIT_TIMER | |
115 | .macro SUB64 high,low,timer | 145 | alg \scratch,__LC_SYSTEM_TIMER |
116 | sl \high,\timer | 146 | stg \scratch,__LC_SYSTEM_TIMER |
117 | sl \low,4+\timer | 147 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer |
118 | brc 3,.+8 | ||
119 | ahi \high,-1 | ||
120 | .endm | 148 | .endm |
121 | 149 | ||
122 | .macro UPDATE_VTIME high,low,enter_timer | 150 | .macro LAST_BREAK scratch |
123 | lm \high,\low,__LC_EXIT_TIMER | 151 | srag \scratch,%r10,23 |
124 | SUB64 \high,\low,\enter_timer | 152 | jz .+10 |
125 | ADD64 \high,\low,__LC_USER_TIMER | 153 | stg %r10,__TI_last_break(%r12) |
126 | stm \high,\low,__LC_USER_TIMER | ||
127 | lm \high,\low,__LC_LAST_UPDATE_TIMER | ||
128 | SUB64 \high,\low,__LC_EXIT_TIMER | ||
129 | ADD64 \high,\low,__LC_SYSTEM_TIMER | ||
130 | stm \high,\low,__LC_SYSTEM_TIMER | ||
131 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
132 | .endm | 154 | .endm |
133 | 155 | ||
134 | .macro REENABLE_IRQS | 156 | .macro REENABLE_IRQS |
135 | st %r8,__LC_RETURN_PSW | 157 | stg %r8,__LC_RETURN_PSW |
136 | ni __LC_RETURN_PSW,0xbf | 158 | ni __LC_RETURN_PSW,0xbf |
137 | ssm __LC_RETURN_PSW | 159 | ssm __LC_RETURN_PSW |
138 | .endm | 160 | .endm |
139 | 161 | ||
162 | .macro STCK savearea | ||
163 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES | ||
164 | .insn s,0xb27c0000,\savearea # store clock fast | ||
165 | #else | ||
166 | .insn s,0xb2050000,\savearea # store clock | ||
167 | #endif | ||
168 | .endm | ||
169 | |||
140 | .section .kprobes.text, "ax" | 170 | .section .kprobes.text, "ax" |
141 | 171 | ||
142 | /* | 172 | /* |
@@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
147 | * gpr2 = prev | 177 | * gpr2 = prev |
148 | */ | 178 | */ |
149 | ENTRY(__switch_to) | 179 | ENTRY(__switch_to) |
150 | stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
151 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev | 181 | stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev |
152 | l %r4,__THREAD_info(%r2) # get thread_info of prev | 182 | lg %r4,__THREAD_info(%r2) # get thread_info of prev |
153 | l %r5,__THREAD_info(%r3) # get thread_info of next | 183 | lg %r5,__THREAD_info(%r3) # get thread_info of next |
154 | lr %r15,%r5 | 184 | lgr %r15,%r5 |
155 | ahi %r15,STACK_INIT # end of kernel stack of next | 185 | aghi %r15,STACK_INIT # end of kernel stack of next |
156 | st %r3,__LC_CURRENT # store task struct of next | 186 | stg %r3,__LC_CURRENT # store task struct of next |
157 | st %r5,__LC_THREAD_INFO # store thread info of next | 187 | stg %r5,__LC_THREAD_INFO # store thread info of next |
158 | st %r15,__LC_KERNEL_STACK # store end of kernel stack | 188 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
159 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 189 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
160 | mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next | 190 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next |
161 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next | 191 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next |
162 | lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
163 | br %r14 | 193 | br %r14 |
164 | 194 | ||
165 | .L__critical_start: | 195 | .L__critical_start: |
@@ -170,75 +200,83 @@ ENTRY(__switch_to) | |||
170 | 200 | ||
171 | ENTRY(system_call) | 201 | ENTRY(system_call) |
172 | stpt __LC_SYNC_ENTER_TIMER | 202 | stpt __LC_SYNC_ENTER_TIMER |
173 | .Lsysc_stm: | 203 | .Lsysc_stmg: |
174 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
175 | l %r12,__LC_THREAD_INFO | 205 | lg %r10,__LC_LAST_BREAK |
176 | l %r13,__LC_SVC_NEW_PSW+4 | 206 | lg %r12,__LC_THREAD_INFO |
177 | lhi %r14,_PIF_SYSCALL | 207 | lghi %r14,_PIF_SYSCALL |
178 | .Lsysc_per: | 208 | .Lsysc_per: |
179 | l %r15,__LC_KERNEL_STACK | 209 | lg %r15,__LC_KERNEL_STACK |
180 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
181 | .Lsysc_vtime: | 211 | .Lsysc_vtime: |
182 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER | 212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER |
183 | stm %r0,%r7,__PT_R0(%r11) | 213 | LAST_BREAK %r13 |
184 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 214 | stmg %r0,%r7,__PT_R0(%r11) |
185 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW | 215 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | ||
186 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
187 | st %r14,__PT_FLAGS(%r11) | 218 | stg %r14,__PT_FLAGS(%r11) |
188 | .Lsysc_do_svc: | 219 | .Lsysc_do_svc: |
189 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 220 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
190 | lh %r8,__PT_INT_CODE+2(%r11) | 221 | llgh %r8,__PT_INT_CODE+2(%r11) |
191 | sla %r8,2 # shift and test for svc0 | 222 | slag %r8,%r8,2 # shift and test for svc 0 |
192 | jnz .Lsysc_nr_ok | 223 | jnz .Lsysc_nr_ok |
193 | # svc 0: system call number in %r1 | 224 | # svc 0: system call number in %r1 |
194 | cl %r1,BASED(.Lnr_syscalls) | 225 | llgfr %r1,%r1 # clear high word in r1 |
226 | cghi %r1,NR_syscalls | ||
195 | jnl .Lsysc_nr_ok | 227 | jnl .Lsysc_nr_ok |
196 | sth %r1,__PT_INT_CODE+2(%r11) | 228 | sth %r1,__PT_INT_CODE+2(%r11) |
197 | lr %r8,%r1 | 229 | slag %r8,%r1,2 |
198 | sla %r8,2 | ||
199 | .Lsysc_nr_ok: | 230 | .Lsysc_nr_ok: |
200 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
201 | st %r2,__PT_ORIG_GPR2(%r11) | 232 | stg %r2,__PT_ORIG_GPR2(%r11) |
202 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
203 | l %r9,0(%r8,%r10) # get system call addr. | 234 | lgf %r9,0(%r8,%r10) # get system call add. |
204 | tm __TI_flags+3(%r12),_TIF_TRACE | 235 | tm __TI_flags+7(%r12),_TIF_TRACE |
205 | jnz .Lsysc_tracesys | 236 | jnz .Lsysc_tracesys |
206 | basr %r14,%r9 # call sys_xxxx | 237 | basr %r14,%r9 # call sys_xxxx |
207 | st %r2,__PT_R2(%r11) # store return value | 238 | stg %r2,__PT_R2(%r11) # store return value |
208 | 239 | ||
209 | .Lsysc_return: | 240 | .Lsysc_return: |
210 | LOCKDEP_SYS_EXIT | 241 | LOCKDEP_SYS_EXIT |
211 | .Lsysc_tif: | 242 | .Lsysc_tif: |
212 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
213 | jno .Lsysc_restore | 244 | jno .Lsysc_restore |
214 | tm __PT_FLAGS+3(%r11),_PIF_WORK | 245 | tm __PT_FLAGS+7(%r11),_PIF_WORK |
215 | jnz .Lsysc_work | 246 | jnz .Lsysc_work |
216 | tm __TI_flags+3(%r12),_TIF_WORK | 247 | tm __TI_flags+7(%r12),_TIF_WORK |
217 | jnz .Lsysc_work # check for thread work | 248 | jnz .Lsysc_work # check for work |
218 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 249 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
219 | jnz .Lsysc_work | 250 | jnz .Lsysc_work |
220 | .Lsysc_restore: | 251 | .Lsysc_restore: |
221 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 252 | lg %r14,__LC_VDSO_PER_CPU |
253 | lmg %r0,%r10,__PT_R0(%r11) | ||
254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
222 | stpt __LC_EXIT_TIMER | 255 | stpt __LC_EXIT_TIMER |
223 | lm %r0,%r15,__PT_R0(%r11) | 256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
224 | lpsw __LC_RETURN_PSW | 257 | lmg %r11,%r15,__PT_R11(%r11) |
258 | lpswe __LC_RETURN_PSW | ||
225 | .Lsysc_done: | 259 | .Lsysc_done: |
226 | 260 | ||
227 | # | 261 | # |
228 | # One of the work bits is on. Find out which one. | 262 | # One of the work bits is on. Find out which one. |
229 | # | 263 | # |
230 | .Lsysc_work: | 264 | .Lsysc_work: |
231 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
232 | jo .Lsysc_mcck_pending | 266 | jo .Lsysc_mcck_pending |
233 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
234 | jo .Lsysc_reschedule | 268 | jo .Lsysc_reschedule |
235 | tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 269 | #ifdef CONFIG_UPROBES |
270 | tm __TI_flags+7(%r12),_TIF_UPROBE | ||
271 | jo .Lsysc_uprobe_notify | ||
272 | #endif | ||
273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
236 | jo .Lsysc_singlestep | 274 | jo .Lsysc_singlestep |
237 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
238 | jo .Lsysc_sigpending | 276 | jo .Lsysc_sigpending |
239 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
240 | jo .Lsysc_notify_resume | 278 | jo .Lsysc_notify_resume |
241 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
242 | jo .Lsysc_uaccess | 280 | jo .Lsysc_uaccess |
243 | j .Lsysc_return # beware of critical section cleanup | 281 | j .Lsysc_return # beware of critical section cleanup |
244 | 282 | ||
@@ -246,109 +284,109 @@ ENTRY(system_call) | |||
246 | # _TIF_NEED_RESCHED is set, call schedule | 284 | # _TIF_NEED_RESCHED is set, call schedule |
247 | # | 285 | # |
248 | .Lsysc_reschedule: | 286 | .Lsysc_reschedule: |
249 | l %r1,BASED(.Lc_schedule) | 287 | larl %r14,.Lsysc_return |
250 | la %r14,BASED(.Lsysc_return) | 288 | jg schedule |
251 | br %r1 # call schedule | ||
252 | 289 | ||
253 | # | 290 | # |
254 | # _CIF_MCCK_PENDING is set, call handler | 291 | # _CIF_MCCK_PENDING is set, call handler |
255 | # | 292 | # |
256 | .Lsysc_mcck_pending: | 293 | .Lsysc_mcck_pending: |
257 | l %r1,BASED(.Lc_handle_mcck) | 294 | larl %r14,.Lsysc_return |
258 | la %r14,BASED(.Lsysc_return) | 295 | jg s390_handle_mcck # TIF bit will be cleared by handler |
259 | br %r1 # TIF bit will be cleared by handler | ||
260 | 296 | ||
261 | # | 297 | # |
262 | # _CIF_ASCE is set, load user space asce | 298 | # _CIF_ASCE is set, load user space asce |
263 | # | 299 | # |
264 | .Lsysc_uaccess: | 300 | .Lsysc_uaccess: |
265 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
266 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
267 | j .Lsysc_return | 303 | j .Lsysc_return |
268 | 304 | ||
269 | # | 305 | # |
270 | # _TIF_SIGPENDING is set, call do_signal | 306 | # _TIF_SIGPENDING is set, call do_signal |
271 | # | 307 | # |
272 | .Lsysc_sigpending: | 308 | .Lsysc_sigpending: |
273 | lr %r2,%r11 # pass pointer to pt_regs | 309 | lgr %r2,%r11 # pass pointer to pt_regs |
274 | l %r1,BASED(.Lc_do_signal) | 310 | brasl %r14,do_signal |
275 | basr %r14,%r1 # call do_signal | 311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL |
276 | tm __PT_FLAGS+3(%r11),_PIF_SYSCALL | ||
277 | jno .Lsysc_return | 312 | jno .Lsysc_return |
278 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments | 313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
279 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 314 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
280 | xr %r8,%r8 # svc 0 returns -ENOSYS | 315 | lghi %r8,0 # svc 0 returns -ENOSYS |
281 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) | 316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number |
317 | cghi %r1,NR_syscalls | ||
282 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 | 318 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 |
283 | lh %r8,__PT_INT_CODE+2(%r11) # load new svc number | 319 | slag %r8,%r1,2 |
284 | sla %r8,2 | ||
285 | j .Lsysc_nr_ok # restart svc | 320 | j .Lsysc_nr_ok # restart svc |
286 | 321 | ||
287 | # | 322 | # |
288 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
289 | # | 324 | # |
290 | .Lsysc_notify_resume: | 325 | .Lsysc_notify_resume: |
291 | lr %r2,%r11 # pass pointer to pt_regs | 326 | lgr %r2,%r11 # pass pointer to pt_regs |
292 | l %r1,BASED(.Lc_do_notify_resume) | 327 | larl %r14,.Lsysc_return |
293 | la %r14,BASED(.Lsysc_return) | 328 | jg do_notify_resume |
294 | br %r1 # call do_notify_resume | 329 | |
330 | # | ||
331 | # _TIF_UPROBE is set, call uprobe_notify_resume | ||
332 | # | ||
333 | #ifdef CONFIG_UPROBES | ||
334 | .Lsysc_uprobe_notify: | ||
335 | lgr %r2,%r11 # pass pointer to pt_regs | ||
336 | larl %r14,.Lsysc_return | ||
337 | jg uprobe_notify_resume | ||
338 | #endif | ||
295 | 339 | ||
296 | # | 340 | # |
297 | # _PIF_PER_TRAP is set, call do_per_trap | 341 | # _PIF_PER_TRAP is set, call do_per_trap |
298 | # | 342 | # |
299 | .Lsysc_singlestep: | 343 | .Lsysc_singlestep: |
300 | ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP | 344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP |
301 | lr %r2,%r11 # pass pointer to pt_regs | 345 | lgr %r2,%r11 # pass pointer to pt_regs |
302 | l %r1,BASED(.Lc_do_per_trap) | 346 | larl %r14,.Lsysc_return |
303 | la %r14,BASED(.Lsysc_return) | 347 | jg do_per_trap |
304 | br %r1 # call do_per_trap | ||
305 | 348 | ||
306 | # | 349 | # |
307 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
308 | # and after the system call | 351 | # and after the system call |
309 | # | 352 | # |
310 | .Lsysc_tracesys: | 353 | .Lsysc_tracesys: |
311 | l %r1,BASED(.Lc_trace_enter) | 354 | lgr %r2,%r11 # pass pointer to pt_regs |
312 | lr %r2,%r11 # pass pointer to pt_regs | ||
313 | la %r3,0 | 355 | la %r3,0 |
314 | xr %r0,%r0 | 356 | llgh %r0,__PT_INT_CODE+2(%r11) |
315 | icm %r0,3,__PT_INT_CODE+2(%r11) | 357 | stg %r0,__PT_R2(%r11) |
316 | st %r0,__PT_R2(%r11) | 358 | brasl %r14,do_syscall_trace_enter |
317 | basr %r14,%r1 # call do_syscall_trace_enter | 359 | lghi %r0,NR_syscalls |
318 | cl %r2,BASED(.Lnr_syscalls) | 360 | clgr %r0,%r2 |
319 | jnl .Lsysc_tracenogo | 361 | jnh .Lsysc_tracenogo |
320 | lr %r8,%r2 | 362 | sllg %r8,%r2,2 |
321 | sll %r8,2 | 363 | lgf %r9,0(%r8,%r10) |
322 | l %r9,0(%r8,%r10) | ||
323 | .Lsysc_tracego: | 364 | .Lsysc_tracego: |
324 | lm %r3,%r7,__PT_R3(%r11) | 365 | lmg %r3,%r7,__PT_R3(%r11) |
325 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
326 | l %r2,__PT_ORIG_GPR2(%r11) | 367 | lg %r2,__PT_ORIG_GPR2(%r11) |
327 | basr %r14,%r9 # call sys_xxx | 368 | basr %r14,%r9 # call sys_xxx |
328 | st %r2,__PT_R2(%r11) # store return value | 369 | stg %r2,__PT_R2(%r11) # store return value |
329 | .Lsysc_tracenogo: | 370 | .Lsysc_tracenogo: |
330 | tm __TI_flags+3(%r12),_TIF_TRACE | 371 | tm __TI_flags+7(%r12),_TIF_TRACE |
331 | jz .Lsysc_return | 372 | jz .Lsysc_return |
332 | l %r1,BASED(.Lc_trace_exit) | 373 | lgr %r2,%r11 # pass pointer to pt_regs |
333 | lr %r2,%r11 # pass pointer to pt_regs | 374 | larl %r14,.Lsysc_return |
334 | la %r14,BASED(.Lsysc_return) | 375 | jg do_syscall_trace_exit |
335 | br %r1 # call do_syscall_trace_exit | ||
336 | 376 | ||
337 | # | 377 | # |
338 | # a new process exits the kernel with ret_from_fork | 378 | # a new process exits the kernel with ret_from_fork |
339 | # | 379 | # |
340 | ENTRY(ret_from_fork) | 380 | ENTRY(ret_from_fork) |
341 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 381 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
342 | l %r12,__LC_THREAD_INFO | 382 | lg %r12,__LC_THREAD_INFO |
343 | l %r13,__LC_SVC_NEW_PSW+4 | 383 | brasl %r14,schedule_tail |
344 | l %r1,BASED(.Lc_schedule_tail) | ||
345 | basr %r14,%r1 # call schedule_tail | ||
346 | TRACE_IRQS_ON | 384 | TRACE_IRQS_ON |
347 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 385 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
348 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | 386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
349 | jne .Lsysc_tracenogo | 387 | jne .Lsysc_tracenogo |
350 | # it's a kernel thread | 388 | # it's a kernel thread |
351 | lm %r9,%r10,__PT_R9(%r11) # load gprs | 389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs |
352 | ENTRY(kernel_thread_starter) | 390 | ENTRY(kernel_thread_starter) |
353 | la %r2,0(%r10) | 391 | la %r2,0(%r10) |
354 | basr %r14,%r9 | 392 | basr %r14,%r9 |
@@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter) | |||
360 | 398 | ||
361 | ENTRY(pgm_check_handler) | 399 | ENTRY(pgm_check_handler) |
362 | stpt __LC_SYNC_ENTER_TIMER | 400 | stpt __LC_SYNC_ENTER_TIMER |
363 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 401 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
364 | l %r12,__LC_THREAD_INFO | 402 | lg %r10,__LC_LAST_BREAK |
365 | l %r13,__LC_SVC_NEW_PSW+4 | 403 | lg %r12,__LC_THREAD_INFO |
366 | lm %r8,%r9,__LC_PGM_OLD_PSW | 404 | larl %r13,system_call |
367 | tmh %r8,0x0001 # test problem state bit | 405 | lmg %r8,%r9,__LC_PGM_OLD_PSW |
406 | HANDLE_SIE_INTERCEPT %r14,1 | ||
407 | tmhh %r8,0x0001 # test problem state bit | ||
368 | jnz 1f # -> fault in user space | 408 | jnz 1f # -> fault in user space |
369 | tmh %r8,0x4000 # PER bit set in old PSW ? | 409 | tmhh %r8,0x4000 # PER bit set in old PSW ? |
370 | jnz 0f # -> enabled, can't be a double fault | 410 | jnz 0f # -> enabled, can't be a double fault |
371 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 411 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
372 | jnz .Lpgm_svcper # -> single stepped svc | 412 | jnz .Lpgm_svcper # -> single stepped svc |
373 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
374 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
375 | j 2f | 415 | j 2f |
376 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER | 416 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER |
377 | l %r15,__LC_KERNEL_STACK | 417 | LAST_BREAK %r14 |
418 | lg %r15,__LC_KERNEL_STACK | ||
419 | lg %r14,__TI_task(%r12) | ||
420 | lghi %r13,__LC_PGM_TDB | ||
421 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | ||
422 | jz 2f | ||
423 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | ||
378 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | 424 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
379 | stm %r0,%r7,__PT_R0(%r11) | 425 | stmg %r0,%r7,__PT_R0(%r11) |
380 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 426 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
381 | stm %r8,%r9,__PT_PSW(%r11) | 427 | stmg %r8,%r9,__PT_PSW(%r11) |
382 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | 428 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC |
383 | mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE | 429 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE |
384 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 430 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
431 | stg %r10,__PT_ARGS(%r11) | ||
385 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 432 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
386 | jz 0f | 433 | jz 0f |
387 | l %r1,__TI_task(%r12) | 434 | tmhh %r8,0x0001 # kernel per event ? |
388 | tmh %r8,0x0001 # kernel per event ? | ||
389 | jz .Lpgm_kprobe | 435 | jz .Lpgm_kprobe |
390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE | 438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID | 439 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID |
394 | 0: REENABLE_IRQS | 440 | 0: REENABLE_IRQS |
395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 441 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
396 | l %r1,BASED(.Lc_jump_table) | 442 | larl %r1,pgm_check_table |
397 | la %r10,0x7f | 443 | llgh %r10,__PT_INT_CODE+2(%r11) |
398 | n %r10,__PT_INT_CODE(%r11) | 444 | nill %r10,0x007f |
399 | je .Lsysc_return | ||
400 | sll %r10,2 | 445 | sll %r10,2 |
401 | l %r1,0(%r10,%r1) # load address of handler routine | 446 | je .Lsysc_return |
402 | lr %r2,%r11 # pass pointer to pt_regs | 447 | lgf %r1,0(%r10,%r1) # load address of handler routine |
448 | lgr %r2,%r11 # pass pointer to pt_regs | ||
403 | basr %r14,%r1 # branch to interrupt-handler | 449 | basr %r14,%r1 # branch to interrupt-handler |
404 | j .Lsysc_return | 450 | j .Lsysc_return |
405 | 451 | ||
@@ -408,54 +454,55 @@ ENTRY(pgm_check_handler) | |||
408 | # | 454 | # |
409 | .Lpgm_kprobe: | 455 | .Lpgm_kprobe: |
410 | REENABLE_IRQS | 456 | REENABLE_IRQS |
411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
412 | l %r1,BASED(.Lc_do_per_trap) | 458 | lgr %r2,%r11 # pass pointer to pt_regs |
413 | lr %r2,%r11 # pass pointer to pt_regs | 459 | brasl %r14,do_per_trap |
414 | basr %r14,%r1 # call do_per_trap | ||
415 | j .Lsysc_return | 460 | j .Lsysc_return |
416 | 461 | ||
417 | # | 462 | # |
418 | # single stepped system call | 463 | # single stepped system call |
419 | # | 464 | # |
420 | .Lpgm_svcper: | 465 | .Lpgm_svcper: |
421 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW | 466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
422 | mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) | 467 | larl %r14,.Lsysc_per |
423 | lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | 468 | stg %r14,__LC_RETURN_PSW+8 |
424 | lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | 469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP |
470 | lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | ||
425 | 471 | ||
426 | /* | 472 | /* |
427 | * IO interrupt handler routine | 473 | * IO interrupt handler routine |
428 | */ | 474 | */ |
429 | |||
430 | ENTRY(io_int_handler) | 475 | ENTRY(io_int_handler) |
431 | stck __LC_INT_CLOCK | 476 | STCK __LC_INT_CLOCK |
432 | stpt __LC_ASYNC_ENTER_TIMER | 477 | stpt __LC_ASYNC_ENTER_TIMER |
433 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC | 478 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
434 | l %r12,__LC_THREAD_INFO | 479 | lg %r10,__LC_LAST_BREAK |
435 | l %r13,__LC_SVC_NEW_PSW+4 | 480 | lg %r12,__LC_THREAD_INFO |
436 | lm %r8,%r9,__LC_IO_OLD_PSW | 481 | larl %r13,system_call |
437 | tmh %r8,0x0001 # interrupting from user ? | 482 | lmg %r8,%r9,__LC_IO_OLD_PSW |
483 | HANDLE_SIE_INTERCEPT %r14,2 | ||
484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
485 | tmhh %r8,0x0001 # interrupting from user? | ||
438 | jz .Lio_skip | 486 | jz .Lio_skip |
439 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
488 | LAST_BREAK %r14 | ||
440 | .Lio_skip: | 489 | .Lio_skip: |
441 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 490 | stmg %r0,%r7,__PT_R0(%r11) |
442 | stm %r0,%r7,__PT_R0(%r11) | 491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
443 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 492 | stmg %r8,%r9,__PT_PSW(%r11) |
444 | stm %r8,%r9,__PT_PSW(%r11) | ||
445 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 493 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
446 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
447 | TRACE_IRQS_OFF | 495 | TRACE_IRQS_OFF |
448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
449 | .Lio_loop: | 497 | .Lio_loop: |
450 | l %r1,BASED(.Lc_do_IRQ) | 498 | lgr %r2,%r11 # pass pointer to pt_regs |
451 | lr %r2,%r11 # pass pointer to pt_regs | 499 | lghi %r3,IO_INTERRUPT |
452 | lhi %r3,IO_INTERRUPT | ||
453 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | 500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? |
454 | jz .Lio_call | 501 | jz .Lio_call |
455 | lhi %r3,THIN_INTERRUPT | 502 | lghi %r3,THIN_INTERRUPT |
456 | .Lio_call: | 503 | .Lio_call: |
457 | basr %r14,%r1 # call do_IRQ | 504 | brasl %r14,do_IRQ |
458 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR | 505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR |
459 | jz .Lio_return | 506 | jz .Lio_return |
460 | tpi 0 | 507 | tpi 0 |
461 | jz .Lio_return | 508 | jz .Lio_return |
@@ -465,21 +512,26 @@ ENTRY(io_int_handler) | |||
465 | LOCKDEP_SYS_EXIT | 512 | LOCKDEP_SYS_EXIT |
466 | TRACE_IRQS_ON | 513 | TRACE_IRQS_ON |
467 | .Lio_tif: | 514 | .Lio_tif: |
468 | tm __TI_flags+3(%r12),_TIF_WORK | 515 | tm __TI_flags+7(%r12),_TIF_WORK |
469 | jnz .Lio_work # there is work to do (signals etc.) | 516 | jnz .Lio_work # there is work to do (signals etc.) |
470 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 517 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
471 | jnz .Lio_work | 518 | jnz .Lio_work |
472 | .Lio_restore: | 519 | .Lio_restore: |
473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 520 | lg %r14,__LC_VDSO_PER_CPU |
521 | lmg %r0,%r10,__PT_R0(%r11) | ||
522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
474 | stpt __LC_EXIT_TIMER | 523 | stpt __LC_EXIT_TIMER |
475 | lm %r0,%r15,__PT_R0(%r11) | 524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
476 | lpsw __LC_RETURN_PSW | 525 | lmg %r11,%r15,__PT_R11(%r11) |
526 | lpswe __LC_RETURN_PSW | ||
477 | .Lio_done: | 527 | .Lio_done: |
478 | 528 | ||
479 | # | 529 | # |
480 | # There is work todo, find out in which context we have been interrupted: | 530 | # There is work todo, find out in which context we have been interrupted: |
481 | # 1) if we return to user space we can do all _TIF_WORK work | 531 | # 1) if we return to user space we can do all _TIF_WORK work |
482 | # 2) if we return to kernel code and preemptive scheduling is enabled check | 532 | # 2) if we return to kernel code and kvm is enabled check if we need to |
533 | # modify the psw to leave SIE | ||
534 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
483 | # the preemption counter and if it is zero call preempt_schedule_irq | 535 | # the preemption counter and if it is zero call preempt_schedule_irq |
484 | # Before any work can be done, a switch to the kernel stack is required. | 536 | # Before any work can be done, a switch to the kernel stack is required. |
485 | # | 537 | # |
@@ -489,21 +541,20 @@ ENTRY(io_int_handler) | |||
489 | #ifdef CONFIG_PREEMPT | 541 | #ifdef CONFIG_PREEMPT |
490 | # check for preemptive scheduling | 542 | # check for preemptive scheduling |
491 | icm %r0,15,__TI_precount(%r12) | 543 | icm %r0,15,__TI_precount(%r12) |
492 | jnz .Lio_restore # preemption disabled | 544 | jnz .Lio_restore # preemption is disabled |
493 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
494 | jno .Lio_restore | 546 | jno .Lio_restore |
495 | # switch to kernel stack | 547 | # switch to kernel stack |
496 | l %r1,__PT_R15(%r11) | 548 | lg %r1,__PT_R15(%r11) |
497 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
498 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 550 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
499 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
500 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 552 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
501 | lr %r15,%r1 | 553 | lgr %r15,%r1 |
502 | # TRACE_IRQS_ON already done at .Lio_return, call | 554 | # TRACE_IRQS_ON already done at .Lio_return, call |
503 | # TRACE_IRQS_OFF to keep things symmetrical | 555 | # TRACE_IRQS_OFF to keep things symmetrical |
504 | TRACE_IRQS_OFF | 556 | TRACE_IRQS_OFF |
505 | l %r1,BASED(.Lc_preempt_irq) | 557 | brasl %r14,preempt_schedule_irq |
506 | basr %r14,%r1 # call preempt_schedule_irq | ||
507 | j .Lio_return | 558 | j .Lio_return |
508 | #else | 559 | #else |
509 | j .Lio_restore | 560 | j .Lio_restore |
@@ -513,25 +564,25 @@ ENTRY(io_int_handler) | |||
513 | # Need to do work before returning to userspace, switch to kernel stack | 564 | # Need to do work before returning to userspace, switch to kernel stack |
514 | # | 565 | # |
515 | .Lio_work_user: | 566 | .Lio_work_user: |
516 | l %r1,__LC_KERNEL_STACK | 567 | lg %r1,__LC_KERNEL_STACK |
517 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
518 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
519 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 570 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
520 | lr %r15,%r1 | 571 | lgr %r15,%r1 |
521 | 572 | ||
522 | # | 573 | # |
523 | # One of the work bits is on. Find out which one. | 574 | # One of the work bits is on. Find out which one. |
524 | # | 575 | # |
525 | .Lio_work_tif: | 576 | .Lio_work_tif: |
526 | tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING | 577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
527 | jo .Lio_mcck_pending | 578 | jo .Lio_mcck_pending |
528 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
529 | jo .Lio_reschedule | 580 | jo .Lio_reschedule |
530 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
531 | jo .Lio_sigpending | 582 | jo .Lio_sigpending |
532 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
533 | jo .Lio_notify_resume | 584 | jo .Lio_notify_resume |
534 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
535 | jo .Lio_uaccess | 586 | jo .Lio_uaccess |
536 | j .Lio_return # beware of critical section cleanup | 587 | j .Lio_return # beware of critical section cleanup |
537 | 588 | ||
@@ -540,8 +591,7 @@ ENTRY(io_int_handler) | |||
540 | # | 591 | # |
541 | .Lio_mcck_pending: | 592 | .Lio_mcck_pending: |
542 | # TRACE_IRQS_ON already done at .Lio_return | 593 | # TRACE_IRQS_ON already done at .Lio_return |
543 | l %r1,BASED(.Lc_handle_mcck) | 594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler |
544 | basr %r14,%r1 # TIF bit will be cleared by handler | ||
545 | TRACE_IRQS_OFF | 595 | TRACE_IRQS_OFF |
546 | j .Lio_return | 596 | j .Lio_return |
547 | 597 | ||
@@ -549,8 +599,8 @@ ENTRY(io_int_handler) | |||
549 | # _CIF_ASCE is set, load user space asce | 599 | # _CIF_ASCE is set, load user space asce |
550 | # | 600 | # |
551 | .Lio_uaccess: | 601 | .Lio_uaccess: |
552 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
553 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
554 | j .Lio_return | 604 | j .Lio_return |
555 | 605 | ||
556 | # | 606 | # |
@@ -558,35 +608,32 @@ ENTRY(io_int_handler) | |||
558 | # | 608 | # |
559 | .Lio_reschedule: | 609 | .Lio_reschedule: |
560 | # TRACE_IRQS_ON already done at .Lio_return | 610 | # TRACE_IRQS_ON already done at .Lio_return |
561 | l %r1,BASED(.Lc_schedule) | ||
562 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 611 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
563 | basr %r14,%r1 # call scheduler | 612 | brasl %r14,schedule # call scheduler |
564 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
565 | TRACE_IRQS_OFF | 614 | TRACE_IRQS_OFF |
566 | j .Lio_return | 615 | j .Lio_return |
567 | 616 | ||
568 | # | 617 | # |
569 | # _TIF_SIGPENDING is set, call do_signal | 618 | # _TIF_SIGPENDING or is set, call do_signal |
570 | # | 619 | # |
571 | .Lio_sigpending: | 620 | .Lio_sigpending: |
572 | # TRACE_IRQS_ON already done at .Lio_return | 621 | # TRACE_IRQS_ON already done at .Lio_return |
573 | l %r1,BASED(.Lc_do_signal) | ||
574 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 622 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
575 | lr %r2,%r11 # pass pointer to pt_regs | 623 | lgr %r2,%r11 # pass pointer to pt_regs |
576 | basr %r14,%r1 # call do_signal | 624 | brasl %r14,do_signal |
577 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
578 | TRACE_IRQS_OFF | 626 | TRACE_IRQS_OFF |
579 | j .Lio_return | 627 | j .Lio_return |
580 | 628 | ||
581 | # | 629 | # |
582 | # _TIF_SIGPENDING is set, call do_signal | 630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume |
583 | # | 631 | # |
584 | .Lio_notify_resume: | 632 | .Lio_notify_resume: |
585 | # TRACE_IRQS_ON already done at .Lio_return | 633 | # TRACE_IRQS_ON already done at .Lio_return |
586 | l %r1,BASED(.Lc_do_notify_resume) | ||
587 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 634 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
588 | lr %r2,%r11 # pass pointer to pt_regs | 635 | lgr %r2,%r11 # pass pointer to pt_regs |
589 | basr %r14,%r1 # call do_notify_resume | 636 | brasl %r14,do_notify_resume |
590 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
591 | TRACE_IRQS_OFF | 638 | TRACE_IRQS_OFF |
592 | j .Lio_return | 639 | j .Lio_return |
@@ -594,45 +641,47 @@ ENTRY(io_int_handler) | |||
594 | /* | 641 | /* |
595 | * External interrupt handler routine | 642 | * External interrupt handler routine |
596 | */ | 643 | */ |
597 | |||
598 | ENTRY(ext_int_handler) | 644 | ENTRY(ext_int_handler) |
599 | stck __LC_INT_CLOCK | 645 | STCK __LC_INT_CLOCK |
600 | stpt __LC_ASYNC_ENTER_TIMER | 646 | stpt __LC_ASYNC_ENTER_TIMER |
601 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC | 647 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
602 | l %r12,__LC_THREAD_INFO | 648 | lg %r10,__LC_LAST_BREAK |
603 | l %r13,__LC_SVC_NEW_PSW+4 | 649 | lg %r12,__LC_THREAD_INFO |
604 | lm %r8,%r9,__LC_EXT_OLD_PSW | 650 | larl %r13,system_call |
605 | tmh %r8,0x0001 # interrupting from user ? | 651 | lmg %r8,%r9,__LC_EXT_OLD_PSW |
652 | HANDLE_SIE_INTERCEPT %r14,3 | ||
653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
654 | tmhh %r8,0x0001 # interrupting from user ? | ||
606 | jz .Lext_skip | 655 | jz .Lext_skip |
607 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
657 | LAST_BREAK %r14 | ||
608 | .Lext_skip: | 658 | .Lext_skip: |
609 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 659 | stmg %r0,%r7,__PT_R0(%r11) |
610 | stm %r0,%r7,__PT_R0(%r11) | 660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
611 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 661 | stmg %r8,%r9,__PT_PSW(%r11) |
612 | stm %r8,%r9,__PT_PSW(%r11) | 662 | lghi %r1,__LC_EXT_PARAMS2 |
613 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | 663 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR |
614 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 664 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
615 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 665 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) |
666 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
616 | TRACE_IRQS_OFF | 667 | TRACE_IRQS_OFF |
617 | l %r1,BASED(.Lc_do_IRQ) | 668 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
618 | lr %r2,%r11 # pass pointer to pt_regs | 669 | lgr %r2,%r11 # pass pointer to pt_regs |
619 | lhi %r3,EXT_INTERRUPT | 670 | lghi %r3,EXT_INTERRUPT |
620 | basr %r14,%r1 # call do_IRQ | 671 | brasl %r14,do_IRQ |
621 | j .Lio_return | 672 | j .Lio_return |
622 | 673 | ||
623 | /* | 674 | /* |
624 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. | 675 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. |
625 | */ | 676 | */ |
626 | ENTRY(psw_idle) | 677 | ENTRY(psw_idle) |
627 | st %r3,__SF_EMPTY(%r15) | 678 | stg %r3,__SF_EMPTY(%r15) |
628 | basr %r1,0 | 679 | larl %r1,.Lpsw_idle_lpsw+4 |
629 | la %r1,.Lpsw_idle_lpsw+4-.(%r1) | 680 | stg %r1,__SF_EMPTY+8(%r15) |
630 | st %r1,__SF_EMPTY+4(%r15) | 681 | STCK __CLOCK_IDLE_ENTER(%r2) |
631 | oi __SF_EMPTY+4(%r15),0x80 | ||
632 | stck __CLOCK_IDLE_ENTER(%r2) | ||
633 | stpt __TIMER_IDLE_ENTER(%r2) | 682 | stpt __TIMER_IDLE_ENTER(%r2) |
634 | .Lpsw_idle_lpsw: | 683 | .Lpsw_idle_lpsw: |
635 | lpsw __SF_EMPTY(%r15) | 684 | lpswe __SF_EMPTY(%r15) |
636 | br %r14 | 685 | br %r14 |
637 | .Lpsw_idle_end: | 686 | .Lpsw_idle_end: |
638 | 687 | ||
@@ -641,17 +690,19 @@ ENTRY(psw_idle) | |||
641 | /* | 690 | /* |
642 | * Machine check handler routines | 691 | * Machine check handler routines |
643 | */ | 692 | */ |
644 | |||
645 | ENTRY(mcck_int_handler) | 693 | ENTRY(mcck_int_handler) |
646 | stck __LC_MCCK_CLOCK | 694 | STCK __LC_MCCK_CLOCK |
647 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 695 | la %r1,4095 # revalidate r1 |
648 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 696 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
649 | l %r12,__LC_THREAD_INFO | 697 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
650 | l %r13,__LC_SVC_NEW_PSW+4 | 698 | lg %r10,__LC_LAST_BREAK |
651 | lm %r8,%r9,__LC_MCK_OLD_PSW | 699 | lg %r12,__LC_THREAD_INFO |
700 | larl %r13,system_call | ||
701 | lmg %r8,%r9,__LC_MCK_OLD_PSW | ||
702 | HANDLE_SIE_INTERCEPT %r14,4 | ||
652 | tm __LC_MCCK_CODE,0x80 # system damage? | 703 | tm __LC_MCCK_CODE,0x80 # system damage? |
653 | jo .Lmcck_panic # yes -> rest of mcck code invalid | 704 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
654 | la %r14,__LC_CPU_TIMER_SAVE_AREA | 705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
655 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
656 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
657 | jo 3f | 708 | jo 3f |
@@ -669,76 +720,76 @@ ENTRY(mcck_int_handler) | |||
669 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
670 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
671 | jno .Lmcck_panic # no -> skip cleanup critical | 722 | jno .Lmcck_panic # no -> skip cleanup critical |
723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT | ||
672 | tm %r8,0x0001 # interrupting from user ? | 724 | tm %r8,0x0001 # interrupting from user ? |
673 | jz .Lmcck_skip | 725 | jz .Lmcck_skip |
674 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER | 726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
727 | LAST_BREAK %r14 | ||
675 | .Lmcck_skip: | 728 | .Lmcck_skip: |
676 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT | 729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
677 | stm %r0,%r7,__PT_R0(%r11) | 730 | stmg %r0,%r7,__PT_R0(%r11) |
678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | 731 | mvc __PT_R8(64,%r11),0(%r14) |
679 | stm %r8,%r9,__PT_PSW(%r11) | 732 | stmg %r8,%r9,__PT_PSW(%r11) |
680 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 733 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
681 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 734 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
682 | l %r1,BASED(.Lc_do_machine_check) | 735 | lgr %r2,%r11 # pass pointer to pt_regs |
683 | lr %r2,%r11 # pass pointer to pt_regs | 736 | brasl %r14,s390_do_machine_check |
684 | basr %r14,%r1 # call s390_do_machine_check | ||
685 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
686 | jno .Lmcck_return | 738 | jno .Lmcck_return |
687 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
688 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
689 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
690 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 742 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
691 | lr %r15,%r1 | 743 | lgr %r15,%r1 |
692 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
693 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
694 | jno .Lmcck_return | 746 | jno .Lmcck_return |
695 | TRACE_IRQS_OFF | 747 | TRACE_IRQS_OFF |
696 | l %r1,BASED(.Lc_handle_mcck) | 748 | brasl %r14,s390_handle_mcck |
697 | basr %r14,%r1 # call s390_handle_mcck | ||
698 | TRACE_IRQS_ON | 749 | TRACE_IRQS_ON |
699 | .Lmcck_return: | 750 | .Lmcck_return: |
700 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW | 751 | lg %r14,__LC_VDSO_PER_CPU |
752 | lmg %r0,%r10,__PT_R0(%r11) | ||
753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | ||
701 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 754 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
702 | jno 0f | 755 | jno 0f |
703 | lm %r0,%r15,__PT_R0(%r11) | ||
704 | stpt __LC_EXIT_TIMER | 756 | stpt __LC_EXIT_TIMER |
705 | lpsw __LC_RETURN_MCCK_PSW | 757 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
706 | 0: lm %r0,%r15,__PT_R0(%r11) | 758 | 0: lmg %r11,%r15,__PT_R11(%r11) |
707 | lpsw __LC_RETURN_MCCK_PSW | 759 | lpswe __LC_RETURN_MCCK_PSW |
708 | 760 | ||
709 | .Lmcck_panic: | 761 | .Lmcck_panic: |
710 | l %r14,__LC_PANIC_STACK | 762 | lg %r14,__LC_PANIC_STACK |
711 | slr %r14,%r15 | 763 | slgr %r14,%r15 |
712 | sra %r14,PAGE_SHIFT | 764 | srag %r14,%r14,PAGE_SHIFT |
713 | jz 0f | 765 | jz 0f |
714 | l %r15,__LC_PANIC_STACK | 766 | lg %r15,__LC_PANIC_STACK |
715 | j .Lmcck_skip | 767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
716 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
717 | j .Lmcck_skip | 768 | j .Lmcck_skip |
718 | 769 | ||
719 | # | 770 | # |
720 | # PSW restart interrupt handler | 771 | # PSW restart interrupt handler |
721 | # | 772 | # |
722 | ENTRY(restart_int_handler) | 773 | ENTRY(restart_int_handler) |
723 | st %r15,__LC_SAVE_AREA_RESTART | 774 | stg %r15,__LC_SAVE_AREA_RESTART |
724 | l %r15,__LC_RESTART_STACK | 775 | lg %r15,__LC_RESTART_STACK |
725 | ahi %r15,-__PT_SIZE # create pt_regs on stack | 776 | aghi %r15,-__PT_SIZE # create pt_regs on stack |
726 | xc 0(__PT_SIZE,%r15),0(%r15) | 777 | xc 0(__PT_SIZE,%r15),0(%r15) |
727 | stm %r0,%r14,__PT_R0(%r15) | 778 | stmg %r0,%r14,__PT_R0(%r15) |
728 | mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART | 779 | mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART |
729 | mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw | 780 | mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw |
730 | ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack | 781 | aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack |
731 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) | 782 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) |
732 | l %r1,__LC_RESTART_FN # load fn, parm & source cpu | 783 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu |
733 | l %r2,__LC_RESTART_DATA | 784 | lg %r2,__LC_RESTART_DATA |
734 | l %r3,__LC_RESTART_SOURCE | 785 | lg %r3,__LC_RESTART_SOURCE |
735 | ltr %r3,%r3 # test source cpu address | 786 | ltgr %r3,%r3 # test source cpu address |
736 | jm 1f # negative -> skip source stop | 787 | jm 1f # negative -> skip source stop |
737 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu | 788 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu |
738 | brc 10,0b # wait for status stored | 789 | brc 10,0b # wait for status stored |
739 | 1: basr %r14,%r1 # call function | 790 | 1: basr %r14,%r1 # call function |
740 | stap __SF_EMPTY(%r15) # store cpu address | 791 | stap __SF_EMPTY(%r15) # store cpu address |
741 | lh %r3,__SF_EMPTY(%r15) | 792 | llgh %r3,__SF_EMPTY(%r15) |
742 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu | 793 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu |
743 | brc 2,2b | 794 | brc 2,2b |
744 | 3: j 3b | 795 | 3: j 3b |
@@ -752,215 +803,257 @@ ENTRY(restart_int_handler) | |||
752 | * Setup a pt_regs so that show_trace can provide a good call trace. | 803 | * Setup a pt_regs so that show_trace can provide a good call trace. |
753 | */ | 804 | */ |
754 | stack_overflow: | 805 | stack_overflow: |
755 | l %r15,__LC_PANIC_STACK # change to panic stack | 806 | lg %r15,__LC_PANIC_STACK # change to panic stack |
756 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 807 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
757 | stm %r0,%r7,__PT_R0(%r11) | 808 | stmg %r0,%r7,__PT_R0(%r11) |
758 | stm %r8,%r9,__PT_PSW(%r11) | 809 | stmg %r8,%r9,__PT_PSW(%r11) |
759 | mvc __PT_R8(32,%r11),0(%r14) | 810 | mvc __PT_R8(64,%r11),0(%r14) |
760 | l %r1,BASED(1f) | 811 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 |
761 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 812 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
762 | lr %r2,%r11 # pass pointer to pt_regs | 813 | lgr %r2,%r11 # pass pointer to pt_regs |
763 | br %r1 # branch to kernel_stack_overflow | 814 | jg kernel_stack_overflow |
764 | 1: .long kernel_stack_overflow | ||
765 | #endif | 815 | #endif |
766 | 816 | ||
817 | .align 8 | ||
767 | .Lcleanup_table: | 818 | .Lcleanup_table: |
768 | .long system_call + 0x80000000 | 819 | .quad system_call |
769 | .long .Lsysc_do_svc + 0x80000000 | 820 | .quad .Lsysc_do_svc |
770 | .long .Lsysc_tif + 0x80000000 | 821 | .quad .Lsysc_tif |
771 | .long .Lsysc_restore + 0x80000000 | 822 | .quad .Lsysc_restore |
772 | .long .Lsysc_done + 0x80000000 | 823 | .quad .Lsysc_done |
773 | .long .Lio_tif + 0x80000000 | 824 | .quad .Lio_tif |
774 | .long .Lio_restore + 0x80000000 | 825 | .quad .Lio_restore |
775 | .long .Lio_done + 0x80000000 | 826 | .quad .Lio_done |
776 | .long psw_idle + 0x80000000 | 827 | .quad psw_idle |
777 | .long .Lpsw_idle_end + 0x80000000 | 828 | .quad .Lpsw_idle_end |
778 | 829 | ||
779 | cleanup_critical: | 830 | cleanup_critical: |
780 | cl %r9,BASED(.Lcleanup_table) # system_call | 831 | clg %r9,BASED(.Lcleanup_table) # system_call |
781 | jl 0f | 832 | jl 0f |
782 | cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc | 833 | clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc |
783 | jl .Lcleanup_system_call | 834 | jl .Lcleanup_system_call |
784 | cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif | 835 | clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif |
785 | jl 0f | 836 | jl 0f |
786 | cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore | 837 | clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore |
787 | jl .Lcleanup_sysc_tif | 838 | jl .Lcleanup_sysc_tif |
788 | cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done | 839 | clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done |
789 | jl .Lcleanup_sysc_restore | 840 | jl .Lcleanup_sysc_restore |
790 | cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif | 841 | clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif |
791 | jl 0f | 842 | jl 0f |
792 | cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore | 843 | clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore |
793 | jl .Lcleanup_io_tif | 844 | jl .Lcleanup_io_tif |
794 | cl %r9,BASED(.Lcleanup_table+28) # .Lio_done | 845 | clg %r9,BASED(.Lcleanup_table+56) # .Lio_done |
795 | jl .Lcleanup_io_restore | 846 | jl .Lcleanup_io_restore |
796 | cl %r9,BASED(.Lcleanup_table+32) # psw_idle | 847 | clg %r9,BASED(.Lcleanup_table+64) # psw_idle |
797 | jl 0f | 848 | jl 0f |
798 | cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end | 849 | clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end |
799 | jl .Lcleanup_idle | 850 | jl .Lcleanup_idle |
800 | 0: br %r14 | 851 | 0: br %r14 |
801 | 852 | ||
853 | |||
802 | .Lcleanup_system_call: | 854 | .Lcleanup_system_call: |
803 | # check if stpt has been executed | 855 | # check if stpt has been executed |
804 | cl %r9,BASED(.Lcleanup_system_call_insn) | 856 | clg %r9,BASED(.Lcleanup_system_call_insn) |
805 | jh 0f | 857 | jh 0f |
806 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
807 | chi %r11,__LC_SAVE_AREA_ASYNC | 859 | cghi %r11,__LC_SAVE_AREA_ASYNC |
808 | je 0f | 860 | je 0f |
809 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | 861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
810 | 0: # check if stm has been executed | 862 | 0: # check if stmg has been executed |
811 | cl %r9,BASED(.Lcleanup_system_call_insn+4) | 863 | clg %r9,BASED(.Lcleanup_system_call_insn+8) |
812 | jh 0f | 864 | jh 0f |
813 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) | 865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) |
814 | 0: # set up saved registers r12, and r13 | 866 | 0: # check if base register setup + TIF bit load has been done |
815 | st %r12,16(%r11) # r12 thread-info pointer | 867 | clg %r9,BASED(.Lcleanup_system_call_insn+16) |
816 | st %r13,20(%r11) # r13 literal-pool pointer | 868 | jhe 0f |
817 | # check if the user time calculation has been done | 869 | # set up saved registers r10 and r12 |
818 | cl %r9,BASED(.Lcleanup_system_call_insn+8) | 870 | stg %r10,16(%r11) # r10 last break |
871 | stg %r12,32(%r11) # r12 thread-info pointer | ||
872 | 0: # check if the user time update has been done | ||
873 | clg %r9,BASED(.Lcleanup_system_call_insn+24) | ||
819 | jh 0f | 874 | jh 0f |
820 | l %r10,__LC_EXIT_TIMER | 875 | lg %r15,__LC_EXIT_TIMER |
821 | l %r15,__LC_EXIT_TIMER+4 | 876 | slg %r15,__LC_SYNC_ENTER_TIMER |
822 | SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER | 877 | alg %r15,__LC_USER_TIMER |
823 | ADD64 %r10,%r15,__LC_USER_TIMER | 878 | stg %r15,__LC_USER_TIMER |
824 | st %r10,__LC_USER_TIMER | 879 | 0: # check if the system time update has been done |
825 | st %r15,__LC_USER_TIMER+4 | 880 | clg %r9,BASED(.Lcleanup_system_call_insn+32) |
826 | 0: # check if the system time calculation has been done | ||
827 | cl %r9,BASED(.Lcleanup_system_call_insn+12) | ||
828 | jh 0f | 881 | jh 0f |
829 | l %r10,__LC_LAST_UPDATE_TIMER | 882 | lg %r15,__LC_LAST_UPDATE_TIMER |
830 | l %r15,__LC_LAST_UPDATE_TIMER+4 | 883 | slg %r15,__LC_EXIT_TIMER |
831 | SUB64 %r10,%r15,__LC_EXIT_TIMER | 884 | alg %r15,__LC_SYSTEM_TIMER |
832 | ADD64 %r10,%r15,__LC_SYSTEM_TIMER | 885 | stg %r15,__LC_SYSTEM_TIMER |
833 | st %r10,__LC_SYSTEM_TIMER | ||
834 | st %r15,__LC_SYSTEM_TIMER+4 | ||
835 | 0: # update accounting time stamp | 886 | 0: # update accounting time stamp |
836 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 887 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
837 | # set up saved register 11 | 888 | # do LAST_BREAK |
838 | l %r15,__LC_KERNEL_STACK | 889 | lg %r9,16(%r11) |
890 | srag %r9,%r9,23 | ||
891 | jz 0f | ||
892 | mvc __TI_last_break(8,%r12),16(%r11) | ||
893 | 0: # set up saved register r11 | ||
894 | lg %r15,__LC_KERNEL_STACK | ||
839 | la %r9,STACK_FRAME_OVERHEAD(%r15) | 895 | la %r9,STACK_FRAME_OVERHEAD(%r15) |
840 | st %r9,12(%r11) # r11 pt_regs pointer | 896 | stg %r9,24(%r11) # r11 pt_regs pointer |
841 | # fill pt_regs | 897 | # fill pt_regs |
842 | mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC | 898 | mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC |
843 | stm %r0,%r7,__PT_R0(%r9) | 899 | stmg %r0,%r7,__PT_R0(%r9) |
844 | mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW | 900 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW |
845 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | 901 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
846 | xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) | 902 | xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) |
847 | mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL | 903 | mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL |
848 | # setup saved register 15 | 904 | # setup saved register r15 |
849 | st %r15,28(%r11) # r15 stack pointer | 905 | stg %r15,56(%r11) # r15 stack pointer |
850 | # set new psw address and exit | 906 | # set new psw address and exit |
851 | l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 | 907 | larl %r9,.Lsysc_do_svc |
852 | br %r14 | 908 | br %r14 |
853 | .Lcleanup_system_call_insn: | 909 | .Lcleanup_system_call_insn: |
854 | .long system_call + 0x80000000 | 910 | .quad system_call |
855 | .long .Lsysc_stm + 0x80000000 | 911 | .quad .Lsysc_stmg |
856 | .long .Lsysc_vtime + 0x80000000 + 36 | 912 | .quad .Lsysc_per |
857 | .long .Lsysc_vtime + 0x80000000 + 76 | 913 | .quad .Lsysc_vtime+18 |
914 | .quad .Lsysc_vtime+42 | ||
858 | 915 | ||
859 | .Lcleanup_sysc_tif: | 916 | .Lcleanup_sysc_tif: |
860 | l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 | 917 | larl %r9,.Lsysc_tif |
861 | br %r14 | 918 | br %r14 |
862 | 919 | ||
863 | .Lcleanup_sysc_restore: | 920 | .Lcleanup_sysc_restore: |
864 | cl %r9,BASED(.Lcleanup_sysc_restore_insn) | 921 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) |
865 | jhe 0f | 922 | je 0f |
866 | l %r9,12(%r11) # get saved pointer to pt_regs | 923 | lg %r9,24(%r11) # get saved pointer to pt_regs |
867 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
868 | mvc 0(32,%r11),__PT_R8(%r9) | 925 | mvc 0(64,%r11),__PT_R8(%r9) |
869 | lm %r0,%r7,__PT_R0(%r9) | 926 | lmg %r0,%r7,__PT_R0(%r9) |
870 | 0: lm %r8,%r9,__LC_RETURN_PSW | 927 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
871 | br %r14 | 928 | br %r14 |
872 | .Lcleanup_sysc_restore_insn: | 929 | .Lcleanup_sysc_restore_insn: |
873 | .long .Lsysc_done - 4 + 0x80000000 | 930 | .quad .Lsysc_done - 4 |
874 | 931 | ||
875 | .Lcleanup_io_tif: | 932 | .Lcleanup_io_tif: |
876 | l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 | 933 | larl %r9,.Lio_tif |
877 | br %r14 | 934 | br %r14 |
878 | 935 | ||
879 | .Lcleanup_io_restore: | 936 | .Lcleanup_io_restore: |
880 | cl %r9,BASED(.Lcleanup_io_restore_insn) | 937 | clg %r9,BASED(.Lcleanup_io_restore_insn) |
881 | jhe 0f | 938 | je 0f |
882 | l %r9,12(%r11) # get saved r11 pointer to pt_regs | 939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
883 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
884 | mvc 0(32,%r11),__PT_R8(%r9) | 941 | mvc 0(64,%r11),__PT_R8(%r9) |
885 | lm %r0,%r7,__PT_R0(%r9) | 942 | lmg %r0,%r7,__PT_R0(%r9) |
886 | 0: lm %r8,%r9,__LC_RETURN_PSW | 943 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
887 | br %r14 | 944 | br %r14 |
888 | .Lcleanup_io_restore_insn: | 945 | .Lcleanup_io_restore_insn: |
889 | .long .Lio_done - 4 + 0x80000000 | 946 | .quad .Lio_done - 4 |
890 | 947 | ||
891 | .Lcleanup_idle: | 948 | .Lcleanup_idle: |
892 | # copy interrupt clock & cpu timer | 949 | # copy interrupt clock & cpu timer |
893 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | 950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK |
894 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | 951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER |
895 | chi %r11,__LC_SAVE_AREA_ASYNC | 952 | cghi %r11,__LC_SAVE_AREA_ASYNC |
896 | je 0f | 953 | je 0f |
897 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | 954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK |
898 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | 955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER |
899 | 0: # check if stck has been executed | 956 | 0: # check if stck & stpt have been executed |
900 | cl %r9,BASED(.Lcleanup_idle_insn) | 957 | clg %r9,BASED(.Lcleanup_idle_insn) |
901 | jhe 1f | 958 | jhe 1f |
902 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | 959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) |
903 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) | 960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) |
904 | 1: # account system time going idle | 961 | 1: # account system time going idle |
905 | lm %r9,%r10,__LC_STEAL_TIMER | 962 | lg %r9,__LC_STEAL_TIMER |
906 | ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) | 963 | alg %r9,__CLOCK_IDLE_ENTER(%r2) |
907 | SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK | 964 | slg %r9,__LC_LAST_UPDATE_CLOCK |
908 | stm %r9,%r10,__LC_STEAL_TIMER | 965 | stg %r9,__LC_STEAL_TIMER |
909 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) | 966 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) |
910 | lm %r9,%r10,__LC_SYSTEM_TIMER | 967 | lg %r9,__LC_SYSTEM_TIMER |
911 | ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER | 968 | alg %r9,__LC_LAST_UPDATE_TIMER |
912 | SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) | 969 | slg %r9,__TIMER_IDLE_ENTER(%r2) |
913 | stm %r9,%r10,__LC_SYSTEM_TIMER | 970 | stg %r9,__LC_SYSTEM_TIMER |
914 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | 971 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) |
915 | # prepare return psw | 972 | # prepare return psw |
916 | n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits | 973 | nihh %r8,0xfcfd # clear irq & wait state bits |
917 | l %r9,24(%r11) # return from psw_idle | 974 | lg %r9,48(%r11) # return from psw_idle |
918 | br %r14 | 975 | br %r14 |
919 | .Lcleanup_idle_insn: | 976 | .Lcleanup_idle_insn: |
920 | .long .Lpsw_idle_lpsw + 0x80000000 | 977 | .quad .Lpsw_idle_lpsw |
921 | .Lcleanup_idle_wait: | ||
922 | .long 0xfcfdffff | ||
923 | 978 | ||
924 | /* | 979 | /* |
925 | * Integer constants | 980 | * Integer constants |
926 | */ | 981 | */ |
927 | .align 4 | 982 | .align 8 |
928 | .Lnr_syscalls: | 983 | .Lcritical_start: |
929 | .long NR_syscalls | 984 | .quad .L__critical_start |
930 | .Lvtimer_max: | 985 | .Lcritical_length: |
931 | .quad 0x7fffffffffffffff | 986 | .quad .L__critical_end - .L__critical_start |
987 | |||
932 | 988 | ||
989 | #if IS_ENABLED(CONFIG_KVM) | ||
933 | /* | 990 | /* |
934 | * Symbol constants | 991 | * sie64a calling convention: |
992 | * %r2 pointer to sie control block | ||
993 | * %r3 guest register save area | ||
935 | */ | 994 | */ |
936 | .Lc_do_machine_check: .long s390_do_machine_check | 995 | ENTRY(sie64a) |
937 | .Lc_handle_mcck: .long s390_handle_mcck | 996 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers |
938 | .Lc_do_IRQ: .long do_IRQ | 997 | stg %r2,__SF_EMPTY(%r15) # save control block pointer |
939 | .Lc_do_signal: .long do_signal | 998 | stg %r3,__SF_EMPTY+8(%r15) # save guest register save area |
940 | .Lc_do_notify_resume: .long do_notify_resume | 999 | xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason |
941 | .Lc_do_per_trap: .long do_per_trap | 1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 |
942 | .Lc_jump_table: .long pgm_check_table | 1001 | lg %r14,__LC_GMAP # get gmap pointer |
943 | .Lc_schedule: .long schedule | 1002 | ltgr %r14,%r14 |
944 | #ifdef CONFIG_PREEMPT | 1003 | jz .Lsie_gmap |
945 | .Lc_preempt_irq: .long preempt_schedule_irq | 1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce |
946 | #endif | 1005 | .Lsie_gmap: |
947 | .Lc_trace_enter: .long do_syscall_trace_enter | 1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer |
948 | .Lc_trace_exit: .long do_syscall_trace_exit | 1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
949 | .Lc_schedule_tail: .long schedule_tail | 1008 | tm __SIE_PROG20+3(%r14),1 # last exit... |
950 | .Lc_sysc_per: .long .Lsysc_per + 0x80000000 | 1009 | jnz .Lsie_done |
951 | #ifdef CONFIG_TRACE_IRQFLAGS | 1010 | LPP __SF_EMPTY(%r15) # set guest id |
952 | .Lc_hardirqs_on: .long trace_hardirqs_on_caller | 1011 | sie 0(%r14) |
953 | .Lc_hardirqs_off: .long trace_hardirqs_off_caller | 1012 | .Lsie_done: |
954 | #endif | 1013 | LPP __SF_EMPTY+16(%r15) # set host id |
955 | #ifdef CONFIG_LOCKDEP | 1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
956 | .Lc_lockdep_sys_exit: .long lockdep_sys_exit | 1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) | ||
1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | ||
1018 | # instructions between sie64a and .Lsie_done should not cause program | ||
1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | ||
1020 | # See also HANDLE_SIE_INTERCEPT | ||
1021 | .Lrewind_pad: | ||
1022 | nop 0 | ||
1023 | .globl sie_exit | ||
1024 | sie_exit: | ||
1025 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | ||
1026 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | ||
1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code | ||
1029 | br %r14 | ||
1030 | .Lsie_fault: | ||
1031 | lghi %r14,-EFAULT | ||
1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code | ||
1033 | j sie_exit | ||
1034 | |||
1035 | .align 8 | ||
1036 | .Lsie_critical: | ||
1037 | .quad .Lsie_gmap | ||
1038 | .Lsie_critical_length: | ||
1039 | .quad .Lsie_done - .Lsie_gmap | ||
1040 | |||
1041 | EX_TABLE(.Lrewind_pad,.Lsie_fault) | ||
1042 | EX_TABLE(sie_exit,.Lsie_fault) | ||
957 | #endif | 1043 | #endif |
958 | .Lc_critical_start: .long .L__critical_start + 0x80000000 | ||
959 | .Lc_critical_length: .long .L__critical_end - .L__critical_start | ||
960 | 1044 | ||
961 | .section .rodata, "a" | 1045 | .section .rodata, "a" |
962 | #define SYSCALL(esa,esame,emu) .long esa | 1046 | #define SYSCALL(esame,emu) .long esame |
963 | .globl sys_call_table | 1047 | .globl sys_call_table |
964 | sys_call_table: | 1048 | sys_call_table: |
965 | #include "syscalls.S" | 1049 | #include "syscalls.S" |
966 | #undef SYSCALL | 1050 | #undef SYSCALL |
1051 | |||
1052 | #ifdef CONFIG_COMPAT | ||
1053 | |||
1054 | #define SYSCALL(esame,emu) .long emu | ||
1055 | .globl sys_call_table_emu | ||
1056 | sys_call_table_emu: | ||
1057 | #include "syscalls.S" | ||
1058 | #undef SYSCALL | ||
1059 | #endif | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S deleted file mode 100644 index c329446a951d..000000000000 --- a/arch/s390/kernel/entry64.S +++ /dev/null | |||
@@ -1,1059 +0,0 @@ | |||
1 | /* | ||
2 | * S390 low-level entry points. | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999, 2012 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
6 | * Hartmut Penner (hp@de.ibm.com), | ||
7 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
8 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/cache.h> | ||
15 | #include <asm/errno.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <asm/unistd.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/sigp.h> | ||
22 | #include <asm/irq.h> | ||
23 | |||
24 | __PT_R0 = __PT_GPRS | ||
25 | __PT_R1 = __PT_GPRS + 8 | ||
26 | __PT_R2 = __PT_GPRS + 16 | ||
27 | __PT_R3 = __PT_GPRS + 24 | ||
28 | __PT_R4 = __PT_GPRS + 32 | ||
29 | __PT_R5 = __PT_GPRS + 40 | ||
30 | __PT_R6 = __PT_GPRS + 48 | ||
31 | __PT_R7 = __PT_GPRS + 56 | ||
32 | __PT_R8 = __PT_GPRS + 64 | ||
33 | __PT_R9 = __PT_GPRS + 72 | ||
34 | __PT_R10 = __PT_GPRS + 80 | ||
35 | __PT_R11 = __PT_GPRS + 88 | ||
36 | __PT_R12 = __PT_GPRS + 96 | ||
37 | __PT_R13 = __PT_GPRS + 104 | ||
38 | __PT_R14 = __PT_GPRS + 112 | ||
39 | __PT_R15 = __PT_GPRS + 120 | ||
40 | |||
41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | ||
42 | STACK_SIZE = 1 << STACK_SHIFT | ||
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | ||
44 | |||
45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | ||
46 | _TIF_UPROBE) | ||
47 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
48 | _TIF_SYSCALL_TRACEPOINT) | ||
49 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) | ||
50 | _PIF_WORK = (_PIF_PER_TRAP) | ||
51 | |||
52 | #define BASED(name) name-system_call(%r13) | ||
53 | |||
54 | .macro TRACE_IRQS_ON | ||
55 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
56 | basr %r2,%r0 | ||
57 | brasl %r14,trace_hardirqs_on_caller | ||
58 | #endif | ||
59 | .endm | ||
60 | |||
61 | .macro TRACE_IRQS_OFF | ||
62 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
63 | basr %r2,%r0 | ||
64 | brasl %r14,trace_hardirqs_off_caller | ||
65 | #endif | ||
66 | .endm | ||
67 | |||
68 | .macro LOCKDEP_SYS_EXIT | ||
69 | #ifdef CONFIG_LOCKDEP | ||
70 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
71 | jz .+10 | ||
72 | brasl %r14,lockdep_sys_exit | ||
73 | #endif | ||
74 | .endm | ||
75 | |||
76 | .macro LPP newpp | ||
77 | #if IS_ENABLED(CONFIG_KVM) | ||
78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP | ||
79 | jz .+8 | ||
80 | .insn s,0xb2800000,\newpp | ||
81 | #endif | ||
82 | .endm | ||
83 | |||
84 | .macro HANDLE_SIE_INTERCEPT scratch,reason | ||
85 | #if IS_ENABLED(CONFIG_KVM) | ||
86 | tmhh %r8,0x0001 # interrupting from user ? | ||
87 | jnz .+62 | ||
88 | lgr \scratch,%r9 | ||
89 | slg \scratch,BASED(.Lsie_critical) | ||
90 | clg \scratch,BASED(.Lsie_critical_length) | ||
91 | .if \reason==1 | ||
92 | # Some program interrupts are suppressing (e.g. protection). | ||
93 | # We must also check the instruction after SIE in that case. | ||
94 | # do_protection_exception will rewind to .Lrewind_pad | ||
95 | jh .+42 | ||
96 | .else | ||
97 | jhe .+42 | ||
98 | .endif | ||
99 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
100 | LPP __SF_EMPTY+16(%r15) # set host id | ||
101 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
102 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
103 | larl %r9,sie_exit # skip forward to sie_exit | ||
104 | mvi __SF_EMPTY+31(%r15),\reason # set exit reason | ||
105 | #endif | ||
106 | .endm | ||
107 | |||
108 | .macro CHECK_STACK stacksize,savearea | ||
109 | #ifdef CONFIG_CHECK_STACK | ||
110 | tml %r15,\stacksize - CONFIG_STACK_GUARD | ||
111 | lghi %r14,\savearea | ||
112 | jz stack_overflow | ||
113 | #endif | ||
114 | .endm | ||
115 | |||
116 | .macro SWITCH_ASYNC savearea,stack,shift | ||
117 | tmhh %r8,0x0001 # interrupting from user ? | ||
118 | jnz 1f | ||
119 | lgr %r14,%r9 | ||
120 | slg %r14,BASED(.Lcritical_start) | ||
121 | clg %r14,BASED(.Lcritical_length) | ||
122 | jhe 0f | ||
123 | lghi %r11,\savearea # inside critical section, do cleanup | ||
124 | brasl %r14,cleanup_critical | ||
125 | tmhh %r8,0x0001 # retest problem state after cleanup | ||
126 | jnz 1f | ||
127 | 0: lg %r14,\stack # are we already on the target stack? | ||
128 | slgr %r14,%r15 | ||
129 | srag %r14,%r14,\shift | ||
130 | jnz 1f | ||
131 | CHECK_STACK 1<<\shift,\savearea | ||
132 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
133 | j 2f | ||
134 | 1: lg %r15,\stack # load target stack | ||
135 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
136 | .endm | ||
137 | |||
138 | .macro UPDATE_VTIME scratch,enter_timer | ||
139 | lg \scratch,__LC_EXIT_TIMER | ||
140 | slg \scratch,\enter_timer | ||
141 | alg \scratch,__LC_USER_TIMER | ||
142 | stg \scratch,__LC_USER_TIMER | ||
143 | lg \scratch,__LC_LAST_UPDATE_TIMER | ||
144 | slg \scratch,__LC_EXIT_TIMER | ||
145 | alg \scratch,__LC_SYSTEM_TIMER | ||
146 | stg \scratch,__LC_SYSTEM_TIMER | ||
147 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
148 | .endm | ||
149 | |||
150 | .macro LAST_BREAK scratch | ||
151 | srag \scratch,%r10,23 | ||
152 | jz .+10 | ||
153 | stg %r10,__TI_last_break(%r12) | ||
154 | .endm | ||
155 | |||
156 | .macro REENABLE_IRQS | ||
157 | stg %r8,__LC_RETURN_PSW | ||
158 | ni __LC_RETURN_PSW,0xbf | ||
159 | ssm __LC_RETURN_PSW | ||
160 | .endm | ||
161 | |||
162 | .macro STCK savearea | ||
163 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES | ||
164 | .insn s,0xb27c0000,\savearea # store clock fast | ||
165 | #else | ||
166 | .insn s,0xb2050000,\savearea # store clock | ||
167 | #endif | ||
168 | .endm | ||
169 | |||
170 | .section .kprobes.text, "ax" | ||
171 | |||
172 | /* | ||
173 | * Scheduler resume function, called by switch_to | ||
174 | * gpr2 = (task_struct *) prev | ||
175 | * gpr3 = (task_struct *) next | ||
176 | * Returns: | ||
177 | * gpr2 = prev | ||
178 | */ | ||
179 | ENTRY(__switch_to) | ||
180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | ||
181 | stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev | ||
182 | lg %r4,__THREAD_info(%r2) # get thread_info of prev | ||
183 | lg %r5,__THREAD_info(%r3) # get thread_info of next | ||
184 | lgr %r15,%r5 | ||
185 | aghi %r15,STACK_INIT # end of kernel stack of next | ||
186 | stg %r3,__LC_CURRENT # store task struct of next | ||
187 | stg %r5,__LC_THREAD_INFO # store thread info of next | ||
188 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack | ||
189 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | ||
190 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next | ||
191 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next | ||
192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | ||
193 | br %r14 | ||
194 | |||
195 | .L__critical_start: | ||
196 | /* | ||
197 | * SVC interrupt handler routine. System calls are synchronous events and | ||
198 | * are executed with interrupts enabled. | ||
199 | */ | ||
200 | |||
201 | ENTRY(system_call) | ||
202 | stpt __LC_SYNC_ENTER_TIMER | ||
203 | .Lsysc_stmg: | ||
204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | ||
205 | lg %r10,__LC_LAST_BREAK | ||
206 | lg %r12,__LC_THREAD_INFO | ||
207 | lghi %r14,_PIF_SYSCALL | ||
208 | .Lsysc_per: | ||
209 | lg %r15,__LC_KERNEL_STACK | ||
210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | ||
211 | .Lsysc_vtime: | ||
212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER | ||
213 | LAST_BREAK %r13 | ||
214 | stmg %r0,%r7,__PT_R0(%r11) | ||
215 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | ||
216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | ||
217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | ||
218 | stg %r14,__PT_FLAGS(%r11) | ||
219 | .Lsysc_do_svc: | ||
220 | lg %r10,__TI_sysc_table(%r12) # address of system call table | ||
221 | llgh %r8,__PT_INT_CODE+2(%r11) | ||
222 | slag %r8,%r8,2 # shift and test for svc 0 | ||
223 | jnz .Lsysc_nr_ok | ||
224 | # svc 0: system call number in %r1 | ||
225 | llgfr %r1,%r1 # clear high word in r1 | ||
226 | cghi %r1,NR_syscalls | ||
227 | jnl .Lsysc_nr_ok | ||
228 | sth %r1,__PT_INT_CODE+2(%r11) | ||
229 | slag %r8,%r1,2 | ||
230 | .Lsysc_nr_ok: | ||
231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
232 | stg %r2,__PT_ORIG_GPR2(%r11) | ||
233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | ||
234 | lgf %r9,0(%r8,%r10) # get system call add. | ||
235 | tm __TI_flags+7(%r12),_TIF_TRACE | ||
236 | jnz .Lsysc_tracesys | ||
237 | basr %r14,%r9 # call sys_xxxx | ||
238 | stg %r2,__PT_R2(%r11) # store return value | ||
239 | |||
240 | .Lsysc_return: | ||
241 | LOCKDEP_SYS_EXIT | ||
242 | .Lsysc_tif: | ||
243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
244 | jno .Lsysc_restore | ||
245 | tm __PT_FLAGS+7(%r11),_PIF_WORK | ||
246 | jnz .Lsysc_work | ||
247 | tm __TI_flags+7(%r12),_TIF_WORK | ||
248 | jnz .Lsysc_work # check for work | ||
249 | tm __LC_CPU_FLAGS+7,_CIF_WORK | ||
250 | jnz .Lsysc_work | ||
251 | .Lsysc_restore: | ||
252 | lg %r14,__LC_VDSO_PER_CPU | ||
253 | lmg %r0,%r10,__PT_R0(%r11) | ||
254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
255 | stpt __LC_EXIT_TIMER | ||
256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
257 | lmg %r11,%r15,__PT_R11(%r11) | ||
258 | lpswe __LC_RETURN_PSW | ||
259 | .Lsysc_done: | ||
260 | |||
261 | # | ||
262 | # One of the work bits is on. Find out which one. | ||
263 | # | ||
264 | .Lsysc_work: | ||
265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
266 | jo .Lsysc_mcck_pending | ||
267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
268 | jo .Lsysc_reschedule | ||
269 | #ifdef CONFIG_UPROBES | ||
270 | tm __TI_flags+7(%r12),_TIF_UPROBE | ||
271 | jo .Lsysc_uprobe_notify | ||
272 | #endif | ||
273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
274 | jo .Lsysc_singlestep | ||
275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | ||
276 | jo .Lsysc_sigpending | ||
277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | ||
278 | jo .Lsysc_notify_resume | ||
279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | ||
280 | jo .Lsysc_uaccess | ||
281 | j .Lsysc_return # beware of critical section cleanup | ||
282 | |||
283 | # | ||
284 | # _TIF_NEED_RESCHED is set, call schedule | ||
285 | # | ||
286 | .Lsysc_reschedule: | ||
287 | larl %r14,.Lsysc_return | ||
288 | jg schedule | ||
289 | |||
290 | # | ||
291 | # _CIF_MCCK_PENDING is set, call handler | ||
292 | # | ||
293 | .Lsysc_mcck_pending: | ||
294 | larl %r14,.Lsysc_return | ||
295 | jg s390_handle_mcck # TIF bit will be cleared by handler | ||
296 | |||
297 | # | ||
298 | # _CIF_ASCE is set, load user space asce | ||
299 | # | ||
300 | .Lsysc_uaccess: | ||
301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | ||
302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
303 | j .Lsysc_return | ||
304 | |||
305 | # | ||
306 | # _TIF_SIGPENDING is set, call do_signal | ||
307 | # | ||
308 | .Lsysc_sigpending: | ||
309 | lgr %r2,%r11 # pass pointer to pt_regs | ||
310 | brasl %r14,do_signal | ||
311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL | ||
312 | jno .Lsysc_return | ||
313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments | ||
314 | lg %r10,__TI_sysc_table(%r12) # address of system call table | ||
315 | lghi %r8,0 # svc 0 returns -ENOSYS | ||
316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number | ||
317 | cghi %r1,NR_syscalls | ||
318 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 | ||
319 | slag %r8,%r1,2 | ||
320 | j .Lsysc_nr_ok # restart svc | ||
321 | |||
322 | # | ||
323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | ||
324 | # | ||
325 | .Lsysc_notify_resume: | ||
326 | lgr %r2,%r11 # pass pointer to pt_regs | ||
327 | larl %r14,.Lsysc_return | ||
328 | jg do_notify_resume | ||
329 | |||
330 | # | ||
331 | # _TIF_UPROBE is set, call uprobe_notify_resume | ||
332 | # | ||
333 | #ifdef CONFIG_UPROBES | ||
334 | .Lsysc_uprobe_notify: | ||
335 | lgr %r2,%r11 # pass pointer to pt_regs | ||
336 | larl %r14,.Lsysc_return | ||
337 | jg uprobe_notify_resume | ||
338 | #endif | ||
339 | |||
340 | # | ||
341 | # _PIF_PER_TRAP is set, call do_per_trap | ||
342 | # | ||
343 | .Lsysc_singlestep: | ||
344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP | ||
345 | lgr %r2,%r11 # pass pointer to pt_regs | ||
346 | larl %r14,.Lsysc_return | ||
347 | jg do_per_trap | ||
348 | |||
349 | # | ||
350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | ||
351 | # and after the system call | ||
352 | # | ||
353 | .Lsysc_tracesys: | ||
354 | lgr %r2,%r11 # pass pointer to pt_regs | ||
355 | la %r3,0 | ||
356 | llgh %r0,__PT_INT_CODE+2(%r11) | ||
357 | stg %r0,__PT_R2(%r11) | ||
358 | brasl %r14,do_syscall_trace_enter | ||
359 | lghi %r0,NR_syscalls | ||
360 | clgr %r0,%r2 | ||
361 | jnh .Lsysc_tracenogo | ||
362 | sllg %r8,%r2,2 | ||
363 | lgf %r9,0(%r8,%r10) | ||
364 | .Lsysc_tracego: | ||
365 | lmg %r3,%r7,__PT_R3(%r11) | ||
366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | ||
367 | lg %r2,__PT_ORIG_GPR2(%r11) | ||
368 | basr %r14,%r9 # call sys_xxx | ||
369 | stg %r2,__PT_R2(%r11) # store return value | ||
370 | .Lsysc_tracenogo: | ||
371 | tm __TI_flags+7(%r12),_TIF_TRACE | ||
372 | jz .Lsysc_return | ||
373 | lgr %r2,%r11 # pass pointer to pt_regs | ||
374 | larl %r14,.Lsysc_return | ||
375 | jg do_syscall_trace_exit | ||
376 | |||
377 | # | ||
378 | # a new process exits the kernel with ret_from_fork | ||
379 | # | ||
380 | ENTRY(ret_from_fork) | ||
381 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
382 | lg %r12,__LC_THREAD_INFO | ||
383 | brasl %r14,schedule_tail | ||
384 | TRACE_IRQS_ON | ||
385 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | ||
387 | jne .Lsysc_tracenogo | ||
388 | # it's a kernel thread | ||
389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs | ||
390 | ENTRY(kernel_thread_starter) | ||
391 | la %r2,0(%r10) | ||
392 | basr %r14,%r9 | ||
393 | j .Lsysc_tracenogo | ||
394 | |||
395 | /* | ||
396 | * Program check handler routine | ||
397 | */ | ||
398 | |||
399 | ENTRY(pgm_check_handler) | ||
400 | stpt __LC_SYNC_ENTER_TIMER | ||
401 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | ||
402 | lg %r10,__LC_LAST_BREAK | ||
403 | lg %r12,__LC_THREAD_INFO | ||
404 | larl %r13,system_call | ||
405 | lmg %r8,%r9,__LC_PGM_OLD_PSW | ||
406 | HANDLE_SIE_INTERCEPT %r14,1 | ||
407 | tmhh %r8,0x0001 # test problem state bit | ||
408 | jnz 1f # -> fault in user space | ||
409 | tmhh %r8,0x4000 # PER bit set in old PSW ? | ||
410 | jnz 0f # -> enabled, can't be a double fault | ||
411 | tm __LC_PGM_ILC+3,0x80 # check for per exception | ||
412 | jnz .Lpgm_svcper # -> single stepped svc | ||
413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | ||
414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
415 | j 2f | ||
416 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER | ||
417 | LAST_BREAK %r14 | ||
418 | lg %r15,__LC_KERNEL_STACK | ||
419 | lg %r14,__TI_task(%r12) | ||
420 | lghi %r13,__LC_PGM_TDB | ||
421 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | ||
422 | jz 2f | ||
423 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | ||
424 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
425 | stmg %r0,%r7,__PT_R0(%r11) | ||
426 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | ||
427 | stmg %r8,%r9,__PT_PSW(%r11) | ||
428 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | ||
429 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE | ||
430 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
431 | stg %r10,__PT_ARGS(%r11) | ||
432 | tm __LC_PGM_ILC+3,0x80 # check for per exception | ||
433 | jz 0f | ||
434 | tmhh %r8,0x0001 # kernel per event ? | ||
435 | jz .Lpgm_kprobe | ||
436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | ||
438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE | ||
439 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID | ||
440 | 0: REENABLE_IRQS | ||
441 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
442 | larl %r1,pgm_check_table | ||
443 | llgh %r10,__PT_INT_CODE+2(%r11) | ||
444 | nill %r10,0x007f | ||
445 | sll %r10,2 | ||
446 | je .Lsysc_return | ||
447 | lgf %r1,0(%r10,%r1) # load address of handler routine | ||
448 | lgr %r2,%r11 # pass pointer to pt_regs | ||
449 | basr %r14,%r1 # branch to interrupt-handler | ||
450 | j .Lsysc_return | ||
451 | |||
452 | # | ||
453 | # PER event in supervisor state, must be kprobes | ||
454 | # | ||
455 | .Lpgm_kprobe: | ||
456 | REENABLE_IRQS | ||
457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
458 | lgr %r2,%r11 # pass pointer to pt_regs | ||
459 | brasl %r14,do_per_trap | ||
460 | j .Lsysc_return | ||
461 | |||
462 | # | ||
463 | # single stepped system call | ||
464 | # | ||
465 | .Lpgm_svcper: | ||
466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW | ||
467 | larl %r14,.Lsysc_per | ||
468 | stg %r14,__LC_RETURN_PSW+8 | ||
469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | ||
470 | lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | ||
471 | |||
472 | /* | ||
473 | * IO interrupt handler routine | ||
474 | */ | ||
475 | ENTRY(io_int_handler) | ||
476 | STCK __LC_INT_CLOCK | ||
477 | stpt __LC_ASYNC_ENTER_TIMER | ||
478 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC | ||
479 | lg %r10,__LC_LAST_BREAK | ||
480 | lg %r12,__LC_THREAD_INFO | ||
481 | larl %r13,system_call | ||
482 | lmg %r8,%r9,__LC_IO_OLD_PSW | ||
483 | HANDLE_SIE_INTERCEPT %r14,2 | ||
484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
485 | tmhh %r8,0x0001 # interrupting from user? | ||
486 | jz .Lio_skip | ||
487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | ||
488 | LAST_BREAK %r14 | ||
489 | .Lio_skip: | ||
490 | stmg %r0,%r7,__PT_R0(%r11) | ||
491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
492 | stmg %r8,%r9,__PT_PSW(%r11) | ||
493 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | ||
494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
495 | TRACE_IRQS_OFF | ||
496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
497 | .Lio_loop: | ||
498 | lgr %r2,%r11 # pass pointer to pt_regs | ||
499 | lghi %r3,IO_INTERRUPT | ||
500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | ||
501 | jz .Lio_call | ||
502 | lghi %r3,THIN_INTERRUPT | ||
503 | .Lio_call: | ||
504 | brasl %r14,do_IRQ | ||
505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR | ||
506 | jz .Lio_return | ||
507 | tpi 0 | ||
508 | jz .Lio_return | ||
509 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | ||
510 | j .Lio_loop | ||
511 | .Lio_return: | ||
512 | LOCKDEP_SYS_EXIT | ||
513 | TRACE_IRQS_ON | ||
514 | .Lio_tif: | ||
515 | tm __TI_flags+7(%r12),_TIF_WORK | ||
516 | jnz .Lio_work # there is work to do (signals etc.) | ||
517 | tm __LC_CPU_FLAGS+7,_CIF_WORK | ||
518 | jnz .Lio_work | ||
519 | .Lio_restore: | ||
520 | lg %r14,__LC_VDSO_PER_CPU | ||
521 | lmg %r0,%r10,__PT_R0(%r11) | ||
522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
523 | stpt __LC_EXIT_TIMER | ||
524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
525 | lmg %r11,%r15,__PT_R11(%r11) | ||
526 | lpswe __LC_RETURN_PSW | ||
527 | .Lio_done: | ||
528 | |||
529 | # | ||
530 | # There is work todo, find out in which context we have been interrupted: | ||
531 | # 1) if we return to user space we can do all _TIF_WORK work | ||
532 | # 2) if we return to kernel code and kvm is enabled check if we need to | ||
533 | # modify the psw to leave SIE | ||
534 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
535 | # the preemption counter and if it is zero call preempt_schedule_irq | ||
536 | # Before any work can be done, a switch to the kernel stack is required. | ||
537 | # | ||
538 | .Lio_work: | ||
539 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
540 | jo .Lio_work_user # yes -> do resched & signal | ||
541 | #ifdef CONFIG_PREEMPT | ||
542 | # check for preemptive scheduling | ||
543 | icm %r0,15,__TI_precount(%r12) | ||
544 | jnz .Lio_restore # preemption is disabled | ||
545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
546 | jno .Lio_restore | ||
547 | # switch to kernel stack | ||
548 | lg %r1,__PT_R15(%r11) | ||
549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
550 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
552 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
553 | lgr %r15,%r1 | ||
554 | # TRACE_IRQS_ON already done at .Lio_return, call | ||
555 | # TRACE_IRQS_OFF to keep things symmetrical | ||
556 | TRACE_IRQS_OFF | ||
557 | brasl %r14,preempt_schedule_irq | ||
558 | j .Lio_return | ||
559 | #else | ||
560 | j .Lio_restore | ||
561 | #endif | ||
562 | |||
563 | # | ||
564 | # Need to do work before returning to userspace, switch to kernel stack | ||
565 | # | ||
566 | .Lio_work_user: | ||
567 | lg %r1,__LC_KERNEL_STACK | ||
568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
570 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
571 | lgr %r15,%r1 | ||
572 | |||
573 | # | ||
574 | # One of the work bits is on. Find out which one. | ||
575 | # | ||
576 | .Lio_work_tif: | ||
577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
578 | jo .Lio_mcck_pending | ||
579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
580 | jo .Lio_reschedule | ||
581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | ||
582 | jo .Lio_sigpending | ||
583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | ||
584 | jo .Lio_notify_resume | ||
585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | ||
586 | jo .Lio_uaccess | ||
587 | j .Lio_return # beware of critical section cleanup | ||
588 | |||
589 | # | ||
590 | # _CIF_MCCK_PENDING is set, call handler | ||
591 | # | ||
592 | .Lio_mcck_pending: | ||
593 | # TRACE_IRQS_ON already done at .Lio_return | ||
594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler | ||
595 | TRACE_IRQS_OFF | ||
596 | j .Lio_return | ||
597 | |||
598 | # | ||
599 | # _CIF_ASCE is set, load user space asce | ||
600 | # | ||
601 | .Lio_uaccess: | ||
602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | ||
603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
604 | j .Lio_return | ||
605 | |||
606 | # | ||
607 | # _TIF_NEED_RESCHED is set, call schedule | ||
608 | # | ||
609 | .Lio_reschedule: | ||
610 | # TRACE_IRQS_ON already done at .Lio_return | ||
611 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
612 | brasl %r14,schedule # call scheduler | ||
613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
614 | TRACE_IRQS_OFF | ||
615 | j .Lio_return | ||
616 | |||
617 | # | ||
618 | # _TIF_SIGPENDING or is set, call do_signal | ||
619 | # | ||
620 | .Lio_sigpending: | ||
621 | # TRACE_IRQS_ON already done at .Lio_return | ||
622 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
623 | lgr %r2,%r11 # pass pointer to pt_regs | ||
624 | brasl %r14,do_signal | ||
625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
626 | TRACE_IRQS_OFF | ||
627 | j .Lio_return | ||
628 | |||
629 | # | ||
630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume | ||
631 | # | ||
632 | .Lio_notify_resume: | ||
633 | # TRACE_IRQS_ON already done at .Lio_return | ||
634 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
635 | lgr %r2,%r11 # pass pointer to pt_regs | ||
636 | brasl %r14,do_notify_resume | ||
637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
638 | TRACE_IRQS_OFF | ||
639 | j .Lio_return | ||
640 | |||
641 | /* | ||
642 | * External interrupt handler routine | ||
643 | */ | ||
644 | ENTRY(ext_int_handler) | ||
645 | STCK __LC_INT_CLOCK | ||
646 | stpt __LC_ASYNC_ENTER_TIMER | ||
647 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC | ||
648 | lg %r10,__LC_LAST_BREAK | ||
649 | lg %r12,__LC_THREAD_INFO | ||
650 | larl %r13,system_call | ||
651 | lmg %r8,%r9,__LC_EXT_OLD_PSW | ||
652 | HANDLE_SIE_INTERCEPT %r14,3 | ||
653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
654 | tmhh %r8,0x0001 # interrupting from user ? | ||
655 | jz .Lext_skip | ||
656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | ||
657 | LAST_BREAK %r14 | ||
658 | .Lext_skip: | ||
659 | stmg %r0,%r7,__PT_R0(%r11) | ||
660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
661 | stmg %r8,%r9,__PT_PSW(%r11) | ||
662 | lghi %r1,__LC_EXT_PARAMS2 | ||
663 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | ||
664 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | ||
665 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) | ||
666 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
667 | TRACE_IRQS_OFF | ||
668 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
669 | lgr %r2,%r11 # pass pointer to pt_regs | ||
670 | lghi %r3,EXT_INTERRUPT | ||
671 | brasl %r14,do_IRQ | ||
672 | j .Lio_return | ||
673 | |||
674 | /* | ||
675 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. | ||
676 | */ | ||
677 | ENTRY(psw_idle) | ||
678 | stg %r3,__SF_EMPTY(%r15) | ||
679 | larl %r1,.Lpsw_idle_lpsw+4 | ||
680 | stg %r1,__SF_EMPTY+8(%r15) | ||
681 | STCK __CLOCK_IDLE_ENTER(%r2) | ||
682 | stpt __TIMER_IDLE_ENTER(%r2) | ||
683 | .Lpsw_idle_lpsw: | ||
684 | lpswe __SF_EMPTY(%r15) | ||
685 | br %r14 | ||
686 | .Lpsw_idle_end: | ||
687 | |||
688 | .L__critical_end: | ||
689 | |||
690 | /* | ||
691 | * Machine check handler routines | ||
692 | */ | ||
693 | ENTRY(mcck_int_handler) | ||
694 | STCK __LC_MCCK_CLOCK | ||
695 | la %r1,4095 # revalidate r1 | ||
696 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | ||
697 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | ||
698 | lg %r10,__LC_LAST_BREAK | ||
699 | lg %r12,__LC_THREAD_INFO | ||
700 | larl %r13,system_call | ||
701 | lmg %r8,%r9,__LC_MCK_OLD_PSW | ||
702 | HANDLE_SIE_INTERCEPT %r14,4 | ||
703 | tm __LC_MCCK_CODE,0x80 # system damage? | ||
704 | jo .Lmcck_panic # yes -> rest of mcck code invalid | ||
705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA | ||
706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | ||
708 | jo 3f | ||
709 | la %r14,__LC_SYNC_ENTER_TIMER | ||
710 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | ||
711 | jl 0f | ||
712 | la %r14,__LC_ASYNC_ENTER_TIMER | ||
713 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | ||
714 | jl 1f | ||
715 | la %r14,__LC_EXIT_TIMER | ||
716 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | ||
717 | jl 2f | ||
718 | la %r14,__LC_LAST_UPDATE_TIMER | ||
719 | 2: spt 0(%r14) | ||
720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
722 | jno .Lmcck_panic # no -> skip cleanup critical | ||
723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT | ||
724 | tm %r8,0x0001 # interrupting from user ? | ||
725 | jz .Lmcck_skip | ||
726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER | ||
727 | LAST_BREAK %r14 | ||
728 | .Lmcck_skip: | ||
729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 | ||
730 | stmg %r0,%r7,__PT_R0(%r11) | ||
731 | mvc __PT_R8(64,%r11),0(%r14) | ||
732 | stmg %r8,%r9,__PT_PSW(%r11) | ||
733 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
734 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
735 | lgr %r2,%r11 # pass pointer to pt_regs | ||
736 | brasl %r14,s390_do_machine_check | ||
737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
738 | jno .Lmcck_return | ||
739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | ||
740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
742 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
743 | lgr %r15,%r1 | ||
744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | ||
745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
746 | jno .Lmcck_return | ||
747 | TRACE_IRQS_OFF | ||
748 | brasl %r14,s390_handle_mcck | ||
749 | TRACE_IRQS_ON | ||
750 | .Lmcck_return: | ||
751 | lg %r14,__LC_VDSO_PER_CPU | ||
752 | lmg %r0,%r10,__PT_R0(%r11) | ||
753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | ||
754 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | ||
755 | jno 0f | ||
756 | stpt __LC_EXIT_TIMER | ||
757 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
758 | 0: lmg %r11,%r15,__PT_R11(%r11) | ||
759 | lpswe __LC_RETURN_MCCK_PSW | ||
760 | |||
761 | .Lmcck_panic: | ||
762 | lg %r14,__LC_PANIC_STACK | ||
763 | slgr %r14,%r15 | ||
764 | srag %r14,%r14,PAGE_SHIFT | ||
765 | jz 0f | ||
766 | lg %r15,__LC_PANIC_STACK | ||
767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
768 | j .Lmcck_skip | ||
769 | |||
770 | # | ||
771 | # PSW restart interrupt handler | ||
772 | # | ||
773 | ENTRY(restart_int_handler) | ||
774 | stg %r15,__LC_SAVE_AREA_RESTART | ||
775 | lg %r15,__LC_RESTART_STACK | ||
776 | aghi %r15,-__PT_SIZE # create pt_regs on stack | ||
777 | xc 0(__PT_SIZE,%r15),0(%r15) | ||
778 | stmg %r0,%r14,__PT_R0(%r15) | ||
779 | mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART | ||
780 | mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw | ||
781 | aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack | ||
782 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) | ||
783 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu | ||
784 | lg %r2,__LC_RESTART_DATA | ||
785 | lg %r3,__LC_RESTART_SOURCE | ||
786 | ltgr %r3,%r3 # test source cpu address | ||
787 | jm 1f # negative -> skip source stop | ||
788 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu | ||
789 | brc 10,0b # wait for status stored | ||
790 | 1: basr %r14,%r1 # call function | ||
791 | stap __SF_EMPTY(%r15) # store cpu address | ||
792 | llgh %r3,__SF_EMPTY(%r15) | ||
793 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu | ||
794 | brc 2,2b | ||
795 | 3: j 3b | ||
796 | |||
797 | .section .kprobes.text, "ax" | ||
798 | |||
799 | #ifdef CONFIG_CHECK_STACK | ||
800 | /* | ||
801 | * The synchronous or the asynchronous stack overflowed. We are dead. | ||
802 | * No need to properly save the registers, we are going to panic anyway. | ||
803 | * Setup a pt_regs so that show_trace can provide a good call trace. | ||
804 | */ | ||
805 | stack_overflow: | ||
806 | lg %r15,__LC_PANIC_STACK # change to panic stack | ||
807 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
808 | stmg %r0,%r7,__PT_R0(%r11) | ||
809 | stmg %r8,%r9,__PT_PSW(%r11) | ||
810 | mvc __PT_R8(64,%r11),0(%r14) | ||
811 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 | ||
812 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
813 | lgr %r2,%r11 # pass pointer to pt_regs | ||
814 | jg kernel_stack_overflow | ||
815 | #endif | ||
816 | |||
817 | .align 8 | ||
818 | .Lcleanup_table: | ||
819 | .quad system_call | ||
820 | .quad .Lsysc_do_svc | ||
821 | .quad .Lsysc_tif | ||
822 | .quad .Lsysc_restore | ||
823 | .quad .Lsysc_done | ||
824 | .quad .Lio_tif | ||
825 | .quad .Lio_restore | ||
826 | .quad .Lio_done | ||
827 | .quad psw_idle | ||
828 | .quad .Lpsw_idle_end | ||
829 | |||
830 | cleanup_critical: | ||
831 | clg %r9,BASED(.Lcleanup_table) # system_call | ||
832 | jl 0f | ||
833 | clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc | ||
834 | jl .Lcleanup_system_call | ||
835 | clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif | ||
836 | jl 0f | ||
837 | clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore | ||
838 | jl .Lcleanup_sysc_tif | ||
839 | clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done | ||
840 | jl .Lcleanup_sysc_restore | ||
841 | clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif | ||
842 | jl 0f | ||
843 | clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore | ||
844 | jl .Lcleanup_io_tif | ||
845 | clg %r9,BASED(.Lcleanup_table+56) # .Lio_done | ||
846 | jl .Lcleanup_io_restore | ||
847 | clg %r9,BASED(.Lcleanup_table+64) # psw_idle | ||
848 | jl 0f | ||
849 | clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end | ||
850 | jl .Lcleanup_idle | ||
851 | 0: br %r14 | ||
852 | |||
853 | |||
854 | .Lcleanup_system_call: | ||
855 | # check if stpt has been executed | ||
856 | clg %r9,BASED(.Lcleanup_system_call_insn) | ||
857 | jh 0f | ||
858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
859 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
860 | je 0f | ||
861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
862 | 0: # check if stmg has been executed | ||
863 | clg %r9,BASED(.Lcleanup_system_call_insn+8) | ||
864 | jh 0f | ||
865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) | ||
866 | 0: # check if base register setup + TIF bit load has been done | ||
867 | clg %r9,BASED(.Lcleanup_system_call_insn+16) | ||
868 | jhe 0f | ||
869 | # set up saved registers r10 and r12 | ||
870 | stg %r10,16(%r11) # r10 last break | ||
871 | stg %r12,32(%r11) # r12 thread-info pointer | ||
872 | 0: # check if the user time update has been done | ||
873 | clg %r9,BASED(.Lcleanup_system_call_insn+24) | ||
874 | jh 0f | ||
875 | lg %r15,__LC_EXIT_TIMER | ||
876 | slg %r15,__LC_SYNC_ENTER_TIMER | ||
877 | alg %r15,__LC_USER_TIMER | ||
878 | stg %r15,__LC_USER_TIMER | ||
879 | 0: # check if the system time update has been done | ||
880 | clg %r9,BASED(.Lcleanup_system_call_insn+32) | ||
881 | jh 0f | ||
882 | lg %r15,__LC_LAST_UPDATE_TIMER | ||
883 | slg %r15,__LC_EXIT_TIMER | ||
884 | alg %r15,__LC_SYSTEM_TIMER | ||
885 | stg %r15,__LC_SYSTEM_TIMER | ||
886 | 0: # update accounting time stamp | ||
887 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
888 | # do LAST_BREAK | ||
889 | lg %r9,16(%r11) | ||
890 | srag %r9,%r9,23 | ||
891 | jz 0f | ||
892 | mvc __TI_last_break(8,%r12),16(%r11) | ||
893 | 0: # set up saved register r11 | ||
894 | lg %r15,__LC_KERNEL_STACK | ||
895 | la %r9,STACK_FRAME_OVERHEAD(%r15) | ||
896 | stg %r9,24(%r11) # r11 pt_regs pointer | ||
897 | # fill pt_regs | ||
898 | mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC | ||
899 | stmg %r0,%r7,__PT_R0(%r9) | ||
900 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW | ||
901 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | ||
902 | xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) | ||
903 | mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL | ||
904 | # setup saved register r15 | ||
905 | stg %r15,56(%r11) # r15 stack pointer | ||
906 | # set new psw address and exit | ||
907 | larl %r9,.Lsysc_do_svc | ||
908 | br %r14 | ||
909 | .Lcleanup_system_call_insn: | ||
910 | .quad system_call | ||
911 | .quad .Lsysc_stmg | ||
912 | .quad .Lsysc_per | ||
913 | .quad .Lsysc_vtime+18 | ||
914 | .quad .Lsysc_vtime+42 | ||
915 | |||
916 | .Lcleanup_sysc_tif: | ||
917 | larl %r9,.Lsysc_tif | ||
918 | br %r14 | ||
919 | |||
920 | .Lcleanup_sysc_restore: | ||
921 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) | ||
922 | je 0f | ||
923 | lg %r9,24(%r11) # get saved pointer to pt_regs | ||
924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | ||
925 | mvc 0(64,%r11),__PT_R8(%r9) | ||
926 | lmg %r0,%r7,__PT_R0(%r9) | ||
927 | 0: lmg %r8,%r9,__LC_RETURN_PSW | ||
928 | br %r14 | ||
929 | .Lcleanup_sysc_restore_insn: | ||
930 | .quad .Lsysc_done - 4 | ||
931 | |||
932 | .Lcleanup_io_tif: | ||
933 | larl %r9,.Lio_tif | ||
934 | br %r14 | ||
935 | |||
936 | .Lcleanup_io_restore: | ||
937 | clg %r9,BASED(.Lcleanup_io_restore_insn) | ||
938 | je 0f | ||
939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs | ||
940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | ||
941 | mvc 0(64,%r11),__PT_R8(%r9) | ||
942 | lmg %r0,%r7,__PT_R0(%r9) | ||
943 | 0: lmg %r8,%r9,__LC_RETURN_PSW | ||
944 | br %r14 | ||
945 | .Lcleanup_io_restore_insn: | ||
946 | .quad .Lio_done - 4 | ||
947 | |||
948 | .Lcleanup_idle: | ||
949 | # copy interrupt clock & cpu timer | ||
950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | ||
951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | ||
952 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
953 | je 0f | ||
954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | ||
955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | ||
956 | 0: # check if stck & stpt have been executed | ||
957 | clg %r9,BASED(.Lcleanup_idle_insn) | ||
958 | jhe 1f | ||
959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | ||
960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) | ||
961 | 1: # account system time going idle | ||
962 | lg %r9,__LC_STEAL_TIMER | ||
963 | alg %r9,__CLOCK_IDLE_ENTER(%r2) | ||
964 | slg %r9,__LC_LAST_UPDATE_CLOCK | ||
965 | stg %r9,__LC_STEAL_TIMER | ||
966 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) | ||
967 | lg %r9,__LC_SYSTEM_TIMER | ||
968 | alg %r9,__LC_LAST_UPDATE_TIMER | ||
969 | slg %r9,__TIMER_IDLE_ENTER(%r2) | ||
970 | stg %r9,__LC_SYSTEM_TIMER | ||
971 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | ||
972 | # prepare return psw | ||
973 | nihh %r8,0xfcfd # clear irq & wait state bits | ||
974 | lg %r9,48(%r11) # return from psw_idle | ||
975 | br %r14 | ||
976 | .Lcleanup_idle_insn: | ||
977 | .quad .Lpsw_idle_lpsw | ||
978 | |||
979 | /* | ||
980 | * Integer constants | ||
981 | */ | ||
982 | .align 8 | ||
983 | .Lcritical_start: | ||
984 | .quad .L__critical_start | ||
985 | .Lcritical_length: | ||
986 | .quad .L__critical_end - .L__critical_start | ||
987 | |||
988 | |||
989 | #if IS_ENABLED(CONFIG_KVM) | ||
990 | /* | ||
991 | * sie64a calling convention: | ||
992 | * %r2 pointer to sie control block | ||
993 | * %r3 guest register save area | ||
994 | */ | ||
995 | ENTRY(sie64a) | ||
996 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers | ||
997 | stg %r2,__SF_EMPTY(%r15) # save control block pointer | ||
998 | stg %r3,__SF_EMPTY+8(%r15) # save guest register save area | ||
999 | xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason | ||
1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 | ||
1001 | lg %r14,__LC_GMAP # get gmap pointer | ||
1002 | ltgr %r14,%r14 | ||
1003 | jz .Lsie_gmap | ||
1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce | ||
1005 | .Lsie_gmap: | ||
1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now | ||
1008 | tm __SIE_PROG20+3(%r14),1 # last exit... | ||
1009 | jnz .Lsie_done | ||
1010 | LPP __SF_EMPTY(%r15) # set guest id | ||
1011 | sie 0(%r14) | ||
1012 | .Lsie_done: | ||
1013 | LPP __SF_EMPTY+16(%r15) # set host id | ||
1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) | ||
1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | ||
1018 | # instructions between sie64a and .Lsie_done should not cause program | ||
1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | ||
1020 | # See also HANDLE_SIE_INTERCEPT | ||
1021 | .Lrewind_pad: | ||
1022 | nop 0 | ||
1023 | .globl sie_exit | ||
1024 | sie_exit: | ||
1025 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | ||
1026 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | ||
1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code | ||
1029 | br %r14 | ||
1030 | .Lsie_fault: | ||
1031 | lghi %r14,-EFAULT | ||
1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code | ||
1033 | j sie_exit | ||
1034 | |||
1035 | .align 8 | ||
1036 | .Lsie_critical: | ||
1037 | .quad .Lsie_gmap | ||
1038 | .Lsie_critical_length: | ||
1039 | .quad .Lsie_done - .Lsie_gmap | ||
1040 | |||
1041 | EX_TABLE(.Lrewind_pad,.Lsie_fault) | ||
1042 | EX_TABLE(sie_exit,.Lsie_fault) | ||
1043 | #endif | ||
1044 | |||
1045 | .section .rodata, "a" | ||
1046 | #define SYSCALL(esa,esame,emu) .long esame | ||
1047 | .globl sys_call_table | ||
1048 | sys_call_table: | ||
1049 | #include "syscalls.S" | ||
1050 | #undef SYSCALL | ||
1051 | |||
1052 | #ifdef CONFIG_COMPAT | ||
1053 | |||
1054 | #define SYSCALL(esa,esame,emu) .long emu | ||
1055 | .globl sys_call_table_emu | ||
1056 | sys_call_table_emu: | ||
1057 | #include "syscalls.S" | ||
1058 | #undef SYSCALL | ||
1059 | #endif | ||
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6c79f1b44fe7..e0eaf11134b4 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -130,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
130 | /* Verify that the to be replaced code matches what we expect. */ | 130 | /* Verify that the to be replaced code matches what we expect. */ |
131 | if (memcmp(&orig, &old, sizeof(old))) | 131 | if (memcmp(&orig, &old, sizeof(old))) |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) | 133 | s390_kernel_write((void *) rec->ip, &new, sizeof(new)); |
134 | return -EPERM; | ||
135 | return 0; | 134 | return 0; |
136 | } | 135 | } |
137 | 136 | ||
@@ -159,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
159 | /* Verify that the to be replaced code matches what we expect. */ | 158 | /* Verify that the to be replaced code matches what we expect. */ |
160 | if (memcmp(&orig, &old, sizeof(old))) | 159 | if (memcmp(&orig, &old, sizeof(old))) |
161 | return -EINVAL; | 160 | return -EINVAL; |
162 | if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) | 161 | s390_kernel_write((void *) rec->ip, &new, sizeof(new)); |
163 | return -EPERM; | ||
164 | return 0; | 162 | return 0; |
165 | } | 163 | } |
166 | 164 | ||
@@ -231,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void) | |||
231 | { | 229 | { |
232 | u8 op = 0x04; /* set mask field to zero */ | 230 | u8 op = 0x04; /* set mask field to zero */ |
233 | 231 | ||
234 | return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); | 232 | s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); |
233 | return 0; | ||
235 | } | 234 | } |
236 | 235 | ||
237 | int ftrace_disable_ftrace_graph_caller(void) | 236 | int ftrace_disable_ftrace_graph_caller(void) |
238 | { | 237 | { |
239 | u8 op = 0xf4; /* set mask field to all ones */ | 238 | u8 op = 0xf4; /* set mask field to all ones */ |
240 | 239 | ||
241 | return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); | 240 | s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); |
241 | return 0; | ||
242 | } | 242 | } |
243 | 243 | ||
244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 132f4c9ade60..59b7c6470567 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -27,11 +27,7 @@ | |||
27 | #include <asm/thread_info.h> | 27 | #include <asm/thread_info.h> |
28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
29 | 29 | ||
30 | #ifdef CONFIG_64BIT | ||
31 | #define ARCH_OFFSET 4 | 30 | #define ARCH_OFFSET 4 |
32 | #else | ||
33 | #define ARCH_OFFSET 0 | ||
34 | #endif | ||
35 | 31 | ||
36 | __HEAD | 32 | __HEAD |
37 | 33 | ||
@@ -67,7 +63,6 @@ __HEAD | |||
67 | # subroutine to set architecture mode | 63 | # subroutine to set architecture mode |
68 | # | 64 | # |
69 | .Lsetmode: | 65 | .Lsetmode: |
70 | #ifdef CONFIG_64BIT | ||
71 | mvi __LC_AR_MODE_ID,1 # set esame flag | 66 | mvi __LC_AR_MODE_ID,1 # set esame flag |
72 | slr %r0,%r0 # set cpuid to zero | 67 | slr %r0,%r0 # set cpuid to zero |
73 | lhi %r1,2 # mode 2 = esame (dump) | 68 | lhi %r1,2 # mode 2 = esame (dump) |
@@ -76,16 +71,12 @@ __HEAD | |||
76 | .fill 16,4,0x0 | 71 | .fill 16,4,0x0 |
77 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | 72 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs |
78 | sam31 # switch to 31 bit addressing mode | 73 | sam31 # switch to 31 bit addressing mode |
79 | #else | ||
80 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
81 | #endif | ||
82 | br %r14 | 74 | br %r14 |
83 | 75 | ||
84 | # | 76 | # |
85 | # subroutine to wait for end I/O | 77 | # subroutine to wait for end I/O |
86 | # | 78 | # |
87 | .Lirqwait: | 79 | .Lirqwait: |
88 | #ifdef CONFIG_64BIT | ||
89 | mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw | 80 | mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw |
90 | lpsw .Lwaitpsw | 81 | lpsw .Lwaitpsw |
91 | .Lioint: | 82 | .Lioint: |
@@ -93,15 +84,6 @@ __HEAD | |||
93 | .align 8 | 84 | .align 8 |
94 | .Lnewpsw: | 85 | .Lnewpsw: |
95 | .quad 0x0000000080000000,.Lioint | 86 | .quad 0x0000000080000000,.Lioint |
96 | #else | ||
97 | mvc 0x78(8),.Lnewpsw # set up IO interrupt psw | ||
98 | lpsw .Lwaitpsw | ||
99 | .Lioint: | ||
100 | br %r14 | ||
101 | .align 8 | ||
102 | .Lnewpsw: | ||
103 | .long 0x00080000,0x80000000+.Lioint | ||
104 | #endif | ||
105 | .Lwaitpsw: | 87 | .Lwaitpsw: |
106 | .long 0x020a0000,0x80000000+.Lioint | 88 | .long 0x020a0000,0x80000000+.Lioint |
107 | 89 | ||
@@ -375,7 +357,6 @@ ENTRY(startup) | |||
375 | ENTRY(startup_kdump) | 357 | ENTRY(startup_kdump) |
376 | j .Lep_startup_kdump | 358 | j .Lep_startup_kdump |
377 | .Lep_startup_normal: | 359 | .Lep_startup_normal: |
378 | #ifdef CONFIG_64BIT | ||
379 | mvi __LC_AR_MODE_ID,1 # set esame flag | 360 | mvi __LC_AR_MODE_ID,1 # set esame flag |
380 | slr %r0,%r0 # set cpuid to zero | 361 | slr %r0,%r0 # set cpuid to zero |
381 | lhi %r1,2 # mode 2 = esame (dump) | 362 | lhi %r1,2 # mode 2 = esame (dump) |
@@ -384,9 +365,6 @@ ENTRY(startup_kdump) | |||
384 | .fill 16,4,0x0 | 365 | .fill 16,4,0x0 |
385 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | 366 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs |
386 | sam31 # switch to 31 bit addressing mode | 367 | sam31 # switch to 31 bit addressing mode |
387 | #else | ||
388 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
389 | #endif | ||
390 | basr %r13,0 # get base | 368 | basr %r13,0 # get base |
391 | .LPG0: | 369 | .LPG0: |
392 | xc 0x200(256),0x200 # partially clear lowcore | 370 | xc 0x200(256),0x200 # partially clear lowcore |
@@ -396,7 +374,6 @@ ENTRY(startup_kdump) | |||
396 | spt 6f-.LPG0(%r13) | 374 | spt 6f-.LPG0(%r13) |
397 | mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) | 375 | mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) |
398 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | 376 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST |
399 | #ifndef CONFIG_MARCH_G5 | ||
400 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} | 377 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} |
401 | .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST | 378 | .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST |
402 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? | 379 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? |
@@ -435,7 +412,6 @@ ENTRY(startup_kdump) | |||
435 | # the kernel will crash. Format is number of facility words with bits set, | 412 | # the kernel will crash. Format is number of facility words with bits set, |
436 | # followed by the facility words. | 413 | # followed by the facility words. |
437 | 414 | ||
438 | #if defined(CONFIG_64BIT) | ||
439 | #if defined(CONFIG_MARCH_Z13) | 415 | #if defined(CONFIG_MARCH_Z13) |
440 | .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 | 416 | .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 |
441 | #elif defined(CONFIG_MARCH_ZEC12) | 417 | #elif defined(CONFIG_MARCH_ZEC12) |
@@ -451,35 +427,10 @@ ENTRY(startup_kdump) | |||
451 | #elif defined(CONFIG_MARCH_Z900) | 427 | #elif defined(CONFIG_MARCH_Z900) |
452 | .long 1, 0xc0000000 | 428 | .long 1, 0xc0000000 |
453 | #endif | 429 | #endif |
454 | #else | ||
455 | #if defined(CONFIG_MARCH_ZEC12) | ||
456 | .long 1, 0x8100c880 | ||
457 | #elif defined(CONFIG_MARCH_Z196) | ||
458 | .long 1, 0x8100c880 | ||
459 | #elif defined(CONFIG_MARCH_Z10) | ||
460 | .long 1, 0x8100c880 | ||
461 | #elif defined(CONFIG_MARCH_Z9_109) | ||
462 | .long 1, 0x8100c880 | ||
463 | #elif defined(CONFIG_MARCH_Z990) | ||
464 | .long 1, 0x80002000 | ||
465 | #elif defined(CONFIG_MARCH_Z900) | ||
466 | .long 1, 0x80000000 | ||
467 | #endif | ||
468 | #endif | ||
469 | 4: | 430 | 4: |
470 | #endif | ||
471 | |||
472 | #ifdef CONFIG_64BIT | ||
473 | /* Continue with 64bit startup code in head64.S */ | 431 | /* Continue with 64bit startup code in head64.S */ |
474 | sam64 # switch to 64 bit mode | 432 | sam64 # switch to 64 bit mode |
475 | jg startup_continue | 433 | jg startup_continue |
476 | #else | ||
477 | /* Continue with 31bit startup code in head31.S */ | ||
478 | l %r13,5f-.LPG0(%r13) | ||
479 | b 0(%r13) | ||
480 | .align 8 | ||
481 | 5: .long startup_continue | ||
482 | #endif | ||
483 | 434 | ||
484 | .align 8 | 435 | .align 8 |
485 | 6: .long 0x7fffffff,0xffffffff | 436 | 6: .long 0x7fffffff,0xffffffff |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S deleted file mode 100644 index 6dbe80983a24..000000000000 --- a/arch/s390/kernel/head31.S +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2005, 2010 | ||
3 | * | ||
4 | * Author(s): Hartmut Penner <hp@de.ibm.com> | ||
5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | * Rob van der Heij <rvdhei@iae.nl> | ||
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/page.h> | ||
16 | |||
17 | __HEAD | ||
18 | ENTRY(startup_continue) | ||
19 | basr %r13,0 # get base | ||
20 | .LPG1: | ||
21 | |||
22 | l %r1,.Lbase_cc-.LPG1(%r13) | ||
23 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK | ||
24 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | ||
25 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | ||
26 | # move IPL device to lowcore | ||
27 | # | ||
28 | # Setup stack | ||
29 | # | ||
30 | l %r15,.Linittu-.LPG1(%r13) | ||
31 | st %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
32 | mvc __LC_CURRENT(4),__TI_task(%r15) | ||
33 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | ||
34 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | ||
35 | ahi %r15,-96 | ||
36 | # | ||
37 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | ||
38 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
39 | # | ||
40 | l %r14,.Lstartup_init-.LPG1(%r13) | ||
41 | basr %r14,%r14 | ||
42 | lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, | ||
43 | # virtual and never return ... | ||
44 | .align 8 | ||
45 | .Lentry:.long 0x00080000,0x80000000 + _stext | ||
46 | .Lctl: .long 0x04b50000 # cr0: various things | ||
47 | .long 0 # cr1: primary space segment table | ||
48 | .long .Lduct # cr2: dispatchable unit control table | ||
49 | .long 0 # cr3: instruction authorization | ||
50 | .long 0 # cr4: instruction authorization | ||
51 | .long .Lduct # cr5: primary-aste origin | ||
52 | .long 0 # cr6: I/O interrupts | ||
53 | .long 0 # cr7: secondary space segment table | ||
54 | .long 0 # cr8: access registers translation | ||
55 | .long 0 # cr9: tracing off | ||
56 | .long 0 # cr10: tracing off | ||
57 | .long 0 # cr11: tracing off | ||
58 | .long 0 # cr12: tracing off | ||
59 | .long 0 # cr13: home space segment table | ||
60 | .long 0xc0000000 # cr14: machine check handling off | ||
61 | .long 0 # cr15: linkage stack operations | ||
62 | .Lbss_bgn: .long __bss_start | ||
63 | .Lbss_end: .long _end | ||
64 | .Lparmaddr: .long PARMAREA | ||
65 | .Linittu: .long init_thread_union | ||
66 | .Lstartup_init: | ||
67 | .long startup_init | ||
68 | .align 64 | ||
69 | .Lduct: .long 0,0,0,0,.Lduald,0,0,0 | ||
70 | .long 0,0,0,0,0,0,0,0 | ||
71 | .align 128 | ||
72 | .Lduald:.rept 8 | ||
73 | .long 0x80000000,0,0,0 # invalid access-list entries | ||
74 | .endr | ||
75 | .Lbase_cc: | ||
76 | .long sched_clock_base_cc | ||
77 | |||
78 | ENTRY(_ehead) | ||
79 | |||
80 | .org 0x100000 - 0x11000 # head.o ends at 0x11000 | ||
81 | # | ||
82 | # startup-code, running in absolute addressing mode | ||
83 | # | ||
84 | ENTRY(_stext) | ||
85 | basr %r13,0 # get base | ||
86 | .LPG3: | ||
87 | # check control registers | ||
88 | stctl %c0,%c15,0(%r15) | ||
89 | oi 2(%r15),0x60 # enable sigp emergency & external call | ||
90 | oi 0(%r15),0x10 # switch on low address protection | ||
91 | lctl %c0,%c15,0(%r15) | ||
92 | |||
93 | # | ||
94 | lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess | ||
95 | l %r14,.Lstart-.LPG3(%r13) | ||
96 | basr %r14,%r14 # call start_kernel | ||
97 | # | ||
98 | # We returned from start_kernel ?!? PANIK | ||
99 | # | ||
100 | basr %r13,0 | ||
101 | lpsw .Ldw-.(%r13) # load disabled wait psw | ||
102 | # | ||
103 | .align 8 | ||
104 | .Ldw: .long 0x000a0000,0x00000000 | ||
105 | .Lstart:.long start_kernel | ||
106 | .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S index 085a95eb315f..d05950f02c34 100644 --- a/arch/s390/kernel/head_kdump.S +++ b/arch/s390/kernel/head_kdump.S | |||
@@ -92,17 +92,9 @@ startup_kdump_relocated: | |||
92 | #else | 92 | #else |
93 | .align 2 | 93 | .align 2 |
94 | .Lep_startup_kdump: | 94 | .Lep_startup_kdump: |
95 | #ifdef CONFIG_64BIT | ||
96 | larl %r13,startup_kdump_crash | 95 | larl %r13,startup_kdump_crash |
97 | lpswe 0(%r13) | 96 | lpswe 0(%r13) |
98 | .align 8 | 97 | .align 8 |
99 | startup_kdump_crash: | 98 | startup_kdump_crash: |
100 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash | 99 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash |
101 | #else | ||
102 | basr %r13,0 | ||
103 | 0: lpsw startup_kdump_crash-0b(%r13) | ||
104 | .align 8 | ||
105 | startup_kdump_crash: | ||
106 | .long 0x000a0000,0x00000000 + startup_kdump_crash | ||
107 | #endif /* CONFIG_64BIT */ | ||
108 | #endif /* CONFIG_CRASH_DUMP */ | 100 | #endif /* CONFIG_CRASH_DUMP */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5c8651f36509..52fbef91d1d9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308); | |||
182 | 182 | ||
183 | /* SYSFS */ | 183 | /* SYSFS */ |
184 | 184 | ||
185 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ | 185 | #define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \ |
186 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 186 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ |
187 | struct kobj_attribute *attr, \ | 187 | struct kobj_attribute *attr, \ |
188 | char *page) \ | 188 | char *page) \ |
189 | { \ | 189 | { \ |
190 | return sprintf(page, _format, _value); \ | 190 | return snprintf(page, PAGE_SIZE, _format, ##args); \ |
191 | } \ | 191 | } |
192 | |||
193 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ | ||
194 | IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ | ||
192 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 195 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
193 | __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); | 196 | __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL) |
194 | 197 | ||
195 | #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ | 198 | #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ |
196 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 199 | IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \ |
197 | struct kobj_attribute *attr, \ | ||
198 | char *page) \ | ||
199 | { \ | ||
200 | return sprintf(page, _fmt_out, \ | ||
201 | (unsigned long long) _value); \ | ||
202 | } \ | ||
203 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | 200 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ |
204 | struct kobj_attribute *attr, \ | 201 | struct kobj_attribute *attr, \ |
205 | const char *buf, size_t len) \ | 202 | const char *buf, size_t len) \ |
@@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | |||
213 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 210 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
214 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | 211 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ |
215 | sys_##_prefix##_##_name##_show, \ | 212 | sys_##_prefix##_##_name##_show, \ |
216 | sys_##_prefix##_##_name##_store); | 213 | sys_##_prefix##_##_name##_store) |
217 | 214 | ||
218 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ | 215 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ |
219 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 216 | IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \ |
220 | struct kobj_attribute *attr, \ | ||
221 | char *page) \ | ||
222 | { \ | ||
223 | return sprintf(page, _fmt_out, _value); \ | ||
224 | } \ | ||
225 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | 217 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ |
226 | struct kobj_attribute *attr, \ | 218 | struct kobj_attribute *attr, \ |
227 | const char *buf, size_t len) \ | 219 | const char *buf, size_t len) \ |
@@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | |||
233 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 225 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
234 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | 226 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ |
235 | sys_##_prefix##_##_name##_show, \ | 227 | sys_##_prefix##_##_name##_show, \ |
236 | sys_##_prefix##_##_name##_store); | 228 | sys_##_prefix##_##_name##_store) |
237 | 229 | ||
238 | static void make_attrs_ro(struct attribute **attrs) | 230 | static void make_attrs_ro(struct attribute **attrs) |
239 | { | 231 | { |
@@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, | |||
415 | return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, | 407 | return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, |
416 | IPL_PARMBLOCK_SIZE); | 408 | IPL_PARMBLOCK_SIZE); |
417 | } | 409 | } |
418 | 410 | static struct bin_attribute ipl_parameter_attr = | |
419 | static struct bin_attribute ipl_parameter_attr = { | 411 | __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL, |
420 | .attr = { | 412 | PAGE_SIZE); |
421 | .name = "binary_parameter", | ||
422 | .mode = S_IRUGO, | ||
423 | }, | ||
424 | .size = PAGE_SIZE, | ||
425 | .read = &ipl_parameter_read, | ||
426 | }; | ||
427 | 413 | ||
428 | static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, | 414 | static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, |
429 | struct bin_attribute *attr, char *buf, | 415 | struct bin_attribute *attr, char *buf, |
@@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, | |||
434 | 420 | ||
435 | return memory_read_from_buffer(buf, count, &off, scp_data, size); | 421 | return memory_read_from_buffer(buf, count, &off, scp_data, size); |
436 | } | 422 | } |
423 | static struct bin_attribute ipl_scp_data_attr = | ||
424 | __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE); | ||
437 | 425 | ||
438 | static struct bin_attribute ipl_scp_data_attr = { | 426 | static struct bin_attribute *ipl_fcp_bin_attrs[] = { |
439 | .attr = { | 427 | &ipl_parameter_attr, |
440 | .name = "scp_data", | 428 | &ipl_scp_data_attr, |
441 | .mode = S_IRUGO, | 429 | NULL, |
442 | }, | ||
443 | .size = PAGE_SIZE, | ||
444 | .read = ipl_scp_data_read, | ||
445 | }; | 430 | }; |
446 | 431 | ||
447 | /* FCP ipl device attributes */ | 432 | /* FCP ipl device attributes */ |
@@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = { | |||
484 | 469 | ||
485 | static struct attribute_group ipl_fcp_attr_group = { | 470 | static struct attribute_group ipl_fcp_attr_group = { |
486 | .attrs = ipl_fcp_attrs, | 471 | .attrs = ipl_fcp_attrs, |
472 | .bin_attrs = ipl_fcp_bin_attrs, | ||
487 | }; | 473 | }; |
488 | 474 | ||
489 | /* CCW ipl device attributes */ | 475 | /* CCW ipl device attributes */ |
@@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = { | |||
540 | 526 | ||
541 | static struct kset *ipl_kset; | 527 | static struct kset *ipl_kset; |
542 | 528 | ||
543 | static int __init ipl_register_fcp_files(void) | ||
544 | { | ||
545 | int rc; | ||
546 | |||
547 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
548 | if (rc) | ||
549 | goto out; | ||
550 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
551 | if (rc) | ||
552 | goto out_ipl_parm; | ||
553 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr); | ||
554 | if (!rc) | ||
555 | goto out; | ||
556 | |||
557 | sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
558 | |||
559 | out_ipl_parm: | ||
560 | sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
561 | out: | ||
562 | return rc; | ||
563 | } | ||
564 | |||
565 | static void __ipl_run(void *unused) | 529 | static void __ipl_run(void *unused) |
566 | { | 530 | { |
567 | diag308(DIAG308_IPL, NULL); | 531 | diag308(DIAG308_IPL, NULL); |
@@ -596,7 +560,7 @@ static int __init ipl_init(void) | |||
596 | break; | 560 | break; |
597 | case IPL_TYPE_FCP: | 561 | case IPL_TYPE_FCP: |
598 | case IPL_TYPE_FCP_DUMP: | 562 | case IPL_TYPE_FCP_DUMP: |
599 | rc = ipl_register_fcp_files(); | 563 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); |
600 | break; | 564 | break; |
601 | case IPL_TYPE_NSS: | 565 | case IPL_TYPE_NSS: |
602 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); | 566 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); |
@@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, | |||
744 | 708 | ||
745 | return count; | 709 | return count; |
746 | } | 710 | } |
711 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = | ||
712 | __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, | ||
713 | reipl_fcp_scpdata_write, PAGE_SIZE); | ||
747 | 714 | ||
748 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = { | 715 | static struct bin_attribute *reipl_fcp_bin_attrs[] = { |
749 | .attr = { | 716 | &sys_reipl_fcp_scp_data_attr, |
750 | .name = "scp_data", | 717 | NULL, |
751 | .mode = S_IRUGO | S_IWUSR, | ||
752 | }, | ||
753 | .size = PAGE_SIZE, | ||
754 | .read = reipl_fcp_scpdata_read, | ||
755 | .write = reipl_fcp_scpdata_write, | ||
756 | }; | 718 | }; |
757 | 719 | ||
758 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", | 720 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", |
@@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = { | |||
841 | 803 | ||
842 | static struct attribute_group reipl_fcp_attr_group = { | 804 | static struct attribute_group reipl_fcp_attr_group = { |
843 | .attrs = reipl_fcp_attrs, | 805 | .attrs = reipl_fcp_attrs, |
806 | .bin_attrs = reipl_fcp_bin_attrs, | ||
844 | }; | 807 | }; |
845 | 808 | ||
846 | /* CCW reipl device attributes */ | 809 | /* CCW reipl device attributes */ |
@@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void) | |||
1261 | return rc; | 1224 | return rc; |
1262 | } | 1225 | } |
1263 | 1226 | ||
1264 | rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, | ||
1265 | &sys_reipl_fcp_scp_data_attr); | ||
1266 | if (rc) { | ||
1267 | sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); | ||
1268 | kset_unregister(reipl_fcp_kset); | ||
1269 | free_page((unsigned long) reipl_block_fcp); | ||
1270 | return rc; | ||
1271 | } | ||
1272 | |||
1273 | if (ipl_info.type == IPL_TYPE_FCP) { | 1227 | if (ipl_info.type == IPL_TYPE_FCP) { |
1274 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1228 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1275 | /* | 1229 | /* |
@@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj, | |||
1713 | { | 1667 | { |
1714 | return set_trigger(buf, &on_reboot_trigger, len); | 1668 | return set_trigger(buf, &on_reboot_trigger, len); |
1715 | } | 1669 | } |
1716 | 1670 | static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot); | |
1717 | static struct kobj_attribute on_reboot_attr = | ||
1718 | __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store); | ||
1719 | 1671 | ||
1720 | static void do_machine_restart(char *__unused) | 1672 | static void do_machine_restart(char *__unused) |
1721 | { | 1673 | { |
@@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj, | |||
1741 | { | 1693 | { |
1742 | return set_trigger(buf, &on_panic_trigger, len); | 1694 | return set_trigger(buf, &on_panic_trigger, len); |
1743 | } | 1695 | } |
1744 | 1696 | static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic); | |
1745 | static struct kobj_attribute on_panic_attr = | ||
1746 | __ATTR(on_panic, 0644, on_panic_show, on_panic_store); | ||
1747 | 1697 | ||
1748 | static void do_panic(void) | 1698 | static void do_panic(void) |
1749 | { | 1699 | { |
@@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj, | |||
1769 | { | 1719 | { |
1770 | return set_trigger(buf, &on_restart_trigger, len); | 1720 | return set_trigger(buf, &on_restart_trigger, len); |
1771 | } | 1721 | } |
1772 | 1722 | static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart); | |
1773 | static struct kobj_attribute on_restart_attr = | ||
1774 | __ATTR(on_restart, 0644, on_restart_show, on_restart_store); | ||
1775 | 1723 | ||
1776 | static void __do_restart(void *ignore) | 1724 | static void __do_restart(void *ignore) |
1777 | { | 1725 | { |
@@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj, | |||
1808 | { | 1756 | { |
1809 | return set_trigger(buf, &on_halt_trigger, len); | 1757 | return set_trigger(buf, &on_halt_trigger, len); |
1810 | } | 1758 | } |
1811 | 1759 | static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt); | |
1812 | static struct kobj_attribute on_halt_attr = | ||
1813 | __ATTR(on_halt, 0644, on_halt_show, on_halt_store); | ||
1814 | |||
1815 | 1760 | ||
1816 | static void do_machine_halt(void) | 1761 | static void do_machine_halt(void) |
1817 | { | 1762 | { |
@@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj, | |||
1837 | { | 1782 | { |
1838 | return set_trigger(buf, &on_poff_trigger, len); | 1783 | return set_trigger(buf, &on_poff_trigger, len); |
1839 | } | 1784 | } |
1840 | 1785 | static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff); | |
1841 | static struct kobj_attribute on_poff_attr = | ||
1842 | __ATTR(on_poff, 0644, on_poff_show, on_poff_store); | ||
1843 | |||
1844 | 1786 | ||
1845 | static void do_machine_power_off(void) | 1787 | static void do_machine_power_off(void) |
1846 | { | 1788 | { |
@@ -1850,26 +1792,27 @@ static void do_machine_power_off(void) | |||
1850 | } | 1792 | } |
1851 | void (*_machine_power_off)(void) = do_machine_power_off; | 1793 | void (*_machine_power_off)(void) = do_machine_power_off; |
1852 | 1794 | ||
1795 | static struct attribute *shutdown_action_attrs[] = { | ||
1796 | &on_restart_attr.attr, | ||
1797 | &on_reboot_attr.attr, | ||
1798 | &on_panic_attr.attr, | ||
1799 | &on_halt_attr.attr, | ||
1800 | &on_poff_attr.attr, | ||
1801 | NULL, | ||
1802 | }; | ||
1803 | |||
1804 | static struct attribute_group shutdown_action_attr_group = { | ||
1805 | .attrs = shutdown_action_attrs, | ||
1806 | }; | ||
1807 | |||
1853 | static void __init shutdown_triggers_init(void) | 1808 | static void __init shutdown_triggers_init(void) |
1854 | { | 1809 | { |
1855 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, | 1810 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, |
1856 | firmware_kobj); | 1811 | firmware_kobj); |
1857 | if (!shutdown_actions_kset) | 1812 | if (!shutdown_actions_kset) |
1858 | goto fail; | 1813 | goto fail; |
1859 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | 1814 | if (sysfs_create_group(&shutdown_actions_kset->kobj, |
1860 | &on_reboot_attr.attr)) | 1815 | &shutdown_action_attr_group)) |
1861 | goto fail; | ||
1862 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1863 | &on_panic_attr.attr)) | ||
1864 | goto fail; | ||
1865 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1866 | &on_halt_attr.attr)) | ||
1867 | goto fail; | ||
1868 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1869 | &on_poff_attr.attr)) | ||
1870 | goto fail; | ||
1871 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1872 | &on_restart_attr.attr)) | ||
1873 | goto fail; | 1816 | goto fail; |
1874 | return; | 1817 | return; |
1875 | fail: | 1818 | fail: |
@@ -2062,12 +2005,10 @@ static void do_reset_calls(void) | |||
2062 | { | 2005 | { |
2063 | struct reset_call *reset; | 2006 | struct reset_call *reset; |
2064 | 2007 | ||
2065 | #ifdef CONFIG_64BIT | ||
2066 | if (diag308_set_works) { | 2008 | if (diag308_set_works) { |
2067 | diag308_reset(); | 2009 | diag308_reset(); |
2068 | return; | 2010 | return; |
2069 | } | 2011 | } |
2070 | #endif | ||
2071 | list_for_each_entry(reset, &rcall, list) | 2012 | list_for_each_entry(reset, &rcall, list) |
2072 | reset->fn(); | 2013 | reset->fn(); |
2073 | } | 2014 | } |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f238720690f3..02ab9aa3812e 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { | |||
56 | * /proc/interrupts. | 56 | * /proc/interrupts. |
57 | * In addition this list contains non external / I/O events like NMIs. | 57 | * In addition this list contains non external / I/O events like NMIs. |
58 | */ | 58 | */ |
59 | static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | 59 | static const struct irq_class irqclass_sub_desc[] = { |
60 | {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, | 60 | {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, |
61 | {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, | 61 | {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, |
62 | {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, | 62 | {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, |
@@ -94,6 +94,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | |||
94 | 94 | ||
95 | void __init init_IRQ(void) | 95 | void __init init_IRQ(void) |
96 | { | 96 | { |
97 | BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS); | ||
97 | init_cio_interrupts(); | 98 | init_cio_interrupts(); |
98 | init_airq_interrupts(); | 99 | init_airq_interrupts(); |
99 | init_ext_interrupts(); | 100 | init_ext_interrupts(); |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 830066f936c8..a90299600483 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
78 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
79 | jump_label_bug(entry, &old, &new); | 79 | jump_label_bug(entry, &old, &new); |
80 | } | 80 | } |
81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | s390_kernel_write((void *)entry->code, &new, sizeof(new)); |
82 | } | 82 | } |
83 | 83 | ||
84 | static int __sm_arch_jump_label_transform(void *data) | 84 | static int __sm_arch_jump_label_transform(void *data) |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index f516edc1fbe3..389db56a2208 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -178,7 +178,7 @@ static int swap_instruction(void *data) | |||
178 | } | 178 | } |
179 | skip_ftrace: | 179 | skip_ftrace: |
180 | kcb->kprobe_status = KPROBE_SWAP_INST; | 180 | kcb->kprobe_status = KPROBE_SWAP_INST; |
181 | probe_kernel_write(p->addr, &new_insn, len); | 181 | s390_kernel_write(p->addr, &new_insn, len); |
182 | kcb->kprobe_status = status; | 182 | kcb->kprobe_status = status; |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 2ca95862e336..0c1a679314dd 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -38,13 +38,8 @@ | |||
38 | #define DEBUGP(fmt , ...) | 38 | #define DEBUGP(fmt , ...) |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #ifndef CONFIG_64BIT | ||
42 | #define PLT_ENTRY_SIZE 12 | ||
43 | #else /* CONFIG_64BIT */ | ||
44 | #define PLT_ENTRY_SIZE 20 | 41 | #define PLT_ENTRY_SIZE 20 |
45 | #endif /* CONFIG_64BIT */ | ||
46 | 42 | ||
47 | #ifdef CONFIG_64BIT | ||
48 | void *module_alloc(unsigned long size) | 43 | void *module_alloc(unsigned long size) |
49 | { | 44 | { |
50 | if (PAGE_ALIGN(size) > MODULES_LEN) | 45 | if (PAGE_ALIGN(size) > MODULES_LEN) |
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size) | |||
53 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, | 48 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, |
54 | __builtin_return_address(0)); | 49 | __builtin_return_address(0)); |
55 | } | 50 | } |
56 | #endif | ||
57 | 51 | ||
58 | void module_arch_freeing_init(struct module *mod) | 52 | void module_arch_freeing_init(struct module *mod) |
59 | { | 53 | { |
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
323 | unsigned int *ip; | 317 | unsigned int *ip; |
324 | ip = me->module_core + me->arch.plt_offset + | 318 | ip = me->module_core + me->arch.plt_offset + |
325 | info->plt_offset; | 319 | info->plt_offset; |
326 | #ifndef CONFIG_64BIT | ||
327 | ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ | ||
328 | ip[1] = 0x100607f1; | ||
329 | ip[2] = val; | ||
330 | #else /* CONFIG_64BIT */ | ||
331 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ |
332 | ip[1] = 0x100a0004; | 321 | ip[1] = 0x100a0004; |
333 | ip[2] = 0x07f10000; | 322 | ip[2] = 0x07f10000; |
334 | ip[3] = (unsigned int) (val >> 32); | 323 | ip[3] = (unsigned int) (val >> 32); |
335 | ip[4] = (unsigned int) val; | 324 | ip[4] = (unsigned int) val; |
336 | #endif /* CONFIG_64BIT */ | ||
337 | info->plt_initialized = 1; | 325 | info->plt_initialized = 1; |
338 | } | 326 | } |
339 | if (r_type == R_390_PLTOFF16 || | 327 | if (r_type == R_390_PLTOFF16 || |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 3f51cf4e8f02..505c17c0ae1a 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
117 | */ | 117 | */ |
118 | kill_task = 1; | 118 | kill_task = 1; |
119 | } | 119 | } |
120 | #ifndef CONFIG_64BIT | 120 | fpt_save_area = &S390_lowcore.floating_pt_save_area; |
121 | fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; | ||
122 | if (!mci->fc) { | ||
123 | /* | ||
124 | * Floating point control register can't be restored. | ||
125 | * Task will be terminated. | ||
126 | */ | ||
127 | asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); | ||
128 | kill_task = 1; | ||
129 | } else | ||
130 | asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); | ||
131 | |||
121 | asm volatile( | 132 | asm volatile( |
122 | " ld 0,0(%0)\n" | 133 | " ld 0,0(%0)\n" |
123 | " ld 2,8(%0)\n" | 134 | " ld 1,8(%0)\n" |
124 | " ld 4,16(%0)\n" | 135 | " ld 2,16(%0)\n" |
125 | " ld 6,24(%0)" | 136 | " ld 3,24(%0)\n" |
126 | : : "a" (&S390_lowcore.floating_pt_save_area)); | 137 | " ld 4,32(%0)\n" |
127 | #endif | 138 | " ld 5,40(%0)\n" |
128 | 139 | " ld 6,48(%0)\n" | |
129 | if (MACHINE_HAS_IEEE) { | 140 | " ld 7,56(%0)\n" |
130 | #ifdef CONFIG_64BIT | 141 | " ld 8,64(%0)\n" |
131 | fpt_save_area = &S390_lowcore.floating_pt_save_area; | 142 | " ld 9,72(%0)\n" |
132 | fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; | 143 | " ld 10,80(%0)\n" |
133 | #else | 144 | " ld 11,88(%0)\n" |
134 | fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; | 145 | " ld 12,96(%0)\n" |
135 | fpt_creg_save_area = fpt_save_area + 128; | 146 | " ld 13,104(%0)\n" |
136 | #endif | 147 | " ld 14,112(%0)\n" |
137 | if (!mci->fc) { | 148 | " ld 15,120(%0)\n" |
138 | /* | 149 | : : "a" (fpt_save_area)); |
139 | * Floating point control register can't be restored. | ||
140 | * Task will be terminated. | ||
141 | */ | ||
142 | asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); | ||
143 | kill_task = 1; | ||
144 | |||
145 | } else | ||
146 | asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); | ||
147 | |||
148 | asm volatile( | ||
149 | " ld 0,0(%0)\n" | ||
150 | " ld 1,8(%0)\n" | ||
151 | " ld 2,16(%0)\n" | ||
152 | " ld 3,24(%0)\n" | ||
153 | " ld 4,32(%0)\n" | ||
154 | " ld 5,40(%0)\n" | ||
155 | " ld 6,48(%0)\n" | ||
156 | " ld 7,56(%0)\n" | ||
157 | " ld 8,64(%0)\n" | ||
158 | " ld 9,72(%0)\n" | ||
159 | " ld 10,80(%0)\n" | ||
160 | " ld 11,88(%0)\n" | ||
161 | " ld 12,96(%0)\n" | ||
162 | " ld 13,104(%0)\n" | ||
163 | " ld 14,112(%0)\n" | ||
164 | " ld 15,120(%0)\n" | ||
165 | : : "a" (fpt_save_area)); | ||
166 | } | ||
167 | |||
168 | #ifdef CONFIG_64BIT | ||
169 | /* Revalidate vector registers */ | 150 | /* Revalidate vector registers */ |
170 | if (MACHINE_HAS_VX && current->thread.vxrs) { | 151 | if (MACHINE_HAS_VX && current->thread.vxrs) { |
171 | if (!mci->vr) { | 152 | if (!mci->vr) { |
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
178 | restore_vx_regs((__vector128 *) | 159 | restore_vx_regs((__vector128 *) |
179 | S390_lowcore.vector_save_area_addr); | 160 | S390_lowcore.vector_save_area_addr); |
180 | } | 161 | } |
181 | #endif | ||
182 | /* Revalidate access registers */ | 162 | /* Revalidate access registers */ |
183 | asm volatile( | 163 | asm volatile( |
184 | " lam 0,15,0(%0)" | 164 | " lam 0,15,0(%0)" |
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
198 | */ | 178 | */ |
199 | s390_handle_damage("invalid control registers."); | 179 | s390_handle_damage("invalid control registers."); |
200 | } else { | 180 | } else { |
201 | #ifdef CONFIG_64BIT | ||
202 | asm volatile( | 181 | asm volatile( |
203 | " lctlg 0,15,0(%0)" | 182 | " lctlg 0,15,0(%0)" |
204 | : : "a" (&S390_lowcore.cregs_save_area)); | 183 | : : "a" (&S390_lowcore.cregs_save_area)); |
205 | #else | ||
206 | asm volatile( | ||
207 | " lctl 0,15,0(%0)" | ||
208 | : : "a" (&S390_lowcore.cregs_save_area)); | ||
209 | #endif | ||
210 | } | 184 | } |
211 | /* | 185 | /* |
212 | * We don't even try to revalidate the TOD register, since we simply | 186 | * We don't even try to revalidate the TOD register, since we simply |
213 | * can't write something sensible into that register. | 187 | * can't write something sensible into that register. |
214 | */ | 188 | */ |
215 | #ifdef CONFIG_64BIT | ||
216 | /* | 189 | /* |
217 | * See if we can revalidate the TOD programmable register with its | 190 | * See if we can revalidate the TOD programmable register with its |
218 | * old contents (should be zero) otherwise set it to zero. | 191 | * old contents (should be zero) otherwise set it to zero. |
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
228 | " sckpf" | 201 | " sckpf" |
229 | : : "a" (&S390_lowcore.tod_progreg_save_area) | 202 | : : "a" (&S390_lowcore.tod_progreg_save_area) |
230 | : "0", "cc"); | 203 | : "0", "cc"); |
231 | #endif | ||
232 | /* Revalidate clock comparator register */ | 204 | /* Revalidate clock comparator register */ |
233 | set_clock_comparator(S390_lowcore.clock_comparator); | 205 | set_clock_comparator(S390_lowcore.clock_comparator); |
234 | /* Check if old PSW is valid */ | 206 | /* Check if old PSW is valid */ |
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
280 | if (mci->b) { | 252 | if (mci->b) { |
281 | /* Processing backup -> verify if we can survive this */ | 253 | /* Processing backup -> verify if we can survive this */ |
282 | u64 z_mcic, o_mcic, t_mcic; | 254 | u64 z_mcic, o_mcic, t_mcic; |
283 | #ifdef CONFIG_64BIT | ||
284 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); | 255 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); |
285 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | 256 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | |
286 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | 257 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | |
287 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | | 258 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | |
288 | 1ULL<<16); | 259 | 1ULL<<16); |
289 | #else | ||
290 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | | ||
291 | 1ULL<<29); | ||
292 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | ||
293 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | ||
294 | 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); | ||
295 | #endif | ||
296 | t_mcic = *(u64 *)mci; | 260 | t_mcic = *(u64 *)mci; |
297 | 261 | ||
298 | if (((t_mcic & z_mcic) != 0) || | 262 | if (((t_mcic & z_mcic) != 0) || |
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index f6f8886399f6..036aa01d06a9 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S | |||
@@ -6,19 +6,13 @@ | |||
6 | 6 | ||
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
8 | 8 | ||
9 | #ifdef CONFIG_32BIT | ||
10 | #define PGM_CHECK_64BIT(handler) .long default_trap_handler | ||
11 | #else | ||
12 | #define PGM_CHECK_64BIT(handler) .long handler | ||
13 | #endif | ||
14 | |||
15 | #define PGM_CHECK(handler) .long handler | 9 | #define PGM_CHECK(handler) .long handler |
16 | #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) | 10 | #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) |
17 | 11 | ||
18 | /* | 12 | /* |
19 | * The program check table contains exactly 128 (0x00-0x7f) entries. Each | 13 | * The program check table contains exactly 128 (0x00-0x7f) entries. Each |
20 | * line defines the 31 and/or 64 bit function to be called corresponding | 14 | * line defines the function to be called corresponding to the program check |
21 | * to the program check interruption code. | 15 | * interruption code. |
22 | */ | 16 | */ |
23 | .section .rodata, "a" | 17 | .section .rodata, "a" |
24 | ENTRY(pgm_check_table) | 18 | ENTRY(pgm_check_table) |
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */ | |||
46 | PGM_CHECK(operand_exception) /* 15 */ | 40 | PGM_CHECK(operand_exception) /* 15 */ |
47 | PGM_CHECK_DEFAULT /* 16 */ | 41 | PGM_CHECK_DEFAULT /* 16 */ |
48 | PGM_CHECK_DEFAULT /* 17 */ | 42 | PGM_CHECK_DEFAULT /* 17 */ |
49 | PGM_CHECK_64BIT(transaction_exception) /* 18 */ | 43 | PGM_CHECK(transaction_exception) /* 18 */ |
50 | PGM_CHECK_DEFAULT /* 19 */ | 44 | PGM_CHECK_DEFAULT /* 19 */ |
51 | PGM_CHECK_DEFAULT /* 1a */ | 45 | PGM_CHECK_DEFAULT /* 1a */ |
52 | PGM_CHECK_64BIT(vector_exception) /* 1b */ | 46 | PGM_CHECK(vector_exception) /* 1b */ |
53 | PGM_CHECK(space_switch_exception) /* 1c */ | 47 | PGM_CHECK(space_switch_exception) /* 1c */ |
54 | PGM_CHECK(hfp_sqrt_exception) /* 1d */ | 48 | PGM_CHECK(hfp_sqrt_exception) /* 1d */ |
55 | PGM_CHECK_DEFAULT /* 1e */ | 49 | PGM_CHECK_DEFAULT /* 1e */ |
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */ | |||
78 | PGM_CHECK_DEFAULT /* 35 */ | 72 | PGM_CHECK_DEFAULT /* 35 */ |
79 | PGM_CHECK_DEFAULT /* 36 */ | 73 | PGM_CHECK_DEFAULT /* 36 */ |
80 | PGM_CHECK_DEFAULT /* 37 */ | 74 | PGM_CHECK_DEFAULT /* 37 */ |
81 | PGM_CHECK_64BIT(do_dat_exception) /* 38 */ | 75 | PGM_CHECK(do_dat_exception) /* 38 */ |
82 | PGM_CHECK_64BIT(do_dat_exception) /* 39 */ | 76 | PGM_CHECK(do_dat_exception) /* 39 */ |
83 | PGM_CHECK_64BIT(do_dat_exception) /* 3a */ | 77 | PGM_CHECK(do_dat_exception) /* 3a */ |
84 | PGM_CHECK_64BIT(do_dat_exception) /* 3b */ | 78 | PGM_CHECK(do_dat_exception) /* 3b */ |
85 | PGM_CHECK_DEFAULT /* 3c */ | 79 | PGM_CHECK_DEFAULT /* 3c */ |
86 | PGM_CHECK_DEFAULT /* 3d */ | 80 | PGM_CHECK_DEFAULT /* 3d */ |
87 | PGM_CHECK_DEFAULT /* 3e */ | 81 | PGM_CHECK_DEFAULT /* 3e */ |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 13fc0978ca7e..dc5edc29b73a 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task) | |||
79 | { | 79 | { |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_64BIT | ||
83 | void arch_release_task_struct(struct task_struct *tsk) | 82 | void arch_release_task_struct(struct task_struct *tsk) |
84 | { | 83 | { |
85 | if (tsk->thread.vxrs) | 84 | if (tsk->thread.vxrs) |
86 | kfree(tsk->thread.vxrs); | 85 | kfree(tsk->thread.vxrs); |
87 | } | 86 | } |
88 | #endif | ||
89 | 87 | ||
90 | int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | 88 | int copy_thread(unsigned long clone_flags, unsigned long new_stackp, |
91 | unsigned long arg, struct task_struct *p) | 89 | unsigned long arg, struct task_struct *p) |
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
144 | p->thread.ri_signum = 0; | 142 | p->thread.ri_signum = 0; |
145 | frame->childregs.psw.mask &= ~PSW_MASK_RI; | 143 | frame->childregs.psw.mask &= ~PSW_MASK_RI; |
146 | 144 | ||
147 | #ifndef CONFIG_64BIT | ||
148 | /* | ||
149 | * save fprs to current->thread.fp_regs to merge them with | ||
150 | * the emulated registers and then copy the result to the child. | ||
151 | */ | ||
152 | save_fp_ctl(¤t->thread.fp_regs.fpc); | ||
153 | save_fp_regs(current->thread.fp_regs.fprs); | ||
154 | memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs, | ||
155 | sizeof(s390_fp_regs)); | ||
156 | /* Set a new TLS ? */ | ||
157 | if (clone_flags & CLONE_SETTLS) | ||
158 | p->thread.acrs[0] = frame->childregs.gprs[6]; | ||
159 | #else /* CONFIG_64BIT */ | ||
160 | /* Save the fpu registers to new thread structure. */ | 145 | /* Save the fpu registers to new thread structure. */ |
161 | save_fp_ctl(&p->thread.fp_regs.fpc); | 146 | save_fp_ctl(&p->thread.fp_regs.fpc); |
162 | save_fp_regs(p->thread.fp_regs.fprs); | 147 | save_fp_regs(p->thread.fp_regs.fprs); |
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
172 | p->thread.acrs[1] = (unsigned int)tls; | 157 | p->thread.acrs[1] = (unsigned int)tls; |
173 | } | 158 | } |
174 | } | 159 | } |
175 | #endif /* CONFIG_64BIT */ | ||
176 | return 0; | 160 | return 0; |
177 | } | 161 | } |
178 | 162 | ||
179 | asmlinkage void execve_tail(void) | 163 | asmlinkage void execve_tail(void) |
180 | { | 164 | { |
181 | current->thread.fp_regs.fpc = 0; | 165 | current->thread.fp_regs.fpc = 0; |
182 | if (MACHINE_HAS_IEEE) | 166 | asm volatile("sfpc %0,%0" : : "d" (0)); |
183 | asm volatile("sfpc %0,%0" : : "d" (0)); | ||
184 | } | 167 | } |
185 | 168 | ||
186 | /* | 169 | /* |
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void) | |||
188 | */ | 171 | */ |
189 | int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) | 172 | int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) |
190 | { | 173 | { |
191 | #ifndef CONFIG_64BIT | ||
192 | /* | ||
193 | * save fprs to current->thread.fp_regs to merge them with | ||
194 | * the emulated registers and then copy the result to the dump. | ||
195 | */ | ||
196 | save_fp_ctl(¤t->thread.fp_regs.fpc); | ||
197 | save_fp_regs(current->thread.fp_regs.fprs); | ||
198 | memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); | ||
199 | #else /* CONFIG_64BIT */ | ||
200 | save_fp_ctl(&fpregs->fpc); | 174 | save_fp_ctl(&fpregs->fpc); |
201 | save_fp_regs(fpregs->fprs); | 175 | save_fp_regs(fpregs->fprs); |
202 | #endif /* CONFIG_64BIT */ | ||
203 | return 1; | 176 | return 1; |
204 | } | 177 | } |
205 | EXPORT_SYMBOL(dump_fpu); | 178 | EXPORT_SYMBOL(dump_fpu); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index eabfb4594517..d363c9c322a1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task) | |||
44 | struct thread_struct *thread = &task->thread; | 44 | struct thread_struct *thread = &task->thread; |
45 | struct per_regs old, new; | 45 | struct per_regs old, new; |
46 | 46 | ||
47 | #ifdef CONFIG_64BIT | ||
48 | /* Take care of the enable/disable of transactional execution. */ | 47 | /* Take care of the enable/disable of transactional execution. */ |
49 | if (MACHINE_HAS_TE || MACHINE_HAS_VX) { | 48 | if (MACHINE_HAS_TE || MACHINE_HAS_VX) { |
50 | unsigned long cr, cr_new; | 49 | unsigned long cr, cr_new; |
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task) | |||
80 | __ctl_load(cr_new, 2, 2); | 79 | __ctl_load(cr_new, 2, 2); |
81 | } | 80 | } |
82 | } | 81 | } |
83 | #endif | ||
84 | /* Copy user specified PER registers */ | 82 | /* Copy user specified PER registers */ |
85 | new.control = thread->per_user.control; | 83 | new.control = thread->per_user.control; |
86 | new.start = thread->per_user.start; | 84 | new.start = thread->per_user.start; |
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task) | |||
93 | new.control |= PER_EVENT_BRANCH; | 91 | new.control |= PER_EVENT_BRANCH; |
94 | else | 92 | else |
95 | new.control |= PER_EVENT_IFETCH; | 93 | new.control |= PER_EVENT_IFETCH; |
96 | #ifdef CONFIG_64BIT | ||
97 | new.control |= PER_CONTROL_SUSPENSION; | 94 | new.control |= PER_CONTROL_SUSPENSION; |
98 | new.control |= PER_EVENT_TRANSACTION_END; | 95 | new.control |= PER_EVENT_TRANSACTION_END; |
99 | #endif | ||
100 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) | 96 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) |
101 | new.control |= PER_EVENT_IFETCH; | 97 | new.control |= PER_EVENT_IFETCH; |
102 | new.start = 0; | 98 | new.start = 0; |
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task) | |||
146 | task->thread.per_flags = 0; | 142 | task->thread.per_flags = 0; |
147 | } | 143 | } |
148 | 144 | ||
149 | #ifndef CONFIG_64BIT | 145 | #define __ADDR_MASK 7 |
150 | # define __ADDR_MASK 3 | ||
151 | #else | ||
152 | # define __ADDR_MASK 7 | ||
153 | #endif | ||
154 | 146 | ||
155 | static inline unsigned long __peek_user_per(struct task_struct *child, | 147 | static inline unsigned long __peek_user_per(struct task_struct *child, |
156 | addr_t addr) | 148 | addr_t addr) |
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
223 | * access registers are stored in the thread structure | 215 | * access registers are stored in the thread structure |
224 | */ | 216 | */ |
225 | offset = addr - (addr_t) &dummy->regs.acrs; | 217 | offset = addr - (addr_t) &dummy->regs.acrs; |
226 | #ifdef CONFIG_64BIT | ||
227 | /* | 218 | /* |
228 | * Very special case: old & broken 64 bit gdb reading | 219 | * Very special case: old & broken 64 bit gdb reading |
229 | * from acrs[15]. Result is a 64 bit value. Read the | 220 | * from acrs[15]. Result is a 64 bit value. Read the |
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
232 | if (addr == (addr_t) &dummy->regs.acrs[15]) | 223 | if (addr == (addr_t) &dummy->regs.acrs[15]) |
233 | tmp = ((unsigned long) child->thread.acrs[15]) << 32; | 224 | tmp = ((unsigned long) child->thread.acrs[15]) << 32; |
234 | else | 225 | else |
235 | #endif | 226 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); |
236 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); | ||
237 | 227 | ||
238 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 228 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
239 | /* | 229 | /* |
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
261 | * or the child->thread.vxrs array | 251 | * or the child->thread.vxrs array |
262 | */ | 252 | */ |
263 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; | 253 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
264 | #ifdef CONFIG_64BIT | ||
265 | if (child->thread.vxrs) | 254 | if (child->thread.vxrs) |
266 | tmp = *(addr_t *) | 255 | tmp = *(addr_t *) |
267 | ((addr_t) child->thread.vxrs + 2*offset); | 256 | ((addr_t) child->thread.vxrs + 2*offset); |
268 | else | 257 | else |
269 | #endif | ||
270 | tmp = *(addr_t *) | 258 | tmp = *(addr_t *) |
271 | ((addr_t) &child->thread.fp_regs.fprs + offset); | 259 | ((addr_t) &child->thread.fp_regs.fprs + offset); |
272 | 260 | ||
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
293 | * an alignment of 4. Programmers from hell... | 281 | * an alignment of 4. Programmers from hell... |
294 | */ | 282 | */ |
295 | mask = __ADDR_MASK; | 283 | mask = __ADDR_MASK; |
296 | #ifdef CONFIG_64BIT | ||
297 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && | 284 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
298 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) | 285 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
299 | mask = 3; | 286 | mask = 3; |
300 | #endif | ||
301 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 287 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
302 | return -EIO; | 288 | return -EIO; |
303 | 289 | ||
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
370 | * access registers are stored in the thread structure | 356 | * access registers are stored in the thread structure |
371 | */ | 357 | */ |
372 | offset = addr - (addr_t) &dummy->regs.acrs; | 358 | offset = addr - (addr_t) &dummy->regs.acrs; |
373 | #ifdef CONFIG_64BIT | ||
374 | /* | 359 | /* |
375 | * Very special case: old & broken 64 bit gdb writing | 360 | * Very special case: old & broken 64 bit gdb writing |
376 | * to acrs[15] with a 64 bit value. Ignore the lower | 361 | * to acrs[15] with a 64 bit value. Ignore the lower |
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
380 | if (addr == (addr_t) &dummy->regs.acrs[15]) | 365 | if (addr == (addr_t) &dummy->regs.acrs[15]) |
381 | child->thread.acrs[15] = (unsigned int) (data >> 32); | 366 | child->thread.acrs[15] = (unsigned int) (data >> 32); |
382 | else | 367 | else |
383 | #endif | 368 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; |
384 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; | ||
385 | 369 | ||
386 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 370 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
387 | /* | 371 | /* |
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
411 | * or the child->thread.vxrs array | 395 | * or the child->thread.vxrs array |
412 | */ | 396 | */ |
413 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; | 397 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
414 | #ifdef CONFIG_64BIT | ||
415 | if (child->thread.vxrs) | 398 | if (child->thread.vxrs) |
416 | *(addr_t *)((addr_t) | 399 | *(addr_t *)((addr_t) |
417 | child->thread.vxrs + 2*offset) = data; | 400 | child->thread.vxrs + 2*offset) = data; |
418 | else | 401 | else |
419 | #endif | ||
420 | *(addr_t *)((addr_t) | 402 | *(addr_t *)((addr_t) |
421 | &child->thread.fp_regs.fprs + offset) = data; | 403 | &child->thread.fp_regs.fprs + offset) = data; |
422 | 404 | ||
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
441 | * an alignment of 4. Programmers from hell indeed... | 423 | * an alignment of 4. Programmers from hell indeed... |
442 | */ | 424 | */ |
443 | mask = __ADDR_MASK; | 425 | mask = __ADDR_MASK; |
444 | #ifdef CONFIG_64BIT | ||
445 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && | 426 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
446 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) | 427 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
447 | mask = 3; | 428 | mask = 3; |
448 | #endif | ||
449 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 429 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
450 | return -EIO; | 430 | return -EIO; |
451 | 431 | ||
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
649 | * or the child->thread.vxrs array | 629 | * or the child->thread.vxrs array |
650 | */ | 630 | */ |
651 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; | 631 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; |
652 | #ifdef CONFIG_64BIT | ||
653 | if (child->thread.vxrs) | 632 | if (child->thread.vxrs) |
654 | tmp = *(__u32 *) | 633 | tmp = *(__u32 *) |
655 | ((addr_t) child->thread.vxrs + 2*offset); | 634 | ((addr_t) child->thread.vxrs + 2*offset); |
656 | else | 635 | else |
657 | #endif | ||
658 | tmp = *(__u32 *) | 636 | tmp = *(__u32 *) |
659 | ((addr_t) &child->thread.fp_regs.fprs + offset); | 637 | ((addr_t) &child->thread.fp_regs.fprs + offset); |
660 | 638 | ||
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child, | |||
776 | * or the child->thread.vxrs array | 754 | * or the child->thread.vxrs array |
777 | */ | 755 | */ |
778 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; | 756 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; |
779 | #ifdef CONFIG_64BIT | ||
780 | if (child->thread.vxrs) | 757 | if (child->thread.vxrs) |
781 | *(__u32 *)((addr_t) | 758 | *(__u32 *)((addr_t) |
782 | child->thread.vxrs + 2*offset) = tmp; | 759 | child->thread.vxrs + 2*offset) = tmp; |
783 | else | 760 | else |
784 | #endif | ||
785 | *(__u32 *)((addr_t) | 761 | *(__u32 *)((addr_t) |
786 | &child->thread.fp_regs.fprs + offset) = tmp; | 762 | &child->thread.fp_regs.fprs + offset) = tmp; |
787 | 763 | ||
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target, | |||
979 | if (target == current) { | 955 | if (target == current) { |
980 | save_fp_ctl(&target->thread.fp_regs.fpc); | 956 | save_fp_ctl(&target->thread.fp_regs.fpc); |
981 | save_fp_regs(target->thread.fp_regs.fprs); | 957 | save_fp_regs(target->thread.fp_regs.fprs); |
982 | } | 958 | } else if (target->thread.vxrs) { |
983 | #ifdef CONFIG_64BIT | ||
984 | else if (target->thread.vxrs) { | ||
985 | int i; | 959 | int i; |
986 | 960 | ||
987 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 961 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
988 | target->thread.fp_regs.fprs[i] = | 962 | target->thread.fp_regs.fprs[i] = |
989 | *(freg_t *)(target->thread.vxrs + i); | 963 | *(freg_t *)(target->thread.vxrs + i); |
990 | } | 964 | } |
991 | #endif | ||
992 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 965 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
993 | &target->thread.fp_regs, 0, -1); | 966 | &target->thread.fp_regs, 0, -1); |
994 | } | 967 | } |
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target, | |||
1026 | if (target == current) { | 999 | if (target == current) { |
1027 | restore_fp_ctl(&target->thread.fp_regs.fpc); | 1000 | restore_fp_ctl(&target->thread.fp_regs.fpc); |
1028 | restore_fp_regs(target->thread.fp_regs.fprs); | 1001 | restore_fp_regs(target->thread.fp_regs.fprs); |
1029 | } | 1002 | } else if (target->thread.vxrs) { |
1030 | #ifdef CONFIG_64BIT | ||
1031 | else if (target->thread.vxrs) { | ||
1032 | int i; | 1003 | int i; |
1033 | 1004 | ||
1034 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 1005 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
1035 | *(freg_t *)(target->thread.vxrs + i) = | 1006 | *(freg_t *)(target->thread.vxrs + i) = |
1036 | target->thread.fp_regs.fprs[i]; | 1007 | target->thread.fp_regs.fprs[i]; |
1037 | } | 1008 | } |
1038 | #endif | ||
1039 | } | 1009 | } |
1040 | 1010 | ||
1041 | return rc; | 1011 | return rc; |
1042 | } | 1012 | } |
1043 | 1013 | ||
1044 | #ifdef CONFIG_64BIT | ||
1045 | |||
1046 | static int s390_last_break_get(struct task_struct *target, | 1014 | static int s390_last_break_get(struct task_struct *target, |
1047 | const struct user_regset *regset, | 1015 | const struct user_regset *regset, |
1048 | unsigned int pos, unsigned int count, | 1016 | unsigned int pos, unsigned int count, |
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target, | |||
1182 | return rc; | 1150 | return rc; |
1183 | } | 1151 | } |
1184 | 1152 | ||
1185 | #endif | ||
1186 | |||
1187 | static int s390_system_call_get(struct task_struct *target, | 1153 | static int s390_system_call_get(struct task_struct *target, |
1188 | const struct user_regset *regset, | 1154 | const struct user_regset *regset, |
1189 | unsigned int pos, unsigned int count, | 1155 | unsigned int pos, unsigned int count, |
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = { | |||
1229 | .get = s390_system_call_get, | 1195 | .get = s390_system_call_get, |
1230 | .set = s390_system_call_set, | 1196 | .set = s390_system_call_set, |
1231 | }, | 1197 | }, |
1232 | #ifdef CONFIG_64BIT | ||
1233 | { | 1198 | { |
1234 | .core_note_type = NT_S390_LAST_BREAK, | 1199 | .core_note_type = NT_S390_LAST_BREAK, |
1235 | .n = 1, | 1200 | .n = 1, |
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = { | |||
1262 | .get = s390_vxrs_high_get, | 1227 | .get = s390_vxrs_high_get, |
1263 | .set = s390_vxrs_high_set, | 1228 | .set = s390_vxrs_high_set, |
1264 | }, | 1229 | }, |
1265 | #endif | ||
1266 | }; | 1230 | }; |
1267 | 1231 | ||
1268 | static const struct user_regset_view user_s390_view = { | 1232 | static const struct user_regset_view user_s390_view = { |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index dd8016b0477e..52aab0bd84f8 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * S390 version | 2 | * Copyright IBM Corp 2000, 2011 |
3 | * Copyright IBM Corp. 2000 | 3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, |
4 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) | 4 | * Denis Joseph Barrow, |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
@@ -9,43 +9,90 @@ | |||
9 | #include <asm/sigp.h> | 9 | #include <asm/sigp.h> |
10 | 10 | ||
11 | # | 11 | # |
12 | # store_status: Empty implementation until kdump is supported on 31 bit | 12 | # store_status |
13 | # | ||
14 | # Prerequisites to run this function: | ||
15 | # - Prefix register is set to zero | ||
16 | # - Original prefix register is stored in "dump_prefix_page" | ||
17 | # - Lowcore protection is off | ||
13 | # | 18 | # |
14 | ENTRY(store_status) | 19 | ENTRY(store_status) |
15 | br %r14 | 20 | /* Save register one and load save area base */ |
21 | stg %r1,__LC_SAVE_AREA_RESTART | ||
22 | lghi %r1,SAVE_AREA_BASE | ||
23 | /* General purpose registers */ | ||
24 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
25 | lg %r2,__LC_SAVE_AREA_RESTART | ||
26 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | ||
27 | /* Control registers */ | ||
28 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
29 | /* Access registers */ | ||
30 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
31 | /* Floating point registers */ | ||
32 | std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
33 | std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
34 | std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
35 | std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
36 | std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
37 | std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
38 | std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
39 | std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
40 | std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
41 | std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
42 | std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
43 | std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
44 | std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
45 | std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
46 | std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
47 | std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
48 | /* Floating point control register */ | ||
49 | stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
50 | /* CPU timer */ | ||
51 | stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
52 | /* Saved prefix register */ | ||
53 | larl %r2,dump_prefix_page | ||
54 | mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) | ||
55 | /* Clock comparator - seven bytes */ | ||
56 | larl %r2,.Lclkcmp | ||
57 | stckc 0(%r2) | ||
58 | mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) | ||
59 | /* Program status word */ | ||
60 | epsw %r2,%r3 | ||
61 | st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) | ||
62 | st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) | ||
63 | larl %r2,store_status | ||
64 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | ||
65 | br %r14 | ||
66 | |||
67 | .section .bss | ||
68 | .align 8 | ||
69 | .Lclkcmp: .quad 0x0000000000000000 | ||
70 | .previous | ||
16 | 71 | ||
17 | # | 72 | # |
18 | # do_reipl_asm | 73 | # do_reipl_asm |
19 | # Parameter: r2 = schid of reipl device | 74 | # Parameter: r2 = schid of reipl device |
20 | # | 75 | # |
76 | |||
21 | ENTRY(do_reipl_asm) | 77 | ENTRY(do_reipl_asm) |
22 | basr %r13,0 | 78 | basr %r13,0 |
23 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) | 79 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) |
24 | .Lpg1: # do store status of all registers | 80 | .Lpg1: brasl %r14,store_status |
25 | 81 | ||
26 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA | 82 | lctlg %c6,%c6,.Lall-.Lpg0(%r13) |
27 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA | 83 | lgr %r1,%r2 |
28 | stam %a0,%a15,__LC_AREGS_SAVE_AREA | 84 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) |
29 | l %r10,.Ldump_pfx-.Lpg0(%r13) | ||
30 | mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) | ||
31 | stckc .Lclkcmp-.Lpg0(%r13) | ||
32 | mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) | ||
33 | stpt __LC_CPU_TIMER_SAVE_AREA | ||
34 | st %r13, __LC_PSW_SAVE_AREA+4 | ||
35 | lctl %c6,%c6,.Lall-.Lpg0(%r13) | ||
36 | lr %r1,%r2 | ||
37 | mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) | ||
38 | stsch .Lschib-.Lpg0(%r13) | 85 | stsch .Lschib-.Lpg0(%r13) |
39 | oi .Lschib+5-.Lpg0(%r13),0x84 | 86 | oi .Lschib+5-.Lpg0(%r13),0x84 |
40 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | 87 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 |
41 | msch .Lschib-.Lpg0(%r13) | 88 | msch .Lschib-.Lpg0(%r13) |
42 | lhi %r0,5 | 89 | lghi %r0,5 |
43 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | 90 | .Lssch: ssch .Liplorb-.Lpg0(%r13) |
44 | jz .L001 | 91 | jz .L001 |
45 | brct %r0,.Lssch | 92 | brct %r0,.Lssch |
46 | bas %r14,.Ldisab-.Lpg0(%r13) | 93 | bas %r14,.Ldisab-.Lpg0(%r13) |
47 | .L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13) | 94 | .L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) |
48 | .Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13) | 95 | .Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) |
49 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | 96 | .Lcont: c %r1,__LC_SUBCHANNEL_ID |
50 | jnz .Ltpi | 97 | jnz .Ltpi |
51 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | 98 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) |
@@ -58,20 +105,36 @@ ENTRY(do_reipl_asm) | |||
58 | jz .L003 | 105 | jz .L003 |
59 | bas %r14,.Ldisab-.Lpg0(%r13) | 106 | bas %r14,.Ldisab-.Lpg0(%r13) |
60 | .L003: st %r1,__LC_SUBCHANNEL_ID | 107 | .L003: st %r1,__LC_SUBCHANNEL_ID |
108 | lhi %r1,0 # mode 0 = esa | ||
109 | slr %r0,%r0 # set cpuid to zero | ||
110 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode | ||
61 | lpsw 0 | 111 | lpsw 0 |
62 | sigp 0,0,SIGP_RESTART | 112 | .Ldisab: sll %r14,1 |
63 | .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) | 113 | srl %r14,1 # need to kill hi bit to avoid specification exceptions. |
64 | lpsw .Ldispsw-.Lpg0(%r13) | 114 | st %r14,.Ldispsw+12-.Lpg0(%r13) |
115 | lpswe .Ldispsw-.Lpg0(%r13) | ||
65 | .align 8 | 116 | .align 8 |
66 | .Lclkcmp: .quad 0x0000000000000000 | 117 | .Lall: .quad 0x00000000ff000000 |
67 | .Lall: .long 0xff000000 | 118 | .align 16 |
68 | .Ldump_pfx: .long dump_prefix_page | 119 | /* |
69 | .align 8 | 120 | * These addresses have to be 31 bit otherwise |
70 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 | 121 | * the sigp will throw a specifcation exception |
71 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs | 122 | * when switching to ESA mode as bit 31 be set |
72 | .Lionew: .long 0x00080000,0x80000000+.Lcont | 123 | * in the ESA psw. |
73 | .Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi | 124 | * Bit 31 of the addresses has to be 0 for the |
74 | .Ldispsw: .long 0x000a0000,0x00000000 | 125 | * 31bit lpswe instruction a fact they appear to have |
126 | * omitted from the pop. | ||
127 | */ | ||
128 | .Lnewpsw: .quad 0x0000000080000000 | ||
129 | .quad .Lpg1 | ||
130 | .Lpcnew: .quad 0x0000000080000000 | ||
131 | .quad .Lecs | ||
132 | .Lionew: .quad 0x0000000080000000 | ||
133 | .quad .Lcont | ||
134 | .Lwaitpsw: .quad 0x0202000080000000 | ||
135 | .quad .Ltpi | ||
136 | .Ldispsw: .quad 0x0002000080000000 | ||
137 | .quad 0x0000000000000000 | ||
75 | .Liplccws: .long 0x02000000,0x60000018 | 138 | .Liplccws: .long 0x02000000,0x60000018 |
76 | .long 0x08000008,0x20000001 | 139 | .long 0x08000008,0x20000001 |
77 | .Liplorb: .long 0x0049504c,0x0040ff80 | 140 | .Liplorb: .long 0x0049504c,0x0040ff80 |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S deleted file mode 100644 index dc3b1273c4dc..000000000000 --- a/arch/s390/kernel/reipl64.S +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp 2000, 2011 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Denis Joseph Barrow, | ||
5 | */ | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | #include <asm/asm-offsets.h> | ||
9 | #include <asm/sigp.h> | ||
10 | |||
11 | # | ||
12 | # store_status | ||
13 | # | ||
14 | # Prerequisites to run this function: | ||
15 | # - Prefix register is set to zero | ||
16 | # - Original prefix register is stored in "dump_prefix_page" | ||
17 | # - Lowcore protection is off | ||
18 | # | ||
19 | ENTRY(store_status) | ||
20 | /* Save register one and load save area base */ | ||
21 | stg %r1,__LC_SAVE_AREA_RESTART | ||
22 | lghi %r1,SAVE_AREA_BASE | ||
23 | /* General purpose registers */ | ||
24 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
25 | lg %r2,__LC_SAVE_AREA_RESTART | ||
26 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | ||
27 | /* Control registers */ | ||
28 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
29 | /* Access registers */ | ||
30 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
31 | /* Floating point registers */ | ||
32 | std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
33 | std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
34 | std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
35 | std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
36 | std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
37 | std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
38 | std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
39 | std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
40 | std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
41 | std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
42 | std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
43 | std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
44 | std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
45 | std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
46 | std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
47 | std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
48 | /* Floating point control register */ | ||
49 | stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
50 | /* CPU timer */ | ||
51 | stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
52 | /* Saved prefix register */ | ||
53 | larl %r2,dump_prefix_page | ||
54 | mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) | ||
55 | /* Clock comparator - seven bytes */ | ||
56 | larl %r2,.Lclkcmp | ||
57 | stckc 0(%r2) | ||
58 | mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) | ||
59 | /* Program status word */ | ||
60 | epsw %r2,%r3 | ||
61 | st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) | ||
62 | st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) | ||
63 | larl %r2,store_status | ||
64 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | ||
65 | br %r14 | ||
66 | |||
67 | .section .bss | ||
68 | .align 8 | ||
69 | .Lclkcmp: .quad 0x0000000000000000 | ||
70 | .previous | ||
71 | |||
72 | # | ||
73 | # do_reipl_asm | ||
74 | # Parameter: r2 = schid of reipl device | ||
75 | # | ||
76 | |||
77 | ENTRY(do_reipl_asm) | ||
78 | basr %r13,0 | ||
79 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) | ||
80 | .Lpg1: brasl %r14,store_status | ||
81 | |||
82 | lctlg %c6,%c6,.Lall-.Lpg0(%r13) | ||
83 | lgr %r1,%r2 | ||
84 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) | ||
85 | stsch .Lschib-.Lpg0(%r13) | ||
86 | oi .Lschib+5-.Lpg0(%r13),0x84 | ||
87 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | ||
88 | msch .Lschib-.Lpg0(%r13) | ||
89 | lghi %r0,5 | ||
90 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | ||
91 | jz .L001 | ||
92 | brct %r0,.Lssch | ||
93 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
94 | .L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) | ||
95 | .Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) | ||
96 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | ||
97 | jnz .Ltpi | ||
98 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | ||
99 | jnz .Ltpi | ||
100 | tsch .Liplirb-.Lpg0(%r13) | ||
101 | tm .Liplirb+9-.Lpg0(%r13),0xbf | ||
102 | jz .L002 | ||
103 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
104 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | ||
105 | jz .L003 | ||
106 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
107 | .L003: st %r1,__LC_SUBCHANNEL_ID | ||
108 | lhi %r1,0 # mode 0 = esa | ||
109 | slr %r0,%r0 # set cpuid to zero | ||
110 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode | ||
111 | lpsw 0 | ||
112 | .Ldisab: sll %r14,1 | ||
113 | srl %r14,1 # need to kill hi bit to avoid specification exceptions. | ||
114 | st %r14,.Ldispsw+12-.Lpg0(%r13) | ||
115 | lpswe .Ldispsw-.Lpg0(%r13) | ||
116 | .align 8 | ||
117 | .Lall: .quad 0x00000000ff000000 | ||
118 | .align 16 | ||
119 | /* | ||
120 | * These addresses have to be 31 bit otherwise | ||
121 | * the sigp will throw a specifcation exception | ||
122 | * when switching to ESA mode as bit 31 be set | ||
123 | * in the ESA psw. | ||
124 | * Bit 31 of the addresses has to be 0 for the | ||
125 | * 31bit lpswe instruction a fact they appear to have | ||
126 | * omitted from the pop. | ||
127 | */ | ||
128 | .Lnewpsw: .quad 0x0000000080000000 | ||
129 | .quad .Lpg1 | ||
130 | .Lpcnew: .quad 0x0000000080000000 | ||
131 | .quad .Lecs | ||
132 | .Lionew: .quad 0x0000000080000000 | ||
133 | .quad .Lcont | ||
134 | .Lwaitpsw: .quad 0x0202000080000000 | ||
135 | .quad .Ltpi | ||
136 | .Ldispsw: .quad 0x0002000080000000 | ||
137 | .quad 0x0000000000000000 | ||
138 | .Liplccws: .long 0x02000000,0x60000018 | ||
139 | .long 0x08000008,0x20000001 | ||
140 | .Liplorb: .long 0x0049504c,0x0040ff80 | ||
141 | .long 0x00000000+.Liplccws | ||
142 | .Lschib: .long 0x00000000,0x00000000 | ||
143 | .long 0x00000000,0x00000000 | ||
144 | .long 0x00000000,0x00000000 | ||
145 | .long 0x00000000,0x00000000 | ||
146 | .long 0x00000000,0x00000000 | ||
147 | .long 0x00000000,0x00000000 | ||
148 | .Liplirb: .long 0x00000000,0x00000000 | ||
149 | .long 0x00000000,0x00000000 | ||
150 | .long 0x00000000,0x00000000 | ||
151 | .long 0x00000000,0x00000000 | ||
152 | .long 0x00000000,0x00000000 | ||
153 | .long 0x00000000,0x00000000 | ||
154 | .long 0x00000000,0x00000000 | ||
155 | .long 0x00000000,0x00000000 | ||
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index f4e6f20e117a..cfac28330b03 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S | |||
@@ -19,7 +19,8 @@ | |||
19 | * %r7 = PAGE_SIZE | 19 | * %r7 = PAGE_SIZE |
20 | * %r8 holds the source address | 20 | * %r8 holds the source address |
21 | * %r9 = PAGE_SIZE | 21 | * %r9 = PAGE_SIZE |
22 | * %r10 is a page mask | 22 | * |
23 | * 0xf000 is a page_mask | ||
23 | */ | 24 | */ |
24 | 25 | ||
25 | .text | 26 | .text |
@@ -27,46 +28,47 @@ ENTRY(relocate_kernel) | |||
27 | basr %r13,0 # base address | 28 | basr %r13,0 # base address |
28 | .base: | 29 | .base: |
29 | stnsm sys_msk-.base(%r13),0xfb # disable DAT | 30 | stnsm sys_msk-.base(%r13),0xfb # disable DAT |
30 | stctl %c0,%c15,ctlregs-.base(%r13) | 31 | stctg %c0,%c15,ctlregs-.base(%r13) |
31 | stm %r0,%r15,gprregs-.base(%r13) | 32 | stmg %r0,%r15,gprregs-.base(%r13) |
33 | lghi %r0,3 | ||
34 | sllg %r0,%r0,31 | ||
35 | stg %r0,0x1d0(%r0) | ||
36 | la %r0,.back_pgm-.base(%r13) | ||
37 | stg %r0,0x1d8(%r0) | ||
32 | la %r1,load_psw-.base(%r13) | 38 | la %r1,load_psw-.base(%r13) |
33 | mvc 0(8,%r0),0(%r1) | 39 | mvc 0(8,%r0),0(%r1) |
34 | la %r0,.back-.base(%r13) | 40 | la %r0,.back-.base(%r13) |
35 | st %r0,4(%r0) | 41 | st %r0,4(%r0) |
36 | oi 4(%r0),0x80 | 42 | oi 4(%r0),0x80 |
37 | mvc 0x68(8,%r0),0(%r1) | 43 | lghi %r0,0 |
38 | la %r0,.back_pgm-.base(%r13) | ||
39 | st %r0,0x6c(%r0) | ||
40 | oi 0x6c(%r0),0x80 | ||
41 | lhi %r0,0 | ||
42 | diag %r0,%r0,0x308 | 44 | diag %r0,%r0,0x308 |
43 | .back: | 45 | .back: |
46 | lhi %r1,1 # mode 1 = esame | ||
47 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode | ||
48 | sam64 # switch to 64 bit addressing mode | ||
44 | basr %r13,0 | 49 | basr %r13,0 |
45 | .back_base: | 50 | .back_base: |
46 | oi have_diag308-.back_base(%r13),0x01 | 51 | oi have_diag308-.back_base(%r13),0x01 |
47 | lctl %c0,%c15,ctlregs-.back_base(%r13) | 52 | lctlg %c0,%c15,ctlregs-.back_base(%r13) |
48 | lm %r0,%r15,gprregs-.back_base(%r13) | 53 | lmg %r0,%r15,gprregs-.back_base(%r13) |
49 | j .start_reloc | 54 | j .top |
50 | .back_pgm: | 55 | .back_pgm: |
51 | lm %r0,%r15,gprregs-.base(%r13) | 56 | lmg %r0,%r15,gprregs-.base(%r13) |
52 | .start_reloc: | ||
53 | lhi %r10,-1 # preparing the mask | ||
54 | sll %r10,12 # shift it such that it becomes 0xf000 | ||
55 | .top: | 57 | .top: |
56 | lhi %r7,4096 # load PAGE_SIZE in r7 | 58 | lghi %r7,4096 # load PAGE_SIZE in r7 |
57 | lhi %r9,4096 # load PAGE_SIZE in r9 | 59 | lghi %r9,4096 # load PAGE_SIZE in r9 |
58 | l %r5,0(%r2) # read another word for indirection page | 60 | lg %r5,0(%r2) # read another word for indirection page |
59 | ahi %r2,4 # increment pointer | 61 | aghi %r2,8 # increment pointer |
60 | tml %r5,0x1 # is it a destination page? | 62 | tml %r5,0x1 # is it a destination page? |
61 | je .indir_check # NO, goto "indir_check" | 63 | je .indir_check # NO, goto "indir_check" |
62 | lr %r6,%r5 # r6 = r5 | 64 | lgr %r6,%r5 # r6 = r5 |
63 | nr %r6,%r10 # mask it out and... | 65 | nill %r6,0xf000 # mask it out and... |
64 | j .top # ...next iteration | 66 | j .top # ...next iteration |
65 | .indir_check: | 67 | .indir_check: |
66 | tml %r5,0x2 # is it a indirection page? | 68 | tml %r5,0x2 # is it a indirection page? |
67 | je .done_test # NO, goto "done_test" | 69 | je .done_test # NO, goto "done_test" |
68 | nr %r5,%r10 # YES, mask out, | 70 | nill %r5,0xf000 # YES, mask out, |
69 | lr %r2,%r5 # move it into the right register, | 71 | lgr %r2,%r5 # move it into the right register, |
70 | j .top # and read next... | 72 | j .top # and read next... |
71 | .done_test: | 73 | .done_test: |
72 | tml %r5,0x4 # is it the done indicator? | 74 | tml %r5,0x4 # is it the done indicator? |
@@ -75,13 +77,13 @@ ENTRY(relocate_kernel) | |||
75 | .source_test: | 77 | .source_test: |
76 | tml %r5,0x8 # it should be a source indicator... | 78 | tml %r5,0x8 # it should be a source indicator... |
77 | je .top # NO, ignore it... | 79 | je .top # NO, ignore it... |
78 | lr %r8,%r5 # r8 = r5 | 80 | lgr %r8,%r5 # r8 = r5 |
79 | nr %r8,%r10 # masking | 81 | nill %r8,0xf000 # masking |
80 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 | 82 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 |
81 | jo 0b | 83 | jo 0b |
82 | j .top | 84 | j .top |
83 | .done: | 85 | .done: |
84 | sr %r0,%r0 # clear register r0 | 86 | sgr %r0,%r0 # clear register r0 |
85 | la %r4,load_psw-.base(%r13) # load psw-address into the register | 87 | la %r4,load_psw-.base(%r13) # load psw-address into the register |
86 | o %r3,4(%r4) # or load address into psw | 88 | o %r3,4(%r4) # or load address into psw |
87 | st %r3,4(%r4) | 89 | st %r3,4(%r4) |
@@ -90,8 +92,9 @@ ENTRY(relocate_kernel) | |||
90 | jno .no_diag308 | 92 | jno .no_diag308 |
91 | diag %r0,%r0,0x308 | 93 | diag %r0,%r0,0x308 |
92 | .no_diag308: | 94 | .no_diag308: |
93 | sr %r1,%r1 # clear %r1 | 95 | sam31 # 31 bit mode |
94 | sr %r2,%r2 # clear %r2 | 96 | sr %r1,%r1 # erase register r1 |
97 | sr %r2,%r2 # erase register r2 | ||
95 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero | 98 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero |
96 | lpsw 0 # hopefully start new kernel... | 99 | lpsw 0 # hopefully start new kernel... |
97 | 100 | ||
@@ -102,11 +105,11 @@ ENTRY(relocate_kernel) | |||
102 | .quad 0 | 105 | .quad 0 |
103 | ctlregs: | 106 | ctlregs: |
104 | .rept 16 | 107 | .rept 16 |
105 | .long 0 | 108 | .quad 0 |
106 | .endr | 109 | .endr |
107 | gprregs: | 110 | gprregs: |
108 | .rept 16 | 111 | .rept 16 |
109 | .long 0 | 112 | .quad 0 |
110 | .endr | 113 | .endr |
111 | have_diag308: | 114 | have_diag308: |
112 | .byte 0 | 115 | .byte 0 |
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S deleted file mode 100644 index cfac28330b03..000000000000 --- a/arch/s390/kernel/relocate_kernel64.S +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2005 | ||
3 | * | ||
4 | * Author(s): Rolf Adelsberger, | ||
5 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/sigp.h> | ||
11 | |||
12 | /* | ||
13 | * moves the new kernel to its destination... | ||
14 | * %r2 = pointer to first kimage_entry_t | ||
15 | * %r3 = start address - where to jump to after the job is done... | ||
16 | * | ||
17 | * %r5 will be used as temp. storage | ||
18 | * %r6 holds the destination address | ||
19 | * %r7 = PAGE_SIZE | ||
20 | * %r8 holds the source address | ||
21 | * %r9 = PAGE_SIZE | ||
22 | * | ||
23 | * 0xf000 is a page_mask | ||
24 | */ | ||
25 | |||
26 | .text | ||
27 | ENTRY(relocate_kernel) | ||
28 | basr %r13,0 # base address | ||
29 | .base: | ||
30 | stnsm sys_msk-.base(%r13),0xfb # disable DAT | ||
31 | stctg %c0,%c15,ctlregs-.base(%r13) | ||
32 | stmg %r0,%r15,gprregs-.base(%r13) | ||
33 | lghi %r0,3 | ||
34 | sllg %r0,%r0,31 | ||
35 | stg %r0,0x1d0(%r0) | ||
36 | la %r0,.back_pgm-.base(%r13) | ||
37 | stg %r0,0x1d8(%r0) | ||
38 | la %r1,load_psw-.base(%r13) | ||
39 | mvc 0(8,%r0),0(%r1) | ||
40 | la %r0,.back-.base(%r13) | ||
41 | st %r0,4(%r0) | ||
42 | oi 4(%r0),0x80 | ||
43 | lghi %r0,0 | ||
44 | diag %r0,%r0,0x308 | ||
45 | .back: | ||
46 | lhi %r1,1 # mode 1 = esame | ||
47 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode | ||
48 | sam64 # switch to 64 bit addressing mode | ||
49 | basr %r13,0 | ||
50 | .back_base: | ||
51 | oi have_diag308-.back_base(%r13),0x01 | ||
52 | lctlg %c0,%c15,ctlregs-.back_base(%r13) | ||
53 | lmg %r0,%r15,gprregs-.back_base(%r13) | ||
54 | j .top | ||
55 | .back_pgm: | ||
56 | lmg %r0,%r15,gprregs-.base(%r13) | ||
57 | .top: | ||
58 | lghi %r7,4096 # load PAGE_SIZE in r7 | ||
59 | lghi %r9,4096 # load PAGE_SIZE in r9 | ||
60 | lg %r5,0(%r2) # read another word for indirection page | ||
61 | aghi %r2,8 # increment pointer | ||
62 | tml %r5,0x1 # is it a destination page? | ||
63 | je .indir_check # NO, goto "indir_check" | ||
64 | lgr %r6,%r5 # r6 = r5 | ||
65 | nill %r6,0xf000 # mask it out and... | ||
66 | j .top # ...next iteration | ||
67 | .indir_check: | ||
68 | tml %r5,0x2 # is it a indirection page? | ||
69 | je .done_test # NO, goto "done_test" | ||
70 | nill %r5,0xf000 # YES, mask out, | ||
71 | lgr %r2,%r5 # move it into the right register, | ||
72 | j .top # and read next... | ||
73 | .done_test: | ||
74 | tml %r5,0x4 # is it the done indicator? | ||
75 | je .source_test # NO! Well, then it should be the source indicator... | ||
76 | j .done # ok, lets finish it here... | ||
77 | .source_test: | ||
78 | tml %r5,0x8 # it should be a source indicator... | ||
79 | je .top # NO, ignore it... | ||
80 | lgr %r8,%r5 # r8 = r5 | ||
81 | nill %r8,0xf000 # masking | ||
82 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 | ||
83 | jo 0b | ||
84 | j .top | ||
85 | .done: | ||
86 | sgr %r0,%r0 # clear register r0 | ||
87 | la %r4,load_psw-.base(%r13) # load psw-address into the register | ||
88 | o %r3,4(%r4) # or load address into psw | ||
89 | st %r3,4(%r4) | ||
90 | mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 | ||
91 | tm have_diag308-.base(%r13),0x01 | ||
92 | jno .no_diag308 | ||
93 | diag %r0,%r0,0x308 | ||
94 | .no_diag308: | ||
95 | sam31 # 31 bit mode | ||
96 | sr %r1,%r1 # erase register r1 | ||
97 | sr %r2,%r2 # erase register r2 | ||
98 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero | ||
99 | lpsw 0 # hopefully start new kernel... | ||
100 | |||
101 | .align 8 | ||
102 | load_psw: | ||
103 | .long 0x00080000,0x80000000 | ||
104 | sys_msk: | ||
105 | .quad 0 | ||
106 | ctlregs: | ||
107 | .rept 16 | ||
108 | .quad 0 | ||
109 | .endr | ||
110 | gprregs: | ||
111 | .rept 16 | ||
112 | .quad 0 | ||
113 | .endr | ||
114 | have_diag308: | ||
115 | .byte 0 | ||
116 | .align 8 | ||
117 | relocate_kernel_end: | ||
118 | .align 8 | ||
119 | .globl relocate_kernel_len | ||
120 | relocate_kernel_len: | ||
121 | .quad relocate_kernel_end - relocate_kernel | ||
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 7e77e03378f3..43c3169ea49c 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S | |||
@@ -36,21 +36,17 @@ _sclp_wait_int: | |||
36 | ahi %r15,-96 # create stack frame | 36 | ahi %r15,-96 # create stack frame |
37 | la %r8,LC_EXT_NEW_PSW # register int handler | 37 | la %r8,LC_EXT_NEW_PSW # register int handler |
38 | la %r9,.LextpswS1-.LbaseS1(%r13) | 38 | la %r9,.LextpswS1-.LbaseS1(%r13) |
39 | #ifdef CONFIG_64BIT | ||
40 | tm LC_AR_MODE_ID,1 | 39 | tm LC_AR_MODE_ID,1 |
41 | jno .Lesa1 | 40 | jno .Lesa1 |
42 | la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit | 41 | la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit |
43 | la %r9,.LextpswS1_64-.LbaseS1(%r13) | 42 | la %r9,.LextpswS1_64-.LbaseS1(%r13) |
44 | .Lesa1: | 43 | .Lesa1: |
45 | #endif | ||
46 | mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) | 44 | mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) |
47 | mvc 0(16,%r8),0(%r9) | 45 | mvc 0(16,%r8),0(%r9) |
48 | #ifdef CONFIG_64BIT | ||
49 | epsw %r6,%r7 # set current addressing mode | 46 | epsw %r6,%r7 # set current addressing mode |
50 | nill %r6,0x1 # in new psw (31 or 64 bit mode) | 47 | nill %r6,0x1 # in new psw (31 or 64 bit mode) |
51 | nilh %r7,0x8000 | 48 | nilh %r7,0x8000 |
52 | stm %r6,%r7,0(%r8) | 49 | stm %r6,%r7,0(%r8) |
53 | #endif | ||
54 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) | 50 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) |
55 | ltr %r2,%r2 | 51 | ltr %r2,%r2 |
56 | jz .LsetctS1 | 52 | jz .LsetctS1 |
@@ -92,10 +88,8 @@ _sclp_wait_int: | |||
92 | .long 0, 0, 0, 0 # old ext int PSW | 88 | .long 0, 0, 0, 0 # old ext int PSW |
93 | .LextpswS1: | 89 | .LextpswS1: |
94 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int | 90 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int |
95 | #ifdef CONFIG_64BIT | ||
96 | .LextpswS1_64: | 91 | .LextpswS1_64: |
97 | .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit | 92 | .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit |
98 | #endif | ||
99 | .LwaitpswS1: | 93 | .LwaitpswS1: |
100 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int | 94 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int |
101 | .LtimeS1: | 95 | .LtimeS1: |
@@ -272,13 +266,11 @@ _sclp_print: | |||
272 | ENTRY(_sclp_print_early) | 266 | ENTRY(_sclp_print_early) |
273 | stm %r6,%r15,24(%r15) # save registers | 267 | stm %r6,%r15,24(%r15) # save registers |
274 | ahi %r15,-96 # create stack frame | 268 | ahi %r15,-96 # create stack frame |
275 | #ifdef CONFIG_64BIT | ||
276 | tm LC_AR_MODE_ID,1 | 269 | tm LC_AR_MODE_ID,1 |
277 | jno .Lesa2 | 270 | jno .Lesa2 |
278 | ahi %r15,-80 | 271 | ahi %r15,-80 |
279 | stmh %r6,%r15,96(%r15) # store upper register halves | 272 | stmh %r6,%r15,96(%r15) # store upper register halves |
280 | .Lesa2: | 273 | .Lesa2: |
281 | #endif | ||
282 | lr %r10,%r2 # save string pointer | 274 | lr %r10,%r2 # save string pointer |
283 | lhi %r2,0 | 275 | lhi %r2,0 |
284 | bras %r14,_sclp_setup # enable console | 276 | bras %r14,_sclp_setup # enable console |
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early) | |||
291 | lhi %r2,1 | 283 | lhi %r2,1 |
292 | bras %r14,_sclp_setup # disable console | 284 | bras %r14,_sclp_setup # disable console |
293 | .LendS5: | 285 | .LendS5: |
294 | #ifdef CONFIG_64BIT | ||
295 | tm LC_AR_MODE_ID,1 | 286 | tm LC_AR_MODE_ID,1 |
296 | jno .Lesa3 | 287 | jno .Lesa3 |
297 | lgfr %r2,%r2 # sign extend return value | 288 | lgfr %r2,%r2 # sign extend return value |
298 | lmh %r6,%r15,96(%r15) # restore upper register halves | 289 | lmh %r6,%r15,96(%r15) # restore upper register halves |
299 | ahi %r15,80 | 290 | ahi %r15,80 |
300 | .Lesa3: | 291 | .Lesa3: |
301 | #endif | ||
302 | lm %r6,%r15,120(%r15) # restore registers | 292 | lm %r6,%r15,120(%r15) # restore registers |
303 | br %r14 | 293 | br %r14 |
304 | 294 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5ea8bc17cb3..7262fe438c99 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END); | |||
92 | struct page *vmemmap; | 92 | struct page *vmemmap; |
93 | EXPORT_SYMBOL(vmemmap); | 93 | EXPORT_SYMBOL(vmemmap); |
94 | 94 | ||
95 | #ifdef CONFIG_64BIT | ||
96 | unsigned long MODULES_VADDR; | 95 | unsigned long MODULES_VADDR; |
97 | unsigned long MODULES_END; | 96 | unsigned long MODULES_END; |
98 | #endif | ||
99 | 97 | ||
100 | /* An array with a pointer to the lowcore of every CPU. */ | 98 | /* An array with a pointer to the lowcore of every CPU. */ |
101 | struct _lowcore *lowcore_ptr[NR_CPUS]; | 99 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void) | |||
334 | lc->stfl_fac_list = S390_lowcore.stfl_fac_list; | 332 | lc->stfl_fac_list = S390_lowcore.stfl_fac_list; |
335 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | 333 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, |
336 | MAX_FACILITY_BIT/8); | 334 | MAX_FACILITY_BIT/8); |
337 | #ifndef CONFIG_64BIT | ||
338 | if (MACHINE_HAS_IEEE) { | ||
339 | lc->extended_save_area_addr = (__u32) | ||
340 | __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); | ||
341 | /* enable extended save area */ | ||
342 | __ctl_set_bit(14, 29); | ||
343 | } | ||
344 | #else | ||
345 | if (MACHINE_HAS_VX) | 335 | if (MACHINE_HAS_VX) |
346 | lc->vector_save_area_addr = | 336 | lc->vector_save_area_addr = |
347 | (unsigned long) &lc->vector_save_area; | 337 | (unsigned long) &lc->vector_save_area; |
348 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; | 338 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; |
349 | #endif | ||
350 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; | 339 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; |
351 | lc->async_enter_timer = S390_lowcore.async_enter_timer; | 340 | lc->async_enter_timer = S390_lowcore.async_enter_timer; |
352 | lc->exit_timer = S390_lowcore.exit_timer; | 341 | lc->exit_timer = S390_lowcore.exit_timer; |
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void) | |||
450 | unsigned long vmax, vmalloc_size, tmp; | 439 | unsigned long vmax, vmalloc_size, tmp; |
451 | 440 | ||
452 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ | 441 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ |
453 | #ifdef CONFIG_64BIT | ||
454 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; | 442 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; |
455 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; | 443 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; |
456 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE); | 444 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE); |
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void) | |||
462 | MODULES_END = vmax; | 450 | MODULES_END = vmax; |
463 | MODULES_VADDR = MODULES_END - MODULES_LEN; | 451 | MODULES_VADDR = MODULES_END - MODULES_LEN; |
464 | VMALLOC_END = MODULES_VADDR; | 452 | VMALLOC_END = MODULES_VADDR; |
465 | #else | ||
466 | vmalloc_size = VMALLOC_END ?: 96UL << 20; | ||
467 | vmax = 1UL << 31; /* 2-level kernel page table */ | ||
468 | /* vmalloc area is at the end of the kernel address space. */ | ||
469 | VMALLOC_END = vmax; | ||
470 | #endif | ||
471 | VMALLOC_START = vmax - vmalloc_size; | 453 | VMALLOC_START = vmax - vmalloc_size; |
472 | 454 | ||
473 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ | 455 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ |
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void) | |||
754 | if (MACHINE_HAS_HPAGE) | 736 | if (MACHINE_HAS_HPAGE) |
755 | elf_hwcap |= HWCAP_S390_HPAGE; | 737 | elf_hwcap |= HWCAP_S390_HPAGE; |
756 | 738 | ||
757 | #if defined(CONFIG_64BIT) | ||
758 | /* | 739 | /* |
759 | * 64-bit register support for 31-bit processes | 740 | * 64-bit register support for 31-bit processes |
760 | * HWCAP_S390_HIGH_GPRS is bit 9. | 741 | * HWCAP_S390_HIGH_GPRS is bit 9. |
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void) | |||
772 | */ | 753 | */ |
773 | if (test_facility(129)) | 754 | if (test_facility(129)) |
774 | elf_hwcap |= HWCAP_S390_VXRS; | 755 | elf_hwcap |= HWCAP_S390_VXRS; |
775 | #endif | ||
776 | |||
777 | get_cpu_id(&cpu_id); | 756 | get_cpu_id(&cpu_id); |
778 | add_device_randomness(&cpu_id, sizeof(cpu_id)); | 757 | add_device_randomness(&cpu_id, sizeof(cpu_id)); |
779 | switch (cpu_id.machine) { | 758 | switch (cpu_id.machine) { |
780 | case 0x9672: | 759 | case 0x9672: |
781 | #if !defined(CONFIG_64BIT) | ||
782 | default: /* Use "g5" as default for 31 bit kernels. */ | ||
783 | #endif | ||
784 | strcpy(elf_platform, "g5"); | 760 | strcpy(elf_platform, "g5"); |
785 | break; | 761 | break; |
786 | case 0x2064: | 762 | case 0x2064: |
787 | case 0x2066: | 763 | case 0x2066: |
788 | #if defined(CONFIG_64BIT) | ||
789 | default: /* Use "z900" as default for 64 bit kernels. */ | 764 | default: /* Use "z900" as default for 64 bit kernels. */ |
790 | #endif | ||
791 | strcpy(elf_platform, "z900"); | 765 | strcpy(elf_platform, "z900"); |
792 | break; | 766 | break; |
793 | case 0x2084: | 767 | case 0x2084: |
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p) | |||
839 | /* | 813 | /* |
840 | * print what head.S has found out about the machine | 814 | * print what head.S has found out about the machine |
841 | */ | 815 | */ |
842 | #ifndef CONFIG_64BIT | ||
843 | if (MACHINE_IS_VM) | ||
844 | pr_info("Linux is running as a z/VM " | ||
845 | "guest operating system in 31-bit mode\n"); | ||
846 | else if (MACHINE_IS_LPAR) | ||
847 | pr_info("Linux is running natively in 31-bit mode\n"); | ||
848 | if (MACHINE_HAS_IEEE) | ||
849 | pr_info("The hardware system has IEEE compatible " | ||
850 | "floating point units\n"); | ||
851 | else | ||
852 | pr_info("The hardware system has no IEEE compatible " | ||
853 | "floating point units\n"); | ||
854 | #else /* CONFIG_64BIT */ | ||
855 | if (MACHINE_IS_VM) | 816 | if (MACHINE_IS_VM) |
856 | pr_info("Linux is running as a z/VM " | 817 | pr_info("Linux is running as a z/VM " |
857 | "guest operating system in 64-bit mode\n"); | 818 | "guest operating system in 64-bit mode\n"); |
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p) | |||
859 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 820 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
860 | else if (MACHINE_IS_LPAR) | 821 | else if (MACHINE_IS_LPAR) |
861 | pr_info("Linux is running natively in 64-bit mode\n"); | 822 | pr_info("Linux is running natively in 64-bit mode\n"); |
862 | #endif /* CONFIG_64BIT */ | ||
863 | 823 | ||
864 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 824 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
865 | /* boot_command_line has been already set up in early.c */ | 825 | /* boot_command_line has been already set up in early.c */ |
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p) | |||
930 | /* Add system specific data to the random pool */ | 890 | /* Add system specific data to the random pool */ |
931 | setup_randomness(); | 891 | setup_randomness(); |
932 | } | 892 | } |
933 | |||
934 | #ifdef CONFIG_32BIT | ||
935 | static int no_removal_warning __initdata; | ||
936 | |||
937 | static int __init parse_no_removal_warning(char *str) | ||
938 | { | ||
939 | no_removal_warning = 1; | ||
940 | return 0; | ||
941 | } | ||
942 | __setup("no_removal_warning", parse_no_removal_warning); | ||
943 | |||
944 | static int __init removal_warning(void) | ||
945 | { | ||
946 | if (no_removal_warning) | ||
947 | return 0; | ||
948 | printk(KERN_ALERT "\n\n"); | ||
949 | printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); | ||
950 | printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); | ||
951 | printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); | ||
952 | printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); | ||
953 | printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); | ||
954 | printk(KERN_CONT "please let us know. Please write to:\n"); | ||
955 | printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); | ||
956 | printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); | ||
957 | printk(KERN_CONT "Thank you!\n\n"); | ||
958 | printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); | ||
959 | printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); | ||
960 | schedule_timeout_uninterruptible(300 * HZ); | ||
961 | return 0; | ||
962 | } | ||
963 | early_initcall(removal_warning); | ||
964 | #endif | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index b3ae6f70c6d6..7fec60cb0b75 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -106,7 +106,6 @@ static void store_sigregs(void) | |||
106 | { | 106 | { |
107 | save_access_regs(current->thread.acrs); | 107 | save_access_regs(current->thread.acrs); |
108 | save_fp_ctl(¤t->thread.fp_regs.fpc); | 108 | save_fp_ctl(¤t->thread.fp_regs.fpc); |
109 | #ifdef CONFIG_64BIT | ||
110 | if (current->thread.vxrs) { | 109 | if (current->thread.vxrs) { |
111 | int i; | 110 | int i; |
112 | 111 | ||
@@ -115,7 +114,6 @@ static void store_sigregs(void) | |||
115 | current->thread.fp_regs.fprs[i] = | 114 | current->thread.fp_regs.fprs[i] = |
116 | *(freg_t *)(current->thread.vxrs + i); | 115 | *(freg_t *)(current->thread.vxrs + i); |
117 | } else | 116 | } else |
118 | #endif | ||
119 | save_fp_regs(current->thread.fp_regs.fprs); | 117 | save_fp_regs(current->thread.fp_regs.fprs); |
120 | } | 118 | } |
121 | 119 | ||
@@ -124,7 +122,6 @@ static void load_sigregs(void) | |||
124 | { | 122 | { |
125 | restore_access_regs(current->thread.acrs); | 123 | restore_access_regs(current->thread.acrs); |
126 | /* restore_fp_ctl is done in restore_sigregs */ | 124 | /* restore_fp_ctl is done in restore_sigregs */ |
127 | #ifdef CONFIG_64BIT | ||
128 | if (current->thread.vxrs) { | 125 | if (current->thread.vxrs) { |
129 | int i; | 126 | int i; |
130 | 127 | ||
@@ -133,7 +130,6 @@ static void load_sigregs(void) | |||
133 | current->thread.fp_regs.fprs[i]; | 130 | current->thread.fp_regs.fprs[i]; |
134 | restore_vx_regs(current->thread.vxrs); | 131 | restore_vx_regs(current->thread.vxrs); |
135 | } else | 132 | } else |
136 | #endif | ||
137 | restore_fp_regs(current->thread.fp_regs.fprs); | 133 | restore_fp_regs(current->thread.fp_regs.fprs); |
138 | } | 134 | } |
139 | 135 | ||
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
200 | static int save_sigregs_ext(struct pt_regs *regs, | 196 | static int save_sigregs_ext(struct pt_regs *regs, |
201 | _sigregs_ext __user *sregs_ext) | 197 | _sigregs_ext __user *sregs_ext) |
202 | { | 198 | { |
203 | #ifdef CONFIG_64BIT | ||
204 | __u64 vxrs[__NUM_VXRS_LOW]; | 199 | __u64 vxrs[__NUM_VXRS_LOW]; |
205 | int i; | 200 | int i; |
206 | 201 | ||
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs, | |||
215 | sizeof(sregs_ext->vxrs_high))) | 210 | sizeof(sregs_ext->vxrs_high))) |
216 | return -EFAULT; | 211 | return -EFAULT; |
217 | } | 212 | } |
218 | #endif | ||
219 | return 0; | 213 | return 0; |
220 | } | 214 | } |
221 | 215 | ||
222 | static int restore_sigregs_ext(struct pt_regs *regs, | 216 | static int restore_sigregs_ext(struct pt_regs *regs, |
223 | _sigregs_ext __user *sregs_ext) | 217 | _sigregs_ext __user *sregs_ext) |
224 | { | 218 | { |
225 | #ifdef CONFIG_64BIT | ||
226 | __u64 vxrs[__NUM_VXRS_LOW]; | 219 | __u64 vxrs[__NUM_VXRS_LOW]; |
227 | int i; | 220 | int i; |
228 | 221 | ||
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs, | |||
237 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 230 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
238 | *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; | 231 | *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; |
239 | } | 232 | } |
240 | #endif | ||
241 | return 0; | 233 | return 0; |
242 | } | 234 | } |
243 | 235 | ||
@@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
416 | * included in the signal frame on a 31-bit system. | 408 | * included in the signal frame on a 31-bit system. |
417 | */ | 409 | */ |
418 | uc_flags = 0; | 410 | uc_flags = 0; |
419 | #ifdef CONFIG_64BIT | ||
420 | if (MACHINE_HAS_VX) { | 411 | if (MACHINE_HAS_VX) { |
421 | frame_size += sizeof(_sigregs_ext); | 412 | frame_size += sizeof(_sigregs_ext); |
422 | if (current->thread.vxrs) | 413 | if (current->thread.vxrs) |
423 | uc_flags |= UC_VXRS; | 414 | uc_flags |= UC_VXRS; |
424 | } | 415 | } |
425 | #endif | ||
426 | frame = get_sigframe(&ksig->ka, regs, frame_size); | 416 | frame = get_sigframe(&ksig->ka, regs, frame_size); |
427 | if (frame == (void __user *) -1UL) | 417 | if (frame == (void __user *) -1UL) |
428 | return -EFAULT; | 418 | return -EFAULT; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index db8f1115a3bf..efd2c1968000 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | |||
198 | lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; | 198 | lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; |
199 | lc->cpu_nr = cpu; | 199 | lc->cpu_nr = cpu; |
200 | lc->spinlock_lockval = arch_spin_lockval(cpu); | 200 | lc->spinlock_lockval = arch_spin_lockval(cpu); |
201 | #ifndef CONFIG_64BIT | ||
202 | if (MACHINE_HAS_IEEE) { | ||
203 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); | ||
204 | if (!lc->extended_save_area_addr) | ||
205 | goto out; | ||
206 | } | ||
207 | #else | ||
208 | if (MACHINE_HAS_VX) | 201 | if (MACHINE_HAS_VX) |
209 | lc->vector_save_area_addr = | 202 | lc->vector_save_area_addr = |
210 | (unsigned long) &lc->vector_save_area; | 203 | (unsigned long) &lc->vector_save_area; |
211 | if (vdso_alloc_per_cpu(lc)) | 204 | if (vdso_alloc_per_cpu(lc)) |
212 | goto out; | 205 | goto out; |
213 | #endif | ||
214 | lowcore_ptr[cpu] = lc; | 206 | lowcore_ptr[cpu] = lc; |
215 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); | 207 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); |
216 | return 0; | 208 | return 0; |
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) | |||
229 | { | 221 | { |
230 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); | 222 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); |
231 | lowcore_ptr[pcpu - pcpu_devices] = NULL; | 223 | lowcore_ptr[pcpu - pcpu_devices] = NULL; |
232 | #ifndef CONFIG_64BIT | ||
233 | if (MACHINE_HAS_IEEE) { | ||
234 | struct _lowcore *lc = pcpu->lowcore; | ||
235 | |||
236 | free_page((unsigned long) lc->extended_save_area_addr); | ||
237 | lc->extended_save_area_addr = 0; | ||
238 | } | ||
239 | #else | ||
240 | vdso_free_per_cpu(pcpu->lowcore); | 224 | vdso_free_per_cpu(pcpu->lowcore); |
241 | #endif | ||
242 | if (pcpu == &pcpu_devices[0]) | 225 | if (pcpu == &pcpu_devices[0]) |
243 | return; | 226 | return; |
244 | free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); | 227 | free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); |
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu) | |||
492 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); | 475 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); |
493 | } | 476 | } |
494 | 477 | ||
495 | #ifndef CONFIG_64BIT | ||
496 | /* | ||
497 | * this function sends a 'purge tlb' signal to another CPU. | ||
498 | */ | ||
499 | static void smp_ptlb_callback(void *info) | ||
500 | { | ||
501 | __tlb_flush_local(); | ||
502 | } | ||
503 | |||
504 | void smp_ptlb_all(void) | ||
505 | { | ||
506 | on_each_cpu(smp_ptlb_callback, NULL, 1); | ||
507 | } | ||
508 | EXPORT_SYMBOL(smp_ptlb_all); | ||
509 | #endif /* ! CONFIG_64BIT */ | ||
510 | |||
511 | /* | 478 | /* |
512 | * this function sends a 'reschedule' IPI to another CPU. | 479 | * this function sends a 'reschedule' IPI to another CPU. |
513 | * it goes straight through and wastes no time serializing | 480 | * it goes straight through and wastes no time serializing |
@@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
851 | pcpu_prepare_secondary(pcpu, cpu); | 818 | pcpu_prepare_secondary(pcpu, cpu); |
852 | pcpu_attach_task(pcpu, tidle); | 819 | pcpu_attach_task(pcpu, tidle); |
853 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); | 820 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); |
854 | while (!cpu_online(cpu)) | 821 | /* Wait until cpu puts itself in the online & active maps */ |
822 | while (!cpu_online(cpu) || !cpu_active(cpu)) | ||
855 | cpu_relax(); | 823 | cpu_relax(); |
856 | return 0; | 824 | return 0; |
857 | } | 825 | } |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 1c4c5accd220..d3236c9e226b 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn) | |||
138 | { | 138 | { |
139 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); | 139 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
140 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); | 140 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
141 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
142 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
141 | 143 | ||
142 | /* Always save lowcore pages (LC protection might be enabled). */ | 144 | /* Always save lowcore pages (LC protection might be enabled). */ |
143 | if (pfn <= LC_PAGES) | 145 | if (pfn <= LC_PAGES) |
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn) | |||
145 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | 147 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
146 | return 1; | 148 | return 1; |
147 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ | 149 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ |
150 | if (pfn >= stext_pfn && pfn <= eshared_pfn) | ||
151 | return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; | ||
148 | if (tprot(PFN_PHYS(pfn))) | 152 | if (tprot(PFN_PHYS(pfn))) |
149 | return 1; | 153 | return 1; |
150 | return 0; | 154 | return 0; |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S index ca6294645dd3..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp.S | |||
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 23eb222c1658..f145490cce54 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, | |||
76 | return sys_ipc(call, first, second, third, ptr, third); | 76 | return sys_ipc(call, first, second, third, ptr, third); |
77 | } | 77 | } |
78 | 78 | ||
79 | #ifdef CONFIG_64BIT | ||
80 | SYSCALL_DEFINE1(s390_personality, unsigned int, personality) | 79 | SYSCALL_DEFINE1(s390_personality, unsigned int, personality) |
81 | { | 80 | { |
82 | unsigned int ret; | 81 | unsigned int ret; |
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) | |||
90 | 89 | ||
91 | return ret; | 90 | return ret; |
92 | } | 91 | } |
93 | #endif /* CONFIG_64BIT */ | ||
94 | |||
95 | /* | ||
96 | * Wrapper function for sys_fadvise64/fadvise64_64 | ||
97 | */ | ||
98 | #ifndef CONFIG_64BIT | ||
99 | |||
100 | SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low, | ||
101 | size_t, len, int, advice) | ||
102 | { | ||
103 | return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low, | ||
104 | len, advice); | ||
105 | } | ||
106 | |||
107 | struct fadvise64_64_args { | ||
108 | int fd; | ||
109 | long long offset; | ||
110 | long long len; | ||
111 | int advice; | ||
112 | }; | ||
113 | |||
114 | SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) | ||
115 | { | ||
116 | struct fadvise64_64_args a; | ||
117 | |||
118 | if ( copy_from_user(&a, args, sizeof(a)) ) | ||
119 | return -EFAULT; | ||
120 | return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last | ||
125 | * 64 bit argument "len" is split into the upper and lower 32 bits. The | ||
126 | * system call wrapper in the user space loads the value to %r6/%r7. | ||
127 | * The code in entry.S keeps the values in %r2 - %r6 where they are and | ||
128 | * stores %r7 to 96(%r15). But the standard C linkage requires that | ||
129 | * the whole 64 bit value for len is stored on the stack and doesn't | ||
130 | * use %r6 at all. So s390_fallocate has to convert the arguments from | ||
131 | * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len | ||
132 | * to | ||
133 | * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len | ||
134 | */ | ||
135 | SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset, | ||
136 | u32, len_high, u32, len_low) | ||
137 | { | ||
138 | return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); | ||
139 | } | ||
140 | #endif | ||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 939ec474b1dd..1acad02681c4 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -1,365 +1,365 @@ | |||
1 | /* | 1 | /* |
2 | * definitions for sys_call_table, each line represents an | 2 | * definitions for sys_call_table, each line represents an |
3 | * entry in the table in the form | 3 | * entry in the table in the form |
4 | * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall) | 4 | * SYSCALL(64 bit syscall, 31 bit emulated syscall) |
5 | * | 5 | * |
6 | * this file is meant to be included from entry.S and entry64.S | 6 | * this file is meant to be included from entry.S |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall) | 9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) |
10 | 10 | ||
11 | NI_SYSCALL /* 0 */ | 11 | NI_SYSCALL /* 0 */ |
12 | SYSCALL(sys_exit,sys_exit,compat_sys_exit) | 12 | SYSCALL(sys_exit,compat_sys_exit) |
13 | SYSCALL(sys_fork,sys_fork,sys_fork) | 13 | SYSCALL(sys_fork,sys_fork) |
14 | SYSCALL(sys_read,sys_read,compat_sys_s390_read) | 14 | SYSCALL(sys_read,compat_sys_s390_read) |
15 | SYSCALL(sys_write,sys_write,compat_sys_s390_write) | 15 | SYSCALL(sys_write,compat_sys_s390_write) |
16 | SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */ | 16 | SYSCALL(sys_open,compat_sys_open) /* 5 */ |
17 | SYSCALL(sys_close,sys_close,compat_sys_close) | 17 | SYSCALL(sys_close,compat_sys_close) |
18 | SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) | 18 | SYSCALL(sys_restart_syscall,sys_restart_syscall) |
19 | SYSCALL(sys_creat,sys_creat,compat_sys_creat) | 19 | SYSCALL(sys_creat,compat_sys_creat) |
20 | SYSCALL(sys_link,sys_link,compat_sys_link) | 20 | SYSCALL(sys_link,compat_sys_link) |
21 | SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */ | 21 | SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */ |
22 | SYSCALL(sys_execve,sys_execve,compat_sys_execve) | 22 | SYSCALL(sys_execve,compat_sys_execve) |
23 | SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir) | 23 | SYSCALL(sys_chdir,compat_sys_chdir) |
24 | SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */ | 24 | SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */ |
25 | SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod) | 25 | SYSCALL(sys_mknod,compat_sys_mknod) |
26 | SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */ | 26 | SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */ |
27 | SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ | 27 | SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ |
28 | NI_SYSCALL /* old break syscall holder */ | 28 | NI_SYSCALL /* old break syscall holder */ |
29 | NI_SYSCALL /* old stat syscall holder */ | 29 | NI_SYSCALL /* old stat syscall holder */ |
30 | SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek) | 30 | SYSCALL(sys_lseek,compat_sys_lseek) |
31 | SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */ | 31 | SYSCALL(sys_getpid,sys_getpid) /* 20 */ |
32 | SYSCALL(sys_mount,sys_mount,compat_sys_mount) | 32 | SYSCALL(sys_mount,compat_sys_mount) |
33 | SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount) | 33 | SYSCALL(sys_oldumount,compat_sys_oldumount) |
34 | SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ | 34 | SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ |
35 | SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ | 35 | SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ |
36 | SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ | 36 | SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ |
37 | SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace) | 37 | SYSCALL(sys_ptrace,compat_sys_ptrace) |
38 | SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm) | 38 | SYSCALL(sys_alarm,compat_sys_alarm) |
39 | NI_SYSCALL /* old fstat syscall */ | 39 | NI_SYSCALL /* old fstat syscall */ |
40 | SYSCALL(sys_pause,sys_pause,sys_pause) | 40 | SYSCALL(sys_pause,sys_pause) |
41 | SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */ | 41 | SYSCALL(sys_utime,compat_sys_utime) /* 30 */ |
42 | NI_SYSCALL /* old stty syscall */ | 42 | NI_SYSCALL /* old stty syscall */ |
43 | NI_SYSCALL /* old gtty syscall */ | 43 | NI_SYSCALL /* old gtty syscall */ |
44 | SYSCALL(sys_access,sys_access,compat_sys_access) | 44 | SYSCALL(sys_access,compat_sys_access) |
45 | SYSCALL(sys_nice,sys_nice,compat_sys_nice) | 45 | SYSCALL(sys_nice,compat_sys_nice) |
46 | NI_SYSCALL /* 35 old ftime syscall */ | 46 | NI_SYSCALL /* 35 old ftime syscall */ |
47 | SYSCALL(sys_sync,sys_sync,sys_sync) | 47 | SYSCALL(sys_sync,sys_sync) |
48 | SYSCALL(sys_kill,sys_kill,compat_sys_kill) | 48 | SYSCALL(sys_kill,compat_sys_kill) |
49 | SYSCALL(sys_rename,sys_rename,compat_sys_rename) | 49 | SYSCALL(sys_rename,compat_sys_rename) |
50 | SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir) | 50 | SYSCALL(sys_mkdir,compat_sys_mkdir) |
51 | SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */ | 51 | SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ |
52 | SYSCALL(sys_dup,sys_dup,compat_sys_dup) | 52 | SYSCALL(sys_dup,compat_sys_dup) |
53 | SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe) | 53 | SYSCALL(sys_pipe,compat_sys_pipe) |
54 | SYSCALL(sys_times,sys_times,compat_sys_times) | 54 | SYSCALL(sys_times,compat_sys_times) |
55 | NI_SYSCALL /* old prof syscall */ | 55 | NI_SYSCALL /* old prof syscall */ |
56 | SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */ | 56 | SYSCALL(sys_brk,compat_sys_brk) /* 45 */ |
57 | SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ | 57 | SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ |
58 | SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ | 58 | SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ |
59 | SYSCALL(sys_signal,sys_signal,compat_sys_signal) | 59 | SYSCALL(sys_signal,compat_sys_signal) |
60 | SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ | 60 | SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ |
61 | SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ | 61 | SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ |
62 | SYSCALL(sys_acct,sys_acct,compat_sys_acct) | 62 | SYSCALL(sys_acct,compat_sys_acct) |
63 | SYSCALL(sys_umount,sys_umount,compat_sys_umount) | 63 | SYSCALL(sys_umount,compat_sys_umount) |
64 | NI_SYSCALL /* old lock syscall */ | 64 | NI_SYSCALL /* old lock syscall */ |
65 | SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl) | 65 | SYSCALL(sys_ioctl,compat_sys_ioctl) |
66 | SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */ | 66 | SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ |
67 | NI_SYSCALL /* intel mpx syscall */ | 67 | NI_SYSCALL /* intel mpx syscall */ |
68 | SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid) | 68 | SYSCALL(sys_setpgid,compat_sys_setpgid) |
69 | NI_SYSCALL /* old ulimit syscall */ | 69 | NI_SYSCALL /* old ulimit syscall */ |
70 | NI_SYSCALL /* old uname syscall */ | 70 | NI_SYSCALL /* old uname syscall */ |
71 | SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */ | 71 | SYSCALL(sys_umask,compat_sys_umask) /* 60 */ |
72 | SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot) | 72 | SYSCALL(sys_chroot,compat_sys_chroot) |
73 | SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat) | 73 | SYSCALL(sys_ustat,compat_sys_ustat) |
74 | SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2) | 74 | SYSCALL(sys_dup2,compat_sys_dup2) |
75 | SYSCALL(sys_getppid,sys_getppid,sys_getppid) | 75 | SYSCALL(sys_getppid,sys_getppid) |
76 | SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ | 76 | SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ |
77 | SYSCALL(sys_setsid,sys_setsid,sys_setsid) | 77 | SYSCALL(sys_setsid,sys_setsid) |
78 | SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction) | 78 | SYSCALL(sys_sigaction,compat_sys_sigaction) |
79 | NI_SYSCALL /* old sgetmask syscall*/ | 79 | NI_SYSCALL /* old sgetmask syscall*/ |
80 | NI_SYSCALL /* old ssetmask syscall*/ | 80 | NI_SYSCALL /* old ssetmask syscall*/ |
81 | SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ | 81 | SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ |
82 | SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ | 82 | SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ |
83 | SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend) | 83 | SYSCALL(sys_sigsuspend,compat_sys_sigsuspend) |
84 | SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending) | 84 | SYSCALL(sys_sigpending,compat_sys_sigpending) |
85 | SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname) | 85 | SYSCALL(sys_sethostname,compat_sys_sethostname) |
86 | SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */ | 86 | SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */ |
87 | SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit) | 87 | SYSCALL(sys_getrlimit,compat_sys_old_getrlimit) |
88 | SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage) | 88 | SYSCALL(sys_getrusage,compat_sys_getrusage) |
89 | SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday) | 89 | SYSCALL(sys_gettimeofday,compat_sys_gettimeofday) |
90 | SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday) | 90 | SYSCALL(sys_settimeofday,compat_sys_settimeofday) |
91 | SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ | 91 | SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ |
92 | SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ | 92 | SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ |
93 | NI_SYSCALL /* old select syscall */ | 93 | NI_SYSCALL /* old select syscall */ |
94 | SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink) | 94 | SYSCALL(sys_symlink,compat_sys_symlink) |
95 | NI_SYSCALL /* old lstat syscall */ | 95 | NI_SYSCALL /* old lstat syscall */ |
96 | SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */ | 96 | SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */ |
97 | SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib) | 97 | SYSCALL(sys_uselib,compat_sys_uselib) |
98 | SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon) | 98 | SYSCALL(sys_swapon,compat_sys_swapon) |
99 | SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot) | 99 | SYSCALL(sys_reboot,compat_sys_reboot) |
100 | SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ | 100 | SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ |
101 | SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ | 101 | SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ |
102 | SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap) | 102 | SYSCALL(sys_munmap,compat_sys_munmap) |
103 | SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate) | 103 | SYSCALL(sys_truncate,compat_sys_truncate) |
104 | SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate) | 104 | SYSCALL(sys_ftruncate,compat_sys_ftruncate) |
105 | SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod) | 105 | SYSCALL(sys_fchmod,compat_sys_fchmod) |
106 | SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ | 106 | SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ |
107 | SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority) | 107 | SYSCALL(sys_getpriority,compat_sys_getpriority) |
108 | SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority) | 108 | SYSCALL(sys_setpriority,compat_sys_setpriority) |
109 | NI_SYSCALL /* old profil syscall */ | 109 | NI_SYSCALL /* old profil syscall */ |
110 | SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs) | 110 | SYSCALL(sys_statfs,compat_sys_statfs) |
111 | SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */ | 111 | SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ |
112 | NI_SYSCALL /* ioperm for i386 */ | 112 | NI_SYSCALL /* ioperm for i386 */ |
113 | SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall) | 113 | SYSCALL(sys_socketcall,compat_sys_socketcall) |
114 | SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog) | 114 | SYSCALL(sys_syslog,compat_sys_syslog) |
115 | SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer) | 115 | SYSCALL(sys_setitimer,compat_sys_setitimer) |
116 | SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */ | 116 | SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */ |
117 | SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat) | 117 | SYSCALL(sys_newstat,compat_sys_newstat) |
118 | SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat) | 118 | SYSCALL(sys_newlstat,compat_sys_newlstat) |
119 | SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat) | 119 | SYSCALL(sys_newfstat,compat_sys_newfstat) |
120 | NI_SYSCALL /* old uname syscall */ | 120 | NI_SYSCALL /* old uname syscall */ |
121 | SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ | 121 | SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ |
122 | SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) | 122 | SYSCALL(sys_vhangup,sys_vhangup) |
123 | NI_SYSCALL /* old "idle" system call */ | 123 | NI_SYSCALL /* old "idle" system call */ |
124 | NI_SYSCALL /* vm86old for i386 */ | 124 | NI_SYSCALL /* vm86old for i386 */ |
125 | SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) | 125 | SYSCALL(sys_wait4,compat_sys_wait4) |
126 | SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */ | 126 | SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ |
127 | SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo) | 127 | SYSCALL(sys_sysinfo,compat_sys_sysinfo) |
128 | SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc) | 128 | SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) |
129 | SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync) | 129 | SYSCALL(sys_fsync,compat_sys_fsync) |
130 | SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn) | 130 | SYSCALL(sys_sigreturn,compat_sys_sigreturn) |
131 | SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */ | 131 | SYSCALL(sys_clone,compat_sys_clone) /* 120 */ |
132 | SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname) | 132 | SYSCALL(sys_setdomainname,compat_sys_setdomainname) |
133 | SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname) | 133 | SYSCALL(sys_newuname,compat_sys_newuname) |
134 | NI_SYSCALL /* modify_ldt for i386 */ | 134 | NI_SYSCALL /* modify_ldt for i386 */ |
135 | SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex) | 135 | SYSCALL(sys_adjtimex,compat_sys_adjtimex) |
136 | SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */ | 136 | SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */ |
137 | SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask) | 137 | SYSCALL(sys_sigprocmask,compat_sys_sigprocmask) |
138 | NI_SYSCALL /* old "create module" */ | 138 | NI_SYSCALL /* old "create module" */ |
139 | SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module) | 139 | SYSCALL(sys_init_module,compat_sys_init_module) |
140 | SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module) | 140 | SYSCALL(sys_delete_module,compat_sys_delete_module) |
141 | NI_SYSCALL /* 130: old get_kernel_syms */ | 141 | NI_SYSCALL /* 130: old get_kernel_syms */ |
142 | SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl) | 142 | SYSCALL(sys_quotactl,compat_sys_quotactl) |
143 | SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid) | 143 | SYSCALL(sys_getpgid,compat_sys_getpgid) |
144 | SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir) | 144 | SYSCALL(sys_fchdir,compat_sys_fchdir) |
145 | SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush) | 145 | SYSCALL(sys_bdflush,compat_sys_bdflush) |
146 | SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */ | 146 | SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ |
147 | SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality) | 147 | SYSCALL(sys_s390_personality,compat_sys_s390_personality) |
148 | NI_SYSCALL /* for afs_syscall */ | 148 | NI_SYSCALL /* for afs_syscall */ |
149 | SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ | 149 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ |
150 | SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ | 150 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ |
151 | SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */ | 151 | SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ |
152 | SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents) | 152 | SYSCALL(sys_getdents,compat_sys_getdents) |
153 | SYSCALL(sys_select,sys_select,compat_sys_select) | 153 | SYSCALL(sys_select,compat_sys_select) |
154 | SYSCALL(sys_flock,sys_flock,compat_sys_flock) | 154 | SYSCALL(sys_flock,compat_sys_flock) |
155 | SYSCALL(sys_msync,sys_msync,compat_sys_msync) | 155 | SYSCALL(sys_msync,compat_sys_msync) |
156 | SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */ | 156 | SYSCALL(sys_readv,compat_sys_readv) /* 145 */ |
157 | SYSCALL(sys_writev,sys_writev,compat_sys_writev) | 157 | SYSCALL(sys_writev,compat_sys_writev) |
158 | SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid) | 158 | SYSCALL(sys_getsid,compat_sys_getsid) |
159 | SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync) | 159 | SYSCALL(sys_fdatasync,compat_sys_fdatasync) |
160 | SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) | 160 | SYSCALL(sys_sysctl,compat_sys_sysctl) |
161 | SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */ | 161 | SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ |
162 | SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock) | 162 | SYSCALL(sys_munlock,compat_sys_munlock) |
163 | SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall) | 163 | SYSCALL(sys_mlockall,compat_sys_mlockall) |
164 | SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall) | 164 | SYSCALL(sys_munlockall,sys_munlockall) |
165 | SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam) | 165 | SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) |
166 | SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ | 166 | SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ |
167 | SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler) | 167 | SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) |
168 | SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler) | 168 | SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) |
169 | SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) | 169 | SYSCALL(sys_sched_yield,sys_sched_yield) |
170 | SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max) | 170 | SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) |
171 | SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ | 171 | SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ |
172 | SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) | 172 | SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) |
173 | SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep) | 173 | SYSCALL(sys_nanosleep,compat_sys_nanosleep) |
174 | SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap) | 174 | SYSCALL(sys_mremap,compat_sys_mremap) |
175 | SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ | 175 | SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ |
176 | SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ | 176 | SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ |
177 | NI_SYSCALL /* for vm86 */ | 177 | NI_SYSCALL /* for vm86 */ |
178 | NI_SYSCALL /* old sys_query_module */ | 178 | NI_SYSCALL /* old sys_query_module */ |
179 | SYSCALL(sys_poll,sys_poll,compat_sys_poll) | 179 | SYSCALL(sys_poll,compat_sys_poll) |
180 | NI_SYSCALL /* old nfsservctl */ | 180 | NI_SYSCALL /* old nfsservctl */ |
181 | SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ | 181 | SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ |
182 | SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ | 182 | SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ |
183 | SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl) | 183 | SYSCALL(sys_prctl,compat_sys_prctl) |
184 | SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn) | 184 | SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn) |
185 | SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction) | 185 | SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction) |
186 | SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ | 186 | SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ |
187 | SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending) | 187 | SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending) |
188 | SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) | 188 | SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) |
189 | SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) | 189 | SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) |
190 | SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend) | 190 | SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend) |
191 | SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */ | 191 | SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */ |
192 | SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64) | 192 | SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64) |
193 | SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ | 193 | SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ |
194 | SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd) | 194 | SYSCALL(sys_getcwd,compat_sys_getcwd) |
195 | SYSCALL(sys_capget,sys_capget,compat_sys_capget) | 195 | SYSCALL(sys_capget,compat_sys_capget) |
196 | SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */ | 196 | SYSCALL(sys_capset,compat_sys_capset) /* 185 */ |
197 | SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) | 197 | SYSCALL(sys_sigaltstack,compat_sys_sigaltstack) |
198 | SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile) | 198 | SYSCALL(sys_sendfile64,compat_sys_sendfile) |
199 | NI_SYSCALL /* streams1 */ | 199 | NI_SYSCALL /* streams1 */ |
200 | NI_SYSCALL /* streams2 */ | 200 | NI_SYSCALL /* streams2 */ |
201 | SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ | 201 | SYSCALL(sys_vfork,sys_vfork) /* 190 */ |
202 | SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit) | 202 | SYSCALL(sys_getrlimit,compat_sys_getrlimit) |
203 | SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2) | 203 | SYSCALL(sys_mmap2,compat_sys_s390_mmap2) |
204 | SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64) | 204 | SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64) |
205 | SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64) | 205 | SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64) |
206 | SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ | 206 | SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ |
207 | SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64) | 207 | SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64) |
208 | SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64) | 208 | SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64) |
209 | SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown) | 209 | SYSCALL(sys_lchown,compat_sys_lchown) |
210 | SYSCALL(sys_getuid,sys_getuid,sys_getuid) | 210 | SYSCALL(sys_getuid,sys_getuid) |
211 | SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */ | 211 | SYSCALL(sys_getgid,sys_getgid) /* 200 */ |
212 | SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid) | 212 | SYSCALL(sys_geteuid,sys_geteuid) |
213 | SYSCALL(sys_getegid,sys_getegid,sys_getegid) | 213 | SYSCALL(sys_getegid,sys_getegid) |
214 | SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid) | 214 | SYSCALL(sys_setreuid,compat_sys_setreuid) |
215 | SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid) | 215 | SYSCALL(sys_setregid,compat_sys_setregid) |
216 | SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */ | 216 | SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ |
217 | SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups) | 217 | SYSCALL(sys_setgroups,compat_sys_setgroups) |
218 | SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown) | 218 | SYSCALL(sys_fchown,compat_sys_fchown) |
219 | SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid) | 219 | SYSCALL(sys_setresuid,compat_sys_setresuid) |
220 | SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid) | 220 | SYSCALL(sys_getresuid,compat_sys_getresuid) |
221 | SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */ | 221 | SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ |
222 | SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid) | 222 | SYSCALL(sys_getresgid,compat_sys_getresgid) |
223 | SYSCALL(sys_chown,sys_chown,compat_sys_chown) | 223 | SYSCALL(sys_chown,compat_sys_chown) |
224 | SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid) | 224 | SYSCALL(sys_setuid,compat_sys_setuid) |
225 | SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid) | 225 | SYSCALL(sys_setgid,compat_sys_setgid) |
226 | SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */ | 226 | SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ |
227 | SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid) | 227 | SYSCALL(sys_setfsgid,compat_sys_setfsgid) |
228 | SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root) | 228 | SYSCALL(sys_pivot_root,compat_sys_pivot_root) |
229 | SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore) | 229 | SYSCALL(sys_mincore,compat_sys_mincore) |
230 | SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise) | 230 | SYSCALL(sys_madvise,compat_sys_madvise) |
231 | SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */ | 231 | SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */ |
232 | SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64) | 232 | SYSCALL(sys_ni_syscall,compat_sys_fcntl64) |
233 | SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead) | 233 | SYSCALL(sys_readahead,compat_sys_s390_readahead) |
234 | SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64) | 234 | SYSCALL(sys_ni_syscall,compat_sys_sendfile64) |
235 | SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr) | 235 | SYSCALL(sys_setxattr,compat_sys_setxattr) |
236 | SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ | 236 | SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ |
237 | SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr) | 237 | SYSCALL(sys_fsetxattr,compat_sys_fsetxattr) |
238 | SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr) | 238 | SYSCALL(sys_getxattr,compat_sys_getxattr) |
239 | SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr) | 239 | SYSCALL(sys_lgetxattr,compat_sys_lgetxattr) |
240 | SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr) | 240 | SYSCALL(sys_fgetxattr,compat_sys_fgetxattr) |
241 | SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */ | 241 | SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */ |
242 | SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr) | 242 | SYSCALL(sys_llistxattr,compat_sys_llistxattr) |
243 | SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr) | 243 | SYSCALL(sys_flistxattr,compat_sys_flistxattr) |
244 | SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr) | 244 | SYSCALL(sys_removexattr,compat_sys_removexattr) |
245 | SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr) | 245 | SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) |
246 | SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ | 246 | SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ |
247 | SYSCALL(sys_gettid,sys_gettid,sys_gettid) | 247 | SYSCALL(sys_gettid,sys_gettid) |
248 | SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill) | 248 | SYSCALL(sys_tkill,compat_sys_tkill) |
249 | SYSCALL(sys_futex,sys_futex,compat_sys_futex) | 249 | SYSCALL(sys_futex,compat_sys_futex) |
250 | SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity) | 250 | SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) |
251 | SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ | 251 | SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ |
252 | SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill) | 252 | SYSCALL(sys_tgkill,compat_sys_tgkill) |
253 | NI_SYSCALL /* reserved for TUX */ | 253 | NI_SYSCALL /* reserved for TUX */ |
254 | SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup) | 254 | SYSCALL(sys_io_setup,compat_sys_io_setup) |
255 | SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy) | 255 | SYSCALL(sys_io_destroy,compat_sys_io_destroy) |
256 | SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */ | 256 | SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ |
257 | SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit) | 257 | SYSCALL(sys_io_submit,compat_sys_io_submit) |
258 | SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel) | 258 | SYSCALL(sys_io_cancel,compat_sys_io_cancel) |
259 | SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group) | 259 | SYSCALL(sys_exit_group,compat_sys_exit_group) |
260 | SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create) | 260 | SYSCALL(sys_epoll_create,compat_sys_epoll_create) |
261 | SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ | 261 | SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ |
262 | SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait) | 262 | SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) |
263 | SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address) | 263 | SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) |
264 | SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64) | 264 | SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64) |
265 | SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create) | 265 | SYSCALL(sys_timer_create,compat_sys_timer_create) |
266 | SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */ | 266 | SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ |
267 | SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime) | 267 | SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) |
268 | SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun) | 268 | SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) |
269 | SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete) | 269 | SYSCALL(sys_timer_delete,compat_sys_timer_delete) |
270 | SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime) | 270 | SYSCALL(sys_clock_settime,compat_sys_clock_settime) |
271 | SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ | 271 | SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ |
272 | SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres) | 272 | SYSCALL(sys_clock_getres,compat_sys_clock_getres) |
273 | SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep) | 273 | SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep) |
274 | NI_SYSCALL /* reserved for vserver */ | 274 | NI_SYSCALL /* reserved for vserver */ |
275 | SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64) | 275 | SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64) |
276 | SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64) | 276 | SYSCALL(sys_statfs64,compat_sys_statfs64) |
277 | SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64) | 277 | SYSCALL(sys_fstatfs64,compat_sys_fstatfs64) |
278 | SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages) | 278 | SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages) |
279 | NI_SYSCALL /* 268 sys_mbind */ | 279 | NI_SYSCALL /* 268 sys_mbind */ |
280 | NI_SYSCALL /* 269 sys_get_mempolicy */ | 280 | NI_SYSCALL /* 269 sys_get_mempolicy */ |
281 | NI_SYSCALL /* 270 sys_set_mempolicy */ | 281 | NI_SYSCALL /* 270 sys_set_mempolicy */ |
282 | SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open) | 282 | SYSCALL(sys_mq_open,compat_sys_mq_open) |
283 | SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink) | 283 | SYSCALL(sys_mq_unlink,compat_sys_mq_unlink) |
284 | SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend) | 284 | SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend) |
285 | SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive) | 285 | SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive) |
286 | SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */ | 286 | SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */ |
287 | SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr) | 287 | SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr) |
288 | SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load) | 288 | SYSCALL(sys_kexec_load,compat_sys_kexec_load) |
289 | SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key) | 289 | SYSCALL(sys_add_key,compat_sys_add_key) |
290 | SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key) | 290 | SYSCALL(sys_request_key,compat_sys_request_key) |
291 | SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */ | 291 | SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ |
292 | SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid) | 292 | SYSCALL(sys_waitid,compat_sys_waitid) |
293 | SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set) | 293 | SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) |
294 | SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get) | 294 | SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) |
295 | SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init) | 295 | SYSCALL(sys_inotify_init,sys_inotify_init) |
296 | SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ | 296 | SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ |
297 | SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch) | 297 | SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) |
298 | NI_SYSCALL /* 287 sys_migrate_pages */ | 298 | NI_SYSCALL /* 287 sys_migrate_pages */ |
299 | SYSCALL(sys_openat,sys_openat,compat_sys_openat) | 299 | SYSCALL(sys_openat,compat_sys_openat) |
300 | SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat) | 300 | SYSCALL(sys_mkdirat,compat_sys_mkdirat) |
301 | SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */ | 301 | SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */ |
302 | SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat) | 302 | SYSCALL(sys_fchownat,compat_sys_fchownat) |
303 | SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat) | 303 | SYSCALL(sys_futimesat,compat_sys_futimesat) |
304 | SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64) | 304 | SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64) |
305 | SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat) | 305 | SYSCALL(sys_unlinkat,compat_sys_unlinkat) |
306 | SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */ | 306 | SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */ |
307 | SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat) | 307 | SYSCALL(sys_linkat,compat_sys_linkat) |
308 | SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat) | 308 | SYSCALL(sys_symlinkat,compat_sys_symlinkat) |
309 | SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat) | 309 | SYSCALL(sys_readlinkat,compat_sys_readlinkat) |
310 | SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat) | 310 | SYSCALL(sys_fchmodat,compat_sys_fchmodat) |
311 | SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */ | 311 | SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */ |
312 | SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6) | 312 | SYSCALL(sys_pselect6,compat_sys_pselect6) |
313 | SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll) | 313 | SYSCALL(sys_ppoll,compat_sys_ppoll) |
314 | SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare) | 314 | SYSCALL(sys_unshare,compat_sys_unshare) |
315 | SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list) | 315 | SYSCALL(sys_set_robust_list,compat_sys_set_robust_list) |
316 | SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list) | 316 | SYSCALL(sys_get_robust_list,compat_sys_get_robust_list) |
317 | SYSCALL(sys_splice,sys_splice,compat_sys_splice) | 317 | SYSCALL(sys_splice,compat_sys_splice) |
318 | SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range) | 318 | SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range) |
319 | SYSCALL(sys_tee,sys_tee,compat_sys_tee) | 319 | SYSCALL(sys_tee,compat_sys_tee) |
320 | SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice) | 320 | SYSCALL(sys_vmsplice,compat_sys_vmsplice) |
321 | NI_SYSCALL /* 310 sys_move_pages */ | 321 | NI_SYSCALL /* 310 sys_move_pages */ |
322 | SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu) | 322 | SYSCALL(sys_getcpu,compat_sys_getcpu) |
323 | SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait) | 323 | SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait) |
324 | SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes) | 324 | SYSCALL(sys_utimes,compat_sys_utimes) |
325 | SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate) | 325 | SYSCALL(sys_fallocate,compat_sys_s390_fallocate) |
326 | SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */ | 326 | SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ |
327 | SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd) | 327 | SYSCALL(sys_signalfd,compat_sys_signalfd) |
328 | NI_SYSCALL /* 317 old sys_timer_fd */ | 328 | NI_SYSCALL /* 317 old sys_timer_fd */ |
329 | SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd) | 329 | SYSCALL(sys_eventfd,compat_sys_eventfd) |
330 | SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create) | 330 | SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) |
331 | SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ | 331 | SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ |
332 | SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) | 332 | SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) |
333 | SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4) | 333 | SYSCALL(sys_signalfd4,compat_sys_signalfd4) |
334 | SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2) | 334 | SYSCALL(sys_eventfd2,compat_sys_eventfd2) |
335 | SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1) | 335 | SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) |
336 | SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */ | 336 | SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ |
337 | SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3) | 337 | SYSCALL(sys_dup3,compat_sys_dup3) |
338 | SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1) | 338 | SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) |
339 | SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv) | 339 | SYSCALL(sys_preadv,compat_sys_preadv) |
340 | SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) | 340 | SYSCALL(sys_pwritev,compat_sys_pwritev) |
341 | SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ | 341 | SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ |
342 | SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open) | 342 | SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) |
343 | SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init) | 343 | SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) |
344 | SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) | 344 | SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) |
345 | SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64) | 345 | SYSCALL(sys_prlimit64,compat_sys_prlimit64) |
346 | SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ | 346 | SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ |
347 | SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) | 347 | SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) |
348 | SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime) | 348 | SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) |
349 | SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs) | 349 | SYSCALL(sys_syncfs,compat_sys_syncfs) |
350 | SYSCALL(sys_setns,sys_setns,compat_sys_setns) | 350 | SYSCALL(sys_setns,compat_sys_setns) |
351 | SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ | 351 | SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ |
352 | SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev) | 352 | SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) |
353 | SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr) | 353 | SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) |
354 | SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp) | 354 | SYSCALL(sys_kcmp,compat_sys_kcmp) |
355 | SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) | 355 | SYSCALL(sys_finit_module,compat_sys_finit_module) |
356 | SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ | 356 | SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ |
357 | SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) | 357 | SYSCALL(sys_sched_getattr,compat_sys_sched_getattr) |
358 | SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) | 358 | SYSCALL(sys_renameat2,compat_sys_renameat2) |
359 | SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) | 359 | SYSCALL(sys_seccomp,compat_sys_seccomp) |
360 | SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) | 360 | SYSCALL(sys_getrandom,compat_sys_getrandom) |
361 | SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ | 361 | SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */ |
362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) | 362 | SYSCALL(sys_bpf,compat_sys_bpf) |
363 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) | 363 | SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) |
364 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) | 364 | SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) |
365 | SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) | 365 | SYSCALL(sys_execveat,compat_sys_execveat) |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14da43b801d9..5728c5bd44a8 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu) | |||
421 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); | 421 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); |
422 | } | 422 | } |
423 | 423 | ||
424 | const struct cpumask *cpu_thread_mask(int cpu) | 424 | static const struct cpumask *cpu_thread_mask(int cpu) |
425 | { | 425 | { |
426 | return &per_cpu(cpu_topology, cpu).thread_mask; | 426 | return &per_cpu(cpu_topology, cpu).thread_mask; |
427 | } | 427 | } |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f081cf1157c3..4d96c9f53455 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1; | |||
26 | 26 | ||
27 | static inline void __user *get_trap_ip(struct pt_regs *regs) | 27 | static inline void __user *get_trap_ip(struct pt_regs *regs) |
28 | { | 28 | { |
29 | #ifdef CONFIG_64BIT | ||
30 | unsigned long address; | 29 | unsigned long address; |
31 | 30 | ||
32 | if (regs->int_code & 0x200) | 31 | if (regs->int_code & 0x200) |
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) | |||
35 | address = regs->psw.addr; | 34 | address = regs->psw.addr; |
36 | return (void __user *) | 35 | return (void __user *) |
37 | ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); | 36 | ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); |
38 | #else | ||
39 | return (void __user *) | ||
40 | ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); | ||
41 | #endif | ||
42 | } | 37 | } |
43 | 38 | ||
44 | static inline void report_user_fault(struct pt_regs *regs, int signr) | 39 | static inline void report_user_fault(struct pt_regs *regs, int signr) |
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, | |||
153 | "privileged operation") | 148 | "privileged operation") |
154 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, | 149 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, |
155 | "special operation exception") | 150 | "special operation exception") |
156 | |||
157 | #ifdef CONFIG_64BIT | ||
158 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, | 151 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, |
159 | "transaction constraint exception") | 152 | "transaction constraint exception") |
160 | #endif | ||
161 | 153 | ||
162 | static inline void do_fp_trap(struct pt_regs *regs, int fpc) | 154 | static inline void do_fp_trap(struct pt_regs *regs, int fpc) |
163 | { | 155 | { |
@@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) | |||
182 | void translation_exception(struct pt_regs *regs) | 174 | void translation_exception(struct pt_regs *regs) |
183 | { | 175 | { |
184 | /* May never happen. */ | 176 | /* May never happen. */ |
185 | die(regs, "Translation exception"); | 177 | panic("Translation exception"); |
186 | } | 178 | } |
187 | 179 | ||
188 | void illegal_op(struct pt_regs *regs) | 180 | void illegal_op(struct pt_regs *regs) |
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs) | |||
211 | } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { | 203 | } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { |
212 | is_uprobe_insn = 1; | 204 | is_uprobe_insn = 1; |
213 | #endif | 205 | #endif |
214 | #ifdef CONFIG_MATHEMU | ||
215 | } else if (opcode[0] == 0xb3) { | ||
216 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
217 | return; | ||
218 | signal = math_emu_b3(opcode, regs); | ||
219 | } else if (opcode[0] == 0xed) { | ||
220 | if (get_user(*((__u32 *) (opcode+2)), | ||
221 | (__u32 __user *)(location+1))) | ||
222 | return; | ||
223 | signal = math_emu_ed(opcode, regs); | ||
224 | } else if (*((__u16 *) opcode) == 0xb299) { | ||
225 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
226 | return; | ||
227 | signal = math_emu_srnm(opcode, regs); | ||
228 | } else if (*((__u16 *) opcode) == 0xb29c) { | ||
229 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
230 | return; | ||
231 | signal = math_emu_stfpc(opcode, regs); | ||
232 | } else if (*((__u16 *) opcode) == 0xb29d) { | ||
233 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
234 | return; | ||
235 | signal = math_emu_lfpc(opcode, regs); | ||
236 | #endif | ||
237 | } else | 206 | } else |
238 | signal = SIGILL; | 207 | signal = SIGILL; |
239 | } | 208 | } |
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs) | |||
247 | 3, SIGTRAP) != NOTIFY_STOP) | 216 | 3, SIGTRAP) != NOTIFY_STOP) |
248 | signal = SIGILL; | 217 | signal = SIGILL; |
249 | } | 218 | } |
250 | |||
251 | #ifdef CONFIG_MATHEMU | ||
252 | if (signal == SIGFPE) | ||
253 | do_fp_trap(regs, current->thread.fp_regs.fpc); | ||
254 | else if (signal == SIGSEGV) | ||
255 | do_trap(regs, signal, SEGV_MAPERR, "user address fault"); | ||
256 | else | ||
257 | #endif | ||
258 | if (signal) | 219 | if (signal) |
259 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); | 220 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); |
260 | } | 221 | } |
261 | NOKPROBE_SYMBOL(illegal_op); | 222 | NOKPROBE_SYMBOL(illegal_op); |
262 | 223 | ||
263 | #ifdef CONFIG_MATHEMU | ||
264 | void specification_exception(struct pt_regs *regs) | ||
265 | { | ||
266 | __u8 opcode[6]; | ||
267 | __u16 __user *location = NULL; | ||
268 | int signal = 0; | ||
269 | |||
270 | location = (__u16 __user *) get_trap_ip(regs); | ||
271 | |||
272 | if (user_mode(regs)) { | ||
273 | get_user(*((__u16 *) opcode), location); | ||
274 | switch (opcode[0]) { | ||
275 | case 0x28: /* LDR Rx,Ry */ | ||
276 | signal = math_emu_ldr(opcode); | ||
277 | break; | ||
278 | case 0x38: /* LER Rx,Ry */ | ||
279 | signal = math_emu_ler(opcode); | ||
280 | break; | ||
281 | case 0x60: /* STD R,D(X,B) */ | ||
282 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
283 | signal = math_emu_std(opcode, regs); | ||
284 | break; | ||
285 | case 0x68: /* LD R,D(X,B) */ | ||
286 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
287 | signal = math_emu_ld(opcode, regs); | ||
288 | break; | ||
289 | case 0x70: /* STE R,D(X,B) */ | ||
290 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
291 | signal = math_emu_ste(opcode, regs); | ||
292 | break; | ||
293 | case 0x78: /* LE R,D(X,B) */ | ||
294 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
295 | signal = math_emu_le(opcode, regs); | ||
296 | break; | ||
297 | default: | ||
298 | signal = SIGILL; | ||
299 | break; | ||
300 | } | ||
301 | } else | ||
302 | signal = SIGILL; | ||
303 | |||
304 | if (signal == SIGFPE) | ||
305 | do_fp_trap(regs, current->thread.fp_regs.fpc); | ||
306 | else if (signal) | ||
307 | do_trap(regs, signal, ILL_ILLOPN, "specification exception"); | ||
308 | } | ||
309 | #else | ||
310 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, | 224 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, |
311 | "specification exception"); | 225 | "specification exception"); |
312 | #endif | ||
313 | 226 | ||
314 | #ifdef CONFIG_64BIT | ||
315 | int alloc_vector_registers(struct task_struct *tsk) | 227 | int alloc_vector_registers(struct task_struct *tsk) |
316 | { | 228 | { |
317 | __vector128 *vxrs; | 229 | __vector128 *vxrs; |
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str) | |||
377 | return 1; | 289 | return 1; |
378 | } | 290 | } |
379 | __setup("novx", disable_vector_extension); | 291 | __setup("novx", disable_vector_extension); |
380 | #endif | ||
381 | 292 | ||
382 | void data_exception(struct pt_regs *regs) | 293 | void data_exception(struct pt_regs *regs) |
383 | { | 294 | { |
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs) | |||
386 | 297 | ||
387 | location = get_trap_ip(regs); | 298 | location = get_trap_ip(regs); |
388 | 299 | ||
389 | if (MACHINE_HAS_IEEE) | 300 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); |
390 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | ||
391 | |||
392 | #ifdef CONFIG_MATHEMU | ||
393 | else if (user_mode(regs)) { | ||
394 | __u8 opcode[6]; | ||
395 | get_user(*((__u16 *) opcode), location); | ||
396 | switch (opcode[0]) { | ||
397 | case 0x28: /* LDR Rx,Ry */ | ||
398 | signal = math_emu_ldr(opcode); | ||
399 | break; | ||
400 | case 0x38: /* LER Rx,Ry */ | ||
401 | signal = math_emu_ler(opcode); | ||
402 | break; | ||
403 | case 0x60: /* STD R,D(X,B) */ | ||
404 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
405 | signal = math_emu_std(opcode, regs); | ||
406 | break; | ||
407 | case 0x68: /* LD R,D(X,B) */ | ||
408 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
409 | signal = math_emu_ld(opcode, regs); | ||
410 | break; | ||
411 | case 0x70: /* STE R,D(X,B) */ | ||
412 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
413 | signal = math_emu_ste(opcode, regs); | ||
414 | break; | ||
415 | case 0x78: /* LE R,D(X,B) */ | ||
416 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
417 | signal = math_emu_le(opcode, regs); | ||
418 | break; | ||
419 | case 0xb3: | ||
420 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
421 | signal = math_emu_b3(opcode, regs); | ||
422 | break; | ||
423 | case 0xed: | ||
424 | get_user(*((__u32 *) (opcode+2)), | ||
425 | (__u32 __user *)(location+1)); | ||
426 | signal = math_emu_ed(opcode, regs); | ||
427 | break; | ||
428 | case 0xb2: | ||
429 | if (opcode[1] == 0x99) { | ||
430 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
431 | signal = math_emu_srnm(opcode, regs); | ||
432 | } else if (opcode[1] == 0x9c) { | ||
433 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
434 | signal = math_emu_stfpc(opcode, regs); | ||
435 | } else if (opcode[1] == 0x9d) { | ||
436 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
437 | signal = math_emu_lfpc(opcode, regs); | ||
438 | } else | ||
439 | signal = SIGILL; | ||
440 | break; | ||
441 | default: | ||
442 | signal = SIGILL; | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | #endif | ||
447 | #ifdef CONFIG_64BIT | ||
448 | /* Check for vector register enablement */ | 301 | /* Check for vector register enablement */ |
449 | if (MACHINE_HAS_VX && !current->thread.vxrs && | 302 | if (MACHINE_HAS_VX && !current->thread.vxrs && |
450 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { | 303 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { |
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs) | |||
454 | clear_pt_regs_flag(regs, PIF_PER_TRAP); | 307 | clear_pt_regs_flag(regs, PIF_PER_TRAP); |
455 | return; | 308 | return; |
456 | } | 309 | } |
457 | #endif | ||
458 | |||
459 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) | 310 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) |
460 | signal = SIGFPE; | 311 | signal = SIGFPE; |
461 | else | 312 | else |
462 | signal = SIGILL; | 313 | signal = SIGILL; |
463 | if (signal == SIGFPE) | 314 | if (signal == SIGFPE) |
464 | do_fp_trap(regs, current->thread.fp_regs.fpc); | 315 | do_fp_trap(regs, current->thread.fp_regs.fpc); |
465 | else if (signal) | 316 | else if (signal) |
466 | do_trap(regs, signal, ILL_ILLOPN, "data exception"); | 317 | do_trap(regs, signal, ILL_ILLOPN, "data exception"); |
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index cc7328080b60..66956c09d5bf 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c | |||
@@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) | |||
188 | else if (put_user(*(input), __ptr)) \ | 188 | else if (put_user(*(input), __ptr)) \ |
189 | __rc = EMU_ADDRESSING; \ | 189 | __rc = EMU_ADDRESSING; \ |
190 | if (__rc == 0) \ | 190 | if (__rc == 0) \ |
191 | sim_stor_event(regs, __ptr, mask + 1); \ | 191 | sim_stor_event(regs, \ |
192 | (void __force *)__ptr, \ | ||
193 | mask + 1); \ | ||
192 | __rc; \ | 194 | __rc; \ |
193 | }) | 195 | }) |
194 | 196 | ||
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 0bbb7e027c5a..0d58269ff425 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -32,19 +32,17 @@ | |||
32 | #include <asm/vdso.h> | 32 | #include <asm/vdso.h> |
33 | #include <asm/facility.h> | 33 | #include <asm/facility.h> |
34 | 34 | ||
35 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | 35 | #ifdef CONFIG_COMPAT |
36 | extern char vdso32_start, vdso32_end; | 36 | extern char vdso32_start, vdso32_end; |
37 | static void *vdso32_kbase = &vdso32_start; | 37 | static void *vdso32_kbase = &vdso32_start; |
38 | static unsigned int vdso32_pages; | 38 | static unsigned int vdso32_pages; |
39 | static struct page **vdso32_pagelist; | 39 | static struct page **vdso32_pagelist; |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifdef CONFIG_64BIT | ||
43 | extern char vdso64_start, vdso64_end; | 42 | extern char vdso64_start, vdso64_end; |
44 | static void *vdso64_kbase = &vdso64_start; | 43 | static void *vdso64_kbase = &vdso64_start; |
45 | static unsigned int vdso64_pages; | 44 | static unsigned int vdso64_pages; |
46 | static struct page **vdso64_pagelist; | 45 | static struct page **vdso64_pagelist; |
47 | #endif /* CONFIG_64BIT */ | ||
48 | 46 | ||
49 | /* | 47 | /* |
50 | * Should the kernel map a VDSO page into processes and pass its | 48 | * Should the kernel map a VDSO page into processes and pass its |
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd) | |||
87 | vd->ectg_available = test_facility(31); | 85 | vd->ectg_available = test_facility(31); |
88 | } | 86 | } |
89 | 87 | ||
90 | #ifdef CONFIG_64BIT | ||
91 | /* | 88 | /* |
92 | * Allocate/free per cpu vdso data. | 89 | * Allocate/free per cpu vdso data. |
93 | */ | 90 | */ |
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void) | |||
169 | cr5 = offsetof(struct _lowcore, paste); | 166 | cr5 = offsetof(struct _lowcore, paste); |
170 | __ctl_load(cr5, 5, 5); | 167 | __ctl_load(cr5, 5, 5); |
171 | } | 168 | } |
172 | #endif /* CONFIG_64BIT */ | ||
173 | 169 | ||
174 | /* | 170 | /* |
175 | * This is called from binfmt_elf, we create the special vma for the | 171 | * This is called from binfmt_elf, we create the special vma for the |
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
191 | if (!uses_interp) | 187 | if (!uses_interp) |
192 | return 0; | 188 | return 0; |
193 | 189 | ||
194 | #ifdef CONFIG_64BIT | ||
195 | vdso_pagelist = vdso64_pagelist; | 190 | vdso_pagelist = vdso64_pagelist; |
196 | vdso_pages = vdso64_pages; | 191 | vdso_pages = vdso64_pages; |
197 | #ifdef CONFIG_COMPAT | 192 | #ifdef CONFIG_COMPAT |
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
200 | vdso_pages = vdso32_pages; | 195 | vdso_pages = vdso32_pages; |
201 | } | 196 | } |
202 | #endif | 197 | #endif |
203 | #else | ||
204 | vdso_pagelist = vdso32_pagelist; | ||
205 | vdso_pages = vdso32_pages; | ||
206 | #endif | ||
207 | |||
208 | /* | 198 | /* |
209 | * vDSO has a problem and was disabled, just don't "enable" it for | 199 | * vDSO has a problem and was disabled, just don't "enable" it for |
210 | * the process | 200 | * the process |
@@ -268,7 +258,7 @@ static int __init vdso_init(void) | |||
268 | if (!vdso_enabled) | 258 | if (!vdso_enabled) |
269 | return 0; | 259 | return 0; |
270 | vdso_init_data(vdso_data); | 260 | vdso_init_data(vdso_data); |
271 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | 261 | #ifdef CONFIG_COMPAT |
272 | /* Calculate the size of the 32 bit vDSO */ | 262 | /* Calculate the size of the 32 bit vDSO */ |
273 | vdso32_pages = ((&vdso32_end - &vdso32_start | 263 | vdso32_pages = ((&vdso32_end - &vdso32_start |
274 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | 264 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
@@ -287,7 +277,6 @@ static int __init vdso_init(void) | |||
287 | vdso32_pagelist[vdso32_pages] = NULL; | 277 | vdso32_pagelist[vdso32_pages] = NULL; |
288 | #endif | 278 | #endif |
289 | 279 | ||
290 | #ifdef CONFIG_64BIT | ||
291 | /* Calculate the size of the 64 bit vDSO */ | 280 | /* Calculate the size of the 64 bit vDSO */ |
292 | vdso64_pages = ((&vdso64_end - &vdso64_start | 281 | vdso64_pages = ((&vdso64_end - &vdso64_start |
293 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | 282 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
@@ -307,7 +296,6 @@ static int __init vdso_init(void) | |||
307 | if (vdso_alloc_per_cpu(&S390_lowcore)) | 296 | if (vdso_alloc_per_cpu(&S390_lowcore)) |
308 | BUG(); | 297 | BUG(); |
309 | vdso_init_cr5(); | 298 | vdso_init_cr5(); |
310 | #endif /* CONFIG_64BIT */ | ||
311 | 299 | ||
312 | get_page(virt_to_page(vdso_data)); | 300 | get_page(virt_to_page(vdso_data)); |
313 | 301 | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 35b13ed0af5f..445657fe658c 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -6,17 +6,10 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm-generic/vmlinux.lds.h> | 7 | #include <asm-generic/vmlinux.lds.h> |
8 | 8 | ||
9 | #ifndef CONFIG_64BIT | ||
10 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
11 | OUTPUT_ARCH(s390:31-bit) | ||
12 | ENTRY(startup) | ||
13 | jiffies = jiffies_64 + 4; | ||
14 | #else | ||
15 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | 9 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") |
16 | OUTPUT_ARCH(s390:64-bit) | 10 | OUTPUT_ARCH(s390:64-bit) |
17 | ENTRY(startup) | 11 | ENTRY(startup) |
18 | jiffies = jiffies_64; | 12 | jiffies = jiffies_64; |
19 | #endif | ||
20 | 13 | ||
21 | PHDRS { | 14 | PHDRS { |
22 | text PT_LOAD FLAGS(5); /* R_E */ | 15 | text PT_LOAD FLAGS(5); /* R_E */ |