aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-12 07:08:27 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-03-25 06:49:33 -0400
commit5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch)
tree37264d49f069812f19ced94e6ae171814fb7e498 /arch
parent1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff)
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig79
-rw-r--r--arch/s390/Makefile16
-rw-r--r--arch/s390/boot/compressed/Makefile12
-rw-r--r--arch/s390/boot/compressed/head31.S51
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S5
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c4
-rw-r--r--arch/s390/include/asm/appldata.h24
-rw-r--r--arch/s390/include/asm/atomic.h95
-rw-r--r--arch/s390/include/asm/bitops.h28
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/ctl_reg.h14
-rw-r--r--arch/s390/include/asm/elf.h4
-rw-r--r--arch/s390/include/asm/idals.h16
-rw-r--r--arch/s390/include/asm/jump_label.h12
-rw-r--r--arch/s390/include/asm/lowcore.h159
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/percpu.h4
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h24
-rw-r--r--arch/s390/include/asm/pgtable.h125
-rw-r--r--arch/s390/include/asm/processor.h66
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/runtime_instr.h10
-rw-r--r--arch/s390/include/asm/rwsem.h81
-rw-r--r--arch/s390/include/asm/setup.h35
-rw-r--r--arch/s390/include/asm/sfp-util.h10
-rw-r--r--arch/s390/include/asm/sparsemem.h9
-rw-r--r--arch/s390/include/asm/switch_to.h21
-rw-r--r--arch/s390/include/asm/thread_info.h9
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/types.h17
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/kernel/Makefile22
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/base.S76
-rw-r--r--arch/s390/kernel/cpcmd.c10
-rw-r--r--arch/s390/kernel/diag.c15
-rw-r--r--arch/s390/kernel/dis.c48
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S966
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S106
-rw-r--r--arch/s390/kernel/head_kdump.S8
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/nmi.c92
-rw-r--r--arch/s390/kernel/pgm_check.S22
-rw-r--r--arch/s390/kernel/process.c29
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reipl.S92
-rw-r--r--arch/s390/kernel/relocate_kernel.S118
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c72
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c33
-rw-r--r--arch/s390/kernel/sys_s390.c49
-rw-r--r--arch/s390/kernel/traps.c153
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/div64.c147
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/qrnnd.S78
-rw-r--r--arch/s390/lib/uaccess.c136
-rw-r--r--arch/s390/lib/ucmpdi2.c26
-rw-r--r--arch/s390/math-emu/Makefile7
-rw-r--r--arch/s390/math-emu/math.c2255
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c25
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/mm/vmem.c10
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/init.c11
85 files changed, 169 insertions, 5883 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 647c3eccc3d0..2938934c6518 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -4,6 +4,5 @@ obj-$(CONFIG_KVM) += kvm/
4obj-$(CONFIG_CRYPTO_HW) += crypto/ 4obj-$(CONFIG_CRYPTO_HW) += crypto/
5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ 5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
6obj-$(CONFIG_APPLDATA_BASE) += appldata/ 6obj-$(CONFIG_APPLDATA_BASE) += appldata/
7obj-$(CONFIG_MATHEMU) += math-emu/
8obj-y += net/ 7obj-y += net/
9obj-$(CONFIG_PCI) += pci/ 8obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373cd5badf1c..1de26e1c48ac 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -35,7 +35,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
35 def_bool y 35 def_bool y
36 36
37config ARCH_DMA_ADDR_T_64BIT 37config ARCH_DMA_ADDR_T_64BIT
38 def_bool 64BIT 38 def_bool y
39 39
40config GENERIC_LOCKBREAK 40config GENERIC_LOCKBREAK
41 def_bool y if SMP && PREEMPT 41 def_bool y if SMP && PREEMPT
@@ -59,7 +59,7 @@ config PCI_QUIRKS
59 def_bool n 59 def_bool n
60 60
61config ARCH_SUPPORTS_UPROBES 61config ARCH_SUPPORTS_UPROBES
62 def_bool 64BIT 62 def_bool y
63 63
64config S390 64config S390
65 def_bool y 65 def_bool y
@@ -110,19 +110,19 @@ config S390
110 select GENERIC_TIME_VSYSCALL 110 select GENERIC_TIME_VSYSCALL
111 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 111 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
112 select HAVE_ARCH_AUDITSYSCALL 112 select HAVE_ARCH_AUDITSYSCALL
113 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 113 select HAVE_ARCH_JUMP_LABEL
114 select HAVE_ARCH_SECCOMP_FILTER 114 select HAVE_ARCH_SECCOMP_FILTER
115 select HAVE_ARCH_TRACEHOOK 115 select HAVE_ARCH_TRACEHOOK
116 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 116 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
117 select HAVE_BPF_JIT if 64BIT && PACK_STACK 117 select HAVE_BPF_JIT if PACK_STACK
118 select HAVE_CMPXCHG_DOUBLE 118 select HAVE_CMPXCHG_DOUBLE
119 select HAVE_CMPXCHG_LOCAL 119 select HAVE_CMPXCHG_LOCAL
120 select HAVE_DEBUG_KMEMLEAK 120 select HAVE_DEBUG_KMEMLEAK
121 select HAVE_DYNAMIC_FTRACE if 64BIT 121 select HAVE_DYNAMIC_FTRACE
122 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT 122 select HAVE_DYNAMIC_FTRACE_WITH_REGS
123 select HAVE_FTRACE_MCOUNT_RECORD 123 select HAVE_FTRACE_MCOUNT_RECORD
124 select HAVE_FUNCTION_GRAPH_TRACER if 64BIT 124 select HAVE_FUNCTION_GRAPH_TRACER
125 select HAVE_FUNCTION_TRACER if 64BIT 125 select HAVE_FUNCTION_TRACER
126 select HAVE_FUTEX_CMPXCHG if FUTEX 126 select HAVE_FUTEX_CMPXCHG if FUTEX
127 select HAVE_KERNEL_BZIP2 127 select HAVE_KERNEL_BZIP2
128 select HAVE_KERNEL_GZIP 128 select HAVE_KERNEL_GZIP
@@ -132,7 +132,7 @@ config S390
132 select HAVE_KERNEL_XZ 132 select HAVE_KERNEL_XZ
133 select HAVE_KPROBES 133 select HAVE_KPROBES
134 select HAVE_KRETPROBES 134 select HAVE_KRETPROBES
135 select HAVE_KVM if 64BIT 135 select HAVE_KVM
136 select HAVE_MEMBLOCK 136 select HAVE_MEMBLOCK
137 select HAVE_MEMBLOCK_NODE_MAP 137 select HAVE_MEMBLOCK_NODE_MAP
138 select HAVE_MEMBLOCK_PHYS_MAP 138 select HAVE_MEMBLOCK_PHYS_MAP
@@ -141,7 +141,6 @@ config S390
141 select HAVE_PERF_EVENTS 141 select HAVE_PERF_EVENTS
142 select HAVE_REGS_AND_STACK_ACCESS_API 142 select HAVE_REGS_AND_STACK_ACCESS_API
143 select HAVE_SYSCALL_TRACEPOINTS 143 select HAVE_SYSCALL_TRACEPOINTS
144 select HAVE_UID16 if 32BIT
145 select HAVE_VIRT_CPU_ACCOUNTING 144 select HAVE_VIRT_CPU_ACCOUNTING
146 select MODULES_USE_ELF_RELA 145 select MODULES_USE_ELF_RELA
147 select NO_BOOTMEM 146 select NO_BOOTMEM
@@ -190,18 +189,11 @@ config HAVE_MARCH_Z13_FEATURES
190 189
191choice 190choice
192 prompt "Processor type" 191 prompt "Processor type"
193 default MARCH_G5 192 default MARCH_Z900
194
195config MARCH_G5
196 bool "System/390 model G5 and G6"
197 depends on !64BIT
198 help
199 Select this to build a 31 bit kernel that works
200 on all ESA/390 and z/Architecture machines.
201 193
202config MARCH_Z900 194config MARCH_Z900
203 bool "IBM zSeries model z800 and z900" 195 bool "IBM zSeries model z800 and z900"
204 select HAVE_MARCH_Z900_FEATURES if 64BIT 196 select HAVE_MARCH_Z900_FEATURES
205 help 197 help
206 Select this to enable optimizations for model z800/z900 (2064 and 198 Select this to enable optimizations for model z800/z900 (2064 and
207 2066 series). This will enable some optimizations that are not 199 2066 series). This will enable some optimizations that are not
@@ -209,7 +201,7 @@ config MARCH_Z900
209 201
210config MARCH_Z990 202config MARCH_Z990
211 bool "IBM zSeries model z890 and z990" 203 bool "IBM zSeries model z890 and z990"
212 select HAVE_MARCH_Z990_FEATURES if 64BIT 204 select HAVE_MARCH_Z990_FEATURES
213 help 205 help
214 Select this to enable optimizations for model z890/z990 (2084 and 206 Select this to enable optimizations for model z890/z990 (2084 and
215 2086 series). The kernel will be slightly faster but will not work 207 2086 series). The kernel will be slightly faster but will not work
@@ -217,7 +209,7 @@ config MARCH_Z990
217 209
218config MARCH_Z9_109 210config MARCH_Z9_109
219 bool "IBM System z9" 211 bool "IBM System z9"
220 select HAVE_MARCH_Z9_109_FEATURES if 64BIT 212 select HAVE_MARCH_Z9_109_FEATURES
221 help 213 help
222 Select this to enable optimizations for IBM System z9 (2094 and 214 Select this to enable optimizations for IBM System z9 (2094 and
223 2096 series). The kernel will be slightly faster but will not work 215 2096 series). The kernel will be slightly faster but will not work
@@ -225,7 +217,7 @@ config MARCH_Z9_109
225 217
226config MARCH_Z10 218config MARCH_Z10
227 bool "IBM System z10" 219 bool "IBM System z10"
228 select HAVE_MARCH_Z10_FEATURES if 64BIT 220 select HAVE_MARCH_Z10_FEATURES
229 help 221 help
230 Select this to enable optimizations for IBM System z10 (2097 and 222 Select this to enable optimizations for IBM System z10 (2097 and
231 2098 series). The kernel will be slightly faster but will not work 223 2098 series). The kernel will be slightly faster but will not work
@@ -233,7 +225,7 @@ config MARCH_Z10
233 225
234config MARCH_Z196 226config MARCH_Z196
235 bool "IBM zEnterprise 114 and 196" 227 bool "IBM zEnterprise 114 and 196"
236 select HAVE_MARCH_Z196_FEATURES if 64BIT 228 select HAVE_MARCH_Z196_FEATURES
237 help 229 help
238 Select this to enable optimizations for IBM zEnterprise 114 and 196 230 Select this to enable optimizations for IBM zEnterprise 114 and 196
239 (2818 and 2817 series). The kernel will be slightly faster but will 231 (2818 and 2817 series). The kernel will be slightly faster but will
@@ -241,7 +233,7 @@ config MARCH_Z196
241 233
242config MARCH_ZEC12 234config MARCH_ZEC12
243 bool "IBM zBC12 and zEC12" 235 bool "IBM zBC12 and zEC12"
244 select HAVE_MARCH_ZEC12_FEATURES if 64BIT 236 select HAVE_MARCH_ZEC12_FEATURES
245 help 237 help
246 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and 238 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
247 2827 series). The kernel will be slightly faster but will not work on 239 2827 series). The kernel will be slightly faster but will not work on
@@ -249,7 +241,7 @@ config MARCH_ZEC12
249 241
250config MARCH_Z13 242config MARCH_Z13
251 bool "IBM z13" 243 bool "IBM z13"
252 select HAVE_MARCH_Z13_FEATURES if 64BIT 244 select HAVE_MARCH_Z13_FEATURES
253 help 245 help
254 Select this to enable optimizations for IBM z13 (2964 series). 246 Select this to enable optimizations for IBM z13 (2964 series).
255 The kernel will be slightly faster but will not work on older 247 The kernel will be slightly faster but will not work on older
@@ -257,9 +249,6 @@ config MARCH_Z13
257 249
258endchoice 250endchoice
259 251
260config MARCH_G5_TUNE
261 def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
262
263config MARCH_Z900_TUNE 252config MARCH_Z900_TUNE
264 def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT 253 def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
265 254
@@ -298,9 +287,6 @@ config TUNE_DEFAULT
298 Tune the generated code for the target processor for which the kernel 287 Tune the generated code for the target processor for which the kernel
299 will be compiled. 288 will be compiled.
300 289
301config TUNE_G5
302 bool "System/390 model G5 and G6"
303
304config TUNE_Z900 290config TUNE_Z900
305 bool "IBM zSeries model z800 and z900" 291 bool "IBM zSeries model z800 and z900"
306 292
@@ -326,18 +312,10 @@ endchoice
326 312
327config 64BIT 313config 64BIT
328 def_bool y 314 def_bool y
329 prompt "64 bit kernel"
330 help
331 Select this option if you have an IBM z/Architecture machine
332 and want to use the 64 bit addressing mode.
333
334config 32BIT
335 def_bool y if !64BIT
336 315
337config COMPAT 316config COMPAT
338 def_bool y 317 def_bool y
339 prompt "Kernel support for 31 bit emulation" 318 prompt "Kernel support for 31 bit emulation"
340 depends on 64BIT
341 select COMPAT_BINFMT_ELF if BINFMT_ELF 319 select COMPAT_BINFMT_ELF if BINFMT_ELF
342 select ARCH_WANT_OLD_COMPAT_IPC 320 select ARCH_WANT_OLD_COMPAT_IPC
343 select COMPAT_OLD_SIGACTION 321 select COMPAT_OLD_SIGACTION
@@ -376,8 +354,7 @@ config NR_CPUS
376 int "Maximum number of CPUs (2-512)" 354 int "Maximum number of CPUs (2-512)"
377 range 2 512 355 range 2 512
378 depends on SMP 356 depends on SMP
379 default "32" if !64BIT 357 default "64"
380 default "64" if 64BIT
381 help 358 help
382 This allows you to specify the maximum number of CPUs which this 359 This allows you to specify the maximum number of CPUs which this
383 kernel will support. The maximum supported value is 512 and the 360 kernel will support. The maximum supported value is 512 and the
@@ -418,15 +395,6 @@ config SCHED_TOPOLOGY
418 395
419source kernel/Kconfig.preempt 396source kernel/Kconfig.preempt
420 397
421config MATHEMU
422 def_bool y
423 prompt "IEEE FPU emulation"
424 depends on MARCH_G5
425 help
426 This option is required for IEEE compliant floating point arithmetic
427 on older ESA/390 machines. Say Y unless you know your machine doesn't
428 need this.
429
430source kernel/Kconfig.hz 398source kernel/Kconfig.hz
431 399
432endmenu 400endmenu
@@ -437,7 +405,6 @@ config ARCH_SPARSEMEM_ENABLE
437 def_bool y 405 def_bool y
438 select SPARSEMEM_VMEMMAP_ENABLE 406 select SPARSEMEM_VMEMMAP_ENABLE
439 select SPARSEMEM_VMEMMAP 407 select SPARSEMEM_VMEMMAP
440 select SPARSEMEM_STATIC if !64BIT
441 408
442config ARCH_SPARSEMEM_DEFAULT 409config ARCH_SPARSEMEM_DEFAULT
443 def_bool y 410 def_bool y
@@ -453,7 +420,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
453 420
454config ARCH_ENABLE_SPLIT_PMD_PTLOCK 421config ARCH_ENABLE_SPLIT_PMD_PTLOCK
455 def_bool y 422 def_bool y
456 depends on 64BIT
457 423
458config FORCE_MAX_ZONEORDER 424config FORCE_MAX_ZONEORDER
459 int 425 int
@@ -528,7 +494,6 @@ config QDIO
528 494
529menuconfig PCI 495menuconfig PCI
530 bool "PCI support" 496 bool "PCI support"
531 depends on 64BIT
532 select HAVE_DMA_ATTRS 497 select HAVE_DMA_ATTRS
533 select PCI_MSI 498 select PCI_MSI
534 help 499 help
@@ -598,7 +563,6 @@ config CHSC_SCH
598 563
599config SCM_BUS 564config SCM_BUS
600 def_bool y 565 def_bool y
601 depends on 64BIT
602 prompt "SCM bus driver" 566 prompt "SCM bus driver"
603 help 567 help
604 Bus driver for Storage Class Memory. 568 Bus driver for Storage Class Memory.
@@ -620,7 +584,7 @@ menu "Dump support"
620 584
621config CRASH_DUMP 585config CRASH_DUMP
622 bool "kernel crash dumps" 586 bool "kernel crash dumps"
623 depends on 64BIT && SMP 587 depends on SMP
624 select KEXEC 588 select KEXEC
625 help 589 help
626 Generate crash dump after being started by kexec. 590 Generate crash dump after being started by kexec.
@@ -659,7 +623,7 @@ endmenu
659menu "Power Management" 623menu "Power Management"
660 624
661config ARCH_HIBERNATION_POSSIBLE 625config ARCH_HIBERNATION_POSSIBLE
662 def_bool y if 64BIT 626 def_bool y
663 627
664source "kernel/power/Kconfig" 628source "kernel/power/Kconfig"
665 629
@@ -810,7 +774,6 @@ source "arch/s390/kvm/Kconfig"
810config S390_GUEST 774config S390_GUEST
811 def_bool y 775 def_bool y
812 prompt "s390 support for virtio devices" 776 prompt "s390 support for virtio devices"
813 depends on 64BIT
814 select TTY 777 select TTY
815 select VIRTUALIZATION 778 select VIRTUALIZATION
816 select VIRTIO 779 select VIRTIO
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index acb6859c6a95..667b1bca5681 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,15 +13,6 @@
13# Copyright (C) 1994 by Linus Torvalds 13# Copyright (C) 1994 by Linus Torvalds
14# 14#
15 15
16ifndef CONFIG_64BIT
17LD_BFD := elf32-s390
18LDFLAGS := -m elf_s390
19KBUILD_CFLAGS += -m31
20KBUILD_AFLAGS += -m31
21UTS_MACHINE := s390
22STACK_SIZE := 8192
23CHECKFLAGS += -D__s390__ -msize-long
24else
25LD_BFD := elf64-s390 16LD_BFD := elf64-s390
26LDFLAGS := -m elf64_s390 17LDFLAGS := -m elf64_s390
27KBUILD_AFLAGS_MODULE += -fPIC 18KBUILD_AFLAGS_MODULE += -fPIC
@@ -31,11 +22,9 @@ KBUILD_AFLAGS += -m64
31UTS_MACHINE := s390x 22UTS_MACHINE := s390x
32STACK_SIZE := 16384 23STACK_SIZE := 16384
33CHECKFLAGS += -D__s390__ -D__s390x__ 24CHECKFLAGS += -D__s390__ -D__s390x__
34endif
35 25
36export LD_BFD 26export LD_BFD
37 27
38mflags-$(CONFIG_MARCH_G5) := -march=g5
39mflags-$(CONFIG_MARCH_Z900) := -march=z900 28mflags-$(CONFIG_MARCH_Z900) := -march=z900
40mflags-$(CONFIG_MARCH_Z990) := -march=z990 29mflags-$(CONFIG_MARCH_Z990) := -march=z990
41mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 30mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
@@ -47,7 +36,6 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
47aflags-y += $(mflags-y) 36aflags-y += $(mflags-y)
48cflags-y += $(mflags-y) 37cflags-y += $(mflags-y)
49 38
50cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
51cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 39cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
52cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 40cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
53cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 41cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
@@ -104,7 +92,7 @@ KBUILD_AFLAGS += $(aflags-y)
104OBJCOPYFLAGS := -O binary 92OBJCOPYFLAGS := -O binary
105 93
106head-y := arch/s390/kernel/head.o 94head-y := arch/s390/kernel/head.o
107head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) 95head-y += arch/s390/kernel/head64.o
108 96
109# See arch/s390/Kbuild for content of core part of the kernel 97# See arch/s390/Kbuild for content of core part of the kernel
110core-y += arch/s390/ 98core-y += arch/s390/
@@ -129,9 +117,7 @@ zfcpdump:
129 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 117 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
130 118
131vdso_install: 119vdso_install:
132ifeq ($(CONFIG_64BIT),y)
133 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 120 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
134endif
135 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ 121 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
136 122
137archclean: 123archclean:
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index f90d1fc6d603..254fb05c5d6c 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -4,13 +4,11 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7BITS := $(if $(CONFIG_64BIT),64,31)
8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 7targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
10targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 8targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
11targets += misc.o piggy.o sizes.h head$(BITS).o 9targets += misc.o piggy.o sizes.h head64.o
12 10
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 11KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 12KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
15KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks 13KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
16KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) 14KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
@@ -19,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
19GCOV_PROFILE := n 17GCOV_PROFILE := n
20 18
21OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) 19OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
22OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o 20OBJECTS += $(obj)/head64.o $(obj)/misc.o $(obj)/piggy.o
23 21
24LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T 22LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
25$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) 23$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
@@ -34,8 +32,8 @@ quiet_cmd_sizes = GEN $@
34$(obj)/sizes.h: vmlinux 32$(obj)/sizes.h: vmlinux
35 $(call if_changed,sizes) 33 $(call if_changed,sizes)
36 34
37AFLAGS_head$(BITS).o += -I$(obj) 35AFLAGS_head64.o += -I$(obj)
38$(obj)/head$(BITS).o: $(obj)/sizes.h 36$(obj)/head64.o: $(obj)/sizes.h
39 37
40CFLAGS_misc.o += -I$(obj) 38CFLAGS_misc.o += -I$(obj)
41$(obj)/misc.o: $(obj)/sizes.h 39$(obj)/misc.o: $(obj)/sizes.h
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
deleted file mode 100644
index e8c9e18b8039..000000000000
--- a/arch/s390/boot/compressed/head31.S
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Startup glue code to uncompress the kernel
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/linkage.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <asm/page.h>
14#include "sizes.h"
15
16__HEAD
17ENTRY(startup_continue)
18 basr %r13,0 # get base
19.LPG1:
20 # setup stack
21 l %r15,.Lstack-.LPG1(%r13)
22 ahi %r15,-96
23 l %r1,.Ldecompress-.LPG1(%r13)
24 basr %r14,%r1
25 # setup registers for memory mover & branch to target
26 lr %r4,%r2
27 l %r2,.Loffset-.LPG1(%r13)
28 la %r4,0(%r2,%r4)
29 l %r3,.Lmvsize-.LPG1(%r13)
30 lr %r5,%r3
31 # move the memory mover someplace safe
32 la %r1,0x200
33 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
34 # decompress image is started at 0x11000
35 lr %r6,%r2
36 br %r1
37mover:
38 mvcle %r2,%r4,0
39 jo mover
40 br %r6
41mover_end:
42
43 .align 8
44.Lstack:
45 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
46.Ldecompress:
47 .long decompress_kernel
48.Loffset:
49 .long 0x11000
50.Lmvsize:
51 .long SZ__bss_start
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 8e1fb8239287..747735f83426 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -1,12 +1,7 @@
1#include <asm-generic/vmlinux.lds.h> 1#include <asm-generic/vmlinux.lds.h>
2 2
3#ifdef CONFIG_64BIT
4OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 3OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
5OUTPUT_ARCH(s390:64-bit) 4OUTPUT_ARCH(s390:64-bit)
6#else
7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
8OUTPUT_ARCH(s390:31-bit)
9#endif
10 5
11ENTRY(startup) 6ENTRY(startup)
12 7
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index d4c0d3717543..24c747a0fcc3 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -19,13 +19,9 @@
19static void diag0c(struct hypfs_diag0c_entry *entry) 19static void diag0c(struct hypfs_diag0c_entry *entry)
20{ 20{
21 asm volatile ( 21 asm volatile (
22#ifdef CONFIG_64BIT
23 " sam31\n" 22 " sam31\n"
24 " diag %0,%0,0x0c\n" 23 " diag %0,%0,0x0c\n"
25 " sam64\n" 24 " sam64\n"
26#else
27 " diag %0,%0,0x0c\n"
28#endif
29 : /* no output register */ 25 : /* no output register */
30 : "a" (entry) 26 : "a" (entry)
31 : "memory"); 27 : "memory");
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 32a705987156..16887c5fd989 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -9,28 +9,6 @@
9 9
10#include <asm/io.h> 10#include <asm/io.h>
11 11
12#ifndef CONFIG_64BIT
13
14#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
15#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
16#define APPLDATA_GEN_EVENT_REC 0x02
17#define APPLDATA_START_CONFIG_REC 0x03
18
19/*
20 * Parameter list for DIAGNOSE X'DC'
21 */
22struct appldata_parameter_list {
23 u16 diag; /* The DIAGNOSE code X'00DC' */
24 u8 function; /* The function code for the DIAGNOSE */
25 u8 parlist_length; /* Length of the parameter list */
26 u32 product_id_addr; /* Address of the 16-byte product ID */
27 u16 reserved;
28 u16 buffer_length; /* Length of the application data buffer */
29 u32 buffer_addr; /* Address of the application data buffer */
30} __attribute__ ((packed));
31
32#else /* CONFIG_64BIT */
33
34#define APPLDATA_START_INTERVAL_REC 0x80 12#define APPLDATA_START_INTERVAL_REC 0x80
35#define APPLDATA_STOP_REC 0x81 13#define APPLDATA_STOP_REC 0x81
36#define APPLDATA_GEN_EVENT_REC 0x82 14#define APPLDATA_GEN_EVENT_REC 0x82
@@ -51,8 +29,6 @@ struct appldata_parameter_list {
51 u64 buffer_addr; 29 u64 buffer_addr;
52} __attribute__ ((packed)); 30} __attribute__ ((packed));
53 31
54#endif /* CONFIG_64BIT */
55
56struct appldata_product_id { 32struct appldata_product_id {
57 char prod_nr[7]; /* product number */ 33 char prod_nr[7]; /* product number */
58 u16 prod_fn; /* product function */ 34 u16 prod_fn; /* product function */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa934fe080c1..adbe3802e377 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
160 160
161#define ATOMIC64_INIT(i) { (i) } 161#define ATOMIC64_INIT(i) { (i) }
162 162
163#ifdef CONFIG_64BIT
164
165#define __ATOMIC64_NO_BARRIER "\n" 163#define __ATOMIC64_NO_BARRIER "\n"
166 164
167#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 165#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
274 272
275#undef __ATOMIC64_LOOP 273#undef __ATOMIC64_LOOP
276 274
277#else /* CONFIG_64BIT */
278
279typedef struct {
280 long long counter;
281} atomic64_t;
282
283static inline long long atomic64_read(const atomic64_t *v)
284{
285 register_pair rp;
286
287 asm volatile(
288 " lm %0,%N0,%1"
289 : "=&d" (rp) : "Q" (v->counter) );
290 return rp.pair;
291}
292
293static inline void atomic64_set(atomic64_t *v, long long i)
294{
295 register_pair rp = {.pair = i};
296
297 asm volatile(
298 " stm %1,%N1,%0"
299 : "=Q" (v->counter) : "d" (rp) );
300}
301
302static inline long long atomic64_xchg(atomic64_t *v, long long new)
303{
304 register_pair rp_new = {.pair = new};
305 register_pair rp_old;
306
307 asm volatile(
308 " lm %0,%N0,%1\n"
309 "0: cds %0,%2,%1\n"
310 " jl 0b\n"
311 : "=&d" (rp_old), "+Q" (v->counter)
312 : "d" (rp_new)
313 : "cc");
314 return rp_old.pair;
315}
316
317static inline long long atomic64_cmpxchg(atomic64_t *v,
318 long long old, long long new)
319{
320 register_pair rp_old = {.pair = old};
321 register_pair rp_new = {.pair = new};
322
323 asm volatile(
324 " cds %0,%2,%1"
325 : "+&d" (rp_old), "+Q" (v->counter)
326 : "d" (rp_new)
327 : "cc");
328 return rp_old.pair;
329}
330
331
332static inline long long atomic64_add_return(long long i, atomic64_t *v)
333{
334 long long old, new;
335
336 do {
337 old = atomic64_read(v);
338 new = old + i;
339 } while (atomic64_cmpxchg(v, old, new) != old);
340 return new;
341}
342
343static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
344{
345 long long old, new;
346
347 do {
348 old = atomic64_read(v);
349 new = old | mask;
350 } while (atomic64_cmpxchg(v, old, new) != old);
351}
352
353static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
354{
355 long long old, new;
356
357 do {
358 old = atomic64_read(v);
359 new = old & mask;
360 } while (atomic64_cmpxchg(v, old, new) != old);
361}
362
363static inline void atomic64_add(long long i, atomic64_t *v)
364{
365 atomic64_add_return(i, v);
366}
367
368#endif /* CONFIG_64BIT */
369
370static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 275static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
371{ 276{
372 long long c, old; 277 long long c, old;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 520542477678..9b68e98a724f 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -51,32 +51,6 @@
51 51
52#define __BITOPS_NO_BARRIER "\n" 52#define __BITOPS_NO_BARRIER "\n"
53 53
54#ifndef CONFIG_64BIT
55
56#define __BITOPS_OR "or"
57#define __BITOPS_AND "nr"
58#define __BITOPS_XOR "xr"
59#define __BITOPS_BARRIER "\n"
60
61#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
62({ \
63 unsigned long __old, __new; \
64 \
65 typecheck(unsigned long *, (__addr)); \
66 asm volatile( \
67 " l %0,%2\n" \
68 "0: lr %1,%0\n" \
69 __op_string " %1,%3\n" \
70 " cs %0,%1,%2\n" \
71 " jl 0b" \
72 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
73 : "d" (__val) \
74 : "cc", "memory"); \
75 __old; \
76})
77
78#else /* CONFIG_64BIT */
79
80#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 54#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
81 55
82#define __BITOPS_OR "laog" 56#define __BITOPS_OR "laog"
@@ -125,8 +99,6 @@
125 99
126#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 100#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
127 101
128#endif /* CONFIG_64BIT */
129
130#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 102#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
131 103
132static inline unsigned long * 104static inline unsigned long *
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index b91e960e4045..221b454c734a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t;
22 22
23static inline unsigned long __div(unsigned long long n, unsigned long base) 23static inline unsigned long __div(unsigned long long n, unsigned long base)
24{ 24{
25#ifndef CONFIG_64BIT
26 register_pair rp;
27
28 rp.pair = n >> 1;
29 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
30 return rp.subreg.odd;
31#else /* CONFIG_64BIT */
32 return n / base; 25 return n / base;
33#endif /* CONFIG_64BIT */
34} 26}
35 27
36#define cputime_one_jiffy jiffies_to_cputime(1) 28#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
101 struct timespec *value) 93 struct timespec *value)
102{ 94{
103 unsigned long long __cputime = (__force unsigned long long) cputime; 95 unsigned long long __cputime = (__force unsigned long long) cputime;
104#ifndef CONFIG_64BIT
105 register_pair rp;
106
107 rp.pair = __cputime >> 1;
108 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
109 value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
110 value->tv_sec = rp.subreg.odd;
111#else
112 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; 96 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
113 value->tv_sec = __cputime / CPUTIME_PER_SEC; 97 value->tv_sec = __cputime / CPUTIME_PER_SEC;
114#endif
115} 98}
116 99
117/* 100/*
@@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime,
129 struct timeval *value) 112 struct timeval *value)
130{ 113{
131 unsigned long long __cputime = (__force unsigned long long) cputime; 114 unsigned long long __cputime = (__force unsigned long long) cputime;
132#ifndef CONFIG_64BIT
133 register_pair rp;
134
135 rp.pair = __cputime >> 1;
136 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
137 value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
138 value->tv_sec = rp.subreg.odd;
139#else
140 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; 115 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
141 value->tv_sec = __cputime / CPUTIME_PER_SEC; 116 value->tv_sec = __cputime / CPUTIME_PER_SEC;
142#endif
143} 117}
144 118
145/* 119/*
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 31ab9f346d7e..cfad7fca01d6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,20 +9,12 @@
9 9
10#include <linux/bug.h> 10#include <linux/bug.h>
11 11
12#ifdef CONFIG_64BIT
13# define __CTL_LOAD "lctlg"
14# define __CTL_STORE "stctg"
15#else
16# define __CTL_LOAD "lctl"
17# define __CTL_STORE "stctl"
18#endif
19
20#define __ctl_load(array, low, high) { \ 12#define __ctl_load(array, low, high) { \
21 typedef struct { char _[sizeof(array)]; } addrtype; \ 13 typedef struct { char _[sizeof(array)]; } addrtype; \
22 \ 14 \
23 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
24 asm volatile( \ 16 asm volatile( \
25 __CTL_LOAD " %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
26 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
27} 19}
28 20
@@ -31,7 +23,7 @@
31 \ 23 \
32 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 24 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
33 asm volatile( \ 25 asm volatile( \
34 __CTL_STORE " %1,%2,%0\n" \ 26 " stctg %1,%2,%0\n" \
35 : "=Q" (*(addrtype *)(&array)) \ 27 : "=Q" (*(addrtype *)(&array)) \
36 : "i" (low), "i" (high)); \ 28 : "i" (low), "i" (high)); \
37} 29}
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit);
60union ctlreg0 { 52union ctlreg0 {
61 unsigned long val; 53 unsigned long val;
62 struct { 54 struct {
63#ifdef CONFIG_64BIT
64 unsigned long : 32; 55 unsigned long : 32;
65#endif
66 unsigned long : 3; 56 unsigned long : 3;
67 unsigned long lap : 1; /* Low-address-protection control */ 57 unsigned long lap : 1; /* Low-address-protection control */
68 unsigned long : 4; 58 unsigned long : 4;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9c875d9ed31..5214d15680ab 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,7 @@
107/* 107/*
108 * These are used to set parameters in the core dumps. 108 * These are used to set parameters in the core dumps.
109 */ 109 */
110#ifndef CONFIG_64BIT
111#define ELF_CLASS ELFCLASS32
112#else /* CONFIG_64BIT */
113#define ELF_CLASS ELFCLASS64 110#define ELF_CLASS ELFCLASS64
114#endif /* CONFIG_64BIT */
115#define ELF_DATA ELFDATA2MSB 111#define ELF_DATA ELFDATA2MSB
116#define ELF_ARCH EM_S390 112#define ELF_ARCH EM_S390
117 113
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index ea5a6e45fd93..a7b2d7504049 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -19,11 +19,7 @@
19#include <asm/cio.h> 19#include <asm/cio.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21 21
22#ifdef CONFIG_64BIT
23#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ 22#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
24#else
25#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
26#endif
27#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) 23#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
28 24
29/* 25/*
@@ -32,11 +28,7 @@
32static inline int 28static inline int
33idal_is_needed(void *vaddr, unsigned int length) 29idal_is_needed(void *vaddr, unsigned int length)
34{ 30{
35#ifdef CONFIG_64BIT
36 return ((__pa(vaddr) + length - 1) >> 31) != 0; 31 return ((__pa(vaddr) + length - 1) >> 31) != 0;
37#else
38 return 0;
39#endif
40} 32}
41 33
42 34
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
77static inline int 69static inline int
78set_normalized_cda(struct ccw1 * ccw, void *vaddr) 70set_normalized_cda(struct ccw1 * ccw, void *vaddr)
79{ 71{
80#ifdef CONFIG_64BIT
81 unsigned int nridaws; 72 unsigned int nridaws;
82 unsigned long *idal; 73 unsigned long *idal;
83 74
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
93 ccw->flags |= CCW_FLAG_IDA; 84 ccw->flags |= CCW_FLAG_IDA;
94 vaddr = idal; 85 vaddr = idal;
95 } 86 }
96#endif
97 ccw->cda = (__u32)(unsigned long) vaddr; 87 ccw->cda = (__u32)(unsigned long) vaddr;
98 return 0; 88 return 0;
99} 89}
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
104static inline void 94static inline void
105clear_normalized_cda(struct ccw1 * ccw) 95clear_normalized_cda(struct ccw1 * ccw)
106{ 96{
107#ifdef CONFIG_64BIT
108 if (ccw->flags & CCW_FLAG_IDA) { 97 if (ccw->flags & CCW_FLAG_IDA) {
109 kfree((void *)(unsigned long) ccw->cda); 98 kfree((void *)(unsigned long) ccw->cda);
110 ccw->flags &= ~CCW_FLAG_IDA; 99 ccw->flags &= ~CCW_FLAG_IDA;
111 } 100 }
112#endif
113 ccw->cda = 0; 101 ccw->cda = 0;
114} 102}
115 103
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib)
181static inline int 169static inline int
182__idal_buffer_is_needed(struct idal_buffer *ib) 170__idal_buffer_is_needed(struct idal_buffer *ib)
183{ 171{
184#ifdef CONFIG_64BIT
185 return ib->size > (4096ul << ib->page_order) || 172 return ib->size > (4096ul << ib->page_order) ||
186 idal_is_needed(ib->data[0], ib->size); 173 idal_is_needed(ib->data[0], ib->size);
187#else
188 return ib->size > (4096ul << ib->page_order);
189#endif
190} 174}
191 175
192/* 176/*
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 58642fd29c87..510012bceb75 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -6,14 +6,6 @@
6#define JUMP_LABEL_NOP_SIZE 6 6#define JUMP_LABEL_NOP_SIZE 6
7#define JUMP_LABEL_NOP_OFFSET 2 7#define JUMP_LABEL_NOP_OFFSET 2
8 8
9#ifdef CONFIG_64BIT
10#define ASM_PTR ".quad"
11#define ASM_ALIGN ".balign 8"
12#else
13#define ASM_PTR ".long"
14#define ASM_ALIGN ".balign 4"
15#endif
16
17/* 9/*
18 * We use a brcl 0,2 instruction for jump labels at compile time so it 10 * We use a brcl 0,2 instruction for jump labels at compile time so it
19 * can be easily distinguished from a hotpatch generated instruction. 11 * can be easily distinguished from a hotpatch generated instruction.
@@ -22,8 +14,8 @@ static __always_inline bool arch_static_branch(struct static_key *key)
22{ 14{
23 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" 15 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
24 ".pushsection __jump_table, \"aw\"\n" 16 ".pushsection __jump_table, \"aw\"\n"
25 ASM_ALIGN "\n" 17 ".balign 8\n"
26 ASM_PTR " 0b, %l[label], %0\n" 18 ".quad 0b, %l[label], %0\n"
27 ".popsection\n" 19 ".popsection\n"
28 : : "X" (key) : : label); 20 : : "X" (key) : : label);
29 return false; 21 return false;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 34fbcac61133..663f23e37460 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -13,163 +13,6 @@
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14#include <asm/types.h> 14#include <asm/types.h>
15 15
16#ifdef CONFIG_32BIT
17
18#define LC_ORDER 0
19#define LC_PAGES 1
20
21struct save_area {
22 u32 ext_save;
23 u64 timer;
24 u64 clk_cmp;
25 u8 pad1[24];
26 u8 psw[8];
27 u32 pref_reg;
28 u8 pad2[20];
29 u32 acc_regs[16];
30 u64 fp_regs[4];
31 u32 gp_regs[16];
32 u32 ctrl_regs[16];
33} __packed;
34
35struct save_area_ext {
36 struct save_area sa;
37 __vector128 vx_regs[32];
38};
39
40struct _lowcore {
41 psw_t restart_psw; /* 0x0000 */
42 psw_t restart_old_psw; /* 0x0008 */
43 __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
44 __u32 ipl_parmblock_ptr; /* 0x0014 */
45 psw_t external_old_psw; /* 0x0018 */
46 psw_t svc_old_psw; /* 0x0020 */
47 psw_t program_old_psw; /* 0x0028 */
48 psw_t mcck_old_psw; /* 0x0030 */
49 psw_t io_old_psw; /* 0x0038 */
50 __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
51 psw_t external_new_psw; /* 0x0058 */
52 psw_t svc_new_psw; /* 0x0060 */
53 psw_t program_new_psw; /* 0x0068 */
54 psw_t mcck_new_psw; /* 0x0070 */
55 psw_t io_new_psw; /* 0x0078 */
56 __u32 ext_params; /* 0x0080 */
57 __u16 ext_cpu_addr; /* 0x0084 */
58 __u16 ext_int_code; /* 0x0086 */
59 __u16 svc_ilc; /* 0x0088 */
60 __u16 svc_code; /* 0x008a */
61 __u16 pgm_ilc; /* 0x008c */
62 __u16 pgm_code; /* 0x008e */
63 __u32 trans_exc_code; /* 0x0090 */
64 __u16 mon_class_num; /* 0x0094 */
65 __u8 per_code; /* 0x0096 */
66 __u8 per_atmid; /* 0x0097 */
67 __u32 per_address; /* 0x0098 */
68 __u32 monitor_code; /* 0x009c */
69 __u8 exc_access_id; /* 0x00a0 */
70 __u8 per_access_id; /* 0x00a1 */
71 __u8 op_access_id; /* 0x00a2 */
72 __u8 ar_mode_id; /* 0x00a3 */
73 __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
74 __u16 subchannel_id; /* 0x00b8 */
75 __u16 subchannel_nr; /* 0x00ba */
76 __u32 io_int_parm; /* 0x00bc */
77 __u32 io_int_word; /* 0x00c0 */
78 __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
79 __u32 stfl_fac_list; /* 0x00c8 */
80 __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
81 __u32 extended_save_area_addr; /* 0x00d4 */
82 __u32 cpu_timer_save_area[2]; /* 0x00d8 */
83 __u32 clock_comp_save_area[2]; /* 0x00e0 */
84 __u32 mcck_interruption_code[2]; /* 0x00e8 */
85 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
86 __u32 external_damage_code; /* 0x00f4 */
87 __u32 failing_storage_address; /* 0x00f8 */
88 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
89 psw_t psw_save_area; /* 0x0100 */
90 __u32 prefixreg_save_area; /* 0x0108 */
91 __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
92
93 /* CPU register save area: defined by architecture */
94 __u32 access_regs_save_area[16]; /* 0x0120 */
95 __u32 floating_pt_save_area[8]; /* 0x0160 */
96 __u32 gpregs_save_area[16]; /* 0x0180 */
97 __u32 cregs_save_area[16]; /* 0x01c0 */
98
99 /* Save areas. */
100 __u32 save_area_sync[8]; /* 0x0200 */
101 __u32 save_area_async[8]; /* 0x0220 */
102 __u32 save_area_restart[1]; /* 0x0240 */
103
104 /* CPU flags. */
105 __u32 cpu_flags; /* 0x0244 */
106
107 /* Return psws. */
108 psw_t return_psw; /* 0x0248 */
109 psw_t return_mcck_psw; /* 0x0250 */
110
111 /* CPU time accounting values */
112 __u64 sync_enter_timer; /* 0x0258 */
113 __u64 async_enter_timer; /* 0x0260 */
114 __u64 mcck_enter_timer; /* 0x0268 */
115 __u64 exit_timer; /* 0x0270 */
116 __u64 user_timer; /* 0x0278 */
117 __u64 system_timer; /* 0x0280 */
118 __u64 steal_timer; /* 0x0288 */
119 __u64 last_update_timer; /* 0x0290 */
120 __u64 last_update_clock; /* 0x0298 */
121 __u64 int_clock; /* 0x02a0 */
122 __u64 mcck_clock; /* 0x02a8 */
123 __u64 clock_comparator; /* 0x02b0 */
124
125 /* Current process. */
126 __u32 current_task; /* 0x02b8 */
127 __u32 thread_info; /* 0x02bc */
128 __u32 kernel_stack; /* 0x02c0 */
129
130 /* Interrupt, panic and restart stack. */
131 __u32 async_stack; /* 0x02c4 */
132 __u32 panic_stack; /* 0x02c8 */
133 __u32 restart_stack; /* 0x02cc */
134
135 /* Restart function and parameter. */
136 __u32 restart_fn; /* 0x02d0 */
137 __u32 restart_data; /* 0x02d4 */
138 __u32 restart_source; /* 0x02d8 */
139
140 /* Address space pointer. */
141 __u32 kernel_asce; /* 0x02dc */
142 __u32 user_asce; /* 0x02e0 */
143 __u32 current_pid; /* 0x02e4 */
144
145 /* SMP info area */
146 __u32 cpu_nr; /* 0x02e8 */
147 __u32 softirq_pending; /* 0x02ec */
148 __u32 percpu_offset; /* 0x02f0 */
149 __u32 machine_flags; /* 0x02f4 */
150 __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
151 __u32 spinlock_lockval; /* 0x02fc */
152
153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
154
155 /*
156 * 0xe00 contains the address of the IPL Parameter Information
157 * block. Dump tools need IPIB for IPL after dump.
158 * Note: do not change the position of any fields in 0x0e00-0x0f00
159 */
160 __u32 ipib; /* 0x0e00 */
161 __u32 ipib_checksum; /* 0x0e04 */
162 __u32 vmcore_info; /* 0x0e08 */
163 __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
164 __u32 os_info; /* 0x0e18 */
165 __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
166
167 /* Extended facility list */
168 __u64 stfle_fac_list[32]; /* 0x0f00 */
169} __packed;
170
171#else /* CONFIG_32BIT */
172
173#define LC_ORDER 1 16#define LC_ORDER 1
174#define LC_PAGES 2 17#define LC_PAGES 2
175 18
@@ -354,8 +197,6 @@ struct _lowcore {
354 __u8 vector_save_area[1024]; /* 0x1c00 */ 197 __u8 vector_save_area[1024]; /* 0x1c00 */
355} __packed; 198} __packed;
356 199
357#endif /* CONFIG_32BIT */
358
359#define S390_lowcore (*((struct _lowcore *) 0)) 200#define S390_lowcore (*((struct _lowcore *) 0))
360 201
361extern struct _lowcore *lowcore_ptr[]; 202extern struct _lowcore *lowcore_ptr[];
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 9977e08df5bd..b55a59e1d134 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -8,7 +8,7 @@
8 8
9#include <uapi/asm/mman.h> 9#include <uapi/asm/mman.h>
10 10
11#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) 11#ifndef __ASSEMBLY__
12int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); 12int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) 13#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
14#endif 14#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8fb3802f8fad..d25d9ff10ba8 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk,
19 atomic_set(&mm->context.attach_count, 0); 19 atomic_set(&mm->context.attach_count, 0);
20 mm->context.flush_mm = 0; 20 mm->context.flush_mm = 0;
21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
22#ifdef CONFIG_64BIT
23 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 22 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
24#endif
25 mm->context.has_pgste = 0; 23 mm->context.has_pgste = 0;
26 mm->context.use_skey = 0; 24 mm->context.use_skey = 0;
27 mm->context.asce_limit = STACK_TOP_MAX; 25 mm->context.asce_limit = STACK_TOP_MAX;
@@ -110,10 +108,8 @@ static inline void activate_mm(struct mm_struct *prev,
110static inline void arch_dup_mmap(struct mm_struct *oldmm, 108static inline void arch_dup_mmap(struct mm_struct *oldmm,
111 struct mm_struct *mm) 109 struct mm_struct *mm)
112{ 110{
113#ifdef CONFIG_64BIT
114 if (oldmm->context.asce_limit < mm->context.asce_limit) 111 if (oldmm->context.asce_limit < mm->context.asce_limit)
115 crst_table_downgrade(mm, oldmm->context.asce_limit); 112 crst_table_downgrade(mm, oldmm->context.asce_limit);
116#endif
117} 113}
118 114
119static inline void arch_exit_mmap(struct mm_struct *mm) 115static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 933355e0d091..6d6556ca24aa 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,8 +10,6 @@
10 */ 10 */
11#define __my_cpu_offset S390_lowcore.percpu_offset 11#define __my_cpu_offset S390_lowcore.percpu_offset
12 12
13#ifdef CONFIG_64BIT
14
15/* 13/*
16 * For 64 bit module code, the module may be more than 4G above the 14 * For 64 bit module code, the module may be more than 4G above the
17 * per cpu area, use weak definitions to force the compiler to 15 * per cpu area, use weak definitions to force the compiler to
@@ -183,8 +181,6 @@
183#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double 181#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
184#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 182#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
185 183
186#endif /* CONFIG_64BIT */
187
188#include <asm-generic/percpu.h> 184#include <asm-generic/percpu.h>
189 185
190#endif /* __ARCH_S390_PERCPU__ */ 186#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 159a8ec6da9a..4cb19fe76dd9 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,8 +9,6 @@
9#ifndef _ASM_S390_PERF_EVENT_H 9#ifndef _ASM_S390_PERF_EVENT_H
10#define _ASM_S390_PERF_EVENT_H 10#define _ASM_S390_PERF_EVENT_H
11 11
12#ifdef CONFIG_64BIT
13
14#include <linux/perf_event.h> 12#include <linux/perf_event.h>
15#include <linux/device.h> 13#include <linux/device.h>
16#include <asm/cpu_mf.h> 14#include <asm/cpu_mf.h>
@@ -92,5 +90,4 @@ struct sf_raw_sample {
92int perf_reserve_sampling(void); 90int perf_reserve_sampling(void);
93void perf_release_sampling(void); 91void perf_release_sampling(void);
94 92
95#endif /* CONFIG_64BIT */
96#endif /* _ASM_S390_PERF_EVENT_H */ 93#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 3009c2ba46d2..51e7fb634ebc 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -33,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
33 *s = val; 33 *s = val;
34 n = (n / 256) - 1; 34 n = (n / 256) - 1;
35 asm volatile( 35 asm volatile(
36#ifdef CONFIG_64BIT
37 " mvc 8(248,%0),0(%0)\n" 36 " mvc 8(248,%0),0(%0)\n"
38#else
39 " mvc 4(252,%0),0(%0)\n"
40#endif
41 "0: mvc 256(256,%0),0(%0)\n" 37 "0: mvc 256(256,%0),0(%0)\n"
42 " la %0,256(%0)\n" 38 " la %0,256(%0)\n"
43 " brct %1,0b\n" 39 " brct %1,0b\n"
@@ -50,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
50 clear_table(crst, entry, sizeof(unsigned long)*2048); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
51} 47}
52 48
53#ifndef CONFIG_64BIT
54
55static inline unsigned long pgd_entry_type(struct mm_struct *mm)
56{
57 return _SEGMENT_ENTRY_EMPTY;
58}
59
60#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
61#define pud_free(mm, x) do { } while (0)
62
63#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
64#define pmd_free(mm, x) do { } while (0)
65
66#define pgd_populate(mm, pgd, pud) BUG()
67#define pud_populate(mm, pud, pmd) BUG()
68
69#else /* CONFIG_64BIT */
70
71static inline unsigned long pgd_entry_type(struct mm_struct *mm) 49static inline unsigned long pgd_entry_type(struct mm_struct *mm)
72{ 50{
73 if (mm->context.asce_limit <= (1UL << 31)) 51 if (mm->context.asce_limit <= (1UL << 31))
@@ -119,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
119 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 97 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
120} 98}
121 99
122#endif /* CONFIG_64BIT */
123
124static inline pgd_t *pgd_alloc(struct mm_struct *mm) 100static inline pgd_t *pgd_alloc(struct mm_struct *mm)
125{ 101{
126 spin_lock_init(&mm->context.list_lock); 102 spin_lock_init(&mm->context.list_lock);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e08ec38f8c6e..989cfae9e202 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -66,15 +66,9 @@ extern unsigned long zero_page_mask;
66 * table can map 66 * table can map
67 * PGDIR_SHIFT determines what a third-level page table entry can map 67 * PGDIR_SHIFT determines what a third-level page table entry can map
68 */ 68 */
69#ifndef CONFIG_64BIT 69#define PMD_SHIFT 20
70# define PMD_SHIFT 20 70#define PUD_SHIFT 31
71# define PUD_SHIFT 20 71#define PGDIR_SHIFT 42
72# define PGDIR_SHIFT 20
73#else /* CONFIG_64BIT */
74# define PMD_SHIFT 20
75# define PUD_SHIFT 31
76# define PGDIR_SHIFT 42
77#endif /* CONFIG_64BIT */
78 72
79#define PMD_SIZE (1UL << PMD_SHIFT) 73#define PMD_SIZE (1UL << PMD_SHIFT)
80#define PMD_MASK (~(PMD_SIZE-1)) 74#define PMD_MASK (~(PMD_SIZE-1))
@@ -90,15 +84,8 @@ extern unsigned long zero_page_mask;
90 * that leads to 1024 pte per pgd 84 * that leads to 1024 pte per pgd
91 */ 85 */
92#define PTRS_PER_PTE 256 86#define PTRS_PER_PTE 256
93#ifndef CONFIG_64BIT
94#define __PAGETABLE_PUD_FOLDED
95#define PTRS_PER_PMD 1
96#define __PAGETABLE_PMD_FOLDED
97#define PTRS_PER_PUD 1
98#else /* CONFIG_64BIT */
99#define PTRS_PER_PMD 2048 87#define PTRS_PER_PMD 2048
100#define PTRS_PER_PUD 2048 88#define PTRS_PER_PUD 2048
101#endif /* CONFIG_64BIT */
102#define PTRS_PER_PGD 2048 89#define PTRS_PER_PGD 2048
103 90
104#define FIRST_USER_ADDRESS 0UL 91#define FIRST_USER_ADDRESS 0UL
@@ -127,23 +114,19 @@ extern struct page *vmemmap;
127 114
128#define VMEM_MAX_PHYS ((unsigned long) vmemmap) 115#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
129 116
130#ifdef CONFIG_64BIT
131extern unsigned long MODULES_VADDR; 117extern unsigned long MODULES_VADDR;
132extern unsigned long MODULES_END; 118extern unsigned long MODULES_END;
133#define MODULES_VADDR MODULES_VADDR 119#define MODULES_VADDR MODULES_VADDR
134#define MODULES_END MODULES_END 120#define MODULES_END MODULES_END
135#define MODULES_LEN (1UL << 31) 121#define MODULES_LEN (1UL << 31)
136#endif
137 122
138static inline int is_module_addr(void *addr) 123static inline int is_module_addr(void *addr)
139{ 124{
140#ifdef CONFIG_64BIT
141 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 125 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
142 if (addr < (void *)MODULES_VADDR) 126 if (addr < (void *)MODULES_VADDR)
143 return 0; 127 return 0;
144 if (addr > (void *)MODULES_END) 128 if (addr > (void *)MODULES_END)
145 return 0; 129 return 0;
146#endif
147 return 1; 130 return 1;
148} 131}
149 132
@@ -284,56 +267,6 @@ static inline int is_module_addr(void *addr)
284 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 267 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
285 */ 268 */
286 269
287#ifndef CONFIG_64BIT
288
289/* Bits in the segment table address-space-control-element */
290#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
291#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
292#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
293#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
294#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
295
296/* Bits in the segment table entry */
297#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
298#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
299#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
300#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
301#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
302#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
303
304#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
305#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
306#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
307#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
308#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
309#define _SEGMENT_ENTRY_BITS_LARGE 0
310#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
311
312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
314
315/*
316 * Segment table entry encoding (I = invalid, R = read-only bit):
317 * ..R...I.....
318 * prot-none ..1...1.....
319 * read-only ..1...0.....
320 * read-write ..0...0.....
321 * empty ..0...1.....
322 */
323
324/* Page status table bits for virtualization */
325#define PGSTE_ACC_BITS 0xf0000000UL
326#define PGSTE_FP_BIT 0x08000000UL
327#define PGSTE_PCL_BIT 0x00800000UL
328#define PGSTE_HR_BIT 0x00400000UL
329#define PGSTE_HC_BIT 0x00200000UL
330#define PGSTE_GR_BIT 0x00040000UL
331#define PGSTE_GC_BIT 0x00020000UL
332#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
333#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
334
335#else /* CONFIG_64BIT */
336
337/* Bits in the segment/region table address-space-control-element */ 270/* Bits in the segment/region table address-space-control-element */
338#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 271#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
339#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 272#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -417,8 +350,6 @@ static inline int is_module_addr(void *addr)
417#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 350#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
418#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 351#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
419 352
420#endif /* CONFIG_64BIT */
421
422/* Guest Page State used for virtualization */ 353/* Guest Page State used for virtualization */
423#define _PGSTE_GPS_ZERO 0x0000000080000000UL 354#define _PGSTE_GPS_ZERO 0x0000000080000000UL
424#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 355#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -509,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
509/* 440/*
510 * pgd/pmd/pte query functions 441 * pgd/pmd/pte query functions
511 */ 442 */
512#ifndef CONFIG_64BIT
513
514static inline int pgd_present(pgd_t pgd) { return 1; }
515static inline int pgd_none(pgd_t pgd) { return 0; }
516static inline int pgd_bad(pgd_t pgd) { return 0; }
517
518static inline int pud_present(pud_t pud) { return 1; }
519static inline int pud_none(pud_t pud) { return 0; }
520static inline int pud_large(pud_t pud) { return 0; }
521static inline int pud_bad(pud_t pud) { return 0; }
522
523#else /* CONFIG_64BIT */
524
525static inline int pgd_present(pgd_t pgd) 443static inline int pgd_present(pgd_t pgd)
526{ 444{
527 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 445 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -583,8 +501,6 @@ static inline int pud_bad(pud_t pud)
583 return (pud_val(pud) & mask) != 0; 501 return (pud_val(pud) & mask) != 0;
584} 502}
585 503
586#endif /* CONFIG_64BIT */
587
588static inline int pmd_present(pmd_t pmd) 504static inline int pmd_present(pmd_t pmd)
589{ 505{
590 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; 506 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -916,18 +832,14 @@ static inline int pte_unused(pte_t pte)
916 832
917static inline void pgd_clear(pgd_t *pgd) 833static inline void pgd_clear(pgd_t *pgd)
918{ 834{
919#ifdef CONFIG_64BIT
920 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 835 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
921 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 836 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
922#endif
923} 837}
924 838
925static inline void pud_clear(pud_t *pud) 839static inline void pud_clear(pud_t *pud)
926{ 840{
927#ifdef CONFIG_64BIT
928 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 841 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
929 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 842 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
930#endif
931} 843}
932 844
933static inline void pmd_clear(pmd_t *pmdp) 845static inline void pmd_clear(pmd_t *pmdp)
@@ -1026,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1026{ 938{
1027 unsigned long pto = (unsigned long) ptep; 939 unsigned long pto = (unsigned long) ptep;
1028 940
1029#ifndef CONFIG_64BIT
1030 /* pto in ESA mode must point to the start of the segment table */
1031 pto &= 0x7ffffc00;
1032#endif
1033 /* Invalidation + global TLB flush for the pte */ 941 /* Invalidation + global TLB flush for the pte */
1034 asm volatile( 942 asm volatile(
1035 " ipte %2,%3" 943 " ipte %2,%3"
@@ -1040,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1040{ 948{
1041 unsigned long pto = (unsigned long) ptep; 949 unsigned long pto = (unsigned long) ptep;
1042 950
1043#ifndef CONFIG_64BIT
1044 /* pto in ESA mode must point to the start of the segment table */
1045 pto &= 0x7ffffc00;
1046#endif
1047 /* Invalidation + local TLB flush for the pte */ 951 /* Invalidation + local TLB flush for the pte */
1048 asm volatile( 952 asm volatile(
1049 " .insn rrf,0xb2210000,%2,%3,0,1" 953 " .insn rrf,0xb2210000,%2,%3,0,1"
@@ -1054,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1054{ 958{
1055 unsigned long pto = (unsigned long) ptep; 959 unsigned long pto = (unsigned long) ptep;
1056 960
1057#ifndef CONFIG_64BIT
1058 /* pto in ESA mode must point to the start of the segment table */
1059 pto &= 0x7ffffc00;
1060#endif
1061 /* Invalidate a range of ptes + global TLB flush of the ptes */ 961 /* Invalidate a range of ptes + global TLB flush of the ptes */
1062 do { 962 do {
1063 asm volatile( 963 asm volatile(
@@ -1376,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1376#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1276#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1377#define pgd_offset_k(address) pgd_offset(&init_mm, address) 1277#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1378 1278
1379#ifndef CONFIG_64BIT
1380
1381#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1382#define pud_deref(pmd) ({ BUG(); 0UL; })
1383#define pgd_deref(pmd) ({ BUG(); 0UL; })
1384
1385#define pud_offset(pgd, address) ((pud_t *) pgd)
1386#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1387
1388#else /* CONFIG_64BIT */
1389
1390#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1279#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1391#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1280#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1392#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1281#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1407,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1407 return pmd + pmd_index(address); 1296 return pmd + pmd_index(address);
1408} 1297}
1409 1298
1410#endif /* CONFIG_64BIT */
1411
1412#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1299#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1413#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1300#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1414#define pte_page(x) pfn_to_page(pte_pfn(x)) 1301#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1729,11 +1616,9 @@ static inline int has_transparent_hugepage(void)
1729 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1616 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1730 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1617 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1731 */ 1618 */
1732#ifndef CONFIG_64BIT 1619
1733#define __SWP_OFFSET_MASK (~0UL >> 12)
1734#else
1735#define __SWP_OFFSET_MASK (~0UL >> 11) 1620#define __SWP_OFFSET_MASK (~0UL >> 11)
1736#endif 1621
1737static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1622static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1738{ 1623{
1739 pte_t pte; 1624 pte_t pte;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e7cbbdcdee13..dedb6218544b 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -19,7 +19,6 @@
19#define _CIF_ASCE (1<<CIF_ASCE) 19#define _CIF_ASCE (1<<CIF_ASCE)
20#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) 20#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
21 21
22
23#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
24 23
25#include <linux/linkage.h> 24#include <linux/linkage.h>
@@ -66,13 +65,6 @@ extern void execve_tail(void);
66/* 65/*
67 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 66 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
68 */ 67 */
69#ifndef CONFIG_64BIT
70
71#define TASK_SIZE (1UL << 31)
72#define TASK_MAX_SIZE (1UL << 31)
73#define TASK_UNMAPPED_BASE (1UL << 30)
74
75#else /* CONFIG_64BIT */
76 68
77#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 69#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
78#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 70#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
@@ -80,15 +72,8 @@ extern void execve_tail(void);
80#define TASK_SIZE TASK_SIZE_OF(current) 72#define TASK_SIZE TASK_SIZE_OF(current)
81#define TASK_MAX_SIZE (1UL << 53) 73#define TASK_MAX_SIZE (1UL << 53)
82 74
83#endif /* CONFIG_64BIT */
84
85#ifndef CONFIG_64BIT
86#define STACK_TOP (1UL << 31)
87#define STACK_TOP_MAX (1UL << 31)
88#else /* CONFIG_64BIT */
89#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) 75#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
90#define STACK_TOP_MAX (1UL << 42) 76#define STACK_TOP_MAX (1UL << 42)
91#endif /* CONFIG_64BIT */
92 77
93#define HAVE_ARCH_PICK_MMAP_LAYOUT 78#define HAVE_ARCH_PICK_MMAP_LAYOUT
94 79
@@ -115,10 +100,8 @@ struct thread_struct {
115 /* cpu runtime instrumentation */ 100 /* cpu runtime instrumentation */
116 struct runtime_instr_cb *ri_cb; 101 struct runtime_instr_cb *ri_cb;
117 int ri_signum; 102 int ri_signum;
118#ifdef CONFIG_64BIT
119 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 103 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
120 __vector128 *vxrs; /* Vector register save area */ 104 __vector128 *vxrs; /* Vector register save area */
121#endif
122}; 105};
123 106
124/* Flag to disable transactions. */ 107/* Flag to disable transactions. */
@@ -181,11 +164,7 @@ struct task_struct;
181struct mm_struct; 164struct mm_struct;
182struct seq_file; 165struct seq_file;
183 166
184#ifdef CONFIG_64BIT 167void show_cacheinfo(struct seq_file *m);
185extern void show_cacheinfo(struct seq_file *m);
186#else
187static inline void show_cacheinfo(struct seq_file *m) { }
188#endif
189 168
190/* Free all resources held by a thread. */ 169/* Free all resources held by a thread. */
191extern void release_thread(struct task_struct *); 170extern void release_thread(struct task_struct *);
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key)
229 */ 208 */
230static inline void __load_psw(psw_t psw) 209static inline void __load_psw(psw_t psw)
231{ 210{
232#ifndef CONFIG_64BIT
233 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
234#else
235 asm volatile("lpswe %0" : : "Q" (psw) : "cc"); 211 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
236#endif
237} 212}
238 213
239/* 214/*
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask)
247 222
248 psw.mask = mask; 223 psw.mask = mask;
249 224
250#ifndef CONFIG_64BIT
251 asm volatile(
252 " basr %0,0\n"
253 "0: ahi %0,1f-0b\n"
254 " st %0,%O1+4(%R1)\n"
255 " lpsw %1\n"
256 "1:"
257 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
258#else /* CONFIG_64BIT */
259 asm volatile( 225 asm volatile(
260 " larl %0,1f\n" 226 " larl %0,1f\n"
261 " stg %0,%O1+8(%R1)\n" 227 " stg %0,%O1+8(%R1)\n"
262 " lpswe %1\n" 228 " lpswe %1\n"
263 "1:" 229 "1:"
264 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 230 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
265#endif /* CONFIG_64BIT */
266} 231}
267 232
268/* 233/*
@@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask)
270 */ 235 */
271static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) 236static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
272{ 237{
273#ifndef CONFIG_64BIT
274 if (psw.addr & PSW_ADDR_AMODE)
275 /* 31 bit mode */
276 return (psw.addr - ilc) | PSW_ADDR_AMODE;
277 /* 24 bit mode */
278 return (psw.addr - ilc) & ((1UL << 24) - 1);
279#else
280 unsigned long mask; 238 unsigned long mask;
281 239
282 mask = (psw.mask & PSW_MASK_EA) ? -1UL : 240 mask = (psw.mask & PSW_MASK_EA) ? -1UL :
283 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : 241 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
284 (1UL << 24) - 1; 242 (1UL << 24) - 1;
285 return (psw.addr - ilc) & mask; 243 return (psw.addr - ilc) & mask;
286#endif
287} 244}
288 245
289/* 246/*
@@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
305 * Store status and then load disabled wait psw, 262 * Store status and then load disabled wait psw,
306 * the processor is dead afterwards 263 * the processor is dead afterwards
307 */ 264 */
308#ifndef CONFIG_64BIT
309 asm volatile(
310 " stctl 0,0,0(%2)\n"
311 " ni 0(%2),0xef\n" /* switch off protection */
312 " lctl 0,0,0(%2)\n"
313 " stpt 0xd8\n" /* store timer */
314 " stckc 0xe0\n" /* store clock comparator */
315 " stpx 0x108\n" /* store prefix register */
316 " stam 0,15,0x120\n" /* store access registers */
317 " std 0,0x160\n" /* store f0 */
318 " std 2,0x168\n" /* store f2 */
319 " std 4,0x170\n" /* store f4 */
320 " std 6,0x178\n" /* store f6 */
321 " stm 0,15,0x180\n" /* store general registers */
322 " stctl 0,15,0x1c0\n" /* store control registers */
323 " oi 0x1c0,0x10\n" /* fake protection bit */
324 " lpsw 0(%1)"
325 : "=m" (ctl_buf)
326 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
327#else /* CONFIG_64BIT */
328 asm volatile( 265 asm volatile(
329 " stctg 0,0,0(%2)\n" 266 " stctg 0,0,0(%2)\n"
330 " ni 4(%2),0xef\n" /* switch off protection */ 267 " ni 4(%2),0xef\n" /* switch off protection */
@@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
357 " lpswe 0(%1)" 294 " lpswe 0(%1)"
358 : "=m" (ctl_buf) 295 : "=m" (ctl_buf)
359 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); 296 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
360#endif /* CONFIG_64BIT */
361 while (1); 297 while (1);
362} 298}
363 299
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index be317feff7ac..6feda2599282 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -40,12 +40,8 @@ struct psw_bits {
40 unsigned long long ri : 1; /* Runtime Instrumentation */ 40 unsigned long long ri : 1; /* Runtime Instrumentation */
41 unsigned long long : 6; 41 unsigned long long : 6;
42 unsigned long long eaba : 2; /* Addressing Mode */ 42 unsigned long long eaba : 2; /* Addressing Mode */
43#ifdef CONFIG_64BIT
44 unsigned long long : 31; 43 unsigned long long : 31;
45 unsigned long long ia : 64;/* Instruction Address */ 44 unsigned long long ia : 64;/* Instruction Address */
46#else
47 unsigned long long ia : 31;/* Instruction Address */
48#endif
49}; 45};
50 46
51enum { 47enum {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 06f3034605a1..998b61cd0e56 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -211,11 +211,6 @@ struct qdio_buffer_element {
211 u8 scount; 211 u8 scount;
212 u8 sflags; 212 u8 sflags;
213 u32 length; 213 u32 length;
214#ifdef CONFIG_32BIT
215 /* private: */
216 void *res2;
217 /* public: */
218#endif
219 void *addr; 214 void *addr;
220} __attribute__ ((packed, aligned(16))); 215} __attribute__ ((packed, aligned(16)));
221 216
@@ -232,11 +227,6 @@ struct qdio_buffer {
232 * @sbal: absolute SBAL address 227 * @sbal: absolute SBAL address
233 */ 228 */
234struct sl_element { 229struct sl_element {
235#ifdef CONFIG_32BIT
236 /* private: */
237 unsigned long reserved;
238 /* public: */
239#endif
240 unsigned long sbal; 230 unsigned long sbal;
241} __attribute__ ((packed)); 231} __attribute__ ((packed));
242 232
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 830da737ff85..402ad6df4897 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
72 72
73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) 73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
74{ 74{
75#ifdef CONFIG_64BIT
76 if (cb_prev) 75 if (cb_prev)
77 store_runtime_instr_cb(cb_prev); 76 store_runtime_instr_cb(cb_prev);
78#endif
79} 77}
80 78
81static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, 79static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
82 struct runtime_instr_cb *cb_prev) 80 struct runtime_instr_cb *cb_prev)
83{ 81{
84#ifdef CONFIG_64BIT
85 if (cb_next) 82 if (cb_next)
86 load_runtime_instr_cb(cb_next); 83 load_runtime_instr_cb(cb_next);
87 else if (cb_prev) 84 else if (cb_prev)
88 load_runtime_instr_cb(&runtime_instr_empty_cb); 85 load_runtime_instr_cb(&runtime_instr_empty_cb);
89#endif
90} 86}
91 87
92#ifdef CONFIG_64BIT 88void exit_thread_runtime_instr(void);
93extern void exit_thread_runtime_instr(void);
94#else
95static inline void exit_thread_runtime_instr(void) { }
96#endif
97 89
98#endif /* _RUNTIME_INSTR_H */ 90#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 487f9b64efb9..4b43ee7e6776 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -39,17 +39,10 @@
39#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 39#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
40#endif 40#endif
41 41
42#ifndef CONFIG_64BIT
43#define RWSEM_UNLOCKED_VALUE 0x00000000
44#define RWSEM_ACTIVE_BIAS 0x00000001
45#define RWSEM_ACTIVE_MASK 0x0000ffff
46#define RWSEM_WAITING_BIAS (-0x00010000)
47#else /* CONFIG_64BIT */
48#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L 42#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
49#define RWSEM_ACTIVE_BIAS 0x0000000000000001L 43#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
50#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL 44#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
51#define RWSEM_WAITING_BIAS (-0x0000000100000000L) 45#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
52#endif /* CONFIG_64BIT */
53#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 46#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
54#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
55 48
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem)
61 signed long old, new; 54 signed long old, new;
62 55
63 asm volatile( 56 asm volatile(
64#ifndef CONFIG_64BIT
65 " l %0,%2\n"
66 "0: lr %1,%0\n"
67 " ahi %1,%4\n"
68 " cs %0,%1,%2\n"
69 " jl 0b"
70#else /* CONFIG_64BIT */
71 " lg %0,%2\n" 57 " lg %0,%2\n"
72 "0: lgr %1,%0\n" 58 "0: lgr %1,%0\n"
73 " aghi %1,%4\n" 59 " aghi %1,%4\n"
74 " csg %0,%1,%2\n" 60 " csg %0,%1,%2\n"
75 " jl 0b" 61 " jl 0b"
76#endif /* CONFIG_64BIT */
77 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 62 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
78 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 63 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
79 : "cc", "memory"); 64 : "cc", "memory");
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
89 signed long old, new; 74 signed long old, new;
90 75
91 asm volatile( 76 asm volatile(
92#ifndef CONFIG_64BIT
93 " l %0,%2\n"
94 "0: ltr %1,%0\n"
95 " jm 1f\n"
96 " ahi %1,%4\n"
97 " cs %0,%1,%2\n"
98 " jl 0b\n"
99 "1:"
100#else /* CONFIG_64BIT */
101 " lg %0,%2\n" 77 " lg %0,%2\n"
102 "0: ltgr %1,%0\n" 78 "0: ltgr %1,%0\n"
103 " jm 1f\n" 79 " jm 1f\n"
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
105 " csg %0,%1,%2\n" 81 " csg %0,%1,%2\n"
106 " jl 0b\n" 82 " jl 0b\n"
107 "1:" 83 "1:"
108#endif /* CONFIG_64BIT */
109 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 84 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
110 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 85 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
111 : "cc", "memory"); 86 : "cc", "memory");
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
121 96
122 tmp = RWSEM_ACTIVE_WRITE_BIAS; 97 tmp = RWSEM_ACTIVE_WRITE_BIAS;
123 asm volatile( 98 asm volatile(
124#ifndef CONFIG_64BIT
125 " l %0,%2\n"
126 "0: lr %1,%0\n"
127 " a %1,%4\n"
128 " cs %0,%1,%2\n"
129 " jl 0b"
130#else /* CONFIG_64BIT */
131 " lg %0,%2\n" 99 " lg %0,%2\n"
132 "0: lgr %1,%0\n" 100 "0: lgr %1,%0\n"
133 " ag %1,%4\n" 101 " ag %1,%4\n"
134 " csg %0,%1,%2\n" 102 " csg %0,%1,%2\n"
135 " jl 0b" 103 " jl 0b"
136#endif /* CONFIG_64BIT */
137 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 104 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
138 : "Q" (sem->count), "m" (tmp) 105 : "Q" (sem->count), "m" (tmp)
139 : "cc", "memory"); 106 : "cc", "memory");
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
154 signed long old; 121 signed long old;
155 122
156 asm volatile( 123 asm volatile(
157#ifndef CONFIG_64BIT
158 " l %0,%1\n"
159 "0: ltr %0,%0\n"
160 " jnz 1f\n"
161 " cs %0,%3,%1\n"
162 " jl 0b\n"
163#else /* CONFIG_64BIT */
164 " lg %0,%1\n" 124 " lg %0,%1\n"
165 "0: ltgr %0,%0\n" 125 "0: ltgr %0,%0\n"
166 " jnz 1f\n" 126 " jnz 1f\n"
167 " csg %0,%3,%1\n" 127 " csg %0,%3,%1\n"
168 " jl 0b\n" 128 " jl 0b\n"
169#endif /* CONFIG_64BIT */
170 "1:" 129 "1:"
171 : "=&d" (old), "=Q" (sem->count) 130 : "=&d" (old), "=Q" (sem->count)
172 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) 131 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem)
182 signed long old, new; 141 signed long old, new;
183 142
184 asm volatile( 143 asm volatile(
185#ifndef CONFIG_64BIT
186 " l %0,%2\n"
187 "0: lr %1,%0\n"
188 " ahi %1,%4\n"
189 " cs %0,%1,%2\n"
190 " jl 0b"
191#else /* CONFIG_64BIT */
192 " lg %0,%2\n" 144 " lg %0,%2\n"
193 "0: lgr %1,%0\n" 145 "0: lgr %1,%0\n"
194 " aghi %1,%4\n" 146 " aghi %1,%4\n"
195 " csg %0,%1,%2\n" 147 " csg %0,%1,%2\n"
196 " jl 0b" 148 " jl 0b"
197#endif /* CONFIG_64BIT */
198 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 149 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
199 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 150 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
200 : "cc", "memory"); 151 : "cc", "memory");
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem)
212 163
213 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 164 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
214 asm volatile( 165 asm volatile(
215#ifndef CONFIG_64BIT
216 " l %0,%2\n"
217 "0: lr %1,%0\n"
218 " a %1,%4\n"
219 " cs %0,%1,%2\n"
220 " jl 0b"
221#else /* CONFIG_64BIT */
222 " lg %0,%2\n" 166 " lg %0,%2\n"
223 "0: lgr %1,%0\n" 167 "0: lgr %1,%0\n"
224 " ag %1,%4\n" 168 " ag %1,%4\n"
225 " csg %0,%1,%2\n" 169 " csg %0,%1,%2\n"
226 " jl 0b" 170 " jl 0b"
227#endif /* CONFIG_64BIT */
228 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 171 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
229 : "Q" (sem->count), "m" (tmp) 172 : "Q" (sem->count), "m" (tmp)
230 : "cc", "memory"); 173 : "cc", "memory");
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
242 185
243 tmp = -RWSEM_WAITING_BIAS; 186 tmp = -RWSEM_WAITING_BIAS;
244 asm volatile( 187 asm volatile(
245#ifndef CONFIG_64BIT
246 " l %0,%2\n"
247 "0: lr %1,%0\n"
248 " a %1,%4\n"
249 " cs %0,%1,%2\n"
250 " jl 0b"
251#else /* CONFIG_64BIT */
252 " lg %0,%2\n" 188 " lg %0,%2\n"
253 "0: lgr %1,%0\n" 189 "0: lgr %1,%0\n"
254 " ag %1,%4\n" 190 " ag %1,%4\n"
255 " csg %0,%1,%2\n" 191 " csg %0,%1,%2\n"
256 " jl 0b" 192 " jl 0b"
257#endif /* CONFIG_64BIT */
258 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 193 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
259 : "Q" (sem->count), "m" (tmp) 194 : "Q" (sem->count), "m" (tmp)
260 : "cc", "memory"); 195 : "cc", "memory");
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
270 signed long old, new; 205 signed long old, new;
271 206
272 asm volatile( 207 asm volatile(
273#ifndef CONFIG_64BIT
274 " l %0,%2\n"
275 "0: lr %1,%0\n"
276 " ar %1,%4\n"
277 " cs %0,%1,%2\n"
278 " jl 0b"
279#else /* CONFIG_64BIT */
280 " lg %0,%2\n" 208 " lg %0,%2\n"
281 "0: lgr %1,%0\n" 209 "0: lgr %1,%0\n"
282 " agr %1,%4\n" 210 " agr %1,%4\n"
283 " csg %0,%1,%2\n" 211 " csg %0,%1,%2\n"
284 " jl 0b" 212 " jl 0b"
285#endif /* CONFIG_64BIT */
286 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 213 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
287 : "Q" (sem->count), "d" (delta) 214 : "Q" (sem->count), "d" (delta)
288 : "cc", "memory"); 215 : "cc", "memory");
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
296 signed long old, new; 223 signed long old, new;
297 224
298 asm volatile( 225 asm volatile(
299#ifndef CONFIG_64BIT
300 " l %0,%2\n"
301 "0: lr %1,%0\n"
302 " ar %1,%4\n"
303 " cs %0,%1,%2\n"
304 " jl 0b"
305#else /* CONFIG_64BIT */
306 " lg %0,%2\n" 226 " lg %0,%2\n"
307 "0: lgr %1,%0\n" 227 "0: lgr %1,%0\n"
308 " agr %1,%4\n" 228 " agr %1,%4\n"
309 " csg %0,%1,%2\n" 229 " csg %0,%1,%2\n"
310 " jl 0b" 230 " jl 0b"
311#endif /* CONFIG_64BIT */
312 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 231 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
313 : "Q" (sem->count), "d" (delta) 232 : "Q" (sem->count), "d" (delta)
314 : "cc", "memory"); 233 : "cc", "memory");
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8d1e54b4733..b8ffc1bd0a9f 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -15,19 +15,11 @@
15#include <asm/lowcore.h> 15#include <asm/lowcore.h>
16#include <asm/types.h> 16#include <asm/types.h>
17 17
18#ifndef CONFIG_64BIT
19#define IPL_DEVICE (*(unsigned long *) (0x10404))
20#define INITRD_START (*(unsigned long *) (0x1040C))
21#define INITRD_SIZE (*(unsigned long *) (0x10414))
22#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
23#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
24#else /* CONFIG_64BIT */
25#define IPL_DEVICE (*(unsigned long *) (0x10400)) 18#define IPL_DEVICE (*(unsigned long *) (0x10400))
26#define INITRD_START (*(unsigned long *) (0x10408)) 19#define INITRD_START (*(unsigned long *) (0x10408))
27#define INITRD_SIZE (*(unsigned long *) (0x10410)) 20#define INITRD_SIZE (*(unsigned long *) (0x10410))
28#define OLDMEM_BASE (*(unsigned long *) (0x10418)) 21#define OLDMEM_BASE (*(unsigned long *) (0x10418))
29#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 22#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
30#endif /* CONFIG_64BIT */
31#define COMMAND_LINE ((char *) (0x10480)) 23#define COMMAND_LINE ((char *) (0x10480))
32 24
33extern int memory_end_set; 25extern int memory_end_set;
@@ -68,26 +60,8 @@ extern void detect_memory_memblock(void);
68#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 60#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
69#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 61#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
70 62
71#ifndef CONFIG_64BIT
72#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
73#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
74#define MACHINE_HAS_IDTE (0)
75#define MACHINE_HAS_DIAG44 (1)
76#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
77#define MACHINE_HAS_EDAT1 (0)
78#define MACHINE_HAS_EDAT2 (0)
79#define MACHINE_HAS_LPP (0)
80#define MACHINE_HAS_TOPOLOGY (0)
81#define MACHINE_HAS_TE (0)
82#define MACHINE_HAS_TLB_LC (0)
83#define MACHINE_HAS_VX (0)
84#define MACHINE_HAS_CAD (0)
85#else /* CONFIG_64BIT */
86#define MACHINE_HAS_IEEE (1)
87#define MACHINE_HAS_CSP (1)
88#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 63#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
89#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 64#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
90#define MACHINE_HAS_MVPG (1)
91#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 65#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
92#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 66#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
93#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 67#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
@@ -96,7 +70,6 @@ extern void detect_memory_memblock(void);
96#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 70#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
97#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) 71#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
98#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) 72#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
99#endif /* CONFIG_64BIT */
100 73
101/* 74/*
102 * Console mode. Override with conmode= 75 * Console mode. Override with conmode=
@@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void);
135 108
136#else /* __ASSEMBLY__ */ 109#else /* __ASSEMBLY__ */
137 110
138#ifndef CONFIG_64BIT
139#define IPL_DEVICE 0x10404
140#define INITRD_START 0x1040C
141#define INITRD_SIZE 0x10414
142#define OLDMEM_BASE 0x1041C
143#define OLDMEM_SIZE 0x10424
144#else /* CONFIG_64BIT */
145#define IPL_DEVICE 0x10400 111#define IPL_DEVICE 0x10400
146#define INITRD_START 0x10408 112#define INITRD_START 0x10408
147#define INITRD_SIZE 0x10410 113#define INITRD_SIZE 0x10410
148#define OLDMEM_BASE 0x10418 114#define OLDMEM_BASE 0x10418
149#define OLDMEM_SIZE 0x10420 115#define OLDMEM_SIZE 0x10420
150#endif /* CONFIG_64BIT */
151#define COMMAND_LINE 0x10480 116#define COMMAND_LINE 0x10480
152 117
153#endif /* __ASSEMBLY__ */ 118#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 5959bfb3b693..c8b7cf9d6279 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,6 @@
51 wl = __wl; \ 51 wl = __wl; \
52}) 52})
53 53
54#ifdef CONFIG_64BIT
55#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
56 do { unsigned long __n; \ 55 do { unsigned long __n; \
57 unsigned int __r, __d; \ 56 unsigned int __r, __d; \
@@ -60,15 +59,6 @@
60 (q) = __n / __d; \ 59 (q) = __n / __d; \
61 (r) = __n % __d; \ 60 (r) = __n % __d; \
62 } while (0) 61 } while (0)
63#else
64#define udiv_qrnnd(q, r, n1, n0, d) \
65 do { unsigned int __r; \
66 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
67 (r) = __r; \
68 } while (0)
69extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
70 unsigned int , unsigned int);
71#endif
72 62
73#define UDIV_NEEDS_NORMALIZATION 0 63#define UDIV_NEEDS_NORMALIZATION 0
74 64
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index a60d085ddb4d..487428b6d099 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -1,16 +1,7 @@
1#ifndef _ASM_S390_SPARSEMEM_H 1#ifndef _ASM_S390_SPARSEMEM_H
2#define _ASM_S390_SPARSEMEM_H 2#define _ASM_S390_SPARSEMEM_H
3 3
4#ifdef CONFIG_64BIT
5
6#define SECTION_SIZE_BITS 28 4#define SECTION_SIZE_BITS 28
7#define MAX_PHYSMEM_BITS 46 5#define MAX_PHYSMEM_BITS 46
8 6
9#else
10
11#define SECTION_SIZE_BITS 25
12#define MAX_PHYSMEM_BITS 31
13
14#endif /* CONFIG_64BIT */
15
16#endif /* _ASM_S390_SPARSEMEM_H */ 7#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 2542a7e4c8b4..d62e7a69605f 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc)
18 u32 orig_fpc; 18 u32 orig_fpc;
19 int rc; 19 int rc;
20 20
21 if (!MACHINE_HAS_IEEE)
22 return 0;
23
24 asm volatile( 21 asm volatile(
25 " efpc %1\n" 22 " efpc %1\n"
26 " sfpc %2\n" 23 " sfpc %2\n"
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc)
35 32
36static inline void save_fp_ctl(u32 *fpc) 33static inline void save_fp_ctl(u32 *fpc)
37{ 34{
38 if (!MACHINE_HAS_IEEE)
39 return;
40
41 asm volatile( 35 asm volatile(
42 " stfpc %0\n" 36 " stfpc %0\n"
43 : "+Q" (*fpc)); 37 : "+Q" (*fpc));
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc)
47{ 41{
48 int rc; 42 int rc;
49 43
50 if (!MACHINE_HAS_IEEE)
51 return 0;
52
53 asm volatile( 44 asm volatile(
54 " lfpc %1\n" 45 " lfpc %1\n"
55 "0: la %0,0\n" 46 "0: la %0,0\n"
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs)
65 asm volatile("std 2,%0" : "=Q" (fprs[2])); 56 asm volatile("std 2,%0" : "=Q" (fprs[2]));
66 asm volatile("std 4,%0" : "=Q" (fprs[4])); 57 asm volatile("std 4,%0" : "=Q" (fprs[4]));
67 asm volatile("std 6,%0" : "=Q" (fprs[6])); 58 asm volatile("std 6,%0" : "=Q" (fprs[6]));
68 if (!MACHINE_HAS_IEEE)
69 return;
70 asm volatile("std 1,%0" : "=Q" (fprs[1])); 59 asm volatile("std 1,%0" : "=Q" (fprs[1]));
71 asm volatile("std 3,%0" : "=Q" (fprs[3])); 60 asm volatile("std 3,%0" : "=Q" (fprs[3]));
72 asm volatile("std 5,%0" : "=Q" (fprs[5])); 61 asm volatile("std 5,%0" : "=Q" (fprs[5]));
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs)
87 asm volatile("ld 2,%0" : : "Q" (fprs[2])); 76 asm volatile("ld 2,%0" : : "Q" (fprs[2]));
88 asm volatile("ld 4,%0" : : "Q" (fprs[4])); 77 asm volatile("ld 4,%0" : : "Q" (fprs[4]));
89 asm volatile("ld 6,%0" : : "Q" (fprs[6])); 78 asm volatile("ld 6,%0" : : "Q" (fprs[6]));
90 if (!MACHINE_HAS_IEEE)
91 return;
92 asm volatile("ld 1,%0" : : "Q" (fprs[1])); 79 asm volatile("ld 1,%0" : : "Q" (fprs[1]));
93 asm volatile("ld 3,%0" : : "Q" (fprs[3])); 80 asm volatile("ld 3,%0" : : "Q" (fprs[3]));
94 asm volatile("ld 5,%0" : : "Q" (fprs[5])); 81 asm volatile("ld 5,%0" : : "Q" (fprs[5]));
@@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs)
140 127
141static inline void save_fp_vx_regs(struct task_struct *task) 128static inline void save_fp_vx_regs(struct task_struct *task)
142{ 129{
143#ifdef CONFIG_64BIT
144 if (task->thread.vxrs) 130 if (task->thread.vxrs)
145 save_vx_regs(task->thread.vxrs); 131 save_vx_regs(task->thread.vxrs);
146 else 132 else
147#endif 133 save_fp_regs(task->thread.fp_regs.fprs);
148 save_fp_regs(task->thread.fp_regs.fprs);
149} 134}
150 135
151static inline void restore_fp_vx_regs(struct task_struct *task) 136static inline void restore_fp_vx_regs(struct task_struct *task)
152{ 137{
153#ifdef CONFIG_64BIT
154 if (task->thread.vxrs) 138 if (task->thread.vxrs)
155 restore_vx_regs(task->thread.vxrs); 139 restore_vx_regs(task->thread.vxrs);
156 else 140 else
157#endif 141 restore_fp_regs(task->thread.fp_regs.fprs);
158 restore_fp_regs(task->thread.fp_regs.fprs);
159} 142}
160 143
161static inline void save_access_regs(unsigned int *acrs) 144static inline void save_access_regs(unsigned int *acrs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ef1df718642d..3df4aa13291d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -10,13 +10,8 @@
10/* 10/*
11 * Size of kernel stack for each process 11 * Size of kernel stack for each process
12 */ 12 */
13#ifndef CONFIG_64BIT
14#define THREAD_ORDER 1
15#define ASYNC_ORDER 1
16#else /* CONFIG_64BIT */
17#define THREAD_ORDER 2 13#define THREAD_ORDER 2
18#define ASYNC_ORDER 2 14#define ASYNC_ORDER 2
19#endif /* CONFIG_64BIT */
20 15
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 16#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 17#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -99,10 +94,6 @@ static inline struct thread_info *current_thread_info(void)
99#define _TIF_31BIT (1<<TIF_31BIT) 94#define _TIF_31BIT (1<<TIF_31BIT)
100#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 95#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
101 96
102#ifdef CONFIG_64BIT
103#define is_32bit_task() (test_thread_flag(TIF_31BIT)) 97#define is_32bit_task() (test_thread_flag(TIF_31BIT))
104#else
105#define is_32bit_task() (1)
106#endif
107 98
108#endif /* _ASM_THREAD_INFO_H */ 99#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 06d8741ad6f4..7a92e69c50bc 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
118static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 118static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
119 unsigned long address) 119 unsigned long address)
120{ 120{
121#ifdef CONFIG_64BIT
122 if (tlb->mm->context.asce_limit <= (1UL << 31)) 121 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return; 122 return;
124 pgtable_pmd_page_dtor(virt_to_page(pmd)); 123 pgtable_pmd_page_dtor(virt_to_page(pmd));
125 tlb_remove_table(tlb, pmd); 124 tlb_remove_table(tlb, pmd);
126#endif
127} 125}
128 126
129/* 127/*
@@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
136static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 134static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
137 unsigned long address) 135 unsigned long address)
138{ 136{
139#ifdef CONFIG_64BIT
140 if (tlb->mm->context.asce_limit <= (1UL << 42)) 137 if (tlb->mm->context.asce_limit <= (1UL << 42))
141 return; 138 return;
142 tlb_remove_table(tlb, pud); 139 tlb_remove_table(tlb, pud);
143#endif
144} 140}
145 141
146#define tlb_start_vma(tlb, vma) do { } while (0) 142#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 16c9c88658c8..ca148f7c3eaa 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void)
49 register unsigned long reg4 asm("4"); 49 register unsigned long reg4 asm("4");
50 long dummy; 50 long dummy;
51 51
52#ifndef CONFIG_64BIT
53 if (!MACHINE_HAS_CSP) {
54 smp_ptlb_all();
55 return;
56 }
57#endif /* CONFIG_64BIT */
58
59 dummy = 0; 52 dummy = 0;
60 reg2 = reg3 = 0; 53 reg2 = reg3 = 0;
61 reg4 = ((unsigned long) &dummy) + 1; 54 reg4 = ((unsigned long) &dummy) + 1;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index dccef3ca91fa..6740f4f9781f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -8,21 +8,4 @@
8 8
9#include <uapi/asm/types.h> 9#include <uapi/asm/types.h>
10 10
11/*
12 * These aren't exported outside the kernel to avoid name space clashes
13 */
14
15#ifndef __ASSEMBLY__
16
17#ifndef CONFIG_64BIT
18typedef union {
19 unsigned long long pair;
20 struct {
21 unsigned long even;
22 unsigned long odd;
23 } subreg;
24} register_pair;
25
26#endif /* ! CONFIG_64BIT */
27#endif /* __ASSEMBLY__ */
28#endif /* _S390_TYPES_H */ 11#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 651886353551..91f56b1d8156 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,11 +9,7 @@
9#include <uapi/asm/unistd.h> 9#include <uapi/asm/unistd.h>
10 10
11 11
12#ifndef CONFIG_64BIT
13#define __IGNORE_select
14#else
15#define __IGNORE_time 12#define __IGNORE_time
16#endif
17 13
18/* Ignore NUMA system calls. Not wired up on s390. */ 14/* Ignore NUMA system calls. Not wired up on s390. */
19#define __IGNORE_mbind 15#define __IGNORE_mbind
@@ -43,10 +39,6 @@
43#define __ARCH_WANT_SYS_OLDUMOUNT 39#define __ARCH_WANT_SYS_OLDUMOUNT
44#define __ARCH_WANT_SYS_SIGPENDING 40#define __ARCH_WANT_SYS_SIGPENDING
45#define __ARCH_WANT_SYS_SIGPROCMASK 41#define __ARCH_WANT_SYS_SIGPROCMASK
46# ifndef CONFIG_64BIT
47# define __ARCH_WANT_STAT64
48# define __ARCH_WANT_SYS_TIME
49# endif
50# ifdef CONFIG_COMPAT 42# ifdef CONFIG_COMPAT
51# define __ARCH_WANT_COMPAT_SYS_TIME 43# define __ARCH_WANT_COMPAT_SYS_TIME
52# endif 44# endif
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a62526d09201..787acd4f9668 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -42,10 +42,8 @@ struct vdso_per_cpu_data {
42 42
43extern struct vdso_data *vdso_data; 43extern struct vdso_data *vdso_data;
44 44
45#ifdef CONFIG_64BIT
46int vdso_alloc_per_cpu(struct _lowcore *lowcore); 45int vdso_alloc_per_cpu(struct _lowcore *lowcore);
47void vdso_free_per_cpu(struct _lowcore *lowcore); 46void vdso_free_per_cpu(struct _lowcore *lowcore);
48#endif
49 47
50#endif /* __ASSEMBLY__ */ 48#endif /* __ASSEMBLY__ */
51#endif /* __S390_VDSO_H__ */ 49#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 31fab2676fe9..d94cbba95c50 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -26,20 +26,16 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
26# 26#
27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -w
30 30
31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
35obj-y += dumpstack.o 35obj-y += runtime_instr.o cache.o dumpstack.o
36obj-y += entry64.o reipl64.o relocate_kernel64.o
36 37
37obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 38extra-y += head.o head64.o vmlinux.lds
38obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
39obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
40
41extra-y += head.o vmlinux.lds
42extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
43 39
44obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 40obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
45obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
56obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 52obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57obj-$(CONFIG_UPROBES) += uprobes.o 53obj-$(CONFIG_UPROBES) += uprobes.o
58 54
59ifdef CONFIG_64BIT 55obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
60obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ 56obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
61 perf_cpum_cf_events.o
62obj-y += runtime_instr.o cache.o
63endif
64 57
65# vdso 58# vdso
66obj-$(CONFIG_64BIT) += vdso64/ 59obj-y += vdso64/
67obj-$(CONFIG_32BIT) += vdso32/
68obj-$(CONFIG_COMPAT) += vdso32/ 60obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e91605353..6e94edd90318 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -166,9 +166,6 @@ int main(void)
166 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 166 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
167 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 167 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
168 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 168 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
169#ifdef CONFIG_32BIT
170 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
171#else /* CONFIG_32BIT */
172 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); 169 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
173 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); 170 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
174 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); 171 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
@@ -183,6 +180,5 @@ int main(void)
183 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
184 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
185 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
186#endif /* CONFIG_32BIT */
187 return 0; 183 return 0;
188} 184}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f74a53d339b0..daed3fde42ec 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,8 +11,6 @@
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/sigp.h> 12#include <asm/sigp.h>
13 13
14#ifdef CONFIG_64BIT
15
16ENTRY(s390_base_mcck_handler) 14ENTRY(s390_base_mcck_handler)
17 basr %r13,0 15 basr %r13,0
180: lg %r15,__LC_PANIC_STACK # load panic stack 160: lg %r15,__LC_PANIC_STACK # load panic stack
@@ -131,77 +129,3 @@ ENTRY(diag308_reset)
131.Lfpctl: 129.Lfpctl:
132 .long 0 130 .long 0
133 .previous 131 .previous
134
135#else /* CONFIG_64BIT */
136
137ENTRY(s390_base_mcck_handler)
138 basr %r13,0
1390: l %r15,__LC_PANIC_STACK # load panic stack
140 ahi %r15,-STACK_FRAME_OVERHEAD
141 l %r1,2f-0b(%r13)
142 l %r1,0(%r1)
143 ltr %r1,%r1
144 jz 1f
145 basr %r14,%r1
1461: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
147 lpsw __LC_MCK_OLD_PSW
148
1492: .long s390_base_mcck_handler_fn
150
151 .section .bss
152 .align 4
153 .globl s390_base_mcck_handler_fn
154s390_base_mcck_handler_fn:
155 .long 0
156 .previous
157
158ENTRY(s390_base_ext_handler)
159 stm %r0,%r15,__LC_SAVE_AREA_ASYNC
160 basr %r13,0
1610: ahi %r15,-STACK_FRAME_OVERHEAD
162 l %r1,2f-0b(%r13)
163 l %r1,0(%r1)
164 ltr %r1,%r1
165 jz 1f
166 basr %r14,%r1
1671: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
168 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
169 lpsw __LC_EXT_OLD_PSW
170
1712: .long s390_base_ext_handler_fn
172
173 .section .bss
174 .align 4
175 .globl s390_base_ext_handler_fn
176s390_base_ext_handler_fn:
177 .long 0
178 .previous
179
180ENTRY(s390_base_pgm_handler)
181 stm %r0,%r15,__LC_SAVE_AREA_SYNC
182 basr %r13,0
1830: ahi %r15,-STACK_FRAME_OVERHEAD
184 l %r1,2f-0b(%r13)
185 l %r1,0(%r1)
186 ltr %r1,%r1
187 jz 1f
188 basr %r14,%r1
189 lm %r0,%r15,__LC_SAVE_AREA_SYNC
190 lpsw __LC_PGM_OLD_PSW
191
1921: lpsw disabled_wait_psw-0b(%r13)
193
1942: .long s390_base_pgm_handler_fn
195
196disabled_wait_psw:
197 .align 8
198 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
199
200 .section .bss
201 .align 4
202 .globl s390_base_pgm_handler_fn
203s390_base_pgm_handler_fn:
204 .long 0
205 .previous
206
207#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d27880..199ec92ef4fe 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen)
27 register unsigned long reg3 asm ("3") = cmdlen; 27 register unsigned long reg3 asm ("3") = cmdlen;
28 28
29 asm volatile( 29 asm volatile(
30#ifndef CONFIG_64BIT
31 " diag %1,%0,0x8\n"
32#else /* CONFIG_64BIT */
33 " sam31\n" 30 " sam31\n"
34 " diag %1,%0,0x8\n" 31 " diag %1,%0,0x8\n"
35 " sam64\n" 32 " sam64\n"
36#endif /* CONFIG_64BIT */
37 : "+d" (reg3) : "d" (reg2) : "cc"); 33 : "+d" (reg3) : "d" (reg2) : "cc");
38 return reg3; 34 return reg3;
39} 35}
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
46 register unsigned long reg5 asm ("5") = *rlen; 42 register unsigned long reg5 asm ("5") = *rlen;
47 43
48 asm volatile( 44 asm volatile(
49#ifndef CONFIG_64BIT
50 " diag %2,%0,0x8\n"
51 " brc 8,1f\n"
52 " ar %1,%4\n"
53#else /* CONFIG_64BIT */
54 " sam31\n" 45 " sam31\n"
55 " diag %2,%0,0x8\n" 46 " diag %2,%0,0x8\n"
56 " sam64\n" 47 " sam64\n"
57 " brc 8,1f\n" 48 " brc 8,1f\n"
58 " agr %1,%4\n" 49 " agr %1,%4\n"
59#endif /* CONFIG_64BIT */
60 "1:\n" 50 "1:\n"
61 : "+d" (reg4), "+d" (reg5) 51 : "+d" (reg4), "+d" (reg5)
62 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); 52 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 8237fc07ac79..2f69243bf700 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
18 int rc = 0; 18 int rc = 0;
19 19
20 asm volatile( 20 asm volatile(
21#ifdef CONFIG_64BIT
22 " sam31\n" 21 " sam31\n"
23 " diag %2,2,0x14\n" 22 " diag %2,2,0x14\n"
24 " sam64\n" 23 " sam64\n"
25#else
26 " diag %2,2,0x14\n"
27#endif
28 " ipm %0\n" 24 " ipm %0\n"
29 " srl %0,28\n" 25 " srl %0,28\n"
30 : "=d" (rc), "+d" (_ry2) 26 : "=d" (rc), "+d" (_ry2)
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr)
52 spin_lock_irqsave(&diag210_lock, flags); 48 spin_lock_irqsave(&diag210_lock, flags);
53 diag210_tmp = *addr; 49 diag210_tmp = *addr;
54 50
55#ifdef CONFIG_64BIT
56 asm volatile( 51 asm volatile(
57 " lhi %0,-1\n" 52 " lhi %0,-1\n"
58 " sam31\n" 53 " sam31\n"
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr)
62 "1: sam64\n" 57 "1: sam64\n"
63 EX_TABLE(0b, 1b) 58 EX_TABLE(0b, 1b)
64 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); 59 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
65#else
66 asm volatile(
67 " lhi %0,-1\n"
68 " diag %1,0,0x210\n"
69 "0: ipm %0\n"
70 " srl %0,28\n"
71 "1:\n"
72 EX_TABLE(0b, 1b)
73 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
74#endif
75 60
76 *addr = diag210_tmp; 61 *addr = diag210_tmp;
77 spin_unlock_irqrestore(&diag210_lock, flags); 62 spin_unlock_irqrestore(&diag210_lock, flags);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 533430307da8..8140d10c6785 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -32,12 +32,6 @@
32#include <asm/debug.h> 32#include <asm/debug.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34 34
35#ifndef CONFIG_64BIT
36#define ONELONG "%08lx: "
37#else /* CONFIG_64BIT */
38#define ONELONG "%016lx: "
39#endif /* CONFIG_64BIT */
40
41enum { 35enum {
42 UNUSED, /* Indicates the end of the operand list */ 36 UNUSED, /* Indicates the end of the operand list */
43 R_8, /* GPR starting at position 8 */ 37 R_8, /* GPR starting at position 8 */
@@ -536,12 +530,10 @@ static char *long_insn_name[] = {
536}; 530};
537 531
538static struct s390_insn opcode[] = { 532static struct s390_insn opcode[] = {
539#ifdef CONFIG_64BIT
540 { "bprp", 0xc5, INSTR_MII_UPI }, 533 { "bprp", 0xc5, INSTR_MII_UPI },
541 { "bpp", 0xc7, INSTR_SMI_U0RDP }, 534 { "bpp", 0xc7, INSTR_SMI_U0RDP },
542 { "trtr", 0xd0, INSTR_SS_L0RDRD }, 535 { "trtr", 0xd0, INSTR_SS_L0RDRD },
543 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 536 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
544#endif
545 { "spm", 0x04, INSTR_RR_R0 }, 537 { "spm", 0x04, INSTR_RR_R0 },
546 { "balr", 0x05, INSTR_RR_RR }, 538 { "balr", 0x05, INSTR_RR_RR },
547 { "bctr", 0x06, INSTR_RR_RR }, 539 { "bctr", 0x06, INSTR_RR_RR },
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = {
725}; 717};
726 718
727static struct s390_insn opcode_01[] = { 719static struct s390_insn opcode_01[] = {
728#ifdef CONFIG_64BIT
729 { "ptff", 0x04, INSTR_E }, 720 { "ptff", 0x04, INSTR_E },
730 { "pfpo", 0x0a, INSTR_E }, 721 { "pfpo", 0x0a, INSTR_E },
731 { "sam64", 0x0e, INSTR_E }, 722 { "sam64", 0x0e, INSTR_E },
732#endif
733 { "pr", 0x01, INSTR_E }, 723 { "pr", 0x01, INSTR_E },
734 { "upt", 0x02, INSTR_E }, 724 { "upt", 0x02, INSTR_E },
735 { "sckpf", 0x07, INSTR_E }, 725 { "sckpf", 0x07, INSTR_E },
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = {
741}; 731};
742 732
743static struct s390_insn opcode_a5[] = { 733static struct s390_insn opcode_a5[] = {
744#ifdef CONFIG_64BIT
745 { "iihh", 0x00, INSTR_RI_RU }, 734 { "iihh", 0x00, INSTR_RI_RU },
746 { "iihl", 0x01, INSTR_RI_RU }, 735 { "iihl", 0x01, INSTR_RI_RU },
747 { "iilh", 0x02, INSTR_RI_RU }, 736 { "iilh", 0x02, INSTR_RI_RU },
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = {
758 { "llihl", 0x0d, INSTR_RI_RU }, 747 { "llihl", 0x0d, INSTR_RI_RU },
759 { "llilh", 0x0e, INSTR_RI_RU }, 748 { "llilh", 0x0e, INSTR_RI_RU },
760 { "llill", 0x0f, INSTR_RI_RU }, 749 { "llill", 0x0f, INSTR_RI_RU },
761#endif
762 { "", 0, INSTR_INVALID } 750 { "", 0, INSTR_INVALID }
763}; 751};
764 752
765static struct s390_insn opcode_a7[] = { 753static struct s390_insn opcode_a7[] = {
766#ifdef CONFIG_64BIT
767 { "tmhh", 0x02, INSTR_RI_RU }, 754 { "tmhh", 0x02, INSTR_RI_RU },
768 { "tmhl", 0x03, INSTR_RI_RU }, 755 { "tmhl", 0x03, INSTR_RI_RU },
769 { "brctg", 0x07, INSTR_RI_RP }, 756 { "brctg", 0x07, INSTR_RI_RP },
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = {
771 { "aghi", 0x0b, INSTR_RI_RI }, 758 { "aghi", 0x0b, INSTR_RI_RI },
772 { "mghi", 0x0d, INSTR_RI_RI }, 759 { "mghi", 0x0d, INSTR_RI_RI },
773 { "cghi", 0x0f, INSTR_RI_RI }, 760 { "cghi", 0x0f, INSTR_RI_RI },
774#endif
775 { "tmlh", 0x00, INSTR_RI_RU }, 761 { "tmlh", 0x00, INSTR_RI_RU },
776 { "tmll", 0x01, INSTR_RI_RU }, 762 { "tmll", 0x01, INSTR_RI_RU },
777 { "brc", 0x04, INSTR_RI_UP }, 763 { "brc", 0x04, INSTR_RI_UP },
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = {
785}; 771};
786 772
787static struct s390_insn opcode_aa[] = { 773static struct s390_insn opcode_aa[] = {
788#ifdef CONFIG_64BIT
789 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, 774 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
790 { "rion", 0x01, INSTR_RI_RI }, 775 { "rion", 0x01, INSTR_RI_RI },
791 { "tric", 0x02, INSTR_RI_RI }, 776 { "tric", 0x02, INSTR_RI_RI },
792 { "rioff", 0x03, INSTR_RI_RI }, 777 { "rioff", 0x03, INSTR_RI_RI },
793 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, 778 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
794#endif
795 { "", 0, INSTR_INVALID } 779 { "", 0, INSTR_INVALID }
796}; 780};
797 781
798static struct s390_insn opcode_b2[] = { 782static struct s390_insn opcode_b2[] = {
799#ifdef CONFIG_64BIT
800 { "stckf", 0x7c, INSTR_S_RD }, 783 { "stckf", 0x7c, INSTR_S_RD },
801 { "lpp", 0x80, INSTR_S_RD }, 784 { "lpp", 0x80, INSTR_S_RD },
802 { "lcctl", 0x84, INSTR_S_RD }, 785 { "lcctl", 0x84, INSTR_S_RD },
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = {
819 { "tend", 0xf8, INSTR_S_00 }, 802 { "tend", 0xf8, INSTR_S_00 },
820 { "niai", 0xfa, INSTR_IE_UU }, 803 { "niai", 0xfa, INSTR_IE_UU },
821 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, 804 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
822#endif
823 { "stidp", 0x02, INSTR_S_RD }, 805 { "stidp", 0x02, INSTR_S_RD },
824 { "sck", 0x04, INSTR_S_RD }, 806 { "sck", 0x04, INSTR_S_RD },
825 { "stck", 0x05, INSTR_S_RD }, 807 { "stck", 0x05, INSTR_S_RD },
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = {
908}; 890};
909 891
910static struct s390_insn opcode_b3[] = { 892static struct s390_insn opcode_b3[] = {
911#ifdef CONFIG_64BIT
912 { "maylr", 0x38, INSTR_RRF_F0FF }, 893 { "maylr", 0x38, INSTR_RRF_F0FF },
913 { "mylr", 0x39, INSTR_RRF_F0FF }, 894 { "mylr", 0x39, INSTR_RRF_F0FF },
914 { "mayr", 0x3a, INSTR_RRF_F0FF }, 895 { "mayr", 0x3a, INSTR_RRF_F0FF },
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = {
996 { "qaxtr", 0xfd, INSTR_RRF_FUFF }, 977 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
997 { "iextr", 0xfe, INSTR_RRF_F0FR }, 978 { "iextr", 0xfe, INSTR_RRF_F0FR },
998 { "rrxtr", 0xff, INSTR_RRF_FFRU }, 979 { "rrxtr", 0xff, INSTR_RRF_FFRU },
999#endif
1000 { "lpebr", 0x00, INSTR_RRE_FF }, 980 { "lpebr", 0x00, INSTR_RRE_FF },
1001 { "lnebr", 0x01, INSTR_RRE_FF }, 981 { "lnebr", 0x01, INSTR_RRE_FF },
1002 { "ltebr", 0x02, INSTR_RRE_FF }, 982 { "ltebr", 0x02, INSTR_RRE_FF },
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = {
1091}; 1071};
1092 1072
1093static struct s390_insn opcode_b9[] = { 1073static struct s390_insn opcode_b9[] = {
1094#ifdef CONFIG_64BIT
1095 { "lpgr", 0x00, INSTR_RRE_RR }, 1074 { "lpgr", 0x00, INSTR_RRE_RR },
1096 { "lngr", 0x01, INSTR_RRE_RR }, 1075 { "lngr", 0x01, INSTR_RRE_RR },
1097 { "ltgr", 0x02, INSTR_RRE_RR }, 1076 { "ltgr", 0x02, INSTR_RRE_RR },
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = {
1204 { "srk", 0xf9, INSTR_RRF_R0RR2 }, 1183 { "srk", 0xf9, INSTR_RRF_R0RR2 },
1205 { "alrk", 0xfa, INSTR_RRF_R0RR2 }, 1184 { "alrk", 0xfa, INSTR_RRF_R0RR2 },
1206 { "slrk", 0xfb, INSTR_RRF_R0RR2 }, 1185 { "slrk", 0xfb, INSTR_RRF_R0RR2 },
1207#endif
1208 { "kmac", 0x1e, INSTR_RRE_RR }, 1186 { "kmac", 0x1e, INSTR_RRE_RR },
1209 { "lrvr", 0x1f, INSTR_RRE_RR }, 1187 { "lrvr", 0x1f, INSTR_RRE_RR },
1210 { "km", 0x2e, INSTR_RRE_RR }, 1188 { "km", 0x2e, INSTR_RRE_RR },
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = {
1224}; 1202};
1225 1203
1226static struct s390_insn opcode_c0[] = { 1204static struct s390_insn opcode_c0[] = {
1227#ifdef CONFIG_64BIT
1228 { "lgfi", 0x01, INSTR_RIL_RI }, 1205 { "lgfi", 0x01, INSTR_RIL_RI },
1229 { "xihf", 0x06, INSTR_RIL_RU }, 1206 { "xihf", 0x06, INSTR_RIL_RU },
1230 { "xilf", 0x07, INSTR_RIL_RU }, 1207 { "xilf", 0x07, INSTR_RIL_RU },
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = {
1236 { "oilf", 0x0d, INSTR_RIL_RU }, 1213 { "oilf", 0x0d, INSTR_RIL_RU },
1237 { "llihf", 0x0e, INSTR_RIL_RU }, 1214 { "llihf", 0x0e, INSTR_RIL_RU },
1238 { "llilf", 0x0f, INSTR_RIL_RU }, 1215 { "llilf", 0x0f, INSTR_RIL_RU },
1239#endif
1240 { "larl", 0x00, INSTR_RIL_RP }, 1216 { "larl", 0x00, INSTR_RIL_RP },
1241 { "brcl", 0x04, INSTR_RIL_UP }, 1217 { "brcl", 0x04, INSTR_RIL_UP },
1242 { "brasl", 0x05, INSTR_RIL_RP }, 1218 { "brasl", 0x05, INSTR_RIL_RP },
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = {
1244}; 1220};
1245 1221
1246static struct s390_insn opcode_c2[] = { 1222static struct s390_insn opcode_c2[] = {
1247#ifdef CONFIG_64BIT
1248 { "msgfi", 0x00, INSTR_RIL_RI }, 1223 { "msgfi", 0x00, INSTR_RIL_RI },
1249 { "msfi", 0x01, INSTR_RIL_RI }, 1224 { "msfi", 0x01, INSTR_RIL_RI },
1250 { "slgfi", 0x04, INSTR_RIL_RU }, 1225 { "slgfi", 0x04, INSTR_RIL_RU },
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = {
1257 { "cfi", 0x0d, INSTR_RIL_RI }, 1232 { "cfi", 0x0d, INSTR_RIL_RI },
1258 { "clgfi", 0x0e, INSTR_RIL_RU }, 1233 { "clgfi", 0x0e, INSTR_RIL_RU },
1259 { "clfi", 0x0f, INSTR_RIL_RU }, 1234 { "clfi", 0x0f, INSTR_RIL_RU },
1260#endif
1261 { "", 0, INSTR_INVALID } 1235 { "", 0, INSTR_INVALID }
1262}; 1236};
1263 1237
1264static struct s390_insn opcode_c4[] = { 1238static struct s390_insn opcode_c4[] = {
1265#ifdef CONFIG_64BIT
1266 { "llhrl", 0x02, INSTR_RIL_RP }, 1239 { "llhrl", 0x02, INSTR_RIL_RP },
1267 { "lghrl", 0x04, INSTR_RIL_RP }, 1240 { "lghrl", 0x04, INSTR_RIL_RP },
1268 { "lhrl", 0x05, INSTR_RIL_RP }, 1241 { "lhrl", 0x05, INSTR_RIL_RP },
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = {
1274 { "lrl", 0x0d, INSTR_RIL_RP }, 1247 { "lrl", 0x0d, INSTR_RIL_RP },
1275 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, 1248 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
1276 { "strl", 0x0f, INSTR_RIL_RP }, 1249 { "strl", 0x0f, INSTR_RIL_RP },
1277#endif
1278 { "", 0, INSTR_INVALID } 1250 { "", 0, INSTR_INVALID }
1279}; 1251};
1280 1252
1281static struct s390_insn opcode_c6[] = { 1253static struct s390_insn opcode_c6[] = {
1282#ifdef CONFIG_64BIT
1283 { "exrl", 0x00, INSTR_RIL_RP }, 1254 { "exrl", 0x00, INSTR_RIL_RP },
1284 { "pfdrl", 0x02, INSTR_RIL_UP }, 1255 { "pfdrl", 0x02, INSTR_RIL_UP },
1285 { "cghrl", 0x04, INSTR_RIL_RP }, 1256 { "cghrl", 0x04, INSTR_RIL_RP },
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = {
1292 { "crl", 0x0d, INSTR_RIL_RP }, 1263 { "crl", 0x0d, INSTR_RIL_RP },
1293 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, 1264 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
1294 { "clrl", 0x0f, INSTR_RIL_RP }, 1265 { "clrl", 0x0f, INSTR_RIL_RP },
1295#endif
1296 { "", 0, INSTR_INVALID } 1266 { "", 0, INSTR_INVALID }
1297}; 1267};
1298 1268
1299static struct s390_insn opcode_c8[] = { 1269static struct s390_insn opcode_c8[] = {
1300#ifdef CONFIG_64BIT
1301 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 1270 { "mvcos", 0x00, INSTR_SSF_RRDRD },
1302 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1271 { "ectg", 0x01, INSTR_SSF_RRDRD },
1303 { "csst", 0x02, INSTR_SSF_RRDRD }, 1272 { "csst", 0x02, INSTR_SSF_RRDRD },
1304 { "lpd", 0x04, INSTR_SSF_RRDRD2 }, 1273 { "lpd", 0x04, INSTR_SSF_RRDRD2 },
1305 { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, 1274 { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
1306#endif
1307 { "", 0, INSTR_INVALID } 1275 { "", 0, INSTR_INVALID }
1308}; 1276};
1309 1277
1310static struct s390_insn opcode_cc[] = { 1278static struct s390_insn opcode_cc[] = {
1311#ifdef CONFIG_64BIT
1312 { "brcth", 0x06, INSTR_RIL_RP }, 1279 { "brcth", 0x06, INSTR_RIL_RP },
1313 { "aih", 0x08, INSTR_RIL_RI }, 1280 { "aih", 0x08, INSTR_RIL_RI },
1314 { "alsih", 0x0a, INSTR_RIL_RI }, 1281 { "alsih", 0x0a, INSTR_RIL_RI },
1315 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, 1282 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
1316 { "cih", 0x0d, INSTR_RIL_RI }, 1283 { "cih", 0x0d, INSTR_RIL_RI },
1317 { "clih", 0x0f, INSTR_RIL_RI }, 1284 { "clih", 0x0f, INSTR_RIL_RI },
1318#endif
1319 { "", 0, INSTR_INVALID } 1285 { "", 0, INSTR_INVALID }
1320}; 1286};
1321 1287
1322static struct s390_insn opcode_e3[] = { 1288static struct s390_insn opcode_e3[] = {
1323#ifdef CONFIG_64BIT
1324 { "ltg", 0x02, INSTR_RXY_RRRD }, 1289 { "ltg", 0x02, INSTR_RXY_RRRD },
1325 { "lrag", 0x03, INSTR_RXY_RRRD }, 1290 { "lrag", 0x03, INSTR_RXY_RRRD },
1326 { "lg", 0x04, INSTR_RXY_RRRD }, 1291 { "lg", 0x04, INSTR_RXY_RRRD },
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = {
1414 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1379 { "clhf", 0xcf, INSTR_RXY_RRRD },
1415 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, 1380 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
1416 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, 1381 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
1417#endif
1418 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1382 { "lrv", 0x1e, INSTR_RXY_RRRD },
1419 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1383 { "lrvh", 0x1f, INSTR_RXY_RRRD },
1420 { "strv", 0x3e, INSTR_RXY_RRRD }, 1384 { "strv", 0x3e, INSTR_RXY_RRRD },
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = {
1426}; 1390};
1427 1391
1428static struct s390_insn opcode_e5[] = { 1392static struct s390_insn opcode_e5[] = {
1429#ifdef CONFIG_64BIT
1430 { "strag", 0x02, INSTR_SSE_RDRD }, 1393 { "strag", 0x02, INSTR_SSE_RDRD },
1431 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1394 { "mvhhi", 0x44, INSTR_SIL_RDI },
1432 { "mvghi", 0x48, INSTR_SIL_RDI }, 1395 { "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = {
1439 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, 1402 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
1440 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, 1403 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
1441 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, 1404 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
1442#endif
1443 { "lasp", 0x00, INSTR_SSE_RDRD }, 1405 { "lasp", 0x00, INSTR_SSE_RDRD },
1444 { "tprot", 0x01, INSTR_SSE_RDRD }, 1406 { "tprot", 0x01, INSTR_SSE_RDRD },
1445 { "mvcsk", 0x0e, INSTR_SSE_RDRD }, 1407 { "mvcsk", 0x0e, INSTR_SSE_RDRD },
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = {
1448}; 1410};
1449 1411
1450static struct s390_insn opcode_e7[] = { 1412static struct s390_insn opcode_e7[] = {
1451#ifdef CONFIG_64BIT
1452 { "lcbb", 0x27, INSTR_RXE_RRRDM }, 1413 { "lcbb", 0x27, INSTR_RXE_RRRDM },
1453 { "vgef", 0x13, INSTR_VRV_VVRDM }, 1414 { "vgef", 0x13, INSTR_VRV_VVRDM },
1454 { "vgeg", 0x12, INSTR_VRV_VVRDM }, 1415 { "vgeg", 0x12, INSTR_VRV_VVRDM },
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = {
1588 { "vfsq", 0xce, INSTR_VRR_VV000MM }, 1549 { "vfsq", 0xce, INSTR_VRR_VV000MM },
1589 { "vfs", 0xe2, INSTR_VRR_VVV00MM }, 1550 { "vfs", 0xe2, INSTR_VRR_VVV00MM },
1590 { "vftci", 0x4a, INSTR_VRI_VVIMM }, 1551 { "vftci", 0x4a, INSTR_VRI_VVIMM },
1591#endif
1592}; 1552};
1593 1553
1594static struct s390_insn opcode_eb[] = { 1554static struct s390_insn opcode_eb[] = {
1595#ifdef CONFIG_64BIT
1596 { "lmg", 0x04, INSTR_RSY_RRRD }, 1555 { "lmg", 0x04, INSTR_RSY_RRRD },
1597 { "srag", 0x0a, INSTR_RSY_RRRD }, 1556 { "srag", 0x0a, INSTR_RSY_RRRD },
1598 { "slag", 0x0b, INSTR_RSY_RRRD }, 1557 { "slag", 0x0b, INSTR_RSY_RRRD },
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = {
1659 { "stric", 0x61, INSTR_RSY_RDRM }, 1618 { "stric", 0x61, INSTR_RSY_RDRM },
1660 { "mric", 0x62, INSTR_RSY_RDRM }, 1619 { "mric", 0x62, INSTR_RSY_RDRM },
1661 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, 1620 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
1662#endif
1663 { "rll", 0x1d, INSTR_RSY_RRRD }, 1621 { "rll", 0x1d, INSTR_RSY_RRRD },
1664 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1622 { "mvclu", 0x8e, INSTR_RSY_RRRD },
1665 { "tp", 0xc0, INSTR_RSL_R0RD }, 1623 { "tp", 0xc0, INSTR_RSL_R0RD },
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = {
1667}; 1625};
1668 1626
1669static struct s390_insn opcode_ec[] = { 1627static struct s390_insn opcode_ec[] = {
1670#ifdef CONFIG_64BIT
1671 { "brxhg", 0x44, INSTR_RIE_RRP }, 1628 { "brxhg", 0x44, INSTR_RIE_RRP },
1672 { "brxlg", 0x45, INSTR_RIE_RRP }, 1629 { "brxlg", 0x45, INSTR_RIE_RRP },
1673 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, 1630 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = {
1701 { "clgib", 0xfd, INSTR_RIS_RURDU }, 1658 { "clgib", 0xfd, INSTR_RIS_RURDU },
1702 { "cib", 0xfe, INSTR_RIS_RURDI }, 1659 { "cib", 0xfe, INSTR_RIS_RURDI },
1703 { "clib", 0xff, INSTR_RIS_RURDU }, 1660 { "clib", 0xff, INSTR_RIS_RURDU },
1704#endif
1705 { "", 0, INSTR_INVALID } 1661 { "", 0, INSTR_INVALID }
1706}; 1662};
1707 1663
1708static struct s390_insn opcode_ed[] = { 1664static struct s390_insn opcode_ed[] = {
1709#ifdef CONFIG_64BIT
1710 { "mayl", 0x38, INSTR_RXF_FRRDF }, 1665 { "mayl", 0x38, INSTR_RXF_FRRDF },
1711 { "myl", 0x39, INSTR_RXF_FRRDF }, 1666 { "myl", 0x39, INSTR_RXF_FRRDF },
1712 { "may", 0x3a, INSTR_RXF_FRRDF }, 1667 { "may", 0x3a, INSTR_RXF_FRRDF },
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = {
1731 { "czxt", 0xa9, INSTR_RSL_LRDFU }, 1686 { "czxt", 0xa9, INSTR_RSL_LRDFU },
1732 { "cdzt", 0xaa, INSTR_RSL_LRDFU }, 1687 { "cdzt", 0xaa, INSTR_RSL_LRDFU },
1733 { "cxzt", 0xab, INSTR_RSL_LRDFU }, 1688 { "cxzt", 0xab, INSTR_RSL_LRDFU },
1734#endif
1735 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1689 { "ldeb", 0x04, INSTR_RXE_FRRD },
1736 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1690 { "lxdb", 0x05, INSTR_RXE_FRRD },
1737 { "lxeb", 0x06, INSTR_RXE_FRRD }, 1691 { "lxeb", 0x06, INSTR_RXE_FRRD },
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs)
2051 else 2005 else
2052 *ptr++ = ' '; 2006 *ptr++ = ' ';
2053 addr = regs->psw.addr + start - 32; 2007 addr = regs->psw.addr + start - 32;
2054 ptr += sprintf(ptr, ONELONG, addr); 2008 ptr += sprintf(ptr, "%016lx: ", addr);
2055 if (start + opsize >= end) 2009 if (start + opsize >= end)
2056 break; 2010 break;
2057 for (i = 0; i < opsize; i++) 2011 for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index a99852e96a77..dc8e20473484 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -18,16 +18,6 @@
18#include <asm/dis.h> 18#include <asm/dis.h>
19#include <asm/ipl.h> 19#include <asm/ipl.h>
20 20
21#ifndef CONFIG_64BIT
22#define LONG "%08lx "
23#define FOURLONG "%08lx %08lx %08lx %08lx\n"
24static int kstack_depth_to_print = 12;
25#else /* CONFIG_64BIT */
26#define LONG "%016lx "
27#define FOURLONG "%016lx %016lx %016lx %016lx\n"
28static int kstack_depth_to_print = 20;
29#endif /* CONFIG_64BIT */
30
31/* 21/*
32 * For show_trace we have tree different stack to consider: 22 * For show_trace we have tree different stack to consider:
33 * - the panic stack which is used if the kernel stack has overflown 23 * - the panic stack which is used if the kernel stack has overflown
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
115 else 105 else
116 stack = sp; 106 stack = sp;
117 107
118 for (i = 0; i < kstack_depth_to_print; i++) { 108 for (i = 0; i < 20; i++) {
119 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 109 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
120 break; 110 break;
121 if ((i * sizeof(long) % 32) == 0) 111 if ((i * sizeof(long) % 32) == 0)
122 printk("%s ", i == 0 ? "" : "\n"); 112 printk("%s ", i == 0 ? "" : "\n");
123 printk(LONG, *stack++); 113 printk("%016lx ", *stack++);
124 } 114 }
125 printk("\n"); 115 printk("\n");
126 show_trace(task, sp); 116 show_trace(task, sp);
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
128 118
129static void show_last_breaking_event(struct pt_regs *regs) 119static void show_last_breaking_event(struct pt_regs *regs)
130{ 120{
131#ifdef CONFIG_64BIT
132 printk("Last Breaking-Event-Address:\n"); 121 printk("Last Breaking-Event-Address:\n");
133 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); 122 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
134#endif
135} 123}
136 124
137static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 125static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs)
155 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 143 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
156 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 144 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
157 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 145 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
158#ifdef CONFIG_64BIT
159 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 146 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
160#endif 147 printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
161 printk("\n%s GPRS: " FOURLONG, mode,
162 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 148 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
163 printk(" " FOURLONG, 149 printk(" %016lx %016lx %016lx %016lx\n",
164 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 150 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
165 printk(" " FOURLONG, 151 printk(" %016lx %016lx %016lx %016lx\n",
166 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 152 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
167 printk(" " FOURLONG, 153 printk(" %016lx %016lx %016lx %016lx\n",
168 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 154 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
169 show_code(regs); 155 show_code(regs);
170} 156}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4427ab7ac23a..549a73a4b543 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,7 +64,6 @@ asm(
64 " .align 4\n" 64 " .align 4\n"
65 " .type savesys_ipl_nss, @function\n" 65 " .type savesys_ipl_nss, @function\n"
66 "savesys_ipl_nss:\n" 66 "savesys_ipl_nss:\n"
67#ifdef CONFIG_64BIT
68 " stmg 6,15,48(15)\n" 67 " stmg 6,15,48(15)\n"
69 " lgr 14,3\n" 68 " lgr 14,3\n"
70 " sam31\n" 69 " sam31\n"
@@ -72,13 +71,6 @@ asm(
72 " sam64\n" 71 " sam64\n"
73 " lgr 2,14\n" 72 " lgr 2,14\n"
74 " lmg 6,15,48(15)\n" 73 " lmg 6,15,48(15)\n"
75#else
76 " stm 6,15,24(15)\n"
77 " lr 14,3\n"
78 " diag 2,14,0x8\n"
79 " lr 2,14\n"
80 " lm 6,15,24(15)\n"
81#endif
82 " br 14\n" 74 " br 14\n"
83 " .size savesys_ipl_nss, .-savesys_ipl_nss\n" 75 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
84 " .previous\n"); 76 " .previous\n");
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void)
240 232
241static __init void setup_topology(void) 233static __init void setup_topology(void)
242{ 234{
243#ifdef CONFIG_64BIT
244 int max_mnest; 235 int max_mnest;
245 236
246 if (!test_facility(11)) 237 if (!test_facility(11))
@@ -251,7 +242,6 @@ static __init void setup_topology(void)
251 break; 242 break;
252 } 243 }
253 topology_max_mnest = max_mnest; 244 topology_max_mnest = max_mnest;
254#endif
255} 245}
256 246
257static void early_pgm_check_handler(void) 247static void early_pgm_check_handler(void)
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void)
290 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 280 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
291} 281}
292 282
293static __init void detect_mvpg(void)
294{
295#ifndef CONFIG_64BIT
296 int rc;
297
298 asm volatile(
299 " la 0,0\n"
300 " mvpg %2,%2\n"
301 "0: la %0,0\n"
302 "1:\n"
303 EX_TABLE(0b,1b)
304 : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
305 if (!rc)
306 S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
307#endif
308}
309
310static __init void detect_ieee(void)
311{
312#ifndef CONFIG_64BIT
313 int rc, tmp;
314
315 asm volatile(
316 " efpc %1,0\n"
317 "0: la %0,0\n"
318 "1:\n"
319 EX_TABLE(0b,1b)
320 : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
321 if (!rc)
322 S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
323#endif
324}
325
326static __init void detect_csp(void)
327{
328#ifndef CONFIG_64BIT
329 int rc;
330
331 asm volatile(
332 " la 0,0\n"
333 " la 1,0\n"
334 " la 2,4\n"
335 " csp 0,2\n"
336 "0: la %0,0\n"
337 "1:\n"
338 EX_TABLE(0b,1b)
339 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
340 if (!rc)
341 S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
342#endif
343}
344
345static __init void detect_diag9c(void) 283static __init void detect_diag9c(void)
346{ 284{
347 unsigned int cpu_address; 285 unsigned int cpu_address;
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void)
360 298
361static __init void detect_diag44(void) 299static __init void detect_diag44(void)
362{ 300{
363#ifdef CONFIG_64BIT
364 int rc; 301 int rc;
365 302
366 asm volatile( 303 asm volatile(
@@ -371,12 +308,10 @@ static __init void detect_diag44(void)
371 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); 308 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
372 if (!rc) 309 if (!rc)
373 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; 310 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
374#endif
375} 311}
376 312
377static __init void detect_machine_facilities(void) 313static __init void detect_machine_facilities(void)
378{ 314{
379#ifdef CONFIG_64BIT
380 if (test_facility(8)) { 315 if (test_facility(8)) {
381 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; 316 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
382 __ctl_set_bit(0, 23); 317 __ctl_set_bit(0, 23);
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 328 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 329 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 330 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396#endif
397} 331}
398 332
399static int __init cad_setup(char *str) 333static int __init cad_setup(char *str)
@@ -501,9 +435,6 @@ void __init startup_init(void)
501 ipl_update_parameters(); 435 ipl_update_parameters();
502 setup_boot_command_line(); 436 setup_boot_command_line();
503 create_kernel_nss(); 437 create_kernel_nss();
504 detect_mvpg();
505 detect_ieee();
506 detect_csp();
507 detect_diag9c(); 438 detect_diag9c();
508 detect_diag44(); 439 detect_diag44();
509 detect_machine_facilities(); 440 detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
deleted file mode 100644
index 398329b2b518..000000000000
--- a/arch/s390/kernel/entry.S
+++ /dev/null
@@ -1,966 +0,0 @@
1/*
2 * S390 low-level entry points.
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 * Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h>
15#include <asm/errno.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <asm/unistd.h>
20#include <asm/page.h>
21#include <asm/sigp.h>
22#include <asm/irq.h>
23
24__PT_R0 = __PT_GPRS
25__PT_R1 = __PT_GPRS + 4
26__PT_R2 = __PT_GPRS + 8
27__PT_R3 = __PT_GPRS + 12
28__PT_R4 = __PT_GPRS + 16
29__PT_R5 = __PT_GPRS + 20
30__PT_R6 = __PT_GPRS + 24
31__PT_R7 = __PT_GPRS + 28
32__PT_R8 = __PT_GPRS + 32
33__PT_R9 = __PT_GPRS + 36
34__PT_R10 = __PT_GPRS + 40
35__PT_R11 = __PT_GPRS + 44
36__PT_R12 = __PT_GPRS + 48
37__PT_R13 = __PT_GPRS + 524
38__PT_R14 = __PT_GPRS + 56
39__PT_R15 = __PT_GPRS + 60
40
41STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44
45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
46_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
47 _TIF_SYSCALL_TRACEPOINT)
48_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
49_PIF_WORK = (_PIF_PER_TRAP)
50
51#define BASED(name) name-system_call(%r13)
52
53 .macro TRACE_IRQS_ON
54#ifdef CONFIG_TRACE_IRQFLAGS
55 basr %r2,%r0
56 l %r1,BASED(.Lc_hardirqs_on)
57 basr %r14,%r1 # call trace_hardirqs_on_caller
58#endif
59 .endm
60
61 .macro TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS
63 basr %r2,%r0
64 l %r1,BASED(.Lc_hardirqs_off)
65 basr %r14,%r1 # call trace_hardirqs_off_caller
66#endif
67 .endm
68
69 .macro LOCKDEP_SYS_EXIT
70#ifdef CONFIG_LOCKDEP
71 tm __PT_PSW+1(%r11),0x01 # returning to user ?
72 jz .+10
73 l %r1,BASED(.Lc_lockdep_sys_exit)
74 basr %r14,%r1 # call lockdep_sys_exit
75#endif
76 .endm
77
78 .macro CHECK_STACK stacksize,savearea
79#ifdef CONFIG_CHECK_STACK
80 tml %r15,\stacksize - CONFIG_STACK_GUARD
81 la %r14,\savearea
82 jz stack_overflow
83#endif
84 .endm
85
86 .macro SWITCH_ASYNC savearea,stack,shift
87 tmh %r8,0x0001 # interrupting from user ?
88 jnz 1f
89 lr %r14,%r9
90 sl %r14,BASED(.Lc_critical_start)
91 cl %r14,BASED(.Lc_critical_length)
92 jhe 0f
93 la %r11,\savearea # inside critical section, do cleanup
94 bras %r14,cleanup_critical
95 tmh %r8,0x0001 # retest problem state after cleanup
96 jnz 1f
970: l %r14,\stack # are we already on the target stack?
98 slr %r14,%r15
99 sra %r14,\shift
100 jnz 1f
101 CHECK_STACK 1<<\shift,\savearea
102 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
103 j 2f
1041: l %r15,\stack # load target stack
1052: la %r11,STACK_FRAME_OVERHEAD(%r15)
106 .endm
107
108 .macro ADD64 high,low,timer
109 al \high,\timer
110 al \low,4+\timer
111 brc 12,.+8
112 ahi \high,1
113 .endm
114
115 .macro SUB64 high,low,timer
116 sl \high,\timer
117 sl \low,4+\timer
118 brc 3,.+8
119 ahi \high,-1
120 .endm
121
122 .macro UPDATE_VTIME high,low,enter_timer
123 lm \high,\low,__LC_EXIT_TIMER
124 SUB64 \high,\low,\enter_timer
125 ADD64 \high,\low,__LC_USER_TIMER
126 stm \high,\low,__LC_USER_TIMER
127 lm \high,\low,__LC_LAST_UPDATE_TIMER
128 SUB64 \high,\low,__LC_EXIT_TIMER
129 ADD64 \high,\low,__LC_SYSTEM_TIMER
130 stm \high,\low,__LC_SYSTEM_TIMER
131 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
132 .endm
133
134 .macro REENABLE_IRQS
135 st %r8,__LC_RETURN_PSW
136 ni __LC_RETURN_PSW,0xbf
137 ssm __LC_RETURN_PSW
138 .endm
139
140 .section .kprobes.text, "ax"
141
142/*
143 * Scheduler resume function, called by switch_to
144 * gpr2 = (task_struct *) prev
145 * gpr3 = (task_struct *) next
146 * Returns:
147 * gpr2 = prev
148 */
149ENTRY(__switch_to)
150 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
151 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
152 l %r4,__THREAD_info(%r2) # get thread_info of prev
153 l %r5,__THREAD_info(%r3) # get thread_info of next
154 lr %r15,%r5
155 ahi %r15,STACK_INIT # end of kernel stack of next
156 st %r3,__LC_CURRENT # store task struct of next
157 st %r5,__LC_THREAD_INFO # store thread info of next
158 st %r15,__LC_KERNEL_STACK # store end of kernel stack
159 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
160 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
161 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
163 br %r14
164
165.L__critical_start:
166/*
167 * SVC interrupt handler routine. System calls are synchronous events and
168 * are executed with interrupts enabled.
169 */
170
171ENTRY(system_call)
172 stpt __LC_SYNC_ENTER_TIMER
173.Lsysc_stm:
174 stm %r8,%r15,__LC_SAVE_AREA_SYNC
175 l %r12,__LC_THREAD_INFO
176 l %r13,__LC_SVC_NEW_PSW+4
177 lhi %r14,_PIF_SYSCALL
178.Lsysc_per:
179 l %r15,__LC_KERNEL_STACK
180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
181.Lsysc_vtime:
182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
183 stm %r0,%r7,__PT_R0(%r11)
184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
187 st %r14,__PT_FLAGS(%r11)
188.Lsysc_do_svc:
189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
190 lh %r8,__PT_INT_CODE+2(%r11)
191 sla %r8,2 # shift and test for svc0
192 jnz .Lsysc_nr_ok
193 # svc 0: system call number in %r1
194 cl %r1,BASED(.Lnr_syscalls)
195 jnl .Lsysc_nr_ok
196 sth %r1,__PT_INT_CODE+2(%r11)
197 lr %r8,%r1
198 sla %r8,2
199.Lsysc_nr_ok:
200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
201 st %r2,__PT_ORIG_GPR2(%r11)
202 st %r7,STACK_FRAME_OVERHEAD(%r15)
203 l %r9,0(%r8,%r10) # get system call addr.
204 tm __TI_flags+3(%r12),_TIF_TRACE
205 jnz .Lsysc_tracesys
206 basr %r14,%r9 # call sys_xxxx
207 st %r2,__PT_R2(%r11) # store return value
208
209.Lsysc_return:
210 LOCKDEP_SYS_EXIT
211.Lsysc_tif:
212 tm __PT_PSW+1(%r11),0x01 # returning to user ?
213 jno .Lsysc_restore
214 tm __PT_FLAGS+3(%r11),_PIF_WORK
215 jnz .Lsysc_work
216 tm __TI_flags+3(%r12),_TIF_WORK
217 jnz .Lsysc_work # check for thread work
218 tm __LC_CPU_FLAGS+3,_CIF_WORK
219 jnz .Lsysc_work
220.Lsysc_restore:
221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
222 stpt __LC_EXIT_TIMER
223 lm %r0,%r15,__PT_R0(%r11)
224 lpsw __LC_RETURN_PSW
225.Lsysc_done:
226
227#
228# One of the work bits is on. Find out which one.
229#
230.Lsysc_work:
231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
232 jo .Lsysc_mcck_pending
233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
234 jo .Lsysc_reschedule
235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
236 jo .Lsysc_singlestep
237 tm __TI_flags+3(%r12),_TIF_SIGPENDING
238 jo .Lsysc_sigpending
239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
240 jo .Lsysc_notify_resume
241 tm __LC_CPU_FLAGS+3,_CIF_ASCE
242 jo .Lsysc_uaccess
243 j .Lsysc_return # beware of critical section cleanup
244
245#
246# _TIF_NEED_RESCHED is set, call schedule
247#
248.Lsysc_reschedule:
249 l %r1,BASED(.Lc_schedule)
250 la %r14,BASED(.Lsysc_return)
251 br %r1 # call schedule
252
253#
254# _CIF_MCCK_PENDING is set, call handler
255#
256.Lsysc_mcck_pending:
257 l %r1,BASED(.Lc_handle_mcck)
258 la %r14,BASED(.Lsysc_return)
259 br %r1 # TIF bit will be cleared by handler
260
261#
262# _CIF_ASCE is set, load user space asce
263#
264.Lsysc_uaccess:
265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
267 j .Lsysc_return
268
269#
270# _TIF_SIGPENDING is set, call do_signal
271#
272.Lsysc_sigpending:
273 lr %r2,%r11 # pass pointer to pt_regs
274 l %r1,BASED(.Lc_do_signal)
275 basr %r14,%r1 # call do_signal
276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
277 jno .Lsysc_return
278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
280 xr %r8,%r8 # svc 0 returns -ENOSYS
281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
282 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
284 sla %r8,2
285 j .Lsysc_nr_ok # restart svc
286
287#
288# _TIF_NOTIFY_RESUME is set, call do_notify_resume
289#
290.Lsysc_notify_resume:
291 lr %r2,%r11 # pass pointer to pt_regs
292 l %r1,BASED(.Lc_do_notify_resume)
293 la %r14,BASED(.Lsysc_return)
294 br %r1 # call do_notify_resume
295
296#
297# _PIF_PER_TRAP is set, call do_per_trap
298#
299.Lsysc_singlestep:
300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
301 lr %r2,%r11 # pass pointer to pt_regs
302 l %r1,BASED(.Lc_do_per_trap)
303 la %r14,BASED(.Lsysc_return)
304 br %r1 # call do_per_trap
305
306#
307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
308# and after the system call
309#
310.Lsysc_tracesys:
311 l %r1,BASED(.Lc_trace_enter)
312 lr %r2,%r11 # pass pointer to pt_regs
313 la %r3,0
314 xr %r0,%r0
315 icm %r0,3,__PT_INT_CODE+2(%r11)
316 st %r0,__PT_R2(%r11)
317 basr %r14,%r1 # call do_syscall_trace_enter
318 cl %r2,BASED(.Lnr_syscalls)
319 jnl .Lsysc_tracenogo
320 lr %r8,%r2
321 sll %r8,2
322 l %r9,0(%r8,%r10)
323.Lsysc_tracego:
324 lm %r3,%r7,__PT_R3(%r11)
325 st %r7,STACK_FRAME_OVERHEAD(%r15)
326 l %r2,__PT_ORIG_GPR2(%r11)
327 basr %r14,%r9 # call sys_xxx
328 st %r2,__PT_R2(%r11) # store return value
329.Lsysc_tracenogo:
330 tm __TI_flags+3(%r12),_TIF_TRACE
331 jz .Lsysc_return
332 l %r1,BASED(.Lc_trace_exit)
333 lr %r2,%r11 # pass pointer to pt_regs
334 la %r14,BASED(.Lsysc_return)
335 br %r1 # call do_syscall_trace_exit
336
337#
338# a new process exits the kernel with ret_from_fork
339#
340ENTRY(ret_from_fork)
341 la %r11,STACK_FRAME_OVERHEAD(%r15)
342 l %r12,__LC_THREAD_INFO
343 l %r13,__LC_SVC_NEW_PSW+4
344 l %r1,BASED(.Lc_schedule_tail)
345 basr %r14,%r1 # call schedule_tail
346 TRACE_IRQS_ON
347 ssm __LC_SVC_NEW_PSW # reenable interrupts
348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
349 jne .Lsysc_tracenogo
350 # it's a kernel thread
351 lm %r9,%r10,__PT_R9(%r11) # load gprs
352ENTRY(kernel_thread_starter)
353 la %r2,0(%r10)
354 basr %r14,%r9
355 j .Lsysc_tracenogo
356
357/*
358 * Program check handler routine
359 */
360
361ENTRY(pgm_check_handler)
362 stpt __LC_SYNC_ENTER_TIMER
363 stm %r8,%r15,__LC_SAVE_AREA_SYNC
364 l %r12,__LC_THREAD_INFO
365 l %r13,__LC_SVC_NEW_PSW+4
366 lm %r8,%r9,__LC_PGM_OLD_PSW
367 tmh %r8,0x0001 # test problem state bit
368 jnz 1f # -> fault in user space
369 tmh %r8,0x4000 # PER bit set in old PSW ?
370 jnz 0f # -> enabled, can't be a double fault
371 tm __LC_PGM_ILC+3,0x80 # check for per exception
372 jnz .Lpgm_svcper # -> single stepped svc
3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
375 j 2f
3761: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
377 l %r15,__LC_KERNEL_STACK
3782: la %r11,STACK_FRAME_OVERHEAD(%r15)
379 stm %r0,%r7,__PT_R0(%r11)
380 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
381 stm %r8,%r9,__PT_PSW(%r11)
382 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
383 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
384 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
385 tm __LC_PGM_ILC+3,0x80 # check for per exception
386 jz 0f
387 l %r1,__TI_task(%r12)
388 tmh %r8,0x0001 # kernel per event ?
389 jz .Lpgm_kprobe
390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
3940: REENABLE_IRQS
395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Lc_jump_table)
397 la %r10,0x7f
398 n %r10,__PT_INT_CODE(%r11)
399 je .Lsysc_return
400 sll %r10,2
401 l %r1,0(%r10,%r1) # load address of handler routine
402 lr %r2,%r11 # pass pointer to pt_regs
403 basr %r14,%r1 # branch to interrupt-handler
404 j .Lsysc_return
405
406#
407# PER event in supervisor state, must be kprobes
408#
409.Lpgm_kprobe:
410 REENABLE_IRQS
411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
412 l %r1,BASED(.Lc_do_per_trap)
413 lr %r2,%r11 # pass pointer to pt_regs
414 basr %r14,%r1 # call do_per_trap
415 j .Lsysc_return
416
417#
418# single stepped system call
419#
420.Lpgm_svcper:
421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
422 mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
424 lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
425
426/*
427 * IO interrupt handler routine
428 */
429
430ENTRY(io_int_handler)
431 stck __LC_INT_CLOCK
432 stpt __LC_ASYNC_ENTER_TIMER
433 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
434 l %r12,__LC_THREAD_INFO
435 l %r13,__LC_SVC_NEW_PSW+4
436 lm %r8,%r9,__LC_IO_OLD_PSW
437 tmh %r8,0x0001 # interrupting from user ?
438 jz .Lio_skip
439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
440.Lio_skip:
441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
442 stm %r0,%r7,__PT_R0(%r11)
443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
444 stm %r8,%r9,__PT_PSW(%r11)
445 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
447 TRACE_IRQS_OFF
448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449.Lio_loop:
450 l %r1,BASED(.Lc_do_IRQ)
451 lr %r2,%r11 # pass pointer to pt_regs
452 lhi %r3,IO_INTERRUPT
453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
454 jz .Lio_call
455 lhi %r3,THIN_INTERRUPT
456.Lio_call:
457 basr %r14,%r1 # call do_IRQ
458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
459 jz .Lio_return
460 tpi 0
461 jz .Lio_return
462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
463 j .Lio_loop
464.Lio_return:
465 LOCKDEP_SYS_EXIT
466 TRACE_IRQS_ON
467.Lio_tif:
468 tm __TI_flags+3(%r12),_TIF_WORK
469 jnz .Lio_work # there is work to do (signals etc.)
470 tm __LC_CPU_FLAGS+3,_CIF_WORK
471 jnz .Lio_work
472.Lio_restore:
473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
474 stpt __LC_EXIT_TIMER
475 lm %r0,%r15,__PT_R0(%r11)
476 lpsw __LC_RETURN_PSW
477.Lio_done:
478
479#
480# There is work todo, find out in which context we have been interrupted:
481# 1) if we return to user space we can do all _TIF_WORK work
482# 2) if we return to kernel code and preemptive scheduling is enabled check
483# the preemption counter and if it is zero call preempt_schedule_irq
484# Before any work can be done, a switch to the kernel stack is required.
485#
486.Lio_work:
487 tm __PT_PSW+1(%r11),0x01 # returning to user ?
488 jo .Lio_work_user # yes -> do resched & signal
489#ifdef CONFIG_PREEMPT
490 # check for preemptive scheduling
491 icm %r0,15,__TI_precount(%r12)
492 jnz .Lio_restore # preemption disabled
493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
494 jno .Lio_restore
495 # switch to kernel stack
496 l %r1,__PT_R15(%r11)
497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
498 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
500 la %r11,STACK_FRAME_OVERHEAD(%r1)
501 lr %r15,%r1
502 # TRACE_IRQS_ON already done at .Lio_return, call
503 # TRACE_IRQS_OFF to keep things symmetrical
504 TRACE_IRQS_OFF
505 l %r1,BASED(.Lc_preempt_irq)
506 basr %r14,%r1 # call preempt_schedule_irq
507 j .Lio_return
508#else
509 j .Lio_restore
510#endif
511
512#
513# Need to do work before returning to userspace, switch to kernel stack
514#
515.Lio_work_user:
516 l %r1,__LC_KERNEL_STACK
517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
519 la %r11,STACK_FRAME_OVERHEAD(%r1)
520 lr %r15,%r1
521
522#
523# One of the work bits is on. Find out which one.
524#
525.Lio_work_tif:
526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
527 jo .Lio_mcck_pending
528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
529 jo .Lio_reschedule
530 tm __TI_flags+3(%r12),_TIF_SIGPENDING
531 jo .Lio_sigpending
532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
533 jo .Lio_notify_resume
534 tm __LC_CPU_FLAGS+3,_CIF_ASCE
535 jo .Lio_uaccess
536 j .Lio_return # beware of critical section cleanup
537
538#
539# _CIF_MCCK_PENDING is set, call handler
540#
541.Lio_mcck_pending:
542 # TRACE_IRQS_ON already done at .Lio_return
543 l %r1,BASED(.Lc_handle_mcck)
544 basr %r14,%r1 # TIF bit will be cleared by handler
545 TRACE_IRQS_OFF
546 j .Lio_return
547
548#
549# _CIF_ASCE is set, load user space asce
550#
551.Lio_uaccess:
552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
554 j .Lio_return
555
556#
557# _TIF_NEED_RESCHED is set, call schedule
558#
559.Lio_reschedule:
560 # TRACE_IRQS_ON already done at .Lio_return
561 l %r1,BASED(.Lc_schedule)
562 ssm __LC_SVC_NEW_PSW # reenable interrupts
563 basr %r14,%r1 # call scheduler
564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
565 TRACE_IRQS_OFF
566 j .Lio_return
567
568#
569# _TIF_SIGPENDING is set, call do_signal
570#
571.Lio_sigpending:
572 # TRACE_IRQS_ON already done at .Lio_return
573 l %r1,BASED(.Lc_do_signal)
574 ssm __LC_SVC_NEW_PSW # reenable interrupts
575 lr %r2,%r11 # pass pointer to pt_regs
576 basr %r14,%r1 # call do_signal
577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
578 TRACE_IRQS_OFF
579 j .Lio_return
580
581#
582# _TIF_SIGPENDING is set, call do_signal
583#
584.Lio_notify_resume:
585 # TRACE_IRQS_ON already done at .Lio_return
586 l %r1,BASED(.Lc_do_notify_resume)
587 ssm __LC_SVC_NEW_PSW # reenable interrupts
588 lr %r2,%r11 # pass pointer to pt_regs
589 basr %r14,%r1 # call do_notify_resume
590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
591 TRACE_IRQS_OFF
592 j .Lio_return
593
594/*
595 * External interrupt handler routine
596 */
597
598ENTRY(ext_int_handler)
599 stck __LC_INT_CLOCK
600 stpt __LC_ASYNC_ENTER_TIMER
601 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
602 l %r12,__LC_THREAD_INFO
603 l %r13,__LC_SVC_NEW_PSW+4
604 lm %r8,%r9,__LC_EXT_OLD_PSW
605 tmh %r8,0x0001 # interrupting from user ?
606 jz .Lext_skip
607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
608.Lext_skip:
609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
610 stm %r0,%r7,__PT_R0(%r11)
611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
612 stm %r8,%r9,__PT_PSW(%r11)
613 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
616 TRACE_IRQS_OFF
617 l %r1,BASED(.Lc_do_IRQ)
618 lr %r2,%r11 # pass pointer to pt_regs
619 lhi %r3,EXT_INTERRUPT
620 basr %r14,%r1 # call do_IRQ
621 j .Lio_return
622
623/*
624 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
625 */
626ENTRY(psw_idle)
627 st %r3,__SF_EMPTY(%r15)
628 basr %r1,0
629 la %r1,.Lpsw_idle_lpsw+4-.(%r1)
630 st %r1,__SF_EMPTY+4(%r15)
631 oi __SF_EMPTY+4(%r15),0x80
632 stck __CLOCK_IDLE_ENTER(%r2)
633 stpt __TIMER_IDLE_ENTER(%r2)
634.Lpsw_idle_lpsw:
635 lpsw __SF_EMPTY(%r15)
636 br %r14
637.Lpsw_idle_end:
638
639.L__critical_end:
640
641/*
642 * Machine check handler routines
643 */
644
645ENTRY(mcck_int_handler)
646 stck __LC_MCCK_CLOCK
647 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
648 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
649 l %r12,__LC_THREAD_INFO
650 l %r13,__LC_SVC_NEW_PSW+4
651 lm %r8,%r9,__LC_MCK_OLD_PSW
652 tm __LC_MCCK_CODE,0x80 # system damage?
653 jo .Lmcck_panic # yes -> rest of mcck code invalid
654 la %r14,__LC_CPU_TIMER_SAVE_AREA
655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
657 jo 3f
658 la %r14,__LC_SYNC_ENTER_TIMER
659 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
660 jl 0f
661 la %r14,__LC_ASYNC_ENTER_TIMER
6620: clc 0(8,%r14),__LC_EXIT_TIMER
663 jl 1f
664 la %r14,__LC_EXIT_TIMER
6651: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
666 jl 2f
667 la %r14,__LC_LAST_UPDATE_TIMER
6682: spt 0(%r14)
669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
671 jno .Lmcck_panic # no -> skip cleanup critical
672 tm %r8,0x0001 # interrupting from user ?
673 jz .Lmcck_skip
674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
675.Lmcck_skip:
676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
677 stm %r0,%r7,__PT_R0(%r11)
678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
679 stm %r8,%r9,__PT_PSW(%r11)
680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
682 l %r1,BASED(.Lc_do_machine_check)
683 lr %r2,%r11 # pass pointer to pt_regs
684 basr %r14,%r1 # call s390_do_machine_check
685 tm __PT_PSW+1(%r11),0x01 # returning to user ?
686 jno .Lmcck_return
687 l %r1,__LC_KERNEL_STACK # switch to kernel stack
688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
690 la %r11,STACK_FRAME_OVERHEAD(%r15)
691 lr %r15,%r1
692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
694 jno .Lmcck_return
695 TRACE_IRQS_OFF
696 l %r1,BASED(.Lc_handle_mcck)
697 basr %r14,%r1 # call s390_handle_mcck
698 TRACE_IRQS_ON
699.Lmcck_return:
700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
702 jno 0f
703 lm %r0,%r15,__PT_R0(%r11)
704 stpt __LC_EXIT_TIMER
705 lpsw __LC_RETURN_MCCK_PSW
7060: lm %r0,%r15,__PT_R0(%r11)
707 lpsw __LC_RETURN_MCCK_PSW
708
709.Lmcck_panic:
710 l %r14,__LC_PANIC_STACK
711 slr %r14,%r15
712 sra %r14,PAGE_SHIFT
713 jz 0f
714 l %r15,__LC_PANIC_STACK
715 j .Lmcck_skip
7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
717 j .Lmcck_skip
718
719#
720# PSW restart interrupt handler
721#
722ENTRY(restart_int_handler)
723 st %r15,__LC_SAVE_AREA_RESTART
724 l %r15,__LC_RESTART_STACK
725 ahi %r15,-__PT_SIZE # create pt_regs on stack
726 xc 0(__PT_SIZE,%r15),0(%r15)
727 stm %r0,%r14,__PT_R0(%r15)
728 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
729 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
730 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
731 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
732 l %r1,__LC_RESTART_FN # load fn, parm & source cpu
733 l %r2,__LC_RESTART_DATA
734 l %r3,__LC_RESTART_SOURCE
735 ltr %r3,%r3 # test source cpu address
736 jm 1f # negative -> skip source stop
7370: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
738 brc 10,0b # wait for status stored
7391: basr %r14,%r1 # call function
740 stap __SF_EMPTY(%r15) # store cpu address
741 lh %r3,__SF_EMPTY(%r15)
7422: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
743 brc 2,2b
7443: j 3b
745
746 .section .kprobes.text, "ax"
747
748#ifdef CONFIG_CHECK_STACK
749/*
750 * The synchronous or the asynchronous stack overflowed. We are dead.
751 * No need to properly save the registers, we are going to panic anyway.
752 * Setup a pt_regs so that show_trace can provide a good call trace.
753 */
754stack_overflow:
755 l %r15,__LC_PANIC_STACK # change to panic stack
756 la %r11,STACK_FRAME_OVERHEAD(%r15)
757 stm %r0,%r7,__PT_R0(%r11)
758 stm %r8,%r9,__PT_PSW(%r11)
759 mvc __PT_R8(32,%r11),0(%r14)
760 l %r1,BASED(1f)
761 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
762 lr %r2,%r11 # pass pointer to pt_regs
763 br %r1 # branch to kernel_stack_overflow
7641: .long kernel_stack_overflow
765#endif
766
767.Lcleanup_table:
768 .long system_call + 0x80000000
769 .long .Lsysc_do_svc + 0x80000000
770 .long .Lsysc_tif + 0x80000000
771 .long .Lsysc_restore + 0x80000000
772 .long .Lsysc_done + 0x80000000
773 .long .Lio_tif + 0x80000000
774 .long .Lio_restore + 0x80000000
775 .long .Lio_done + 0x80000000
776 .long psw_idle + 0x80000000
777 .long .Lpsw_idle_end + 0x80000000
778
779cleanup_critical:
780 cl %r9,BASED(.Lcleanup_table) # system_call
781 jl 0f
782 cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
783 jl .Lcleanup_system_call
784 cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
785 jl 0f
786 cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
787 jl .Lcleanup_sysc_tif
788 cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
789 jl .Lcleanup_sysc_restore
790 cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
791 jl 0f
792 cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
793 jl .Lcleanup_io_tif
794 cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
795 jl .Lcleanup_io_restore
796 cl %r9,BASED(.Lcleanup_table+32) # psw_idle
797 jl 0f
798 cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
799 jl .Lcleanup_idle
8000: br %r14
801
802.Lcleanup_system_call:
803 # check if stpt has been executed
804 cl %r9,BASED(.Lcleanup_system_call_insn)
805 jh 0f
806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
807 chi %r11,__LC_SAVE_AREA_ASYNC
808 je 0f
809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8100: # check if stm has been executed
811 cl %r9,BASED(.Lcleanup_system_call_insn+4)
812 jh 0f
813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
8140: # set up saved registers r12, and r13
815 st %r12,16(%r11) # r12 thread-info pointer
816 st %r13,20(%r11) # r13 literal-pool pointer
817 # check if the user time calculation has been done
818 cl %r9,BASED(.Lcleanup_system_call_insn+8)
819 jh 0f
820 l %r10,__LC_EXIT_TIMER
821 l %r15,__LC_EXIT_TIMER+4
822 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
823 ADD64 %r10,%r15,__LC_USER_TIMER
824 st %r10,__LC_USER_TIMER
825 st %r15,__LC_USER_TIMER+4
8260: # check if the system time calculation has been done
827 cl %r9,BASED(.Lcleanup_system_call_insn+12)
828 jh 0f
829 l %r10,__LC_LAST_UPDATE_TIMER
830 l %r15,__LC_LAST_UPDATE_TIMER+4
831 SUB64 %r10,%r15,__LC_EXIT_TIMER
832 ADD64 %r10,%r15,__LC_SYSTEM_TIMER
833 st %r10,__LC_SYSTEM_TIMER
834 st %r15,__LC_SYSTEM_TIMER+4
8350: # update accounting time stamp
836 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
837 # set up saved register 11
838 l %r15,__LC_KERNEL_STACK
839 la %r9,STACK_FRAME_OVERHEAD(%r15)
840 st %r9,12(%r11) # r11 pt_regs pointer
841 # fill pt_regs
842 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
843 stm %r0,%r7,__PT_R0(%r9)
844 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
845 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
846 xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
847 mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
848 # setup saved register 15
849 st %r15,28(%r11) # r15 stack pointer
850 # set new psw address and exit
851 l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
852 br %r14
853.Lcleanup_system_call_insn:
854 .long system_call + 0x80000000
855 .long .Lsysc_stm + 0x80000000
856 .long .Lsysc_vtime + 0x80000000 + 36
857 .long .Lsysc_vtime + 0x80000000 + 76
858
859.Lcleanup_sysc_tif:
860 l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
861 br %r14
862
863.Lcleanup_sysc_restore:
864 cl %r9,BASED(.Lcleanup_sysc_restore_insn)
865 jhe 0f
866 l %r9,12(%r11) # get saved pointer to pt_regs
867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
868 mvc 0(32,%r11),__PT_R8(%r9)
869 lm %r0,%r7,__PT_R0(%r9)
8700: lm %r8,%r9,__LC_RETURN_PSW
871 br %r14
872.Lcleanup_sysc_restore_insn:
873 .long .Lsysc_done - 4 + 0x80000000
874
875.Lcleanup_io_tif:
876 l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
877 br %r14
878
879.Lcleanup_io_restore:
880 cl %r9,BASED(.Lcleanup_io_restore_insn)
881 jhe 0f
882 l %r9,12(%r11) # get saved r11 pointer to pt_regs
883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
884 mvc 0(32,%r11),__PT_R8(%r9)
885 lm %r0,%r7,__PT_R0(%r9)
8860: lm %r8,%r9,__LC_RETURN_PSW
887 br %r14
888.Lcleanup_io_restore_insn:
889 .long .Lio_done - 4 + 0x80000000
890
891.Lcleanup_idle:
892 # copy interrupt clock & cpu timer
893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
895 chi %r11,__LC_SAVE_AREA_ASYNC
896 je 0f
897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8990: # check if stck has been executed
900 cl %r9,BASED(.Lcleanup_idle_insn)
901 jhe 1f
902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
9041: # account system time going idle
905 lm %r9,%r10,__LC_STEAL_TIMER
906 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
907 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
908 stm %r9,%r10,__LC_STEAL_TIMER
909 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
910 lm %r9,%r10,__LC_SYSTEM_TIMER
911 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
912 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
913 stm %r9,%r10,__LC_SYSTEM_TIMER
914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 # prepare return psw
916 n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
917 l %r9,24(%r11) # return from psw_idle
918 br %r14
919.Lcleanup_idle_insn:
920 .long .Lpsw_idle_lpsw + 0x80000000
921.Lcleanup_idle_wait:
922 .long 0xfcfdffff
923
924/*
925 * Integer constants
926 */
927 .align 4
928.Lnr_syscalls:
929 .long NR_syscalls
930.Lvtimer_max:
931 .quad 0x7fffffffffffffff
932
933/*
934 * Symbol constants
935 */
936.Lc_do_machine_check: .long s390_do_machine_check
937.Lc_handle_mcck: .long s390_handle_mcck
938.Lc_do_IRQ: .long do_IRQ
939.Lc_do_signal: .long do_signal
940.Lc_do_notify_resume: .long do_notify_resume
941.Lc_do_per_trap: .long do_per_trap
942.Lc_jump_table: .long pgm_check_table
943.Lc_schedule: .long schedule
944#ifdef CONFIG_PREEMPT
945.Lc_preempt_irq: .long preempt_schedule_irq
946#endif
947.Lc_trace_enter: .long do_syscall_trace_enter
948.Lc_trace_exit: .long do_syscall_trace_exit
949.Lc_schedule_tail: .long schedule_tail
950.Lc_sysc_per: .long .Lsysc_per + 0x80000000
951#ifdef CONFIG_TRACE_IRQFLAGS
952.Lc_hardirqs_on: .long trace_hardirqs_on_caller
953.Lc_hardirqs_off: .long trace_hardirqs_off_caller
954#endif
955#ifdef CONFIG_LOCKDEP
956.Lc_lockdep_sys_exit: .long lockdep_sys_exit
957#endif
958.Lc_critical_start: .long .L__critical_start + 0x80000000
959.Lc_critical_length: .long .L__critical_end - .L__critical_start
960
961 .section .rodata, "a"
962#define SYSCALL(esa,esame,emu) .long esa
963 .globl sys_call_table
964sys_call_table:
965#include "syscalls.S"
966#undef SYSCALL
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 132f4c9ade60..59b7c6470567 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,11 +27,7 @@
27#include <asm/thread_info.h> 27#include <asm/thread_info.h>
28#include <asm/page.h> 28#include <asm/page.h>
29 29
30#ifdef CONFIG_64BIT
31#define ARCH_OFFSET 4 30#define ARCH_OFFSET 4
32#else
33#define ARCH_OFFSET 0
34#endif
35 31
36__HEAD 32__HEAD
37 33
@@ -67,7 +63,6 @@ __HEAD
67# subroutine to set architecture mode 63# subroutine to set architecture mode
68# 64#
69.Lsetmode: 65.Lsetmode:
70#ifdef CONFIG_64BIT
71 mvi __LC_AR_MODE_ID,1 # set esame flag 66 mvi __LC_AR_MODE_ID,1 # set esame flag
72 slr %r0,%r0 # set cpuid to zero 67 slr %r0,%r0 # set cpuid to zero
73 lhi %r1,2 # mode 2 = esame (dump) 68 lhi %r1,2 # mode 2 = esame (dump)
@@ -76,16 +71,12 @@ __HEAD
76 .fill 16,4,0x0 71 .fill 16,4,0x0
770: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 720: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
78 sam31 # switch to 31 bit addressing mode 73 sam31 # switch to 31 bit addressing mode
79#else
80 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
81#endif
82 br %r14 74 br %r14
83 75
84# 76#
85# subroutine to wait for end I/O 77# subroutine to wait for end I/O
86# 78#
87.Lirqwait: 79.Lirqwait:
88#ifdef CONFIG_64BIT
89 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw 80 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
90 lpsw .Lwaitpsw 81 lpsw .Lwaitpsw
91.Lioint: 82.Lioint:
@@ -93,15 +84,6 @@ __HEAD
93 .align 8 84 .align 8
94.Lnewpsw: 85.Lnewpsw:
95 .quad 0x0000000080000000,.Lioint 86 .quad 0x0000000080000000,.Lioint
96#else
97 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
98 lpsw .Lwaitpsw
99.Lioint:
100 br %r14
101 .align 8
102.Lnewpsw:
103 .long 0x00080000,0x80000000+.Lioint
104#endif
105.Lwaitpsw: 87.Lwaitpsw:
106 .long 0x020a0000,0x80000000+.Lioint 88 .long 0x020a0000,0x80000000+.Lioint
107 89
@@ -375,7 +357,6 @@ ENTRY(startup)
375ENTRY(startup_kdump) 357ENTRY(startup_kdump)
376 j .Lep_startup_kdump 358 j .Lep_startup_kdump
377.Lep_startup_normal: 359.Lep_startup_normal:
378#ifdef CONFIG_64BIT
379 mvi __LC_AR_MODE_ID,1 # set esame flag 360 mvi __LC_AR_MODE_ID,1 # set esame flag
380 slr %r0,%r0 # set cpuid to zero 361 slr %r0,%r0 # set cpuid to zero
381 lhi %r1,2 # mode 2 = esame (dump) 362 lhi %r1,2 # mode 2 = esame (dump)
@@ -384,9 +365,6 @@ ENTRY(startup_kdump)
384 .fill 16,4,0x0 365 .fill 16,4,0x0
3850: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 3660: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
386 sam31 # switch to 31 bit addressing mode 367 sam31 # switch to 31 bit addressing mode
387#else
388 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
389#endif
390 basr %r13,0 # get base 368 basr %r13,0 # get base
391.LPG0: 369.LPG0:
392 xc 0x200(256),0x200 # partially clear lowcore 370 xc 0x200(256),0x200 # partially clear lowcore
@@ -396,7 +374,6 @@ ENTRY(startup_kdump)
396 spt 6f-.LPG0(%r13) 374 spt 6f-.LPG0(%r13)
397 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 375 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 376 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 377 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
401 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST 378 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 379 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
@@ -435,7 +412,6 @@ ENTRY(startup_kdump)
435# the kernel will crash. Format is number of facility words with bits set, 412# the kernel will crash. Format is number of facility words with bits set,
436# followed by the facility words. 413# followed by the facility words.
437 414
438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_Z13) 415#if defined(CONFIG_MARCH_Z13)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 416 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_ZEC12) 417#elif defined(CONFIG_MARCH_ZEC12)
@@ -451,35 +427,10 @@ ENTRY(startup_kdump)
451#elif defined(CONFIG_MARCH_Z900) 427#elif defined(CONFIG_MARCH_Z900)
452 .long 1, 0xc0000000 428 .long 1, 0xc0000000
453#endif 429#endif
454#else
455#if defined(CONFIG_MARCH_ZEC12)
456 .long 1, 0x8100c880
457#elif defined(CONFIG_MARCH_Z196)
458 .long 1, 0x8100c880
459#elif defined(CONFIG_MARCH_Z10)
460 .long 1, 0x8100c880
461#elif defined(CONFIG_MARCH_Z9_109)
462 .long 1, 0x8100c880
463#elif defined(CONFIG_MARCH_Z990)
464 .long 1, 0x80002000
465#elif defined(CONFIG_MARCH_Z900)
466 .long 1, 0x80000000
467#endif
468#endif
4694: 4304:
470#endif
471
472#ifdef CONFIG_64BIT
473 /* Continue with 64bit startup code in head64.S */ 431 /* Continue with 64bit startup code in head64.S */
474 sam64 # switch to 64 bit mode 432 sam64 # switch to 64 bit mode
475 jg startup_continue 433 jg startup_continue
476#else
477 /* Continue with 31bit startup code in head31.S */
478 l %r13,5f-.LPG0(%r13)
479 b 0(%r13)
480 .align 8
4815: .long startup_continue
482#endif
483 434
484 .align 8 435 .align 8
4856: .long 0x7fffffff,0xffffffff 4366: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
deleted file mode 100644
index 6dbe80983a24..000000000000
--- a/arch/s390/kernel/head31.S
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005, 2010
3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Rob van der Heij <rvdhei@iae.nl>
7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 *
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-offsets.h>
14#include <asm/thread_info.h>
15#include <asm/page.h>
16
17__HEAD
18ENTRY(startup_continue)
19 basr %r13,0 # get base
20.LPG1:
21
22 l %r1,.Lbase_cc-.LPG1(%r13)
23 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
24 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
25 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
26 # move IPL device to lowcore
27#
28# Setup stack
29#
30 l %r15,.Linittu-.LPG1(%r13)
31 st %r15,__LC_THREAD_INFO # cache thread info in lowcore
32 mvc __LC_CURRENT(4),__TI_task(%r15)
33 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
34 st %r15,__LC_KERNEL_STACK # set end of kernel stack
35 ahi %r15,-96
36#
37# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
38# and create a kernel NSS if the SAVESYS= parm is defined
39#
40 l %r14,.Lstartup_init-.LPG1(%r13)
41 basr %r14,%r14
42 lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
43 # virtual and never return ...
44 .align 8
45.Lentry:.long 0x00080000,0x80000000 + _stext
46.Lctl: .long 0x04b50000 # cr0: various things
47 .long 0 # cr1: primary space segment table
48 .long .Lduct # cr2: dispatchable unit control table
49 .long 0 # cr3: instruction authorization
50 .long 0 # cr4: instruction authorization
51 .long .Lduct # cr5: primary-aste origin
52 .long 0 # cr6: I/O interrupts
53 .long 0 # cr7: secondary space segment table
54 .long 0 # cr8: access registers translation
55 .long 0 # cr9: tracing off
56 .long 0 # cr10: tracing off
57 .long 0 # cr11: tracing off
58 .long 0 # cr12: tracing off
59 .long 0 # cr13: home space segment table
60 .long 0xc0000000 # cr14: machine check handling off
61 .long 0 # cr15: linkage stack operations
62.Lbss_bgn: .long __bss_start
63.Lbss_end: .long _end
64.Lparmaddr: .long PARMAREA
65.Linittu: .long init_thread_union
66.Lstartup_init:
67 .long startup_init
68 .align 64
69.Lduct: .long 0,0,0,0,.Lduald,0,0,0
70 .long 0,0,0,0,0,0,0,0
71 .align 128
72.Lduald:.rept 8
73 .long 0x80000000,0,0,0 # invalid access-list entries
74 .endr
75.Lbase_cc:
76 .long sched_clock_base_cc
77
78ENTRY(_ehead)
79
80 .org 0x100000 - 0x11000 # head.o ends at 0x11000
81#
82# startup-code, running in absolute addressing mode
83#
84ENTRY(_stext)
85 basr %r13,0 # get base
86.LPG3:
87# check control registers
88 stctl %c0,%c15,0(%r15)
89 oi 2(%r15),0x60 # enable sigp emergency & external call
90 oi 0(%r15),0x10 # switch on low address protection
91 lctl %c0,%c15,0(%r15)
92
93#
94 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
95 l %r14,.Lstart-.LPG3(%r13)
96 basr %r14,%r14 # call start_kernel
97#
98# We returned from start_kernel ?!? PANIK
99#
100 basr %r13,0
101 lpsw .Ldw-.(%r13) # load disabled wait psw
102#
103 .align 8
104.Ldw: .long 0x000a0000,0x00000000
105.Lstart:.long start_kernel
106.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 085a95eb315f..d05950f02c34 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -92,17 +92,9 @@ startup_kdump_relocated:
92#else 92#else
93.align 2 93.align 2
94.Lep_startup_kdump: 94.Lep_startup_kdump:
95#ifdef CONFIG_64BIT
96 larl %r13,startup_kdump_crash 95 larl %r13,startup_kdump_crash
97 lpswe 0(%r13) 96 lpswe 0(%r13)
98.align 8 97.align 8
99startup_kdump_crash: 98startup_kdump_crash:
100 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash 99 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
101#else
102 basr %r13,0
1030: lpsw startup_kdump_crash-0b(%r13)
104.align 8
105startup_kdump_crash:
106 .long 0x000a0000,0x00000000 + startup_kdump_crash
107#endif /* CONFIG_64BIT */
108#endif /* CONFIG_CRASH_DUMP */ 100#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5c8651f36509..c57951f008c4 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2062,12 +2062,10 @@ static void do_reset_calls(void)
2062{ 2062{
2063 struct reset_call *reset; 2063 struct reset_call *reset;
2064 2064
2065#ifdef CONFIG_64BIT
2066 if (diag308_set_works) { 2065 if (diag308_set_works) {
2067 diag308_reset(); 2066 diag308_reset();
2068 return; 2067 return;
2069 } 2068 }
2070#endif
2071 list_for_each_entry(reset, &rcall, list) 2069 list_for_each_entry(reset, &rcall, list)
2072 reset->fn(); 2070 reset->fn();
2073} 2071}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 2ca95862e336..0c1a679314dd 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -38,13 +38,8 @@
38#define DEBUGP(fmt , ...) 38#define DEBUGP(fmt , ...)
39#endif 39#endif
40 40
41#ifndef CONFIG_64BIT
42#define PLT_ENTRY_SIZE 12
43#else /* CONFIG_64BIT */
44#define PLT_ENTRY_SIZE 20 41#define PLT_ENTRY_SIZE 20
45#endif /* CONFIG_64BIT */
46 42
47#ifdef CONFIG_64BIT
48void *module_alloc(unsigned long size) 43void *module_alloc(unsigned long size)
49{ 44{
50 if (PAGE_ALIGN(size) > MODULES_LEN) 45 if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size)
53 GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, 48 GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
54 __builtin_return_address(0)); 49 __builtin_return_address(0));
55} 50}
56#endif
57 51
58void module_arch_freeing_init(struct module *mod) 52void module_arch_freeing_init(struct module *mod)
59{ 53{
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
323 unsigned int *ip; 317 unsigned int *ip;
324 ip = me->module_core + me->arch.plt_offset + 318 ip = me->module_core + me->arch.plt_offset +
325 info->plt_offset; 319 info->plt_offset;
326#ifndef CONFIG_64BIT
327 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
328 ip[1] = 0x100607f1;
329 ip[2] = val;
330#else /* CONFIG_64BIT */
331 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
332 ip[1] = 0x100a0004; 321 ip[1] = 0x100a0004;
333 ip[2] = 0x07f10000; 322 ip[2] = 0x07f10000;
334 ip[3] = (unsigned int) (val >> 32); 323 ip[3] = (unsigned int) (val >> 32);
335 ip[4] = (unsigned int) val; 324 ip[4] = (unsigned int) val;
336#endif /* CONFIG_64BIT */
337 info->plt_initialized = 1; 325 info->plt_initialized = 1;
338 } 326 }
339 if (r_type == R_390_PLTOFF16 || 327 if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f51cf4e8f02..505c17c0ae1a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci)
117 */ 117 */
118 kill_task = 1; 118 kill_task = 1;
119 } 119 }
120#ifndef CONFIG_64BIT 120 fpt_save_area = &S390_lowcore.floating_pt_save_area;
121 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
122 if (!mci->fc) {
123 /*
124 * Floating point control register can't be restored.
125 * Task will be terminated.
126 */
127 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
128 kill_task = 1;
129 } else
130 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
131
121 asm volatile( 132 asm volatile(
122 " ld 0,0(%0)\n" 133 " ld 0,0(%0)\n"
123 " ld 2,8(%0)\n" 134 " ld 1,8(%0)\n"
124 " ld 4,16(%0)\n" 135 " ld 2,16(%0)\n"
125 " ld 6,24(%0)" 136 " ld 3,24(%0)\n"
126 : : "a" (&S390_lowcore.floating_pt_save_area)); 137 " ld 4,32(%0)\n"
127#endif 138 " ld 5,40(%0)\n"
128 139 " ld 6,48(%0)\n"
129 if (MACHINE_HAS_IEEE) { 140 " ld 7,56(%0)\n"
130#ifdef CONFIG_64BIT 141 " ld 8,64(%0)\n"
131 fpt_save_area = &S390_lowcore.floating_pt_save_area; 142 " ld 9,72(%0)\n"
132 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 143 " ld 10,80(%0)\n"
133#else 144 " ld 11,88(%0)\n"
134 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; 145 " ld 12,96(%0)\n"
135 fpt_creg_save_area = fpt_save_area + 128; 146 " ld 13,104(%0)\n"
136#endif 147 " ld 14,112(%0)\n"
137 if (!mci->fc) { 148 " ld 15,120(%0)\n"
138 /* 149 : : "a" (fpt_save_area));
139 * Floating point control register can't be restored.
140 * Task will be terminated.
141 */
142 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
143 kill_task = 1;
144
145 } else
146 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
147
148 asm volatile(
149 " ld 0,0(%0)\n"
150 " ld 1,8(%0)\n"
151 " ld 2,16(%0)\n"
152 " ld 3,24(%0)\n"
153 " ld 4,32(%0)\n"
154 " ld 5,40(%0)\n"
155 " ld 6,48(%0)\n"
156 " ld 7,56(%0)\n"
157 " ld 8,64(%0)\n"
158 " ld 9,72(%0)\n"
159 " ld 10,80(%0)\n"
160 " ld 11,88(%0)\n"
161 " ld 12,96(%0)\n"
162 " ld 13,104(%0)\n"
163 " ld 14,112(%0)\n"
164 " ld 15,120(%0)\n"
165 : : "a" (fpt_save_area));
166 }
167
168#ifdef CONFIG_64BIT
169 /* Revalidate vector registers */ 150 /* Revalidate vector registers */
170 if (MACHINE_HAS_VX && current->thread.vxrs) { 151 if (MACHINE_HAS_VX && current->thread.vxrs) {
171 if (!mci->vr) { 152 if (!mci->vr) {
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
178 restore_vx_regs((__vector128 *) 159 restore_vx_regs((__vector128 *)
179 S390_lowcore.vector_save_area_addr); 160 S390_lowcore.vector_save_area_addr);
180 } 161 }
181#endif
182 /* Revalidate access registers */ 162 /* Revalidate access registers */
183 asm volatile( 163 asm volatile(
184 " lam 0,15,0(%0)" 164 " lam 0,15,0(%0)"
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
198 */ 178 */
199 s390_handle_damage("invalid control registers."); 179 s390_handle_damage("invalid control registers.");
200 } else { 180 } else {
201#ifdef CONFIG_64BIT
202 asm volatile( 181 asm volatile(
203 " lctlg 0,15,0(%0)" 182 " lctlg 0,15,0(%0)"
204 : : "a" (&S390_lowcore.cregs_save_area)); 183 : : "a" (&S390_lowcore.cregs_save_area));
205#else
206 asm volatile(
207 " lctl 0,15,0(%0)"
208 : : "a" (&S390_lowcore.cregs_save_area));
209#endif
210 } 184 }
211 /* 185 /*
212 * We don't even try to revalidate the TOD register, since we simply 186 * We don't even try to revalidate the TOD register, since we simply
213 * can't write something sensible into that register. 187 * can't write something sensible into that register.
214 */ 188 */
215#ifdef CONFIG_64BIT
216 /* 189 /*
217 * See if we can revalidate the TOD programmable register with its 190 * See if we can revalidate the TOD programmable register with its
218 * old contents (should be zero) otherwise set it to zero. 191 * old contents (should be zero) otherwise set it to zero.
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
228 " sckpf" 201 " sckpf"
229 : : "a" (&S390_lowcore.tod_progreg_save_area) 202 : : "a" (&S390_lowcore.tod_progreg_save_area)
230 : "0", "cc"); 203 : "0", "cc");
231#endif
232 /* Revalidate clock comparator register */ 204 /* Revalidate clock comparator register */
233 set_clock_comparator(S390_lowcore.clock_comparator); 205 set_clock_comparator(S390_lowcore.clock_comparator);
234 /* Check if old PSW is valid */ 206 /* Check if old PSW is valid */
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
280 if (mci->b) { 252 if (mci->b) {
281 /* Processing backup -> verify if we can survive this */ 253 /* Processing backup -> verify if we can survive this */
282 u64 z_mcic, o_mcic, t_mcic; 254 u64 z_mcic, o_mcic, t_mcic;
283#ifdef CONFIG_64BIT
284 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 255 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
285 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 256 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
286 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 257 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
287 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 258 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
288 1ULL<<16); 259 1ULL<<16);
289#else
290 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
291 1ULL<<29);
292 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
293 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
294 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
295#endif
296 t_mcic = *(u64 *)mci; 260 t_mcic = *(u64 *)mci;
297 261
298 if (((t_mcic & z_mcic) != 0) || 262 if (((t_mcic & z_mcic) != 0) ||
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index f6f8886399f6..036aa01d06a9 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -6,19 +6,13 @@
6 6
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8 8
9#ifdef CONFIG_32BIT
10#define PGM_CHECK_64BIT(handler) .long default_trap_handler
11#else
12#define PGM_CHECK_64BIT(handler) .long handler
13#endif
14
15#define PGM_CHECK(handler) .long handler 9#define PGM_CHECK(handler) .long handler
16#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) 10#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
17 11
18/* 12/*
19 * The program check table contains exactly 128 (0x00-0x7f) entries. Each 13 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
20 * line defines the 31 and/or 64 bit function to be called corresponding 14 * line defines the function to be called corresponding to the program check
21 * to the program check interruption code. 15 * interruption code.
22 */ 16 */
23.section .rodata, "a" 17.section .rodata, "a"
24ENTRY(pgm_check_table) 18ENTRY(pgm_check_table)
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */
46PGM_CHECK(operand_exception) /* 15 */ 40PGM_CHECK(operand_exception) /* 15 */
47PGM_CHECK_DEFAULT /* 16 */ 41PGM_CHECK_DEFAULT /* 16 */
48PGM_CHECK_DEFAULT /* 17 */ 42PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */ 43PGM_CHECK(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */ 44PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */ 45PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_64BIT(vector_exception) /* 1b */ 46PGM_CHECK(vector_exception) /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */ 47PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */ 48PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */ 49PGM_CHECK_DEFAULT /* 1e */
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 72PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 73PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 74PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_64BIT(do_dat_exception) /* 38 */ 75PGM_CHECK(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 76PGM_CHECK(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 77PGM_CHECK(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 78PGM_CHECK(do_dat_exception) /* 3b */
85PGM_CHECK_DEFAULT /* 3c */ 79PGM_CHECK_DEFAULT /* 3c */
86PGM_CHECK_DEFAULT /* 3d */ 80PGM_CHECK_DEFAULT /* 3d */
87PGM_CHECK_DEFAULT /* 3e */ 81PGM_CHECK_DEFAULT /* 3e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 13fc0978ca7e..dc5edc29b73a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task)
79{ 79{
80} 80}
81 81
82#ifdef CONFIG_64BIT
83void arch_release_task_struct(struct task_struct *tsk) 82void arch_release_task_struct(struct task_struct *tsk)
84{ 83{
85 if (tsk->thread.vxrs) 84 if (tsk->thread.vxrs)
86 kfree(tsk->thread.vxrs); 85 kfree(tsk->thread.vxrs);
87} 86}
88#endif
89 87
90int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 88int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
91 unsigned long arg, struct task_struct *p) 89 unsigned long arg, struct task_struct *p)
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
144 p->thread.ri_signum = 0; 142 p->thread.ri_signum = 0;
145 frame->childregs.psw.mask &= ~PSW_MASK_RI; 143 frame->childregs.psw.mask &= ~PSW_MASK_RI;
146 144
147#ifndef CONFIG_64BIT
148 /*
149 * save fprs to current->thread.fp_regs to merge them with
150 * the emulated registers and then copy the result to the child.
151 */
152 save_fp_ctl(&current->thread.fp_regs.fpc);
153 save_fp_regs(current->thread.fp_regs.fprs);
154 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
155 sizeof(s390_fp_regs));
156 /* Set a new TLS ? */
157 if (clone_flags & CLONE_SETTLS)
158 p->thread.acrs[0] = frame->childregs.gprs[6];
159#else /* CONFIG_64BIT */
160 /* Save the fpu registers to new thread structure. */ 145 /* Save the fpu registers to new thread structure. */
161 save_fp_ctl(&p->thread.fp_regs.fpc); 146 save_fp_ctl(&p->thread.fp_regs.fpc);
162 save_fp_regs(p->thread.fp_regs.fprs); 147 save_fp_regs(p->thread.fp_regs.fprs);
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
172 p->thread.acrs[1] = (unsigned int)tls; 157 p->thread.acrs[1] = (unsigned int)tls;
173 } 158 }
174 } 159 }
175#endif /* CONFIG_64BIT */
176 return 0; 160 return 0;
177} 161}
178 162
179asmlinkage void execve_tail(void) 163asmlinkage void execve_tail(void)
180{ 164{
181 current->thread.fp_regs.fpc = 0; 165 current->thread.fp_regs.fpc = 0;
182 if (MACHINE_HAS_IEEE) 166 asm volatile("sfpc %0,%0" : : "d" (0));
183 asm volatile("sfpc %0,%0" : : "d" (0));
184} 167}
185 168
186/* 169/*
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void)
188 */ 171 */
189int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 172int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
190{ 173{
191#ifndef CONFIG_64BIT
192 /*
193 * save fprs to current->thread.fp_regs to merge them with
194 * the emulated registers and then copy the result to the dump.
195 */
196 save_fp_ctl(&current->thread.fp_regs.fpc);
197 save_fp_regs(current->thread.fp_regs.fprs);
198 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
199#else /* CONFIG_64BIT */
200 save_fp_ctl(&fpregs->fpc); 174 save_fp_ctl(&fpregs->fpc);
201 save_fp_regs(fpregs->fprs); 175 save_fp_regs(fpregs->fprs);
202#endif /* CONFIG_64BIT */
203 return 1; 176 return 1;
204} 177}
205EXPORT_SYMBOL(dump_fpu); 178EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index eabfb4594517..d363c9c322a1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task)
44 struct thread_struct *thread = &task->thread; 44 struct thread_struct *thread = &task->thread;
45 struct per_regs old, new; 45 struct per_regs old, new;
46 46
47#ifdef CONFIG_64BIT
48 /* Take care of the enable/disable of transactional execution. */ 47 /* Take care of the enable/disable of transactional execution. */
49 if (MACHINE_HAS_TE || MACHINE_HAS_VX) { 48 if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
50 unsigned long cr, cr_new; 49 unsigned long cr, cr_new;
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task)
80 __ctl_load(cr_new, 2, 2); 79 __ctl_load(cr_new, 2, 2);
81 } 80 }
82 } 81 }
83#endif
84 /* Copy user specified PER registers */ 82 /* Copy user specified PER registers */
85 new.control = thread->per_user.control; 83 new.control = thread->per_user.control;
86 new.start = thread->per_user.start; 84 new.start = thread->per_user.start;
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task)
93 new.control |= PER_EVENT_BRANCH; 91 new.control |= PER_EVENT_BRANCH;
94 else 92 else
95 new.control |= PER_EVENT_IFETCH; 93 new.control |= PER_EVENT_IFETCH;
96#ifdef CONFIG_64BIT
97 new.control |= PER_CONTROL_SUSPENSION; 94 new.control |= PER_CONTROL_SUSPENSION;
98 new.control |= PER_EVENT_TRANSACTION_END; 95 new.control |= PER_EVENT_TRANSACTION_END;
99#endif
100 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) 96 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
101 new.control |= PER_EVENT_IFETCH; 97 new.control |= PER_EVENT_IFETCH;
102 new.start = 0; 98 new.start = 0;
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task)
146 task->thread.per_flags = 0; 142 task->thread.per_flags = 0;
147} 143}
148 144
149#ifndef CONFIG_64BIT 145#define __ADDR_MASK 7
150# define __ADDR_MASK 3
151#else
152# define __ADDR_MASK 7
153#endif
154 146
155static inline unsigned long __peek_user_per(struct task_struct *child, 147static inline unsigned long __peek_user_per(struct task_struct *child,
156 addr_t addr) 148 addr_t addr)
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
223 * access registers are stored in the thread structure 215 * access registers are stored in the thread structure
224 */ 216 */
225 offset = addr - (addr_t) &dummy->regs.acrs; 217 offset = addr - (addr_t) &dummy->regs.acrs;
226#ifdef CONFIG_64BIT
227 /* 218 /*
228 * Very special case: old & broken 64 bit gdb reading 219 * Very special case: old & broken 64 bit gdb reading
229 * from acrs[15]. Result is a 64 bit value. Read the 220 * from acrs[15]. Result is a 64 bit value. Read the
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
232 if (addr == (addr_t) &dummy->regs.acrs[15]) 223 if (addr == (addr_t) &dummy->regs.acrs[15])
233 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 224 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
234 else 225 else
235#endif 226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
236 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
237 227
238 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 228 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
239 /* 229 /*
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
261 * or the child->thread.vxrs array 251 * or the child->thread.vxrs array
262 */ 252 */
263 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 253 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
264#ifdef CONFIG_64BIT
265 if (child->thread.vxrs) 254 if (child->thread.vxrs)
266 tmp = *(addr_t *) 255 tmp = *(addr_t *)
267 ((addr_t) child->thread.vxrs + 2*offset); 256 ((addr_t) child->thread.vxrs + 2*offset);
268 else 257 else
269#endif
270 tmp = *(addr_t *) 258 tmp = *(addr_t *)
271 ((addr_t) &child->thread.fp_regs.fprs + offset); 259 ((addr_t) &child->thread.fp_regs.fprs + offset);
272 260
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
293 * an alignment of 4. Programmers from hell... 281 * an alignment of 4. Programmers from hell...
294 */ 282 */
295 mask = __ADDR_MASK; 283 mask = __ADDR_MASK;
296#ifdef CONFIG_64BIT
297 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 284 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
298 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 285 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
299 mask = 3; 286 mask = 3;
300#endif
301 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 287 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
302 return -EIO; 288 return -EIO;
303 289
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
370 * access registers are stored in the thread structure 356 * access registers are stored in the thread structure
371 */ 357 */
372 offset = addr - (addr_t) &dummy->regs.acrs; 358 offset = addr - (addr_t) &dummy->regs.acrs;
373#ifdef CONFIG_64BIT
374 /* 359 /*
375 * Very special case: old & broken 64 bit gdb writing 360 * Very special case: old & broken 64 bit gdb writing
376 * to acrs[15] with a 64 bit value. Ignore the lower 361 * to acrs[15] with a 64 bit value. Ignore the lower
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
380 if (addr == (addr_t) &dummy->regs.acrs[15]) 365 if (addr == (addr_t) &dummy->regs.acrs[15])
381 child->thread.acrs[15] = (unsigned int) (data >> 32); 366 child->thread.acrs[15] = (unsigned int) (data >> 32);
382 else 367 else
383#endif 368 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
384 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
385 369
386 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 370 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
387 /* 371 /*
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
411 * or the child->thread.vxrs array 395 * or the child->thread.vxrs array
412 */ 396 */
413 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 397 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
414#ifdef CONFIG_64BIT
415 if (child->thread.vxrs) 398 if (child->thread.vxrs)
416 *(addr_t *)((addr_t) 399 *(addr_t *)((addr_t)
417 child->thread.vxrs + 2*offset) = data; 400 child->thread.vxrs + 2*offset) = data;
418 else 401 else
419#endif
420 *(addr_t *)((addr_t) 402 *(addr_t *)((addr_t)
421 &child->thread.fp_regs.fprs + offset) = data; 403 &child->thread.fp_regs.fprs + offset) = data;
422 404
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
441 * an alignment of 4. Programmers from hell indeed... 423 * an alignment of 4. Programmers from hell indeed...
442 */ 424 */
443 mask = __ADDR_MASK; 425 mask = __ADDR_MASK;
444#ifdef CONFIG_64BIT
445 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 426 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
446 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 427 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
447 mask = 3; 428 mask = 3;
448#endif
449 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 429 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
450 return -EIO; 430 return -EIO;
451 431
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
649 * or the child->thread.vxrs array 629 * or the child->thread.vxrs array
650 */ 630 */
651 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 631 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
652#ifdef CONFIG_64BIT
653 if (child->thread.vxrs) 632 if (child->thread.vxrs)
654 tmp = *(__u32 *) 633 tmp = *(__u32 *)
655 ((addr_t) child->thread.vxrs + 2*offset); 634 ((addr_t) child->thread.vxrs + 2*offset);
656 else 635 else
657#endif
658 tmp = *(__u32 *) 636 tmp = *(__u32 *)
659 ((addr_t) &child->thread.fp_regs.fprs + offset); 637 ((addr_t) &child->thread.fp_regs.fprs + offset);
660 638
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child,
776 * or the child->thread.vxrs array 754 * or the child->thread.vxrs array
777 */ 755 */
778 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 756 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
779#ifdef CONFIG_64BIT
780 if (child->thread.vxrs) 757 if (child->thread.vxrs)
781 *(__u32 *)((addr_t) 758 *(__u32 *)((addr_t)
782 child->thread.vxrs + 2*offset) = tmp; 759 child->thread.vxrs + 2*offset) = tmp;
783 else 760 else
784#endif
785 *(__u32 *)((addr_t) 761 *(__u32 *)((addr_t)
786 &child->thread.fp_regs.fprs + offset) = tmp; 762 &child->thread.fp_regs.fprs + offset) = tmp;
787 763
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target,
979 if (target == current) { 955 if (target == current) {
980 save_fp_ctl(&target->thread.fp_regs.fpc); 956 save_fp_ctl(&target->thread.fp_regs.fpc);
981 save_fp_regs(target->thread.fp_regs.fprs); 957 save_fp_regs(target->thread.fp_regs.fprs);
982 } 958 } else if (target->thread.vxrs) {
983#ifdef CONFIG_64BIT
984 else if (target->thread.vxrs) {
985 int i; 959 int i;
986 960
987 for (i = 0; i < __NUM_VXRS_LOW; i++) 961 for (i = 0; i < __NUM_VXRS_LOW; i++)
988 target->thread.fp_regs.fprs[i] = 962 target->thread.fp_regs.fprs[i] =
989 *(freg_t *)(target->thread.vxrs + i); 963 *(freg_t *)(target->thread.vxrs + i);
990 } 964 }
991#endif
992 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 965 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
993 &target->thread.fp_regs, 0, -1); 966 &target->thread.fp_regs, 0, -1);
994} 967}
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target,
1026 if (target == current) { 999 if (target == current) {
1027 restore_fp_ctl(&target->thread.fp_regs.fpc); 1000 restore_fp_ctl(&target->thread.fp_regs.fpc);
1028 restore_fp_regs(target->thread.fp_regs.fprs); 1001 restore_fp_regs(target->thread.fp_regs.fprs);
1029 } 1002 } else if (target->thread.vxrs) {
1030#ifdef CONFIG_64BIT
1031 else if (target->thread.vxrs) {
1032 int i; 1003 int i;
1033 1004
1034 for (i = 0; i < __NUM_VXRS_LOW; i++) 1005 for (i = 0; i < __NUM_VXRS_LOW; i++)
1035 *(freg_t *)(target->thread.vxrs + i) = 1006 *(freg_t *)(target->thread.vxrs + i) =
1036 target->thread.fp_regs.fprs[i]; 1007 target->thread.fp_regs.fprs[i];
1037 } 1008 }
1038#endif
1039 } 1009 }
1040 1010
1041 return rc; 1011 return rc;
1042} 1012}
1043 1013
1044#ifdef CONFIG_64BIT
1045
1046static int s390_last_break_get(struct task_struct *target, 1014static int s390_last_break_get(struct task_struct *target,
1047 const struct user_regset *regset, 1015 const struct user_regset *regset,
1048 unsigned int pos, unsigned int count, 1016 unsigned int pos, unsigned int count,
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target,
1182 return rc; 1150 return rc;
1183} 1151}
1184 1152
1185#endif
1186
1187static int s390_system_call_get(struct task_struct *target, 1153static int s390_system_call_get(struct task_struct *target,
1188 const struct user_regset *regset, 1154 const struct user_regset *regset,
1189 unsigned int pos, unsigned int count, 1155 unsigned int pos, unsigned int count,
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = {
1229 .get = s390_system_call_get, 1195 .get = s390_system_call_get,
1230 .set = s390_system_call_set, 1196 .set = s390_system_call_set,
1231 }, 1197 },
1232#ifdef CONFIG_64BIT
1233 { 1198 {
1234 .core_note_type = NT_S390_LAST_BREAK, 1199 .core_note_type = NT_S390_LAST_BREAK,
1235 .n = 1, 1200 .n = 1,
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = {
1262 .get = s390_vxrs_high_get, 1227 .get = s390_vxrs_high_get,
1263 .set = s390_vxrs_high_set, 1228 .set = s390_vxrs_high_set,
1264 }, 1229 },
1265#endif
1266}; 1230};
1267 1231
1268static const struct user_regset_view user_s390_view = { 1232static const struct user_regset_view user_s390_view = {
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
deleted file mode 100644
index dd8016b0477e..000000000000
--- a/arch/s390/kernel/reipl.S
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * S390 version
3 * Copyright IBM Corp. 2000
4 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
5 */
6
7#include <linux/linkage.h>
8#include <asm/asm-offsets.h>
9#include <asm/sigp.h>
10
11#
12# store_status: Empty implementation until kdump is supported on 31 bit
13#
14ENTRY(store_status)
15 br %r14
16
17#
18# do_reipl_asm
19# Parameter: r2 = schid of reipl device
20#
21ENTRY(do_reipl_asm)
22 basr %r13,0
23.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
24.Lpg1: # do store status of all registers
25
26 stm %r0,%r15,__LC_GPREGS_SAVE_AREA
27 stctl %c0,%c15,__LC_CREGS_SAVE_AREA
28 stam %a0,%a15,__LC_AREGS_SAVE_AREA
29 l %r10,.Ldump_pfx-.Lpg0(%r13)
30 mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
31 stckc .Lclkcmp-.Lpg0(%r13)
32 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
33 stpt __LC_CPU_TIMER_SAVE_AREA
34 st %r13, __LC_PSW_SAVE_AREA+4
35 lctl %c6,%c6,.Lall-.Lpg0(%r13)
36 lr %r1,%r2
37 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
38 stsch .Lschib-.Lpg0(%r13)
39 oi .Lschib+5-.Lpg0(%r13),0x84
40.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
41 msch .Lschib-.Lpg0(%r13)
42 lhi %r0,5
43.Lssch: ssch .Liplorb-.Lpg0(%r13)
44 jz .L001
45 brct %r0,.Lssch
46 bas %r14,.Ldisab-.Lpg0(%r13)
47.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
48.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
49.Lcont: c %r1,__LC_SUBCHANNEL_ID
50 jnz .Ltpi
51 clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
52 jnz .Ltpi
53 tsch .Liplirb-.Lpg0(%r13)
54 tm .Liplirb+9-.Lpg0(%r13),0xbf
55 jz .L002
56 bas %r14,.Ldisab-.Lpg0(%r13)
57.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
58 jz .L003
59 bas %r14,.Ldisab-.Lpg0(%r13)
60.L003: st %r1,__LC_SUBCHANNEL_ID
61 lpsw 0
62 sigp 0,0,SIGP_RESTART
63.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
64 lpsw .Ldispsw-.Lpg0(%r13)
65 .align 8
66.Lclkcmp: .quad 0x0000000000000000
67.Lall: .long 0xff000000
68.Ldump_pfx: .long dump_prefix_page
69 .align 8
70.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
71.Lpcnew: .long 0x00080000,0x80000000+.Lecs
72.Lionew: .long 0x00080000,0x80000000+.Lcont
73.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
74.Ldispsw: .long 0x000a0000,0x00000000
75.Liplccws: .long 0x02000000,0x60000018
76 .long 0x08000008,0x20000001
77.Liplorb: .long 0x0049504c,0x0040ff80
78 .long 0x00000000+.Liplccws
79.Lschib: .long 0x00000000,0x00000000
80 .long 0x00000000,0x00000000
81 .long 0x00000000,0x00000000
82 .long 0x00000000,0x00000000
83 .long 0x00000000,0x00000000
84 .long 0x00000000,0x00000000
85.Liplirb: .long 0x00000000,0x00000000
86 .long 0x00000000,0x00000000
87 .long 0x00000000,0x00000000
88 .long 0x00000000,0x00000000
89 .long 0x00000000,0x00000000
90 .long 0x00000000,0x00000000
91 .long 0x00000000,0x00000000
92 .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
deleted file mode 100644
index f4e6f20e117a..000000000000
--- a/arch/s390/kernel/relocate_kernel.S
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005
3 *
4 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 *
7 */
8
9#include <linux/linkage.h>
10#include <asm/sigp.h>
11
12/*
13 * moves the new kernel to its destination...
14 * %r2 = pointer to first kimage_entry_t
15 * %r3 = start address - where to jump to after the job is done...
16 *
17 * %r5 will be used as temp. storage
18 * %r6 holds the destination address
19 * %r7 = PAGE_SIZE
20 * %r8 holds the source address
21 * %r9 = PAGE_SIZE
22 * %r10 is a page mask
23 */
24
25 .text
26ENTRY(relocate_kernel)
27 basr %r13,0 # base address
28 .base:
29 stnsm sys_msk-.base(%r13),0xfb # disable DAT
30 stctl %c0,%c15,ctlregs-.base(%r13)
31 stm %r0,%r15,gprregs-.base(%r13)
32 la %r1,load_psw-.base(%r13)
33 mvc 0(8,%r0),0(%r1)
34 la %r0,.back-.base(%r13)
35 st %r0,4(%r0)
36 oi 4(%r0),0x80
37 mvc 0x68(8,%r0),0(%r1)
38 la %r0,.back_pgm-.base(%r13)
39 st %r0,0x6c(%r0)
40 oi 0x6c(%r0),0x80
41 lhi %r0,0
42 diag %r0,%r0,0x308
43 .back:
44 basr %r13,0
45 .back_base:
46 oi have_diag308-.back_base(%r13),0x01
47 lctl %c0,%c15,ctlregs-.back_base(%r13)
48 lm %r0,%r15,gprregs-.back_base(%r13)
49 j .start_reloc
50 .back_pgm:
51 lm %r0,%r15,gprregs-.base(%r13)
52 .start_reloc:
53 lhi %r10,-1 # preparing the mask
54 sll %r10,12 # shift it such that it becomes 0xf000
55 .top:
56 lhi %r7,4096 # load PAGE_SIZE in r7
57 lhi %r9,4096 # load PAGE_SIZE in r9
58 l %r5,0(%r2) # read another word for indirection page
59 ahi %r2,4 # increment pointer
60 tml %r5,0x1 # is it a destination page?
61 je .indir_check # NO, goto "indir_check"
62 lr %r6,%r5 # r6 = r5
63 nr %r6,%r10 # mask it out and...
64 j .top # ...next iteration
65 .indir_check:
66 tml %r5,0x2 # is it a indirection page?
67 je .done_test # NO, goto "done_test"
68 nr %r5,%r10 # YES, mask out,
69 lr %r2,%r5 # move it into the right register,
70 j .top # and read next...
71 .done_test:
72 tml %r5,0x4 # is it the done indicator?
73 je .source_test # NO! Well, then it should be the source indicator...
74 j .done # ok, lets finish it here...
75 .source_test:
76 tml %r5,0x8 # it should be a source indicator...
77 je .top # NO, ignore it...
78 lr %r8,%r5 # r8 = r5
79 nr %r8,%r10 # masking
80 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
81 jo 0b
82 j .top
83 .done:
84 sr %r0,%r0 # clear register r0
85 la %r4,load_psw-.base(%r13) # load psw-address into the register
86 o %r3,4(%r4) # or load address into psw
87 st %r3,4(%r4)
88 mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
89 tm have_diag308-.base(%r13),0x01
90 jno .no_diag308
91 diag %r0,%r0,0x308
92 .no_diag308:
93 sr %r1,%r1 # clear %r1
94 sr %r2,%r2 # clear %r2
95 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
96 lpsw 0 # hopefully start new kernel...
97
98 .align 8
99 load_psw:
100 .long 0x00080000,0x80000000
101 sys_msk:
102 .quad 0
103 ctlregs:
104 .rept 16
105 .long 0
106 .endr
107 gprregs:
108 .rept 16
109 .long 0
110 .endr
111 have_diag308:
112 .byte 0
113 .align 8
114 relocate_kernel_end:
115 .align 8
116 .globl relocate_kernel_len
117 relocate_kernel_len:
118 .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 7e77e03378f3..43c3169ea49c 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -36,21 +36,17 @@ _sclp_wait_int:
36 ahi %r15,-96 # create stack frame 36 ahi %r15,-96 # create stack frame
37 la %r8,LC_EXT_NEW_PSW # register int handler 37 la %r8,LC_EXT_NEW_PSW # register int handler
38 la %r9,.LextpswS1-.LbaseS1(%r13) 38 la %r9,.LextpswS1-.LbaseS1(%r13)
39#ifdef CONFIG_64BIT
40 tm LC_AR_MODE_ID,1 39 tm LC_AR_MODE_ID,1
41 jno .Lesa1 40 jno .Lesa1
42 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit 41 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
43 la %r9,.LextpswS1_64-.LbaseS1(%r13) 42 la %r9,.LextpswS1_64-.LbaseS1(%r13)
44.Lesa1: 43.Lesa1:
45#endif
46 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) 44 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
47 mvc 0(16,%r8),0(%r9) 45 mvc 0(16,%r8),0(%r9)
48#ifdef CONFIG_64BIT
49 epsw %r6,%r7 # set current addressing mode 46 epsw %r6,%r7 # set current addressing mode
50 nill %r6,0x1 # in new psw (31 or 64 bit mode) 47 nill %r6,0x1 # in new psw (31 or 64 bit mode)
51 nilh %r7,0x8000 48 nilh %r7,0x8000
52 stm %r6,%r7,0(%r8) 49 stm %r6,%r7,0(%r8)
53#endif
54 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 50 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
55 ltr %r2,%r2 51 ltr %r2,%r2
56 jz .LsetctS1 52 jz .LsetctS1
@@ -92,10 +88,8 @@ _sclp_wait_int:
92 .long 0, 0, 0, 0 # old ext int PSW 88 .long 0, 0, 0, 0 # old ext int PSW
93.LextpswS1: 89.LextpswS1:
94 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 90 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
95#ifdef CONFIG_64BIT
96.LextpswS1_64: 91.LextpswS1_64:
97 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit 92 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
98#endif
99.LwaitpswS1: 93.LwaitpswS1:
100 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 94 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
101.LtimeS1: 95.LtimeS1:
@@ -272,13 +266,11 @@ _sclp_print:
272ENTRY(_sclp_print_early) 266ENTRY(_sclp_print_early)
273 stm %r6,%r15,24(%r15) # save registers 267 stm %r6,%r15,24(%r15) # save registers
274 ahi %r15,-96 # create stack frame 268 ahi %r15,-96 # create stack frame
275#ifdef CONFIG_64BIT
276 tm LC_AR_MODE_ID,1 269 tm LC_AR_MODE_ID,1
277 jno .Lesa2 270 jno .Lesa2
278 ahi %r15,-80 271 ahi %r15,-80
279 stmh %r6,%r15,96(%r15) # store upper register halves 272 stmh %r6,%r15,96(%r15) # store upper register halves
280.Lesa2: 273.Lesa2:
281#endif
282 lr %r10,%r2 # save string pointer 274 lr %r10,%r2 # save string pointer
283 lhi %r2,0 275 lhi %r2,0
284 bras %r14,_sclp_setup # enable console 276 bras %r14,_sclp_setup # enable console
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early)
291 lhi %r2,1 283 lhi %r2,1
292 bras %r14,_sclp_setup # disable console 284 bras %r14,_sclp_setup # disable console
293.LendS5: 285.LendS5:
294#ifdef CONFIG_64BIT
295 tm LC_AR_MODE_ID,1 286 tm LC_AR_MODE_ID,1
296 jno .Lesa3 287 jno .Lesa3
297 lgfr %r2,%r2 # sign extend return value 288 lgfr %r2,%r2 # sign extend return value
298 lmh %r6,%r15,96(%r15) # restore upper register halves 289 lmh %r6,%r15,96(%r15) # restore upper register halves
299 ahi %r15,80 290 ahi %r15,80
300.Lesa3: 291.Lesa3:
301#endif
302 lm %r6,%r15,120(%r15) # restore registers 292 lm %r6,%r15,120(%r15) # restore registers
303 br %r14 293 br %r14
304 294
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5ea8bc17cb3..7262fe438c99 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END);
92struct page *vmemmap; 92struct page *vmemmap;
93EXPORT_SYMBOL(vmemmap); 93EXPORT_SYMBOL(vmemmap);
94 94
95#ifdef CONFIG_64BIT
96unsigned long MODULES_VADDR; 95unsigned long MODULES_VADDR;
97unsigned long MODULES_END; 96unsigned long MODULES_END;
98#endif
99 97
100/* An array with a pointer to the lowcore of every CPU. */ 98/* An array with a pointer to the lowcore of every CPU. */
101struct _lowcore *lowcore_ptr[NR_CPUS]; 99struct _lowcore *lowcore_ptr[NR_CPUS];
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void)
334 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 332 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
335 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 333 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
336 MAX_FACILITY_BIT/8); 334 MAX_FACILITY_BIT/8);
337#ifndef CONFIG_64BIT
338 if (MACHINE_HAS_IEEE) {
339 lc->extended_save_area_addr = (__u32)
340 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
341 /* enable extended save area */
342 __ctl_set_bit(14, 29);
343 }
344#else
345 if (MACHINE_HAS_VX) 335 if (MACHINE_HAS_VX)
346 lc->vector_save_area_addr = 336 lc->vector_save_area_addr =
347 (unsigned long) &lc->vector_save_area; 337 (unsigned long) &lc->vector_save_area;
348 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 338 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
349#endif
350 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 339 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
351 lc->async_enter_timer = S390_lowcore.async_enter_timer; 340 lc->async_enter_timer = S390_lowcore.async_enter_timer;
352 lc->exit_timer = S390_lowcore.exit_timer; 341 lc->exit_timer = S390_lowcore.exit_timer;
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void)
450 unsigned long vmax, vmalloc_size, tmp; 439 unsigned long vmax, vmalloc_size, tmp;
451 440
452 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 441 /* Choose kernel address space layout: 2, 3, or 4 levels. */
453#ifdef CONFIG_64BIT
454 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 442 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
455 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 443 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
456 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 444 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void)
462 MODULES_END = vmax; 450 MODULES_END = vmax;
463 MODULES_VADDR = MODULES_END - MODULES_LEN; 451 MODULES_VADDR = MODULES_END - MODULES_LEN;
464 VMALLOC_END = MODULES_VADDR; 452 VMALLOC_END = MODULES_VADDR;
465#else
466 vmalloc_size = VMALLOC_END ?: 96UL << 20;
467 vmax = 1UL << 31; /* 2-level kernel page table */
468 /* vmalloc area is at the end of the kernel address space. */
469 VMALLOC_END = vmax;
470#endif
471 VMALLOC_START = vmax - vmalloc_size; 453 VMALLOC_START = vmax - vmalloc_size;
472 454
473 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 455 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void)
754 if (MACHINE_HAS_HPAGE) 736 if (MACHINE_HAS_HPAGE)
755 elf_hwcap |= HWCAP_S390_HPAGE; 737 elf_hwcap |= HWCAP_S390_HPAGE;
756 738
757#if defined(CONFIG_64BIT)
758 /* 739 /*
759 * 64-bit register support for 31-bit processes 740 * 64-bit register support for 31-bit processes
760 * HWCAP_S390_HIGH_GPRS is bit 9. 741 * HWCAP_S390_HIGH_GPRS is bit 9.
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void)
772 */ 753 */
773 if (test_facility(129)) 754 if (test_facility(129))
774 elf_hwcap |= HWCAP_S390_VXRS; 755 elf_hwcap |= HWCAP_S390_VXRS;
775#endif
776
777 get_cpu_id(&cpu_id); 756 get_cpu_id(&cpu_id);
778 add_device_randomness(&cpu_id, sizeof(cpu_id)); 757 add_device_randomness(&cpu_id, sizeof(cpu_id));
779 switch (cpu_id.machine) { 758 switch (cpu_id.machine) {
780 case 0x9672: 759 case 0x9672:
781#if !defined(CONFIG_64BIT)
782 default: /* Use "g5" as default for 31 bit kernels. */
783#endif
784 strcpy(elf_platform, "g5"); 760 strcpy(elf_platform, "g5");
785 break; 761 break;
786 case 0x2064: 762 case 0x2064:
787 case 0x2066: 763 case 0x2066:
788#if defined(CONFIG_64BIT)
789 default: /* Use "z900" as default for 64 bit kernels. */ 764 default: /* Use "z900" as default for 64 bit kernels. */
790#endif
791 strcpy(elf_platform, "z900"); 765 strcpy(elf_platform, "z900");
792 break; 766 break;
793 case 0x2084: 767 case 0x2084:
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p)
839 /* 813 /*
840 * print what head.S has found out about the machine 814 * print what head.S has found out about the machine
841 */ 815 */
842#ifndef CONFIG_64BIT
843 if (MACHINE_IS_VM)
844 pr_info("Linux is running as a z/VM "
845 "guest operating system in 31-bit mode\n");
846 else if (MACHINE_IS_LPAR)
847 pr_info("Linux is running natively in 31-bit mode\n");
848 if (MACHINE_HAS_IEEE)
849 pr_info("The hardware system has IEEE compatible "
850 "floating point units\n");
851 else
852 pr_info("The hardware system has no IEEE compatible "
853 "floating point units\n");
854#else /* CONFIG_64BIT */
855 if (MACHINE_IS_VM) 816 if (MACHINE_IS_VM)
856 pr_info("Linux is running as a z/VM " 817 pr_info("Linux is running as a z/VM "
857 "guest operating system in 64-bit mode\n"); 818 "guest operating system in 64-bit mode\n");
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p)
859 pr_info("Linux is running under KVM in 64-bit mode\n"); 820 pr_info("Linux is running under KVM in 64-bit mode\n");
860 else if (MACHINE_IS_LPAR) 821 else if (MACHINE_IS_LPAR)
861 pr_info("Linux is running natively in 64-bit mode\n"); 822 pr_info("Linux is running natively in 64-bit mode\n");
862#endif /* CONFIG_64BIT */
863 823
864 /* Have one command line that is parsed and saved in /proc/cmdline */ 824 /* Have one command line that is parsed and saved in /proc/cmdline */
865 /* boot_command_line has been already set up in early.c */ 825 /* boot_command_line has been already set up in early.c */
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p)
930 /* Add system specific data to the random pool */ 890 /* Add system specific data to the random pool */
931 setup_randomness(); 891 setup_randomness();
932} 892}
933
934#ifdef CONFIG_32BIT
935static int no_removal_warning __initdata;
936
937static int __init parse_no_removal_warning(char *str)
938{
939 no_removal_warning = 1;
940 return 0;
941}
942__setup("no_removal_warning", parse_no_removal_warning);
943
944static int __init removal_warning(void)
945{
946 if (no_removal_warning)
947 return 0;
948 printk(KERN_ALERT "\n\n");
949 printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
950 printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
951 printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
952 printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
953 printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
954 printk(KERN_CONT "please let us know. Please write to:\n");
955 printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
956 printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
957 printk(KERN_CONT "Thank you!\n\n");
958 printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
959 printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
960 schedule_timeout_uninterruptible(300 * HZ);
961 return 0;
962}
963early_initcall(removal_warning);
964#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b3ae6f70c6d6..7fec60cb0b75 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -106,7 +106,6 @@ static void store_sigregs(void)
106{ 106{
107 save_access_regs(current->thread.acrs); 107 save_access_regs(current->thread.acrs);
108 save_fp_ctl(&current->thread.fp_regs.fpc); 108 save_fp_ctl(&current->thread.fp_regs.fpc);
109#ifdef CONFIG_64BIT
110 if (current->thread.vxrs) { 109 if (current->thread.vxrs) {
111 int i; 110 int i;
112 111
@@ -115,7 +114,6 @@ static void store_sigregs(void)
115 current->thread.fp_regs.fprs[i] = 114 current->thread.fp_regs.fprs[i] =
116 *(freg_t *)(current->thread.vxrs + i); 115 *(freg_t *)(current->thread.vxrs + i);
117 } else 116 } else
118#endif
119 save_fp_regs(current->thread.fp_regs.fprs); 117 save_fp_regs(current->thread.fp_regs.fprs);
120} 118}
121 119
@@ -124,7 +122,6 @@ static void load_sigregs(void)
124{ 122{
125 restore_access_regs(current->thread.acrs); 123 restore_access_regs(current->thread.acrs);
126 /* restore_fp_ctl is done in restore_sigregs */ 124 /* restore_fp_ctl is done in restore_sigregs */
127#ifdef CONFIG_64BIT
128 if (current->thread.vxrs) { 125 if (current->thread.vxrs) {
129 int i; 126 int i;
130 127
@@ -133,7 +130,6 @@ static void load_sigregs(void)
133 current->thread.fp_regs.fprs[i]; 130 current->thread.fp_regs.fprs[i];
134 restore_vx_regs(current->thread.vxrs); 131 restore_vx_regs(current->thread.vxrs);
135 } else 132 } else
136#endif
137 restore_fp_regs(current->thread.fp_regs.fprs); 133 restore_fp_regs(current->thread.fp_regs.fprs);
138} 134}
139 135
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
200static int save_sigregs_ext(struct pt_regs *regs, 196static int save_sigregs_ext(struct pt_regs *regs,
201 _sigregs_ext __user *sregs_ext) 197 _sigregs_ext __user *sregs_ext)
202{ 198{
203#ifdef CONFIG_64BIT
204 __u64 vxrs[__NUM_VXRS_LOW]; 199 __u64 vxrs[__NUM_VXRS_LOW];
205 int i; 200 int i;
206 201
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs,
215 sizeof(sregs_ext->vxrs_high))) 210 sizeof(sregs_ext->vxrs_high)))
216 return -EFAULT; 211 return -EFAULT;
217 } 212 }
218#endif
219 return 0; 213 return 0;
220} 214}
221 215
222static int restore_sigregs_ext(struct pt_regs *regs, 216static int restore_sigregs_ext(struct pt_regs *regs,
223 _sigregs_ext __user *sregs_ext) 217 _sigregs_ext __user *sregs_ext)
224{ 218{
225#ifdef CONFIG_64BIT
226 __u64 vxrs[__NUM_VXRS_LOW]; 219 __u64 vxrs[__NUM_VXRS_LOW];
227 int i; 220 int i;
228 221
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs,
237 for (i = 0; i < __NUM_VXRS_LOW; i++) 230 for (i = 0; i < __NUM_VXRS_LOW; i++)
238 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; 231 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
239 } 232 }
240#endif
241 return 0; 233 return 0;
242} 234}
243 235
@@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
416 * included in the signal frame on a 31-bit system. 408 * included in the signal frame on a 31-bit system.
417 */ 409 */
418 uc_flags = 0; 410 uc_flags = 0;
419#ifdef CONFIG_64BIT
420 if (MACHINE_HAS_VX) { 411 if (MACHINE_HAS_VX) {
421 frame_size += sizeof(_sigregs_ext); 412 frame_size += sizeof(_sigregs_ext);
422 if (current->thread.vxrs) 413 if (current->thread.vxrs)
423 uc_flags |= UC_VXRS; 414 uc_flags |= UC_VXRS;
424 } 415 }
425#endif
426 frame = get_sigframe(&ksig->ka, regs, frame_size); 416 frame = get_sigframe(&ksig->ka, regs, frame_size);
427 if (frame == (void __user *) -1UL) 417 if (frame == (void __user *) -1UL)
428 return -EFAULT; 418 return -EFAULT;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index db8f1115a3bf..d140160c9aec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
199 lc->cpu_nr = cpu; 199 lc->cpu_nr = cpu;
200 lc->spinlock_lockval = arch_spin_lockval(cpu); 200 lc->spinlock_lockval = arch_spin_lockval(cpu);
201#ifndef CONFIG_64BIT
202 if (MACHINE_HAS_IEEE) {
203 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
204 if (!lc->extended_save_area_addr)
205 goto out;
206 }
207#else
208 if (MACHINE_HAS_VX) 201 if (MACHINE_HAS_VX)
209 lc->vector_save_area_addr = 202 lc->vector_save_area_addr =
210 (unsigned long) &lc->vector_save_area; 203 (unsigned long) &lc->vector_save_area;
211 if (vdso_alloc_per_cpu(lc)) 204 if (vdso_alloc_per_cpu(lc))
212 goto out; 205 goto out;
213#endif
214 lowcore_ptr[cpu] = lc; 206 lowcore_ptr[cpu] = lc;
215 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 207 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
216 return 0; 208 return 0;
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
229{ 221{
230 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 222 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
231 lowcore_ptr[pcpu - pcpu_devices] = NULL; 223 lowcore_ptr[pcpu - pcpu_devices] = NULL;
232#ifndef CONFIG_64BIT
233 if (MACHINE_HAS_IEEE) {
234 struct _lowcore *lc = pcpu->lowcore;
235
236 free_page((unsigned long) lc->extended_save_area_addr);
237 lc->extended_save_area_addr = 0;
238 }
239#else
240 vdso_free_per_cpu(pcpu->lowcore); 224 vdso_free_per_cpu(pcpu->lowcore);
241#endif
242 if (pcpu == &pcpu_devices[0]) 225 if (pcpu == &pcpu_devices[0])
243 return; 226 return;
244 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); 227 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu)
492 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 475 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
493} 476}
494 477
495#ifndef CONFIG_64BIT
496/*
497 * this function sends a 'purge tlb' signal to another CPU.
498 */
499static void smp_ptlb_callback(void *info)
500{
501 __tlb_flush_local();
502}
503
504void smp_ptlb_all(void)
505{
506 on_each_cpu(smp_ptlb_callback, NULL, 1);
507}
508EXPORT_SYMBOL(smp_ptlb_all);
509#endif /* ! CONFIG_64BIT */
510
511/* 478/*
512 * this function sends a 'reschedule' IPI to another CPU. 479 * this function sends a 'reschedule' IPI to another CPU.
513 * it goes straight through and wastes no time serializing 480 * it goes straight through and wastes no time serializing
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 23eb222c1658..f145490cce54 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
76 return sys_ipc(call, first, second, third, ptr, third); 76 return sys_ipc(call, first, second, third, ptr, third);
77} 77}
78 78
79#ifdef CONFIG_64BIT
80SYSCALL_DEFINE1(s390_personality, unsigned int, personality) 79SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
81{ 80{
82 unsigned int ret; 81 unsigned int ret;
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
90 89
91 return ret; 90 return ret;
92} 91}
93#endif /* CONFIG_64BIT */
94
95/*
96 * Wrapper function for sys_fadvise64/fadvise64_64
97 */
98#ifndef CONFIG_64BIT
99
100SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
101 size_t, len, int, advice)
102{
103 return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
104 len, advice);
105}
106
107struct fadvise64_64_args {
108 int fd;
109 long long offset;
110 long long len;
111 int advice;
112};
113
114SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
115{
116 struct fadvise64_64_args a;
117
118 if ( copy_from_user(&a, args, sizeof(a)) )
119 return -EFAULT;
120 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
121}
122
123/*
124 * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
125 * 64 bit argument "len" is split into the upper and lower 32 bits. The
126 * system call wrapper in the user space loads the value to %r6/%r7.
127 * The code in entry.S keeps the values in %r2 - %r6 where they are and
128 * stores %r7 to 96(%r15). But the standard C linkage requires that
129 * the whole 64 bit value for len is stored on the stack and doesn't
130 * use %r6 at all. So s390_fallocate has to convert the arguments from
131 * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
132 * to
133 * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
134 */
135SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
136 u32, len_high, u32, len_low)
137{
138 return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
139}
140#endif
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f081cf1157c3..8be11c22ed17 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1;
26 26
27static inline void __user *get_trap_ip(struct pt_regs *regs) 27static inline void __user *get_trap_ip(struct pt_regs *regs)
28{ 28{
29#ifdef CONFIG_64BIT
30 unsigned long address; 29 unsigned long address;
31 30
32 if (regs->int_code & 0x200) 31 if (regs->int_code & 0x200)
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
35 address = regs->psw.addr; 34 address = regs->psw.addr;
36 return (void __user *) 35 return (void __user *)
37 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); 36 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
38#else
39 return (void __user *)
40 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
41#endif
42} 37}
43 38
44static inline void report_user_fault(struct pt_regs *regs, int signr) 39static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
153 "privileged operation") 148 "privileged operation")
154DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 149DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
155 "special operation exception") 150 "special operation exception")
156
157#ifdef CONFIG_64BIT
158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 151DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
159 "transaction constraint exception") 152 "transaction constraint exception")
160#endif
161 153
162static inline void do_fp_trap(struct pt_regs *regs, int fpc) 154static inline void do_fp_trap(struct pt_regs *regs, int fpc)
163{ 155{
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs)
211 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { 203 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
212 is_uprobe_insn = 1; 204 is_uprobe_insn = 1;
213#endif 205#endif
214#ifdef CONFIG_MATHEMU
215 } else if (opcode[0] == 0xb3) {
216 if (get_user(*((__u16 *) (opcode+2)), location+1))
217 return;
218 signal = math_emu_b3(opcode, regs);
219 } else if (opcode[0] == 0xed) {
220 if (get_user(*((__u32 *) (opcode+2)),
221 (__u32 __user *)(location+1)))
222 return;
223 signal = math_emu_ed(opcode, regs);
224 } else if (*((__u16 *) opcode) == 0xb299) {
225 if (get_user(*((__u16 *) (opcode+2)), location+1))
226 return;
227 signal = math_emu_srnm(opcode, regs);
228 } else if (*((__u16 *) opcode) == 0xb29c) {
229 if (get_user(*((__u16 *) (opcode+2)), location+1))
230 return;
231 signal = math_emu_stfpc(opcode, regs);
232 } else if (*((__u16 *) opcode) == 0xb29d) {
233 if (get_user(*((__u16 *) (opcode+2)), location+1))
234 return;
235 signal = math_emu_lfpc(opcode, regs);
236#endif
237 } else 206 } else
238 signal = SIGILL; 207 signal = SIGILL;
239 } 208 }
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs)
247 3, SIGTRAP) != NOTIFY_STOP) 216 3, SIGTRAP) != NOTIFY_STOP)
248 signal = SIGILL; 217 signal = SIGILL;
249 } 218 }
250
251#ifdef CONFIG_MATHEMU
252 if (signal == SIGFPE)
253 do_fp_trap(regs, current->thread.fp_regs.fpc);
254 else if (signal == SIGSEGV)
255 do_trap(regs, signal, SEGV_MAPERR, "user address fault");
256 else
257#endif
258 if (signal) 219 if (signal)
259 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 220 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
260} 221}
261NOKPROBE_SYMBOL(illegal_op); 222NOKPROBE_SYMBOL(illegal_op);
262 223
263#ifdef CONFIG_MATHEMU
264void specification_exception(struct pt_regs *regs)
265{
266 __u8 opcode[6];
267 __u16 __user *location = NULL;
268 int signal = 0;
269
270 location = (__u16 __user *) get_trap_ip(regs);
271
272 if (user_mode(regs)) {
273 get_user(*((__u16 *) opcode), location);
274 switch (opcode[0]) {
275 case 0x28: /* LDR Rx,Ry */
276 signal = math_emu_ldr(opcode);
277 break;
278 case 0x38: /* LER Rx,Ry */
279 signal = math_emu_ler(opcode);
280 break;
281 case 0x60: /* STD R,D(X,B) */
282 get_user(*((__u16 *) (opcode+2)), location+1);
283 signal = math_emu_std(opcode, regs);
284 break;
285 case 0x68: /* LD R,D(X,B) */
286 get_user(*((__u16 *) (opcode+2)), location+1);
287 signal = math_emu_ld(opcode, regs);
288 break;
289 case 0x70: /* STE R,D(X,B) */
290 get_user(*((__u16 *) (opcode+2)), location+1);
291 signal = math_emu_ste(opcode, regs);
292 break;
293 case 0x78: /* LE R,D(X,B) */
294 get_user(*((__u16 *) (opcode+2)), location+1);
295 signal = math_emu_le(opcode, regs);
296 break;
297 default:
298 signal = SIGILL;
299 break;
300 }
301 } else
302 signal = SIGILL;
303
304 if (signal == SIGFPE)
305 do_fp_trap(regs, current->thread.fp_regs.fpc);
306 else if (signal)
307 do_trap(regs, signal, ILL_ILLOPN, "specification exception");
308}
309#else
310DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 224DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
311 "specification exception"); 225 "specification exception");
312#endif
313 226
314#ifdef CONFIG_64BIT
315int alloc_vector_registers(struct task_struct *tsk) 227int alloc_vector_registers(struct task_struct *tsk)
316{ 228{
317 __vector128 *vxrs; 229 __vector128 *vxrs;
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str)
377 return 1; 289 return 1;
378} 290}
379__setup("novx", disable_vector_extension); 291__setup("novx", disable_vector_extension);
380#endif
381 292
382void data_exception(struct pt_regs *regs) 293void data_exception(struct pt_regs *regs)
383{ 294{
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs)
386 297
387 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
388 299
389 if (MACHINE_HAS_IEEE) 300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
390 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
391
392#ifdef CONFIG_MATHEMU
393 else if (user_mode(regs)) {
394 __u8 opcode[6];
395 get_user(*((__u16 *) opcode), location);
396 switch (opcode[0]) {
397 case 0x28: /* LDR Rx,Ry */
398 signal = math_emu_ldr(opcode);
399 break;
400 case 0x38: /* LER Rx,Ry */
401 signal = math_emu_ler(opcode);
402 break;
403 case 0x60: /* STD R,D(X,B) */
404 get_user(*((__u16 *) (opcode+2)), location+1);
405 signal = math_emu_std(opcode, regs);
406 break;
407 case 0x68: /* LD R,D(X,B) */
408 get_user(*((__u16 *) (opcode+2)), location+1);
409 signal = math_emu_ld(opcode, regs);
410 break;
411 case 0x70: /* STE R,D(X,B) */
412 get_user(*((__u16 *) (opcode+2)), location+1);
413 signal = math_emu_ste(opcode, regs);
414 break;
415 case 0x78: /* LE R,D(X,B) */
416 get_user(*((__u16 *) (opcode+2)), location+1);
417 signal = math_emu_le(opcode, regs);
418 break;
419 case 0xb3:
420 get_user(*((__u16 *) (opcode+2)), location+1);
421 signal = math_emu_b3(opcode, regs);
422 break;
423 case 0xed:
424 get_user(*((__u32 *) (opcode+2)),
425 (__u32 __user *)(location+1));
426 signal = math_emu_ed(opcode, regs);
427 break;
428 case 0xb2:
429 if (opcode[1] == 0x99) {
430 get_user(*((__u16 *) (opcode+2)), location+1);
431 signal = math_emu_srnm(opcode, regs);
432 } else if (opcode[1] == 0x9c) {
433 get_user(*((__u16 *) (opcode+2)), location+1);
434 signal = math_emu_stfpc(opcode, regs);
435 } else if (opcode[1] == 0x9d) {
436 get_user(*((__u16 *) (opcode+2)), location+1);
437 signal = math_emu_lfpc(opcode, regs);
438 } else
439 signal = SIGILL;
440 break;
441 default:
442 signal = SIGILL;
443 break;
444 }
445 }
446#endif
447#ifdef CONFIG_64BIT
448 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
449 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
450 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs)
454 clear_pt_regs_flag(regs, PIF_PER_TRAP); 307 clear_pt_regs_flag(regs, PIF_PER_TRAP);
455 return; 308 return;
456 } 309 }
457#endif
458
459 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 310 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
460 signal = SIGFPE; 311 signal = SIGFPE;
461 else 312 else
462 signal = SIGILL; 313 signal = SIGILL;
463 if (signal == SIGFPE) 314 if (signal == SIGFPE)
464 do_fp_trap(regs, current->thread.fp_regs.fpc); 315 do_fp_trap(regs, current->thread.fp_regs.fpc);
465 else if (signal) 316 else if (signal)
466 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 317 do_trap(regs, signal, ILL_ILLOPN, "data exception");
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0bbb7e027c5a..0d58269ff425 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -32,19 +32,17 @@
32#include <asm/vdso.h> 32#include <asm/vdso.h>
33#include <asm/facility.h> 33#include <asm/facility.h>
34 34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 35#ifdef CONFIG_COMPAT
36extern char vdso32_start, vdso32_end; 36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start; 37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages; 38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist; 39static struct page **vdso32_pagelist;
40#endif 40#endif
41 41
42#ifdef CONFIG_64BIT
43extern char vdso64_start, vdso64_end; 42extern char vdso64_start, vdso64_end;
44static void *vdso64_kbase = &vdso64_start; 43static void *vdso64_kbase = &vdso64_start;
45static unsigned int vdso64_pages; 44static unsigned int vdso64_pages;
46static struct page **vdso64_pagelist; 45static struct page **vdso64_pagelist;
47#endif /* CONFIG_64BIT */
48 46
49/* 47/*
50 * Should the kernel map a VDSO page into processes and pass its 48 * Should the kernel map a VDSO page into processes and pass its
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd)
87 vd->ectg_available = test_facility(31); 85 vd->ectg_available = test_facility(31);
88} 86}
89 87
90#ifdef CONFIG_64BIT
91/* 88/*
92 * Allocate/free per cpu vdso data. 89 * Allocate/free per cpu vdso data.
93 */ 90 */
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void)
169 cr5 = offsetof(struct _lowcore, paste); 166 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5); 167 __ctl_load(cr5, 5, 5);
171} 168}
172#endif /* CONFIG_64BIT */
173 169
174/* 170/*
175 * This is called from binfmt_elf, we create the special vma for the 171 * This is called from binfmt_elf, we create the special vma for the
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191 if (!uses_interp) 187 if (!uses_interp)
192 return 0; 188 return 0;
193 189
194#ifdef CONFIG_64BIT
195 vdso_pagelist = vdso64_pagelist; 190 vdso_pagelist = vdso64_pagelist;
196 vdso_pages = vdso64_pages; 191 vdso_pages = vdso64_pages;
197#ifdef CONFIG_COMPAT 192#ifdef CONFIG_COMPAT
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
200 vdso_pages = vdso32_pages; 195 vdso_pages = vdso32_pages;
201 } 196 }
202#endif 197#endif
203#else
204 vdso_pagelist = vdso32_pagelist;
205 vdso_pages = vdso32_pages;
206#endif
207
208 /* 198 /*
209 * vDSO has a problem and was disabled, just don't "enable" it for 199 * vDSO has a problem and was disabled, just don't "enable" it for
210 * the process 200 * the process
@@ -268,7 +258,7 @@ static int __init vdso_init(void)
268 if (!vdso_enabled) 258 if (!vdso_enabled)
269 return 0; 259 return 0;
270 vdso_init_data(vdso_data); 260 vdso_init_data(vdso_data);
271#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 261#ifdef CONFIG_COMPAT
272 /* Calculate the size of the 32 bit vDSO */ 262 /* Calculate the size of the 32 bit vDSO */
273 vdso32_pages = ((&vdso32_end - &vdso32_start 263 vdso32_pages = ((&vdso32_end - &vdso32_start
274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 264 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -287,7 +277,6 @@ static int __init vdso_init(void)
287 vdso32_pagelist[vdso32_pages] = NULL; 277 vdso32_pagelist[vdso32_pages] = NULL;
288#endif 278#endif
289 279
290#ifdef CONFIG_64BIT
291 /* Calculate the size of the 64 bit vDSO */ 280 /* Calculate the size of the 64 bit vDSO */
292 vdso64_pages = ((&vdso64_end - &vdso64_start 281 vdso64_pages = ((&vdso64_end - &vdso64_start
293 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 282 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -307,7 +296,6 @@ static int __init vdso_init(void)
307 if (vdso_alloc_per_cpu(&S390_lowcore)) 296 if (vdso_alloc_per_cpu(&S390_lowcore))
308 BUG(); 297 BUG();
309 vdso_init_cr5(); 298 vdso_init_cr5();
310#endif /* CONFIG_64BIT */
311 299
312 get_page(virt_to_page(vdso_data)); 300 get_page(virt_to_page(vdso_data));
313 301
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed0af5f..445657fe658c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -6,17 +6,10 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8 8
9#ifndef CONFIG_64BIT
10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
11OUTPUT_ARCH(s390:31-bit)
12ENTRY(startup)
13jiffies = jiffies_64 + 4;
14#else
15OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 9OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
16OUTPUT_ARCH(s390:64-bit) 10OUTPUT_ARCH(s390:64-bit)
17ENTRY(startup) 11ENTRY(startup)
18jiffies = jiffies_64; 12jiffies = jiffies_64;
19#endif
20 13
21PHDRS { 14PHDRS {
22 text PT_LOAD FLAGS(5); /* R_E */ 15 text PT_LOAD FLAGS(5); /* R_E */
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index a01df233856f..15536da68e18 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,8 +3,7 @@
3# 3#
4 4
5lib-y += delay.o string.o uaccess.o find.o 5lib-y += delay.o string.o uaccess.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-y += mem64.o
7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_SMP) += spinlock.o 7lib-$(CONFIG_SMP) += spinlock.o
9lib-$(CONFIG_KPROBES) += probes.o 8lib-$(CONFIG_KPROBES) += probes.o
10lib-$(CONFIG_UPROBES) += probes.o 9lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
deleted file mode 100644
index 261152f83242..000000000000
--- a/arch/s390/lib/div64.c
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * __div64_32 implementation for 31 bit.
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10
11#ifdef CONFIG_MARCH_G5
12
13/*
14 * Function to divide an unsigned 64 bit integer by an unsigned
15 * 31 bit integer using signed 64/32 bit division.
16 */
17static uint32_t __div64_31(uint64_t *n, uint32_t base)
18{
19 register uint32_t reg2 asm("2");
20 register uint32_t reg3 asm("3");
21 uint32_t *words = (uint32_t *) n;
22 uint32_t tmp;
23
24 /* Special case base==1, remainder = 0, quotient = n */
25 if (base == 1)
26 return 0;
27 /*
28 * Special case base==0 will cause a fixed point divide exception
29 * on the dr instruction and may not happen anyway. For the
30 * following calculation we can assume base > 1. The first
31 * signed 64 / 32 bit division with an upper half of 0 will
32 * give the correct upper half of the 64 bit quotient.
33 */
34 reg2 = 0UL;
35 reg3 = words[0];
36 asm volatile(
37 " dr %0,%2\n"
38 : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
39 words[0] = reg3;
40 reg3 = words[1];
41 /*
42 * To get the lower half of the 64 bit quotient and the 32 bit
43 * remainder we have to use a little trick. Since we only have
44 * a signed division the quotient can get too big. To avoid this
45 * the 64 bit dividend is halved, then the signed division will
46 * work. Afterwards the quotient and the remainder are doubled.
47 * If the last bit of the dividend has been one the remainder
48 * is increased by one then checked against the base. If the
49 * remainder has overflown subtract base and increase the
50 * quotient. Simple, no ?
51 */
52 asm volatile(
53 " nr %2,%1\n"
54 " srdl %0,1\n"
55 " dr %0,%3\n"
56 " alr %0,%0\n"
57 " alr %1,%1\n"
58 " alr %0,%2\n"
59 " clr %0,%3\n"
60 " jl 0f\n"
61 " slr %0,%3\n"
62 " ahi %1,1\n"
63 "0:\n"
64 : "+d" (reg2), "+d" (reg3), "=d" (tmp)
65 : "d" (base), "2" (1UL) : "cc" );
66 words[1] = reg3;
67 return reg2;
68}
69
70/*
71 * Function to divide an unsigned 64 bit integer by an unsigned
72 * 32 bit integer using the unsigned 64/31 bit division.
73 */
74uint32_t __div64_32(uint64_t *n, uint32_t base)
75{
76 uint32_t r;
77
78 /*
79 * If the most significant bit of base is set, divide n by
80 * (base/2). That allows to use 64/31 bit division and gives a
81 * good approximation of the result: n = (base/2)*q + r. The
82 * result needs to be corrected with two simple transformations.
83 * If base is already < 2^31-1 __div64_31 can be used directly.
84 */
85 r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
86 if ((signed) base < 0) {
87 uint64_t q = *n;
88 /*
89 * First transformation:
90 * n = (base/2)*q + r
91 * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
92 * Since r < (base/2), r + (base/2) < base.
93 * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
94 * n = ((base/2)*2)*q1 + r1 with r1 < base.
95 */
96 if (q & 1)
97 r += base/2;
98 q >>= 1;
99 /*
100 * Second transformation. ((base/2)*2) could have lost the
101 * last bit.
102 * n = ((base/2)*2)*q1 + r1
103 * = base*q1 - ((base&1) ? q1 : 0) + r1
104 */
105 if (base & 1) {
106 int64_t rx = r - q;
107 /*
108 * base is >= 2^31. The worst case for the while
109 * loop is n=2^64-1 base=2^31+1. That gives a
110 * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
111 * base >= 2^31 the loop is finished after a maximum
112 * of three iterations.
113 */
114 while (rx < 0) {
115 rx += base;
116 q--;
117 }
118 r = rx;
119 }
120 *n = q;
121 }
122 return r;
123}
124
125#else /* MARCH_G5 */
126
127uint32_t __div64_32(uint64_t *n, uint32_t base)
128{
129 register uint32_t reg2 asm("2");
130 register uint32_t reg3 asm("3");
131 uint32_t *words = (uint32_t *) n;
132
133 reg2 = 0UL;
134 reg3 = words[0];
135 asm volatile(
136 " dlr %0,%2\n"
137 : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
138 words[0] = reg3;
139 reg3 = words[1];
140 asm volatile(
141 " dlr %0,%2\n"
142 : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
143 words[1] = reg3;
144 return reg2;
145}
146
147#endif /* MARCH_G5 */
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
deleted file mode 100644
index 14ca9244b615..000000000000
--- a/arch/s390/lib/mem32.S
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 basr %r5,%r0
26.Lmemset_base:
27 ltr %r4,%r4
28 bzr %r14
29 ltr %r3,%r3
30 jnz .Lmemset_fill
31 ahi %r4,-1
32 lr %r3,%r4
33 srl %r3,8
34 ltr %r3,%r3
35 lr %r1,%r2
36 je .Lmemset_clear_rest
37.Lmemset_clear_loop:
38 xc 0(256,%r1),0(%r1)
39 la %r1,256(%r1)
40 brct %r3,.Lmemset_clear_loop
41.Lmemset_clear_rest:
42 ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
43 br %r14
44.Lmemset_fill:
45 stc %r3,0(%r2)
46 chi %r4,1
47 lr %r1,%r2
48 ber %r14
49 ahi %r4,-2
50 lr %r3,%r4
51 srl %r3,8
52 ltr %r3,%r3
53 je .Lmemset_fill_rest
54.Lmemset_fill_loop:
55 mvc 1(256,%r1),0(%r1)
56 la %r1,256(%r1)
57 brct %r3,.Lmemset_fill_loop
58.Lmemset_fill_rest:
59 ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
60 br %r14
61.Lmemset_xc:
62 xc 0(1,%r1),0(%r1)
63.Lmemset_mvc:
64 mvc 1(1,%r1),0(%r1)
65
66/*
67 * memcpy implementation
68 *
69 * void *memcpy(void *dest, const void *src, size_t n)
70 */
71ENTRY(memcpy)
72 basr %r5,%r0
73.Lmemcpy_base:
74 ltr %r4,%r4
75 bzr %r14
76 ahi %r4,-1
77 lr %r0,%r4
78 srl %r0,8
79 ltr %r0,%r0
80 lr %r1,%r2
81 jnz .Lmemcpy_loop
82.Lmemcpy_rest:
83 ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
84 br %r14
85.Lmemcpy_loop:
86 mvc 0(256,%r1),0(%r3)
87 la %r1,256(%r1)
88 la %r3,256(%r3)
89 brct %r0,.Lmemcpy_loop
90 j .Lmemcpy_rest
91.Lmemcpy_mvc:
92 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
deleted file mode 100644
index d321329130ec..000000000000
--- a/arch/s390/lib/qrnnd.S
+++ /dev/null
@@ -1,78 +0,0 @@
1# S/390 __udiv_qrnnd
2
3#include <linux/linkage.h>
4
5# r2 : &__r
6# r3 : upper half of 64 bit word n
7# r4 : lower half of 64 bit word n
8# r5 : divisor d
9# the reminder r of the division is to be stored to &__r and
10# the quotient q is to be returned
11
12 .text
13ENTRY(__udiv_qrnnd)
14 st %r2,24(%r15) # store pointer to reminder for later
15 lr %r0,%r3 # reload n
16 lr %r1,%r4
17 ltr %r2,%r5 # reload and test divisor
18 jp 5f
19 # divisor >= 0x80000000
20 srdl %r0,2 # n/4
21 srl %r2,1 # d/2
22 slr %r1,%r2 # special case if last bit of d is set
23 brc 3,0f # (n/4) div (n/2) can overflow by 1
24 ahi %r0,-1 # trick: subtract n/2, then divide
250: dr %r0,%r2 # signed division
26 ahi %r1,1 # trick part 2: add 1 to the quotient
27 # now (n >> 2) = (d >> 1) * %r1 + %r0
28 lhi %r3,1
29 nr %r3,%r1 # test last bit of q
30 jz 1f
31 alr %r0,%r2 # add (d>>1) to r
321: srl %r1,1 # q >>= 1
33 # now (n >> 2) = (d&-2) * %r1 + %r0
34 lhi %r3,1
35 nr %r3,%r5 # test last bit of d
36 jz 2f
37 slr %r0,%r1 # r -= q
38 brc 3,2f # borrow ?
39 alr %r0,%r5 # r += d
40 ahi %r1,-1
412: # now (n >> 2) = d * %r1 + %r0
42 alr %r1,%r1 # q <<= 1
43 alr %r0,%r0 # r <<= 1
44 brc 12,3f # overflow on r ?
45 slr %r0,%r5 # r -= d
46 ahi %r1,1 # q += 1
473: lhi %r3,2
48 nr %r3,%r4 # test next to last bit of n
49 jz 4f
50 ahi %r0,1 # r += 1
514: clr %r0,%r5 # r >= d ?
52 jl 6f
53 slr %r0,%r5 # r -= d
54 ahi %r1,1 # q += 1
55 # now (n >> 1) = d * %r1 + %r0
56 j 6f
575: # divisor < 0x80000000
58 srdl %r0,1
59 dr %r0,%r2 # signed division
60 # now (n >> 1) = d * %r1 + %r0
616: alr %r1,%r1 # q <<= 1
62 alr %r0,%r0 # r <<= 1
63 brc 12,7f # overflow on r ?
64 slr %r0,%r5 # r -= d
65 ahi %r1,1 # q += 1
667: lhi %r3,1
67 nr %r3,%r4 # isolate last bit of n
68 alr %r0,%r3 # r += (n & 1)
69 clr %r0,%r5 # r >= d ?
70 jl 8f
71 slr %r0,%r5 # r -= d
72 ahi %r1,1 # q += 1
738: # now n = d * %r1 + %r0
74 l %r2,24(%r15)
75 st %r0,0(%r2)
76 lr %r2,%r1
77 br %r14
78 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 53dd5d7a0c96..4614d415bb58 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,20 +15,6 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/facility.h> 16#include <asm/facility.h>
17 17
18#ifndef CONFIG_64BIT
19#define AHI "ahi"
20#define ALR "alr"
21#define CLR "clr"
22#define LHI "lhi"
23#define SLR "slr"
24#else
25#define AHI "aghi"
26#define ALR "algr"
27#define CLR "clgr"
28#define LHI "lghi"
29#define SLR "slgr"
30#endif
31
32static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; 18static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
33 19
34static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, 20static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
@@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
41 asm volatile( 27 asm volatile(
42 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" 28 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
43 "9: jz 7f\n" 29 "9: jz 7f\n"
44 "1:"ALR" %0,%3\n" 30 "1: algr %0,%3\n"
45 " "SLR" %1,%3\n" 31 " slgr %1,%3\n"
46 " "SLR" %2,%3\n" 32 " slgr %2,%3\n"
47 " j 0b\n" 33 " j 0b\n"
48 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ 34 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
49 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ 35 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
50 " "SLR" %4,%1\n" 36 " slgr %4,%1\n"
51 " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 37 " clgr %0,%4\n" /* copy crosses next page boundary? */
52 " jnh 4f\n" 38 " jnh 4f\n"
53 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" 39 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
54 "10:"SLR" %0,%4\n" 40 "10:slgr %0,%4\n"
55 " "ALR" %2,%4\n" 41 " algr %2,%4\n"
56 "4:"LHI" %4,-1\n" 42 "4: lghi %4,-1\n"
57 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ 43 " algr %4,%0\n" /* copy remaining size, subtract 1 */
58 " bras %3,6f\n" /* memset loop */ 44 " bras %3,6f\n" /* memset loop */
59 " xc 0(1,%2),0(%2)\n" 45 " xc 0(1,%2),0(%2)\n"
60 "5: xc 0(256,%2),0(%2)\n" 46 "5: xc 0(256,%2),0(%2)\n"
61 " la %2,256(%2)\n" 47 " la %2,256(%2)\n"
62 "6:"AHI" %4,-256\n" 48 "6: aghi %4,-256\n"
63 " jnm 5b\n" 49 " jnm 5b\n"
64 " ex %4,0(%3)\n" 50 " ex %4,0(%3)\n"
65 " j 8f\n" 51 " j 8f\n"
66 "7:"SLR" %0,%0\n" 52 "7:slgr %0,%0\n"
67 "8:\n" 53 "8:\n"
68 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) 54 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
69 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 55 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
82 " sacf 0\n" 68 " sacf 0\n"
83 "0: mvcp 0(%0,%2),0(%1),%3\n" 69 "0: mvcp 0(%0,%2),0(%1),%3\n"
84 "10:jz 8f\n" 70 "10:jz 8f\n"
85 "1:"ALR" %0,%3\n" 71 "1: algr %0,%3\n"
86 " la %1,256(%1)\n" 72 " la %1,256(%1)\n"
87 " la %2,256(%2)\n" 73 " la %2,256(%2)\n"
88 "2: mvcp 0(%0,%2),0(%1),%3\n" 74 "2: mvcp 0(%0,%2),0(%1),%3\n"
89 "11:jnz 1b\n" 75 "11:jnz 1b\n"
90 " j 8f\n" 76 " j 8f\n"
91 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 77 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
92 " "LHI" %3,-4096\n" 78 " lghi %3,-4096\n"
93 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 79 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
94 " "SLR" %4,%1\n" 80 " slgr %4,%1\n"
95 " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 81 " clgr %0,%4\n" /* copy crosses next page boundary? */
96 " jnh 5f\n" 82 " jnh 5f\n"
97 "4: mvcp 0(%4,%2),0(%1),%3\n" 83 "4: mvcp 0(%4,%2),0(%1),%3\n"
98 "12:"SLR" %0,%4\n" 84 "12:slgr %0,%4\n"
99 " "ALR" %2,%4\n" 85 " algr %2,%4\n"
100 "5:"LHI" %4,-1\n" 86 "5: lghi %4,-1\n"
101 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ 87 " algr %4,%0\n" /* copy remaining size, subtract 1 */
102 " bras %3,7f\n" /* memset loop */ 88 " bras %3,7f\n" /* memset loop */
103 " xc 0(1,%2),0(%2)\n" 89 " xc 0(1,%2),0(%2)\n"
104 "6: xc 0(256,%2),0(%2)\n" 90 "6: xc 0(256,%2),0(%2)\n"
105 " la %2,256(%2)\n" 91 " la %2,256(%2)\n"
106 "7:"AHI" %4,-256\n" 92 "7: aghi %4,-256\n"
107 " jnm 6b\n" 93 " jnm 6b\n"
108 " ex %4,0(%3)\n" 94 " ex %4,0(%3)\n"
109 " j 9f\n" 95 " j 9f\n"
110 "8:"SLR" %0,%0\n" 96 "8:slgr %0,%0\n"
111 "9: sacf 768\n" 97 "9: sacf 768\n"
112 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) 98 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
113 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) 99 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
@@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
134 asm volatile( 120 asm volatile(
135 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" 121 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
136 "6: jz 4f\n" 122 "6: jz 4f\n"
137 "1:"ALR" %0,%3\n" 123 "1: algr %0,%3\n"
138 " "SLR" %1,%3\n" 124 " slgr %1,%3\n"
139 " "SLR" %2,%3\n" 125 " slgr %2,%3\n"
140 " j 0b\n" 126 " j 0b\n"
141 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ 127 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
142 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ 128 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
143 " "SLR" %4,%1\n" 129 " slgr %4,%1\n"
144 " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 130 " clgr %0,%4\n" /* copy crosses next page boundary? */
145 " jnh 5f\n" 131 " jnh 5f\n"
146 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" 132 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
147 "7:"SLR" %0,%4\n" 133 "7: slgr %0,%4\n"
148 " j 5f\n" 134 " j 5f\n"
149 "4:"SLR" %0,%0\n" 135 "4: slgr %0,%0\n"
150 "5:\n" 136 "5:\n"
151 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 137 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
152 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 138 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
165 " sacf 0\n" 151 " sacf 0\n"
166 "0: mvcs 0(%0,%1),0(%2),%3\n" 152 "0: mvcs 0(%0,%1),0(%2),%3\n"
167 "7: jz 5f\n" 153 "7: jz 5f\n"
168 "1:"ALR" %0,%3\n" 154 "1: algr %0,%3\n"
169 " la %1,256(%1)\n" 155 " la %1,256(%1)\n"
170 " la %2,256(%2)\n" 156 " la %2,256(%2)\n"
171 "2: mvcs 0(%0,%1),0(%2),%3\n" 157 "2: mvcs 0(%0,%1),0(%2),%3\n"
172 "8: jnz 1b\n" 158 "8: jnz 1b\n"
173 " j 5f\n" 159 " j 5f\n"
174 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 160 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
175 " "LHI" %3,-4096\n" 161 " lghi %3,-4096\n"
176 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 162 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
177 " "SLR" %4,%1\n" 163 " slgr %4,%1\n"
178 " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 164 " clgr %0,%4\n" /* copy crosses next page boundary? */
179 " jnh 6f\n" 165 " jnh 6f\n"
180 "4: mvcs 0(%4,%1),0(%2),%3\n" 166 "4: mvcs 0(%4,%1),0(%2),%3\n"
181 "9:"SLR" %0,%4\n" 167 "9: slgr %0,%4\n"
182 " j 6f\n" 168 " j 6f\n"
183 "5:"SLR" %0,%0\n" 169 "5: slgr %0,%0\n"
184 "6: sacf 768\n" 170 "6: sacf 768\n"
185 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 171 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
186 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 172 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
@@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
208 asm volatile( 194 asm volatile(
209 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" 195 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
210 " jz 2f\n" 196 " jz 2f\n"
211 "1:"ALR" %0,%3\n" 197 "1: algr %0,%3\n"
212 " "SLR" %1,%3\n" 198 " slgr %1,%3\n"
213 " "SLR" %2,%3\n" 199 " slgr %2,%3\n"
214 " j 0b\n" 200 " j 0b\n"
215 "2:"SLR" %0,%0\n" 201 "2:slgr %0,%0\n"
216 "3: \n" 202 "3: \n"
217 EX_TABLE(0b,3b) 203 EX_TABLE(0b,3b)
218 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) 204 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
@@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
228 load_kernel_asce(); 214 load_kernel_asce();
229 asm volatile( 215 asm volatile(
230 " sacf 256\n" 216 " sacf 256\n"
231 " "AHI" %0,-1\n" 217 " aghi %0,-1\n"
232 " jo 5f\n" 218 " jo 5f\n"
233 " bras %3,3f\n" 219 " bras %3,3f\n"
234 "0:"AHI" %0,257\n" 220 "0: aghi %0,257\n"
235 "1: mvc 0(1,%1),0(%2)\n" 221 "1: mvc 0(1,%1),0(%2)\n"
236 " la %1,1(%1)\n" 222 " la %1,1(%1)\n"
237 " la %2,1(%2)\n" 223 " la %2,1(%2)\n"
238 " "AHI" %0,-1\n" 224 " aghi %0,-1\n"
239 " jnz 1b\n" 225 " jnz 1b\n"
240 " j 5f\n" 226 " j 5f\n"
241 "2: mvc 0(256,%1),0(%2)\n" 227 "2: mvc 0(256,%1),0(%2)\n"
242 " la %1,256(%1)\n" 228 " la %1,256(%1)\n"
243 " la %2,256(%2)\n" 229 " la %2,256(%2)\n"
244 "3:"AHI" %0,-256\n" 230 "3: aghi %0,-256\n"
245 " jnm 2b\n" 231 " jnm 2b\n"
246 "4: ex %0,1b-0b(%3)\n" 232 "4: ex %0,1b-0b(%3)\n"
247 "5: "SLR" %0,%0\n" 233 "5: slgr %0,%0\n"
248 "6: sacf 768\n" 234 "6: sacf 768\n"
249 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 235 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
250 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) 236 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
@@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
269 asm volatile( 255 asm volatile(
270 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" 256 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
271 " jz 4f\n" 257 " jz 4f\n"
272 "1:"ALR" %0,%2\n" 258 "1: algr %0,%2\n"
273 " "SLR" %1,%2\n" 259 " slgr %1,%2\n"
274 " j 0b\n" 260 " j 0b\n"
275 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ 261 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
276 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ 262 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
277 " "SLR" %3,%1\n" 263 " slgr %3,%1\n"
278 " "CLR" %0,%3\n" /* copy crosses next page boundary? */ 264 " clgr %0,%3\n" /* copy crosses next page boundary? */
279 " jnh 5f\n" 265 " jnh 5f\n"
280 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" 266 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
281 " "SLR" %0,%3\n" 267 " slgr %0,%3\n"
282 " j 5f\n" 268 " j 5f\n"
283 "4:"SLR" %0,%0\n" 269 "4:slgr %0,%0\n"
284 "5:\n" 270 "5:\n"
285 EX_TABLE(0b,2b) EX_TABLE(3b,5b) 271 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
286 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) 272 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
@@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
295 load_kernel_asce(); 281 load_kernel_asce();
296 asm volatile( 282 asm volatile(
297 " sacf 256\n" 283 " sacf 256\n"
298 " "AHI" %0,-1\n" 284 " aghi %0,-1\n"
299 " jo 5f\n" 285 " jo 5f\n"
300 " bras %3,3f\n" 286 " bras %3,3f\n"
301 " xc 0(1,%1),0(%1)\n" 287 " xc 0(1,%1),0(%1)\n"
302 "0:"AHI" %0,257\n" 288 "0: aghi %0,257\n"
303 " la %2,255(%1)\n" /* %2 = ptr + 255 */ 289 " la %2,255(%1)\n" /* %2 = ptr + 255 */
304 " srl %2,12\n" 290 " srl %2,12\n"
305 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ 291 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
306 " "SLR" %2,%1\n" 292 " slgr %2,%1\n"
307 " "CLR" %0,%2\n" /* clear crosses next page boundary? */ 293 " clgr %0,%2\n" /* clear crosses next page boundary? */
308 " jnh 5f\n" 294 " jnh 5f\n"
309 " "AHI" %2,-1\n" 295 " aghi %2,-1\n"
310 "1: ex %2,0(%3)\n" 296 "1: ex %2,0(%3)\n"
311 " "AHI" %2,1\n" 297 " aghi %2,1\n"
312 " "SLR" %0,%2\n" 298 " slgr %0,%2\n"
313 " j 5f\n" 299 " j 5f\n"
314 "2: xc 0(256,%1),0(%1)\n" 300 "2: xc 0(256,%1),0(%1)\n"
315 " la %1,256(%1)\n" 301 " la %1,256(%1)\n"
316 "3:"AHI" %0,-256\n" 302 "3: aghi %0,-256\n"
317 " jnm 2b\n" 303 " jnm 2b\n"
318 "4: ex %0,0(%3)\n" 304 "4: ex %0,0(%3)\n"
319 "5: "SLR" %0,%0\n" 305 "5: slgr %0,%0\n"
320 "6: sacf 768\n" 306 "6: sacf 768\n"
321 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 307 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
322 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) 308 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
@@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
341 asm volatile( 327 asm volatile(
342 " la %2,0(%1)\n" 328 " la %2,0(%1)\n"
343 " la %3,0(%0,%1)\n" 329 " la %3,0(%0,%1)\n"
344 " "SLR" %0,%0\n" 330 " slgr %0,%0\n"
345 " sacf 256\n" 331 " sacf 256\n"
346 "0: srst %3,%2\n" 332 "0: srst %3,%2\n"
347 " jo 0b\n" 333 " jo 0b\n"
348 " la %0,1(%3)\n" /* strnlen_user results includes \0 */ 334 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
349 " "SLR" %0,%1\n" 335 " slgr %0,%1\n"
350 "1: sacf 768\n" 336 "1: sacf 768\n"
351 EX_TABLE(0b,1b) 337 EX_TABLE(0b,1b)
352 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) 338 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
@@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt);
399 385
400static int __init uaccess_init(void) 386static int __init uaccess_init(void)
401{ 387{
402 if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) 388 if (!uaccess_primary && test_facility(27))
403 static_key_slow_inc(&have_mvcos); 389 static_key_slow_inc(&have_mvcos);
404 return 0; 390 return 0;
405} 391}
diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c
deleted file mode 100644
index 3e05ff532582..000000000000
--- a/arch/s390/lib/ucmpdi2.c
+++ /dev/null
@@ -1,26 +0,0 @@
1#include <linux/module.h>
2
3union ull_union {
4 unsigned long long ull;
5 struct {
6 unsigned int high;
7 unsigned int low;
8 } ui;
9};
10
11int __ucmpdi2(unsigned long long a, unsigned long long b)
12{
13 union ull_union au = {.ull = a};
14 union ull_union bu = {.ull = b};
15
16 if (au.ui.high < bu.ui.high)
17 return 0;
18 else if (au.ui.high > bu.ui.high)
19 return 2;
20 if (au.ui.low < bu.ui.low)
21 return 0;
22 else if (au.ui.low > bu.ui.low)
23 return 2;
24 return 1;
25}
26EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
deleted file mode 100644
index 51d399549f60..000000000000
--- a/arch/s390/math-emu/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for the FPU instruction emulation.
3#
4
5obj-$(CONFIG_MATHEMU) := math.o
6
7ccflags-y := -I$(src) -Iinclude/math-emu -w
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
deleted file mode 100644
index a6ba0d724335..000000000000
--- a/arch/s390/math-emu/math.c
+++ /dev/null
@@ -1,2255 +0,0 @@
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2001
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 *
6 * 'math.c' emulates IEEE instructions on a S390 processor
7 * that does not have the IEEE fpu (all processors before G5).
8 */
9
10#include <linux/types.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <asm/uaccess.h>
14#include <asm/lowcore.h>
15
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19#include <math-emu/double.h>
20#include <math-emu/quad.h>
21
22#define FPC_VALID_MASK 0xF8F8FF03
23
24/*
25 * I miss a macro to round a floating point number to the
26 * nearest integer in the same floating point format.
27 */
28#define _FP_TO_FPINT_ROUND(fs, wc, X) \
29 do { \
30 switch (X##_c) \
31 { \
32 case FP_CLS_NORMAL: \
33 if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \
34 { /* floating point number has no bits after the dot. */ \
35 } \
36 else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \
37 X##_e > _FP_EXPBIAS_##fs) \
38 { /* some bits before the dot, some after it. */ \
39 _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \
40 X##_e - _FP_EXPBIAS_##fs \
41 + _FP_FRACBITS_##fs); \
42 _FP_ROUND(wc, X); \
43 _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \
44 + _FP_FRACBITS_##fs); \
45 } \
46 else \
47 { /* all bits after the dot. */ \
48 FP_SET_EXCEPTION(FP_EX_INEXACT); \
49 X##_c = FP_CLS_ZERO; \
50 } \
51 break; \
52 case FP_CLS_NAN: \
53 case FP_CLS_INF: \
54 case FP_CLS_ZERO: \
55 break; \
56 } \
57 } while (0)
58
59#define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X)
60#define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X)
61#define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X)
62
63typedef union {
64 long double ld;
65 struct {
66 __u64 high;
67 __u64 low;
68 } w;
69} mathemu_ldcv;
70
71#ifdef CONFIG_SYSCTL
72int sysctl_ieee_emulation_warnings=1;
73#endif
74
75#define mathemu_put_user(x, p) \
76 do { \
77 if (put_user((x),(p))) \
78 return SIGSEGV; \
79 } while (0)
80
81#define mathemu_get_user(x, p) \
82 do { \
83 if (get_user((x),(p))) \
84 return SIGSEGV; \
85 } while (0)
86
87#define mathemu_copy_from_user(d, s, n)\
88 do { \
89 if (copy_from_user((d),(s),(n)) != 0) \
90 return SIGSEGV; \
91 } while (0)
92
93#define mathemu_copy_to_user(d, s, n) \
94 do { \
95 if (copy_to_user((d),(s),(n)) != 0) \
96 return SIGSEGV; \
97 } while (0)
98
99static void display_emulation_not_implemented(struct pt_regs *regs, char *instr)
100{
101 __u16 *location;
102
103#ifdef CONFIG_SYSCTL
104 if(sysctl_ieee_emulation_warnings)
105#endif
106 {
107 location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
108 printk("%s ieee fpu instruction not emulated "
109 "process name: %s pid: %d \n",
110 instr, current->comm, current->pid);
111 printk("%s's PSW: %08lx %08lx\n", instr,
112 (unsigned long) regs->psw.mask,
113 (unsigned long) location);
114 }
115}
116
117static inline void emu_set_CC (struct pt_regs *regs, int cc)
118{
119 regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
120}
121
122/*
123 * Set the condition code in the user psw.
124 * 0 : Result is zero
125 * 1 : Result is less than zero
126 * 2 : Result is greater than zero
127 * 3 : Result is NaN or INF
128 */
129static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign)
130{
131 switch (class) {
132 case FP_CLS_NORMAL:
133 case FP_CLS_INF:
134 emu_set_CC(regs, sign ? 1 : 2);
135 break;
136 case FP_CLS_ZERO:
137 emu_set_CC(regs, 0);
138 break;
139 case FP_CLS_NAN:
140 emu_set_CC(regs, 3);
141 break;
142 }
143}
144
145/* Add long double */
146static int emu_axbr (struct pt_regs *regs, int rx, int ry) {
147 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
148 FP_DECL_EX;
149 mathemu_ldcv cvt;
150 int mode;
151
152 mode = current->thread.fp_regs.fpc & 3;
153 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
154 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
155 FP_UNPACK_QP(QA, &cvt.ld);
156 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
157 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
158 FP_UNPACK_QP(QB, &cvt.ld);
159 FP_ADD_Q(QR, QA, QB);
160 FP_PACK_QP(&cvt.ld, QR);
161 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
162 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
163 emu_set_CC_cs(regs, QR_c, QR_s);
164 return _fex;
165}
166
167/* Add double */
168static int emu_adbr (struct pt_regs *regs, int rx, int ry) {
169 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
170 FP_DECL_EX;
171 int mode;
172
173 mode = current->thread.fp_regs.fpc & 3;
174 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
175 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
176 FP_ADD_D(DR, DA, DB);
177 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
178 emu_set_CC_cs(regs, DR_c, DR_s);
179 return _fex;
180}
181
182/* Add double */
183static int emu_adb (struct pt_regs *regs, int rx, double *val) {
184 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
185 FP_DECL_EX;
186 int mode;
187
188 mode = current->thread.fp_regs.fpc & 3;
189 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
190 FP_UNPACK_DP(DB, val);
191 FP_ADD_D(DR, DA, DB);
192 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
193 emu_set_CC_cs(regs, DR_c, DR_s);
194 return _fex;
195}
196
197/* Add float */
198static int emu_aebr (struct pt_regs *regs, int rx, int ry) {
199 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
200 FP_DECL_EX;
201 int mode;
202
203 mode = current->thread.fp_regs.fpc & 3;
204 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
205 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
206 FP_ADD_S(SR, SA, SB);
207 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
208 emu_set_CC_cs(regs, SR_c, SR_s);
209 return _fex;
210}
211
212/* Add float */
213static int emu_aeb (struct pt_regs *regs, int rx, float *val) {
214 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
215 FP_DECL_EX;
216 int mode;
217
218 mode = current->thread.fp_regs.fpc & 3;
219 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
220 FP_UNPACK_SP(SB, val);
221 FP_ADD_S(SR, SA, SB);
222 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
223 emu_set_CC_cs(regs, SR_c, SR_s);
224 return _fex;
225}
226
227/* Compare long double */
228static int emu_cxbr (struct pt_regs *regs, int rx, int ry) {
229 FP_DECL_Q(QA); FP_DECL_Q(QB);
230 mathemu_ldcv cvt;
231 int IR;
232
233 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
234 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
235 FP_UNPACK_RAW_QP(QA, &cvt.ld);
236 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
237 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
238 FP_UNPACK_RAW_QP(QB, &cvt.ld);
239 FP_CMP_Q(IR, QA, QB, 3);
240 /*
241 * IR == -1 if DA < DB, IR == 0 if DA == DB,
242 * IR == 1 if DA > DB and IR == 3 if unorderded
243 */
244 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
245 return 0;
246}
247
248/* Compare double */
249static int emu_cdbr (struct pt_regs *regs, int rx, int ry) {
250 FP_DECL_D(DA); FP_DECL_D(DB);
251 int IR;
252
253 FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
254 FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
255 FP_CMP_D(IR, DA, DB, 3);
256 /*
257 * IR == -1 if DA < DB, IR == 0 if DA == DB,
258 * IR == 1 if DA > DB and IR == 3 if unorderded
259 */
260 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
261 return 0;
262}
263
264/* Compare double */
265static int emu_cdb (struct pt_regs *regs, int rx, double *val) {
266 FP_DECL_D(DA); FP_DECL_D(DB);
267 int IR;
268
269 FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
270 FP_UNPACK_RAW_DP(DB, val);
271 FP_CMP_D(IR, DA, DB, 3);
272 /*
273 * IR == -1 if DA < DB, IR == 0 if DA == DB,
274 * IR == 1 if DA > DB and IR == 3 if unorderded
275 */
276 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
277 return 0;
278}
279
280/* Compare float */
281static int emu_cebr (struct pt_regs *regs, int rx, int ry) {
282 FP_DECL_S(SA); FP_DECL_S(SB);
283 int IR;
284
285 FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
286 FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
287 FP_CMP_S(IR, SA, SB, 3);
288 /*
289 * IR == -1 if DA < DB, IR == 0 if DA == DB,
290 * IR == 1 if DA > DB and IR == 3 if unorderded
291 */
292 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
293 return 0;
294}
295
296/* Compare float */
297static int emu_ceb (struct pt_regs *regs, int rx, float *val) {
298 FP_DECL_S(SA); FP_DECL_S(SB);
299 int IR;
300
301 FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
302 FP_UNPACK_RAW_SP(SB, val);
303 FP_CMP_S(IR, SA, SB, 3);
304 /*
305 * IR == -1 if DA < DB, IR == 0 if DA == DB,
306 * IR == 1 if DA > DB and IR == 3 if unorderded
307 */
308 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
309 return 0;
310}
311
312/* Compare and signal long double */
313static int emu_kxbr (struct pt_regs *regs, int rx, int ry) {
314 FP_DECL_Q(QA); FP_DECL_Q(QB);
315 FP_DECL_EX;
316 mathemu_ldcv cvt;
317 int IR;
318
319 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
320 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
321 FP_UNPACK_RAW_QP(QA, &cvt.ld);
322 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
323 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
324 FP_UNPACK_QP(QB, &cvt.ld);
325 FP_CMP_Q(IR, QA, QB, 3);
326 /*
327 * IR == -1 if DA < DB, IR == 0 if DA == DB,
328 * IR == 1 if DA > DB and IR == 3 if unorderded
329 */
330 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
331 if (IR == 3)
332 FP_SET_EXCEPTION (FP_EX_INVALID);
333 return _fex;
334}
335
336/* Compare and signal double */
337static int emu_kdbr (struct pt_regs *regs, int rx, int ry) {
338 FP_DECL_D(DA); FP_DECL_D(DB);
339 FP_DECL_EX;
340 int IR;
341
342 FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
343 FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
344 FP_CMP_D(IR, DA, DB, 3);
345 /*
346 * IR == -1 if DA < DB, IR == 0 if DA == DB,
347 * IR == 1 if DA > DB and IR == 3 if unorderded
348 */
349 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
350 if (IR == 3)
351 FP_SET_EXCEPTION (FP_EX_INVALID);
352 return _fex;
353}
354
355/* Compare and signal double */
356static int emu_kdb (struct pt_regs *regs, int rx, double *val) {
357 FP_DECL_D(DA); FP_DECL_D(DB);
358 FP_DECL_EX;
359 int IR;
360
361 FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
362 FP_UNPACK_RAW_DP(DB, val);
363 FP_CMP_D(IR, DA, DB, 3);
364 /*
365 * IR == -1 if DA < DB, IR == 0 if DA == DB,
366 * IR == 1 if DA > DB and IR == 3 if unorderded
367 */
368 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
369 if (IR == 3)
370 FP_SET_EXCEPTION (FP_EX_INVALID);
371 return _fex;
372}
373
374/* Compare and signal float */
375static int emu_kebr (struct pt_regs *regs, int rx, int ry) {
376 FP_DECL_S(SA); FP_DECL_S(SB);
377 FP_DECL_EX;
378 int IR;
379
380 FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
381 FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
382 FP_CMP_S(IR, SA, SB, 3);
383 /*
384 * IR == -1 if DA < DB, IR == 0 if DA == DB,
385 * IR == 1 if DA > DB and IR == 3 if unorderded
386 */
387 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
388 if (IR == 3)
389 FP_SET_EXCEPTION (FP_EX_INVALID);
390 return _fex;
391}
392
393/* Compare and signal float */
394static int emu_keb (struct pt_regs *regs, int rx, float *val) {
395 FP_DECL_S(SA); FP_DECL_S(SB);
396 FP_DECL_EX;
397 int IR;
398
399 FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
400 FP_UNPACK_RAW_SP(SB, val);
401 FP_CMP_S(IR, SA, SB, 3);
402 /*
403 * IR == -1 if DA < DB, IR == 0 if DA == DB,
404 * IR == 1 if DA > DB and IR == 3 if unorderded
405 */
406 emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
407 if (IR == 3)
408 FP_SET_EXCEPTION (FP_EX_INVALID);
409 return _fex;
410}
411
412/* Convert from fixed long double */
413static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) {
414 FP_DECL_Q(QR);
415 FP_DECL_EX;
416 mathemu_ldcv cvt;
417 __s32 si;
418 int mode;
419
420 mode = current->thread.fp_regs.fpc & 3;
421 si = regs->gprs[ry];
422 FP_FROM_INT_Q(QR, si, 32, int);
423 FP_PACK_QP(&cvt.ld, QR);
424 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
425 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
426 return _fex;
427}
428
429/* Convert from fixed double */
430static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) {
431 FP_DECL_D(DR);
432 FP_DECL_EX;
433 __s32 si;
434 int mode;
435
436 mode = current->thread.fp_regs.fpc & 3;
437 si = regs->gprs[ry];
438 FP_FROM_INT_D(DR, si, 32, int);
439 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
440 return _fex;
441}
442
443/* Convert from fixed float */
444static int emu_cefbr (struct pt_regs *regs, int rx, int ry) {
445 FP_DECL_S(SR);
446 FP_DECL_EX;
447 __s32 si;
448 int mode;
449
450 mode = current->thread.fp_regs.fpc & 3;
451 si = regs->gprs[ry];
452 FP_FROM_INT_S(SR, si, 32, int);
453 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
454 return _fex;
455}
456
457/* Convert to fixed long double */
458static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) {
459 FP_DECL_Q(QA);
460 FP_DECL_EX;
461 mathemu_ldcv cvt;
462 __s32 si;
463 int mode;
464
465 if (mask == 0)
466 mode = current->thread.fp_regs.fpc & 3;
467 else if (mask == 1)
468 mode = FP_RND_NEAREST;
469 else
470 mode = mask - 4;
471 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
472 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
473 FP_UNPACK_QP(QA, &cvt.ld);
474 FP_TO_INT_ROUND_Q(si, QA, 32, 1);
475 regs->gprs[rx] = si;
476 emu_set_CC_cs(regs, QA_c, QA_s);
477 return _fex;
478}
479
480/* Convert to fixed double */
481static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) {
482 FP_DECL_D(DA);
483 FP_DECL_EX;
484 __s32 si;
485 int mode;
486
487 if (mask == 0)
488 mode = current->thread.fp_regs.fpc & 3;
489 else if (mask == 1)
490 mode = FP_RND_NEAREST;
491 else
492 mode = mask - 4;
493 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
494 FP_TO_INT_ROUND_D(si, DA, 32, 1);
495 regs->gprs[rx] = si;
496 emu_set_CC_cs(regs, DA_c, DA_s);
497 return _fex;
498}
499
500/* Convert to fixed float */
501static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) {
502 FP_DECL_S(SA);
503 FP_DECL_EX;
504 __s32 si;
505 int mode;
506
507 if (mask == 0)
508 mode = current->thread.fp_regs.fpc & 3;
509 else if (mask == 1)
510 mode = FP_RND_NEAREST;
511 else
512 mode = mask - 4;
513 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
514 FP_TO_INT_ROUND_S(si, SA, 32, 1);
515 regs->gprs[rx] = si;
516 emu_set_CC_cs(regs, SA_c, SA_s);
517 return _fex;
518}
519
520/* Divide long double */
521static int emu_dxbr (struct pt_regs *regs, int rx, int ry) {
522 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
523 FP_DECL_EX;
524 mathemu_ldcv cvt;
525 int mode;
526
527 mode = current->thread.fp_regs.fpc & 3;
528 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
529 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
530 FP_UNPACK_QP(QA, &cvt.ld);
531 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
532 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
533 FP_UNPACK_QP(QB, &cvt.ld);
534 FP_DIV_Q(QR, QA, QB);
535 FP_PACK_QP(&cvt.ld, QR);
536 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
537 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
538 return _fex;
539}
540
541/* Divide double */
542static int emu_ddbr (struct pt_regs *regs, int rx, int ry) {
543 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
544 FP_DECL_EX;
545 int mode;
546
547 mode = current->thread.fp_regs.fpc & 3;
548 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
549 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
550 FP_DIV_D(DR, DA, DB);
551 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
552 return _fex;
553}
554
555/* Divide double */
556static int emu_ddb (struct pt_regs *regs, int rx, double *val) {
557 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
558 FP_DECL_EX;
559 int mode;
560
561 mode = current->thread.fp_regs.fpc & 3;
562 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
563 FP_UNPACK_DP(DB, val);
564 FP_DIV_D(DR, DA, DB);
565 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
566 return _fex;
567}
568
569/* Divide float */
570static int emu_debr (struct pt_regs *regs, int rx, int ry) {
571 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
572 FP_DECL_EX;
573 int mode;
574
575 mode = current->thread.fp_regs.fpc & 3;
576 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
577 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
578 FP_DIV_S(SR, SA, SB);
579 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
580 return _fex;
581}
582
583/* Divide float */
584static int emu_deb (struct pt_regs *regs, int rx, float *val) {
585 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
586 FP_DECL_EX;
587 int mode;
588
589 mode = current->thread.fp_regs.fpc & 3;
590 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
591 FP_UNPACK_SP(SB, val);
592 FP_DIV_S(SR, SA, SB);
593 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
594 return _fex;
595}
596
597/* Divide to integer double */
598static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) {
599 display_emulation_not_implemented(regs, "didbr");
600 return 0;
601}
602
603/* Divide to integer float */
604static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) {
605 display_emulation_not_implemented(regs, "diebr");
606 return 0;
607}
608
609/* Extract fpc */
610static int emu_efpc (struct pt_regs *regs, int rx, int ry) {
611 regs->gprs[rx] = current->thread.fp_regs.fpc;
612 return 0;
613}
614
615/* Load and test long double */
616static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) {
617 s390_fp_regs *fp_regs = &current->thread.fp_regs;
618 mathemu_ldcv cvt;
619 FP_DECL_Q(QA);
620 FP_DECL_EX;
621
622 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
623 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
624 FP_UNPACK_QP(QA, &cvt.ld);
625 fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
626 fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui;
627 emu_set_CC_cs(regs, QA_c, QA_s);
628 return _fex;
629}
630
631/* Load and test double */
632static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) {
633 s390_fp_regs *fp_regs = &current->thread.fp_regs;
634 FP_DECL_D(DA);
635 FP_DECL_EX;
636
637 FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
638 fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
639 emu_set_CC_cs(regs, DA_c, DA_s);
640 return _fex;
641}
642
643/* Load and test double */
644static int emu_ltebr (struct pt_regs *regs, int rx, int ry) {
645 s390_fp_regs *fp_regs = &current->thread.fp_regs;
646 FP_DECL_S(SA);
647 FP_DECL_EX;
648
649 FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
650 fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
651 emu_set_CC_cs(regs, SA_c, SA_s);
652 return _fex;
653}
654
655/* Load complement long double */
656static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) {
657 FP_DECL_Q(QA); FP_DECL_Q(QR);
658 FP_DECL_EX;
659 mathemu_ldcv cvt;
660 int mode;
661
662 mode = current->thread.fp_regs.fpc & 3;
663 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
664 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
665 FP_UNPACK_QP(QA, &cvt.ld);
666 FP_NEG_Q(QR, QA);
667 FP_PACK_QP(&cvt.ld, QR);
668 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
669 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
670 emu_set_CC_cs(regs, QR_c, QR_s);
671 return _fex;
672}
673
674/* Load complement double */
675static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) {
676 FP_DECL_D(DA); FP_DECL_D(DR);
677 FP_DECL_EX;
678 int mode;
679
680 mode = current->thread.fp_regs.fpc & 3;
681 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
682 FP_NEG_D(DR, DA);
683 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
684 emu_set_CC_cs(regs, DR_c, DR_s);
685 return _fex;
686}
687
688/* Load complement float */
689static int emu_lcebr (struct pt_regs *regs, int rx, int ry) {
690 FP_DECL_S(SA); FP_DECL_S(SR);
691 FP_DECL_EX;
692 int mode;
693
694 mode = current->thread.fp_regs.fpc & 3;
695 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
696 FP_NEG_S(SR, SA);
697 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
698 emu_set_CC_cs(regs, SR_c, SR_s);
699 return _fex;
700}
701
702/* Load floating point integer long double */
703static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) {
704 s390_fp_regs *fp_regs = &current->thread.fp_regs;
705 FP_DECL_Q(QA);
706 FP_DECL_EX;
707 mathemu_ldcv cvt;
708 __s32 si;
709 int mode;
710
711 if (mask == 0)
712 mode = fp_regs->fpc & 3;
713 else if (mask == 1)
714 mode = FP_RND_NEAREST;
715 else
716 mode = mask - 4;
717 cvt.w.high = fp_regs->fprs[ry].ui;
718 cvt.w.low = fp_regs->fprs[ry+2].ui;
719 FP_UNPACK_QP(QA, &cvt.ld);
720 FP_TO_FPINT_ROUND_Q(QA);
721 FP_PACK_QP(&cvt.ld, QA);
722 fp_regs->fprs[rx].ui = cvt.w.high;
723 fp_regs->fprs[rx+2].ui = cvt.w.low;
724 return _fex;
725}
726
727/* Load floating point integer double */
728static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) {
729 /* FIXME: rounding mode !! */
730 s390_fp_regs *fp_regs = &current->thread.fp_regs;
731 FP_DECL_D(DA);
732 FP_DECL_EX;
733 __s32 si;
734 int mode;
735
736 if (mask == 0)
737 mode = fp_regs->fpc & 3;
738 else if (mask == 1)
739 mode = FP_RND_NEAREST;
740 else
741 mode = mask - 4;
742 FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
743 FP_TO_FPINT_ROUND_D(DA);
744 FP_PACK_DP(&fp_regs->fprs[rx].d, DA);
745 return _fex;
746}
747
748/* Load floating point integer float */
749static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) {
750 s390_fp_regs *fp_regs = &current->thread.fp_regs;
751 FP_DECL_S(SA);
752 FP_DECL_EX;
753 __s32 si;
754 int mode;
755
756 if (mask == 0)
757 mode = fp_regs->fpc & 3;
758 else if (mask == 1)
759 mode = FP_RND_NEAREST;
760 else
761 mode = mask - 4;
762 FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
763 FP_TO_FPINT_ROUND_S(SA);
764 FP_PACK_SP(&fp_regs->fprs[rx].f, SA);
765 return _fex;
766}
767
768/* Load lengthened double to long double */
769static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) {
770 FP_DECL_D(DA); FP_DECL_Q(QR);
771 FP_DECL_EX;
772 mathemu_ldcv cvt;
773 int mode;
774
775 mode = current->thread.fp_regs.fpc & 3;
776 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
777 FP_CONV (Q, D, 4, 2, QR, DA);
778 FP_PACK_QP(&cvt.ld, QR);
779 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
780 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
781 return _fex;
782}
783
784/* Load lengthened double to long double */
785static int emu_lxdb (struct pt_regs *regs, int rx, double *val) {
786 FP_DECL_D(DA); FP_DECL_Q(QR);
787 FP_DECL_EX;
788 mathemu_ldcv cvt;
789 int mode;
790
791 mode = current->thread.fp_regs.fpc & 3;
792 FP_UNPACK_DP(DA, val);
793 FP_CONV (Q, D, 4, 2, QR, DA);
794 FP_PACK_QP(&cvt.ld, QR);
795 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
796 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
797 return _fex;
798}
799
800/* Load lengthened float to long double */
801static int emu_lxebr (struct pt_regs *regs, int rx, int ry) {
802 FP_DECL_S(SA); FP_DECL_Q(QR);
803 FP_DECL_EX;
804 mathemu_ldcv cvt;
805 int mode;
806
807 mode = current->thread.fp_regs.fpc & 3;
808 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
809 FP_CONV (Q, S, 4, 1, QR, SA);
810 FP_PACK_QP(&cvt.ld, QR);
811 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
812 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
813 return _fex;
814}
815
816/* Load lengthened float to long double */
817static int emu_lxeb (struct pt_regs *regs, int rx, float *val) {
818 FP_DECL_S(SA); FP_DECL_Q(QR);
819 FP_DECL_EX;
820 mathemu_ldcv cvt;
821 int mode;
822
823 mode = current->thread.fp_regs.fpc & 3;
824 FP_UNPACK_SP(SA, val);
825 FP_CONV (Q, S, 4, 1, QR, SA);
826 FP_PACK_QP(&cvt.ld, QR);
827 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
828 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
829 return _fex;
830}
831
832/* Load lengthened float to double */
833static int emu_ldebr (struct pt_regs *regs, int rx, int ry) {
834 FP_DECL_S(SA); FP_DECL_D(DR);
835 FP_DECL_EX;
836 int mode;
837
838 mode = current->thread.fp_regs.fpc & 3;
839 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
840 FP_CONV (D, S, 2, 1, DR, SA);
841 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
842 return _fex;
843}
844
845/* Load lengthened float to double */
846static int emu_ldeb (struct pt_regs *regs, int rx, float *val) {
847 FP_DECL_S(SA); FP_DECL_D(DR);
848 FP_DECL_EX;
849 int mode;
850
851 mode = current->thread.fp_regs.fpc & 3;
852 FP_UNPACK_SP(SA, val);
853 FP_CONV (D, S, 2, 1, DR, SA);
854 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
855 return _fex;
856}
857
858/* Load negative long double */
859static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) {
860 FP_DECL_Q(QA); FP_DECL_Q(QR);
861 FP_DECL_EX;
862 mathemu_ldcv cvt;
863 int mode;
864
865 mode = current->thread.fp_regs.fpc & 3;
866 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
867 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
868 FP_UNPACK_QP(QA, &cvt.ld);
869 if (QA_s == 0) {
870 FP_NEG_Q(QR, QA);
871 FP_PACK_QP(&cvt.ld, QR);
872 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
873 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
874 } else {
875 current->thread.fp_regs.fprs[rx].ui =
876 current->thread.fp_regs.fprs[ry].ui;
877 current->thread.fp_regs.fprs[rx+2].ui =
878 current->thread.fp_regs.fprs[ry+2].ui;
879 }
880 emu_set_CC_cs(regs, QR_c, QR_s);
881 return _fex;
882}
883
884/* Load negative double */
885static int emu_lndbr (struct pt_regs *regs, int rx, int ry) {
886 FP_DECL_D(DA); FP_DECL_D(DR);
887 FP_DECL_EX;
888 int mode;
889
890 mode = current->thread.fp_regs.fpc & 3;
891 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
892 if (DA_s == 0) {
893 FP_NEG_D(DR, DA);
894 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
895 } else
896 current->thread.fp_regs.fprs[rx].ui =
897 current->thread.fp_regs.fprs[ry].ui;
898 emu_set_CC_cs(regs, DR_c, DR_s);
899 return _fex;
900}
901
902/* Load negative float */
903static int emu_lnebr (struct pt_regs *regs, int rx, int ry) {
904 FP_DECL_S(SA); FP_DECL_S(SR);
905 FP_DECL_EX;
906 int mode;
907
908 mode = current->thread.fp_regs.fpc & 3;
909 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
910 if (SA_s == 0) {
911 FP_NEG_S(SR, SA);
912 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
913 } else
914 current->thread.fp_regs.fprs[rx].ui =
915 current->thread.fp_regs.fprs[ry].ui;
916 emu_set_CC_cs(regs, SR_c, SR_s);
917 return _fex;
918}
919
920/* Load positive long double */
921static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) {
922 FP_DECL_Q(QA); FP_DECL_Q(QR);
923 FP_DECL_EX;
924 mathemu_ldcv cvt;
925 int mode;
926
927 mode = current->thread.fp_regs.fpc & 3;
928 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
929 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
930 FP_UNPACK_QP(QA, &cvt.ld);
931 if (QA_s != 0) {
932 FP_NEG_Q(QR, QA);
933 FP_PACK_QP(&cvt.ld, QR);
934 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
935 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
936 } else{
937 current->thread.fp_regs.fprs[rx].ui =
938 current->thread.fp_regs.fprs[ry].ui;
939 current->thread.fp_regs.fprs[rx+2].ui =
940 current->thread.fp_regs.fprs[ry+2].ui;
941 }
942 emu_set_CC_cs(regs, QR_c, QR_s);
943 return _fex;
944}
945
946/* Load positive double */
947static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) {
948 FP_DECL_D(DA); FP_DECL_D(DR);
949 FP_DECL_EX;
950 int mode;
951
952 mode = current->thread.fp_regs.fpc & 3;
953 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
954 if (DA_s != 0) {
955 FP_NEG_D(DR, DA);
956 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
957 } else
958 current->thread.fp_regs.fprs[rx].ui =
959 current->thread.fp_regs.fprs[ry].ui;
960 emu_set_CC_cs(regs, DR_c, DR_s);
961 return _fex;
962}
963
964/* Load positive float */
965static int emu_lpebr (struct pt_regs *regs, int rx, int ry) {
966 FP_DECL_S(SA); FP_DECL_S(SR);
967 FP_DECL_EX;
968 int mode;
969
970 mode = current->thread.fp_regs.fpc & 3;
971 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
972 if (SA_s != 0) {
973 FP_NEG_S(SR, SA);
974 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
975 } else
976 current->thread.fp_regs.fprs[rx].ui =
977 current->thread.fp_regs.fprs[ry].ui;
978 emu_set_CC_cs(regs, SR_c, SR_s);
979 return _fex;
980}
981
982/* Load rounded long double to double */
983static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) {
984 FP_DECL_Q(QA); FP_DECL_D(DR);
985 FP_DECL_EX;
986 mathemu_ldcv cvt;
987 int mode;
988
989 mode = current->thread.fp_regs.fpc & 3;
990 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
991 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
992 FP_UNPACK_QP(QA, &cvt.ld);
993 FP_CONV (D, Q, 2, 4, DR, QA);
994 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].f, DR);
995 return _fex;
996}
997
998/* Load rounded long double to float */
999static int emu_lexbr (struct pt_regs *regs, int rx, int ry) {
1000 FP_DECL_Q(QA); FP_DECL_S(SR);
1001 FP_DECL_EX;
1002 mathemu_ldcv cvt;
1003 int mode;
1004
1005 mode = current->thread.fp_regs.fpc & 3;
1006 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
1007 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
1008 FP_UNPACK_QP(QA, &cvt.ld);
1009 FP_CONV (S, Q, 1, 4, SR, QA);
1010 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1011 return _fex;
1012}
1013
1014/* Load rounded double to float */
1015static int emu_ledbr (struct pt_regs *regs, int rx, int ry) {
1016 FP_DECL_D(DA); FP_DECL_S(SR);
1017 FP_DECL_EX;
1018 int mode;
1019
1020 mode = current->thread.fp_regs.fpc & 3;
1021 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
1022 FP_CONV (S, D, 1, 2, SR, DA);
1023 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1024 return _fex;
1025}
1026
1027/* Multiply long double */
1028static int emu_mxbr (struct pt_regs *regs, int rx, int ry) {
1029 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
1030 FP_DECL_EX;
1031 mathemu_ldcv cvt;
1032 int mode;
1033
1034 mode = current->thread.fp_regs.fpc & 3;
1035 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
1036 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
1037 FP_UNPACK_QP(QA, &cvt.ld);
1038 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
1039 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
1040 FP_UNPACK_QP(QB, &cvt.ld);
1041 FP_MUL_Q(QR, QA, QB);
1042 FP_PACK_QP(&cvt.ld, QR);
1043 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
1044 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
1045 return _fex;
1046}
1047
1048/* Multiply double */
1049static int emu_mdbr (struct pt_regs *regs, int rx, int ry) {
1050 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1051 FP_DECL_EX;
1052 int mode;
1053
1054 mode = current->thread.fp_regs.fpc & 3;
1055 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1056 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
1057 FP_MUL_D(DR, DA, DB);
1058 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1059 return _fex;
1060}
1061
1062/* Multiply double */
1063static int emu_mdb (struct pt_regs *regs, int rx, double *val) {
1064 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1065 FP_DECL_EX;
1066 int mode;
1067
1068 mode = current->thread.fp_regs.fpc & 3;
1069 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1070 FP_UNPACK_DP(DB, val);
1071 FP_MUL_D(DR, DA, DB);
1072 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1073 return _fex;
1074}
1075
1076/* Multiply double to long double */
1077static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) {
1078 FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
1079 FP_DECL_EX;
1080 mathemu_ldcv cvt;
1081 int mode;
1082
1083 mode = current->thread.fp_regs.fpc & 3;
1084 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1085 FP_CONV (Q, D, 4, 2, QA, DA);
1086 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
1087 FP_CONV (Q, D, 4, 2, QB, DA);
1088 FP_MUL_Q(QR, QA, QB);
1089 FP_PACK_QP(&cvt.ld, QR);
1090 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
1091 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
1092 return _fex;
1093}
1094
1095/* Multiply double to long double */
1096static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) {
1097 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
1098 FP_DECL_EX;
1099 mathemu_ldcv cvt;
1100 int mode;
1101
1102 mode = current->thread.fp_regs.fpc & 3;
1103 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
1104 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
1105 FP_UNPACK_QP(QA, &cvt.ld);
1106 FP_UNPACK_QP(QB, val);
1107 FP_MUL_Q(QR, QA, QB);
1108 FP_PACK_QP(&cvt.ld, QR);
1109 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
1110 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
1111 return _fex;
1112}
1113
1114/* Multiply float */
1115static int emu_meebr (struct pt_regs *regs, int rx, int ry) {
1116 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
1117 FP_DECL_EX;
1118 int mode;
1119
1120 mode = current->thread.fp_regs.fpc & 3;
1121 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1122 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
1123 FP_MUL_S(SR, SA, SB);
1124 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1125 return _fex;
1126}
1127
1128/* Multiply float */
1129static int emu_meeb (struct pt_regs *regs, int rx, float *val) {
1130 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
1131 FP_DECL_EX;
1132 int mode;
1133
1134 mode = current->thread.fp_regs.fpc & 3;
1135 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1136 FP_UNPACK_SP(SB, val);
1137 FP_MUL_S(SR, SA, SB);
1138 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1139 return _fex;
1140}
1141
1142/* Multiply float to double */
1143static int emu_mdebr (struct pt_regs *regs, int rx, int ry) {
1144 FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1145 FP_DECL_EX;
1146 int mode;
1147
1148 mode = current->thread.fp_regs.fpc & 3;
1149 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1150 FP_CONV (D, S, 2, 1, DA, SA);
1151 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
1152 FP_CONV (D, S, 2, 1, DB, SA);
1153 FP_MUL_D(DR, DA, DB);
1154 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1155 return _fex;
1156}
1157
1158/* Multiply float to double */
1159static int emu_mdeb (struct pt_regs *regs, int rx, float *val) {
1160 FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1161 FP_DECL_EX;
1162 int mode;
1163
1164 mode = current->thread.fp_regs.fpc & 3;
1165 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1166 FP_CONV (D, S, 2, 1, DA, SA);
1167 FP_UNPACK_SP(SA, val);
1168 FP_CONV (D, S, 2, 1, DB, SA);
1169 FP_MUL_D(DR, DA, DB);
1170 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1171 return _fex;
1172}
1173
1174/* Multiply and add double */
1175static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) {
1176 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
1177 FP_DECL_EX;
1178 int mode;
1179
1180 mode = current->thread.fp_regs.fpc & 3;
1181 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1182 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
1183 FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
1184 FP_MUL_D(DR, DA, DB);
1185 FP_ADD_D(DR, DR, DC);
1186 FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
1187 return _fex;
1188}
1189
1190/* Multiply and add double */
1191static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) {
1192 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
1193 FP_DECL_EX;
1194 int mode;
1195
1196 mode = current->thread.fp_regs.fpc & 3;
1197 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1198 FP_UNPACK_DP(DB, val);
1199 FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
1200 FP_MUL_D(DR, DA, DB);
1201 FP_ADD_D(DR, DR, DC);
1202 FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
1203 return _fex;
1204}
1205
1206/* Multiply and add float */
1207static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) {
1208 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
1209 FP_DECL_EX;
1210 int mode;
1211
1212 mode = current->thread.fp_regs.fpc & 3;
1213 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1214 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
1215 FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
1216 FP_MUL_S(SR, SA, SB);
1217 FP_ADD_S(SR, SR, SC);
1218 FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
1219 return _fex;
1220}
1221
1222/* Multiply and add float */
1223static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) {
1224 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
1225 FP_DECL_EX;
1226 int mode;
1227
1228 mode = current->thread.fp_regs.fpc & 3;
1229 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1230 FP_UNPACK_SP(SB, val);
1231 FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
1232 FP_MUL_S(SR, SA, SB);
1233 FP_ADD_S(SR, SR, SC);
1234 FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
1235 return _fex;
1236}
1237
1238/* Multiply and subtract double */
1239static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) {
1240 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
1241 FP_DECL_EX;
1242 int mode;
1243
1244 mode = current->thread.fp_regs.fpc & 3;
1245 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1246 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
1247 FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
1248 FP_MUL_D(DR, DA, DB);
1249 FP_SUB_D(DR, DR, DC);
1250 FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
1251 return _fex;
1252}
1253
1254/* Multiply and subtract double */
1255static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) {
1256 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
1257 FP_DECL_EX;
1258 int mode;
1259
1260 mode = current->thread.fp_regs.fpc & 3;
1261 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1262 FP_UNPACK_DP(DB, val);
1263 FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
1264 FP_MUL_D(DR, DA, DB);
1265 FP_SUB_D(DR, DR, DC);
1266 FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
1267 return _fex;
1268}
1269
1270/* Multiply and subtract float */
1271static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) {
1272 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
1273 FP_DECL_EX;
1274 int mode;
1275
1276 mode = current->thread.fp_regs.fpc & 3;
1277 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1278 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
1279 FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
1280 FP_MUL_S(SR, SA, SB);
1281 FP_SUB_S(SR, SR, SC);
1282 FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
1283 return _fex;
1284}
1285
1286/* Multiply and subtract float */
1287static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) {
1288 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
1289 FP_DECL_EX;
1290 int mode;
1291
1292 mode = current->thread.fp_regs.fpc & 3;
1293 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1294 FP_UNPACK_SP(SB, val);
1295 FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
1296 FP_MUL_S(SR, SA, SB);
1297 FP_SUB_S(SR, SR, SC);
1298 FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
1299 return _fex;
1300}
1301
1302/* Set floating point control word */
1303static int emu_sfpc (struct pt_regs *regs, int rx, int ry) {
1304 __u32 temp;
1305
1306 temp = regs->gprs[rx];
1307 if ((temp & ~FPC_VALID_MASK) != 0)
1308 return SIGILL;
1309 current->thread.fp_regs.fpc = temp;
1310 return 0;
1311}
1312
1313/* Square root long double */
1314static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) {
1315 FP_DECL_Q(QA); FP_DECL_Q(QR);
1316 FP_DECL_EX;
1317 mathemu_ldcv cvt;
1318 int mode;
1319
1320 mode = current->thread.fp_regs.fpc & 3;
1321 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
1322 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
1323 FP_UNPACK_QP(QA, &cvt.ld);
1324 FP_SQRT_Q(QR, QA);
1325 FP_PACK_QP(&cvt.ld, QR);
1326 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
1327 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
1328 emu_set_CC_cs(regs, QR_c, QR_s);
1329 return _fex;
1330}
1331
1332/* Square root double */
1333static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) {
1334 FP_DECL_D(DA); FP_DECL_D(DR);
1335 FP_DECL_EX;
1336 int mode;
1337
1338 mode = current->thread.fp_regs.fpc & 3;
1339 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
1340 FP_SQRT_D(DR, DA);
1341 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1342 emu_set_CC_cs(regs, DR_c, DR_s);
1343 return _fex;
1344}
1345
1346/* Square root double */
1347static int emu_sqdb (struct pt_regs *regs, int rx, double *val) {
1348 FP_DECL_D(DA); FP_DECL_D(DR);
1349 FP_DECL_EX;
1350 int mode;
1351
1352 mode = current->thread.fp_regs.fpc & 3;
1353 FP_UNPACK_DP(DA, val);
1354 FP_SQRT_D(DR, DA);
1355 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1356 emu_set_CC_cs(regs, DR_c, DR_s);
1357 return _fex;
1358}
1359
1360/* Square root float */
1361static int emu_sqebr (struct pt_regs *regs, int rx, int ry) {
1362 FP_DECL_S(SA); FP_DECL_S(SR);
1363 FP_DECL_EX;
1364 int mode;
1365
1366 mode = current->thread.fp_regs.fpc & 3;
1367 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
1368 FP_SQRT_S(SR, SA);
1369 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1370 emu_set_CC_cs(regs, SR_c, SR_s);
1371 return _fex;
1372}
1373
1374/* Square root float */
1375static int emu_sqeb (struct pt_regs *regs, int rx, float *val) {
1376 FP_DECL_S(SA); FP_DECL_S(SR);
1377 FP_DECL_EX;
1378 int mode;
1379
1380 mode = current->thread.fp_regs.fpc & 3;
1381 FP_UNPACK_SP(SA, val);
1382 FP_SQRT_S(SR, SA);
1383 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1384 emu_set_CC_cs(regs, SR_c, SR_s);
1385 return _fex;
1386}
1387
1388/* Subtract long double */
1389static int emu_sxbr (struct pt_regs *regs, int rx, int ry) {
1390 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
1391 FP_DECL_EX;
1392 mathemu_ldcv cvt;
1393 int mode;
1394
1395 mode = current->thread.fp_regs.fpc & 3;
1396 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
1397 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
1398 FP_UNPACK_QP(QA, &cvt.ld);
1399 cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
1400 cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
1401 FP_UNPACK_QP(QB, &cvt.ld);
1402 FP_SUB_Q(QR, QA, QB);
1403 FP_PACK_QP(&cvt.ld, QR);
1404 current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
1405 current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
1406 emu_set_CC_cs(regs, QR_c, QR_s);
1407 return _fex;
1408}
1409
1410/* Subtract double */
1411static int emu_sdbr (struct pt_regs *regs, int rx, int ry) {
1412 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1413 FP_DECL_EX;
1414 int mode;
1415
1416 mode = current->thread.fp_regs.fpc & 3;
1417 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1418 FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
1419 FP_SUB_D(DR, DA, DB);
1420 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1421 emu_set_CC_cs(regs, DR_c, DR_s);
1422 return _fex;
1423}
1424
1425/* Subtract double */
1426static int emu_sdb (struct pt_regs *regs, int rx, double *val) {
1427 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
1428 FP_DECL_EX;
1429 int mode;
1430
1431 mode = current->thread.fp_regs.fpc & 3;
1432 FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1433 FP_UNPACK_DP(DB, val);
1434 FP_SUB_D(DR, DA, DB);
1435 FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
1436 emu_set_CC_cs(regs, DR_c, DR_s);
1437 return _fex;
1438}
1439
1440/* Subtract float */
1441static int emu_sebr (struct pt_regs *regs, int rx, int ry) {
1442 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
1443 FP_DECL_EX;
1444 int mode;
1445
1446 mode = current->thread.fp_regs.fpc & 3;
1447 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1448 FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
1449 FP_SUB_S(SR, SA, SB);
1450 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1451 emu_set_CC_cs(regs, SR_c, SR_s);
1452 return _fex;
1453}
1454
1455/* Subtract float */
1456static int emu_seb (struct pt_regs *regs, int rx, float *val) {
1457 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
1458 FP_DECL_EX;
1459 int mode;
1460
1461 mode = current->thread.fp_regs.fpc & 3;
1462 FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1463 FP_UNPACK_SP(SB, val);
1464 FP_SUB_S(SR, SA, SB);
1465 FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
1466 emu_set_CC_cs(regs, SR_c, SR_s);
1467 return _fex;
1468}
1469
1470/* Test data class long double */
1471static int emu_tcxb (struct pt_regs *regs, int rx, long val) {
1472 FP_DECL_Q(QA);
1473 mathemu_ldcv cvt;
1474 int bit;
1475
1476 cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
1477 cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
1478 FP_UNPACK_RAW_QP(QA, &cvt.ld);
1479 switch (QA_e) {
1480 default:
1481 bit = 8; /* normalized number */
1482 break;
1483 case 0:
1484 if (_FP_FRAC_ZEROP_4(QA))
1485 bit = 10; /* zero */
1486 else
1487 bit = 6; /* denormalized number */
1488 break;
1489 case _FP_EXPMAX_Q:
1490 if (_FP_FRAC_ZEROP_4(QA))
1491 bit = 4; /* infinity */
1492 else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q)
1493 bit = 2; /* quiet NAN */
1494 else
1495 bit = 0; /* signaling NAN */
1496 break;
1497 }
1498 if (!QA_s)
1499 bit++;
1500 emu_set_CC(regs, ((__u32) val >> bit) & 1);
1501 return 0;
1502}
1503
1504/* Test data class double */
1505static int emu_tcdb (struct pt_regs *regs, int rx, long val) {
1506 FP_DECL_D(DA);
1507 int bit;
1508
1509 FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
1510 switch (DA_e) {
1511 default:
1512 bit = 8; /* normalized number */
1513 break;
1514 case 0:
1515 if (_FP_FRAC_ZEROP_2(DA))
1516 bit = 10; /* zero */
1517 else
1518 bit = 6; /* denormalized number */
1519 break;
1520 case _FP_EXPMAX_D:
1521 if (_FP_FRAC_ZEROP_2(DA))
1522 bit = 4; /* infinity */
1523 else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D)
1524 bit = 2; /* quiet NAN */
1525 else
1526 bit = 0; /* signaling NAN */
1527 break;
1528 }
1529 if (!DA_s)
1530 bit++;
1531 emu_set_CC(regs, ((__u32) val >> bit) & 1);
1532 return 0;
1533}
1534
1535/* Test data class float */
1536static int emu_tceb (struct pt_regs *regs, int rx, long val) {
1537 FP_DECL_S(SA);
1538 int bit;
1539
1540 FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
1541 switch (SA_e) {
1542 default:
1543 bit = 8; /* normalized number */
1544 break;
1545 case 0:
1546 if (_FP_FRAC_ZEROP_1(SA))
1547 bit = 10; /* zero */
1548 else
1549 bit = 6; /* denormalized number */
1550 break;
1551 case _FP_EXPMAX_S:
1552 if (_FP_FRAC_ZEROP_1(SA))
1553 bit = 4; /* infinity */
1554 else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S)
1555 bit = 2; /* quiet NAN */
1556 else
1557 bit = 0; /* signaling NAN */
1558 break;
1559 }
1560 if (!SA_s)
1561 bit++;
1562 emu_set_CC(regs, ((__u32) val >> bit) & 1);
1563 return 0;
1564}
1565
1566static inline void emu_load_regd(int reg) {
1567 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1568 return;
1569 asm volatile( /* load reg from fp_regs.fprs[reg] */
1570 " bras 1,0f\n"
1571 " ld 0,0(%1)\n"
1572 "0: ex %0,0(1)"
1573 : /* no output */
1574 : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
1575 : "1");
1576}
1577
1578static inline void emu_load_rege(int reg) {
1579 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1580 return;
1581 asm volatile( /* load reg from fp_regs.fprs[reg] */
1582 " bras 1,0f\n"
1583 " le 0,0(%1)\n"
1584 "0: ex %0,0(1)"
1585 : /* no output */
1586 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1587 : "1");
1588}
1589
1590static inline void emu_store_regd(int reg) {
1591 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1592 return;
1593 asm volatile( /* store reg to fp_regs.fprs[reg] */
1594 " bras 1,0f\n"
1595 " std 0,0(%1)\n"
1596 "0: ex %0,0(1)"
1597 : /* no output */
1598 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
1599 : "1");
1600}
1601
1602
1603static inline void emu_store_rege(int reg) {
1604 if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
1605 return;
1606 asm volatile( /* store reg to fp_regs.fprs[reg] */
1607 " bras 1,0f\n"
1608 " ste 0,0(%1)\n"
1609 "0: ex %0,0(1)"
1610 : /* no output */
1611 : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
1612 : "1");
1613}
1614
1615int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
1616 int _fex = 0;
1617 static const __u8 format_table[256] = {
1618 [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03,
1619 [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d,
1620 [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03,
1621 [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06,
1622 [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02,
1623 [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03,
1624 [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02,
1625 [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05,
1626 [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01,
1627 [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04,
1628 [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01,
1629 [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06,
1630 [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13,
1631 [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c,
1632 [0x99] = 0x0b,[0x9a] = 0x0a
1633 };
1634 static const void *jump_table[256]= {
1635 [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr,
1636 [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr,
1637 [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr,
1638 [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr,
1639 [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr,
1640 [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr,
1641 [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr,
1642 [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr,
1643 [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr,
1644 [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr,
1645 [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr,
1646 [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr,
1647 [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr,
1648 [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr,
1649 [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr,
1650 [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr,
1651 [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc,
1652 [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr,
1653 [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr,
1654 [0x9a] = emu_cfxbr
1655 };
1656
1657 switch (format_table[opcode[1]]) {
1658 case 1: /* RRE format, long double operation */
1659 if (opcode[3] & 0x22)
1660 return SIGILL;
1661 emu_store_regd((opcode[3] >> 4) & 15);
1662 emu_store_regd(((opcode[3] >> 4) & 15) + 2);
1663 emu_store_regd(opcode[3] & 15);
1664 emu_store_regd((opcode[3] & 15) + 2);
1665 /* call the emulation function */
1666 _fex = ((int (*)(struct pt_regs *,int, int))
1667 jump_table[opcode[1]])
1668 (regs, opcode[3] >> 4, opcode[3] & 15);
1669 emu_load_regd((opcode[3] >> 4) & 15);
1670 emu_load_regd(((opcode[3] >> 4) & 15) + 2);
1671 emu_load_regd(opcode[3] & 15);
1672 emu_load_regd((opcode[3] & 15) + 2);
1673 break;
1674 case 2: /* RRE format, double operation */
1675 emu_store_regd((opcode[3] >> 4) & 15);
1676 emu_store_regd(opcode[3] & 15);
1677 /* call the emulation function */
1678 _fex = ((int (*)(struct pt_regs *, int, int))
1679 jump_table[opcode[1]])
1680 (regs, opcode[3] >> 4, opcode[3] & 15);
1681 emu_load_regd((opcode[3] >> 4) & 15);
1682 emu_load_regd(opcode[3] & 15);
1683 break;
1684 case 3: /* RRE format, float operation */
1685 emu_store_rege((opcode[3] >> 4) & 15);
1686 emu_store_rege(opcode[3] & 15);
1687 /* call the emulation function */
1688 _fex = ((int (*)(struct pt_regs *, int, int))
1689 jump_table[opcode[1]])
1690 (regs, opcode[3] >> 4, opcode[3] & 15);
1691 emu_load_rege((opcode[3] >> 4) & 15);
1692 emu_load_rege(opcode[3] & 15);
1693 break;
1694 case 4: /* RRF format, long double operation */
1695 if (opcode[3] & 0x22)
1696 return SIGILL;
1697 emu_store_regd((opcode[3] >> 4) & 15);
1698 emu_store_regd(((opcode[3] >> 4) & 15) + 2);
1699 emu_store_regd(opcode[3] & 15);
1700 emu_store_regd((opcode[3] & 15) + 2);
1701 /* call the emulation function */
1702 _fex = ((int (*)(struct pt_regs *, int, int, int))
1703 jump_table[opcode[1]])
1704 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1705 emu_load_regd((opcode[3] >> 4) & 15);
1706 emu_load_regd(((opcode[3] >> 4) & 15) + 2);
1707 emu_load_regd(opcode[3] & 15);
1708 emu_load_regd((opcode[3] & 15) + 2);
1709 break;
1710 case 5: /* RRF format, double operation */
1711 emu_store_regd((opcode[2] >> 4) & 15);
1712 emu_store_regd((opcode[3] >> 4) & 15);
1713 emu_store_regd(opcode[3] & 15);
1714 /* call the emulation function */
1715 _fex = ((int (*)(struct pt_regs *, int, int, int))
1716 jump_table[opcode[1]])
1717 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1718 emu_load_regd((opcode[2] >> 4) & 15);
1719 emu_load_regd((opcode[3] >> 4) & 15);
1720 emu_load_regd(opcode[3] & 15);
1721 break;
1722 case 6: /* RRF format, float operation */
1723 emu_store_rege((opcode[2] >> 4) & 15);
1724 emu_store_rege((opcode[3] >> 4) & 15);
1725 emu_store_rege(opcode[3] & 15);
1726 /* call the emulation function */
1727 _fex = ((int (*)(struct pt_regs *, int, int, int))
1728 jump_table[opcode[1]])
1729 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1730 emu_load_rege((opcode[2] >> 4) & 15);
1731 emu_load_rege((opcode[3] >> 4) & 15);
1732 emu_load_rege(opcode[3] & 15);
1733 break;
1734 case 7: /* RRE format, cxfbr instruction */
1735 /* call the emulation function */
1736 if (opcode[3] & 0x20)
1737 return SIGILL;
1738 _fex = ((int (*)(struct pt_regs *, int, int))
1739 jump_table[opcode[1]])
1740 (regs, opcode[3] >> 4, opcode[3] & 15);
1741 emu_load_regd((opcode[3] >> 4) & 15);
1742 emu_load_regd(((opcode[3] >> 4) & 15) + 2);
1743 break;
1744 case 8: /* RRE format, cdfbr instruction */
1745 /* call the emulation function */
1746 _fex = ((int (*)(struct pt_regs *, int, int))
1747 jump_table[opcode[1]])
1748 (regs, opcode[3] >> 4, opcode[3] & 15);
1749 emu_load_regd((opcode[3] >> 4) & 15);
1750 break;
1751 case 9: /* RRE format, cefbr instruction */
1752 /* call the emulation function */
1753 _fex = ((int (*)(struct pt_regs *, int, int))
1754 jump_table[opcode[1]])
1755 (regs, opcode[3] >> 4, opcode[3] & 15);
1756 emu_load_rege((opcode[3] >> 4) & 15);
1757 break;
1758 case 10: /* RRF format, cfxbr instruction */
1759 if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
1760 /* mask of { 2,3,8-15 } is invalid */
1761 return SIGILL;
1762 if (opcode[3] & 2)
1763 return SIGILL;
1764 emu_store_regd(opcode[3] & 15);
1765 emu_store_regd((opcode[3] & 15) + 2);
1766 /* call the emulation function */
1767 _fex = ((int (*)(struct pt_regs *, int, int, int))
1768 jump_table[opcode[1]])
1769 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1770 break;
1771 case 11: /* RRF format, cfdbr instruction */
1772 if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
1773 /* mask of { 2,3,8-15 } is invalid */
1774 return SIGILL;
1775 emu_store_regd(opcode[3] & 15);
1776 /* call the emulation function */
1777 _fex = ((int (*)(struct pt_regs *, int, int, int))
1778 jump_table[opcode[1]])
1779 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1780 break;
1781 case 12: /* RRF format, cfebr instruction */
1782 if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
1783 /* mask of { 2,3,8-15 } is invalid */
1784 return SIGILL;
1785 emu_store_rege(opcode[3] & 15);
1786 /* call the emulation function */
1787 _fex = ((int (*)(struct pt_regs *, int, int, int))
1788 jump_table[opcode[1]])
1789 (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
1790 break;
1791 case 13: /* RRE format, ldxbr & mdxbr instruction */
1792 /* double store but long double load */
1793 if (opcode[3] & 0x20)
1794 return SIGILL;
1795 emu_store_regd((opcode[3] >> 4) & 15);
1796 emu_store_regd(opcode[3] & 15);
1797 /* call the emulation function */
1798 _fex = ((int (*)(struct pt_regs *, int, int))
1799 jump_table[opcode[1]])
1800 (regs, opcode[3] >> 4, opcode[3] & 15);
1801 emu_load_regd((opcode[3] >> 4) & 15);
1802 emu_load_regd(((opcode[3] >> 4) & 15) + 2);
1803 break;
1804 case 14: /* RRE format, ldxbr & mdxbr instruction */
1805 /* float store but long double load */
1806 if (opcode[3] & 0x20)
1807 return SIGILL;
1808 emu_store_rege((opcode[3] >> 4) & 15);
1809 emu_store_rege(opcode[3] & 15);
1810 /* call the emulation function */
1811 _fex = ((int (*)(struct pt_regs *, int, int))
1812 jump_table[opcode[1]])
1813 (regs, opcode[3] >> 4, opcode[3] & 15);
1814 emu_load_regd((opcode[3] >> 4) & 15);
1815 emu_load_regd(((opcode[3] >> 4) & 15) + 2);
1816 break;
1817 case 15: /* RRE format, ldebr & mdebr instruction */
1818 /* float store but double load */
1819 emu_store_rege((opcode[3] >> 4) & 15);
1820 emu_store_rege(opcode[3] & 15);
1821 /* call the emulation function */
1822 _fex = ((int (*)(struct pt_regs *, int, int))
1823 jump_table[opcode[1]])
1824 (regs, opcode[3] >> 4, opcode[3] & 15);
1825 emu_load_regd((opcode[3] >> 4) & 15);
1826 break;
1827 case 16: /* RRE format, ldxbr instruction */
1828 /* long double store but double load */
1829 if (opcode[3] & 2)
1830 return SIGILL;
1831 emu_store_regd(opcode[3] & 15);
1832 emu_store_regd((opcode[3] & 15) + 2);
1833 /* call the emulation function */
1834 _fex = ((int (*)(struct pt_regs *, int, int))
1835 jump_table[opcode[1]])
1836 (regs, opcode[3] >> 4, opcode[3] & 15);
1837 emu_load_regd((opcode[3] >> 4) & 15);
1838 break;
1839 case 17: /* RRE format, ldxbr instruction */
1840 /* long double store but float load */
1841 if (opcode[3] & 2)
1842 return SIGILL;
1843 emu_store_regd(opcode[3] & 15);
1844 emu_store_regd((opcode[3] & 15) + 2);
1845 /* call the emulation function */
1846 _fex = ((int (*)(struct pt_regs *, int, int))
1847 jump_table[opcode[1]])
1848 (regs, opcode[3] >> 4, opcode[3] & 15);
1849 emu_load_rege((opcode[3] >> 4) & 15);
1850 break;
1851 case 18: /* RRE format, ledbr instruction */
1852 /* double store but float load */
1853 emu_store_regd(opcode[3] & 15);
1854 /* call the emulation function */
1855 _fex = ((int (*)(struct pt_regs *, int, int))
1856 jump_table[opcode[1]])
1857 (regs, opcode[3] >> 4, opcode[3] & 15);
1858 emu_load_rege((opcode[3] >> 4) & 15);
1859 break;
1860 case 19: /* RRE format, efpc & sfpc instruction */
1861 /* call the emulation function */
1862 _fex = ((int (*)(struct pt_regs *, int, int))
1863 jump_table[opcode[1]])
1864 (regs, opcode[3] >> 4, opcode[3] & 15);
1865 break;
1866 default: /* invalid operation */
1867 return SIGILL;
1868 }
1869 if (_fex != 0) {
1870 current->thread.fp_regs.fpc |= _fex;
1871 if (current->thread.fp_regs.fpc & (_fex << 8))
1872 return SIGFPE;
1873 }
1874 return 0;
1875}
1876
1877static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp)
1878{
1879 addr_t addr;
1880
1881 rx &= 15;
1882 rb &= 15;
1883 addr = disp & 0xfff;
1884 addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */
1885 addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */
1886 return (void*) addr;
1887}
1888
1889int math_emu_ed(__u8 *opcode, struct pt_regs * regs) {
1890 int _fex = 0;
1891
1892 static const __u8 format_table[256] = {
1893 [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05,
1894 [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02,
1895 [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04,
1896 [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02,
1897 [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01,
1898 [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01,
1899 [0x1e] = 0x03,[0x1f] = 0x03,
1900 };
1901 static const void *jump_table[]= {
1902 [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb,
1903 [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb,
1904 [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb,
1905 [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb,
1906 [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb,
1907 [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb,
1908 [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb,
1909 [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb,
1910 [0x1e] = emu_madb,[0x1f] = emu_msdb
1911 };
1912
1913 switch (format_table[opcode[5]]) {
1914 case 1: /* RXE format, double constant */ {
1915 __u64 *dxb, temp;
1916 __u32 opc;
1917
1918 emu_store_regd((opcode[1] >> 4) & 15);
1919 opc = *((__u32 *) opcode);
1920 dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
1921 mathemu_copy_from_user(&temp, dxb, 8);
1922 /* call the emulation function */
1923 _fex = ((int (*)(struct pt_regs *, int, double *))
1924 jump_table[opcode[5]])
1925 (regs, opcode[1] >> 4, (double *) &temp);
1926 emu_load_regd((opcode[1] >> 4) & 15);
1927 break;
1928 }
1929 case 2: /* RXE format, float constant */ {
1930 __u32 *dxb, temp;
1931 __u32 opc;
1932
1933 emu_store_rege((opcode[1] >> 4) & 15);
1934 opc = *((__u32 *) opcode);
1935 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
1936 mathemu_get_user(temp, dxb);
1937 /* call the emulation function */
1938 _fex = ((int (*)(struct pt_regs *, int, float *))
1939 jump_table[opcode[5]])
1940 (regs, opcode[1] >> 4, (float *) &temp);
1941 emu_load_rege((opcode[1] >> 4) & 15);
1942 break;
1943 }
1944 case 3: /* RXF format, double constant */ {
1945 __u64 *dxb, temp;
1946 __u32 opc;
1947
1948 emu_store_regd((opcode[1] >> 4) & 15);
1949 emu_store_regd((opcode[4] >> 4) & 15);
1950 opc = *((__u32 *) opcode);
1951 dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
1952 mathemu_copy_from_user(&temp, dxb, 8);
1953 /* call the emulation function */
1954 _fex = ((int (*)(struct pt_regs *, int, double *, int))
1955 jump_table[opcode[5]])
1956 (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
1957 emu_load_regd((opcode[1] >> 4) & 15);
1958 break;
1959 }
1960 case 4: /* RXF format, float constant */ {
1961 __u32 *dxb, temp;
1962 __u32 opc;
1963
1964 emu_store_rege((opcode[1] >> 4) & 15);
1965 emu_store_rege((opcode[4] >> 4) & 15);
1966 opc = *((__u32 *) opcode);
1967 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
1968 mathemu_get_user(temp, dxb);
1969 /* call the emulation function */
1970 _fex = ((int (*)(struct pt_regs *, int, float *, int))
1971 jump_table[opcode[5]])
1972 (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
1973 emu_load_rege((opcode[4] >> 4) & 15);
1974 break;
1975 }
1976 case 5: /* RXE format, double constant */
1977 /* store double and load long double */
1978 {
1979 __u64 *dxb, temp;
1980 __u32 opc;
1981 if ((opcode[1] >> 4) & 0x20)
1982 return SIGILL;
1983 emu_store_regd((opcode[1] >> 4) & 15);
1984 opc = *((__u32 *) opcode);
1985 dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
1986 mathemu_copy_from_user(&temp, dxb, 8);
1987 /* call the emulation function */
1988 _fex = ((int (*)(struct pt_regs *, int, double *))
1989 jump_table[opcode[5]])
1990 (regs, opcode[1] >> 4, (double *) &temp);
1991 emu_load_regd((opcode[1] >> 4) & 15);
1992 emu_load_regd(((opcode[1] >> 4) & 15) + 2);
1993 break;
1994 }
1995 case 6: /* RXE format, float constant */
1996 /* store float and load double */
1997 {
1998 __u32 *dxb, temp;
1999 __u32 opc;
2000 emu_store_rege((opcode[1] >> 4) & 15);
2001 opc = *((__u32 *) opcode);
2002 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2003 mathemu_get_user(temp, dxb);
2004 /* call the emulation function */
2005 _fex = ((int (*)(struct pt_regs *, int, float *))
2006 jump_table[opcode[5]])
2007 (regs, opcode[1] >> 4, (float *) &temp);
2008 emu_load_regd((opcode[1] >> 4) & 15);
2009 break;
2010 }
2011 case 7: /* RXE format, float constant */
2012 /* store float and load long double */
2013 {
2014 __u32 *dxb, temp;
2015 __u32 opc;
2016 if ((opcode[1] >> 4) & 0x20)
2017 return SIGILL;
2018 emu_store_rege((opcode[1] >> 4) & 15);
2019 opc = *((__u32 *) opcode);
2020 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2021 mathemu_get_user(temp, dxb);
2022 /* call the emulation function */
2023 _fex = ((int (*)(struct pt_regs *, int, float *))
2024 jump_table[opcode[5]])
2025 (regs, opcode[1] >> 4, (float *) &temp);
2026 emu_load_regd((opcode[1] >> 4) & 15);
2027 emu_load_regd(((opcode[1] >> 4) & 15) + 2);
2028 break;
2029 }
2030 case 8: /* RXE format, RX address used as int value */ {
2031 __u64 dxb;
2032 __u32 opc;
2033
2034 emu_store_rege((opcode[1] >> 4) & 15);
2035 opc = *((__u32 *) opcode);
2036 dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
2037 /* call the emulation function */
2038 _fex = ((int (*)(struct pt_regs *, int, long))
2039 jump_table[opcode[5]])
2040 (regs, opcode[1] >> 4, dxb);
2041 break;
2042 }
2043 case 9: /* RXE format, RX address used as int value */ {
2044 __u64 dxb;
2045 __u32 opc;
2046
2047 emu_store_regd((opcode[1] >> 4) & 15);
2048 opc = *((__u32 *) opcode);
2049 dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
2050 /* call the emulation function */
2051 _fex = ((int (*)(struct pt_regs *, int, long))
2052 jump_table[opcode[5]])
2053 (regs, opcode[1] >> 4, dxb);
2054 break;
2055 }
2056 case 10: /* RXE format, RX address used as int value */ {
2057 __u64 dxb;
2058 __u32 opc;
2059
2060 if ((opcode[1] >> 4) & 2)
2061 return SIGILL;
2062 emu_store_regd((opcode[1] >> 4) & 15);
2063 emu_store_regd(((opcode[1] >> 4) & 15) + 2);
2064 opc = *((__u32 *) opcode);
2065 dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
2066 /* call the emulation function */
2067 _fex = ((int (*)(struct pt_regs *, int, long))
2068 jump_table[opcode[5]])
2069 (regs, opcode[1] >> 4, dxb);
2070 break;
2071 }
2072 default: /* invalid operation */
2073 return SIGILL;
2074 }
2075 if (_fex != 0) {
2076 current->thread.fp_regs.fpc |= _fex;
2077 if (current->thread.fp_regs.fpc & (_fex << 8))
2078 return SIGFPE;
2079 }
2080 return 0;
2081}
2082
2083/*
2084 * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
2085 */
2086int math_emu_ldr(__u8 *opcode) {
2087 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2088 __u16 opc = *((__u16 *) opcode);
2089
2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2091 /* we got an exception therefore ry can't be in {0,2,4,6} */
2092 asm volatile( /* load rx from fp_regs.fprs[ry] */
2093 " bras 1,0f\n"
2094 " ld 0,0(%1)\n"
2095 "0: ex %0,0(1)"
2096 : /* no output */
2097 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
2098 : "1");
2099 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2100 asm volatile ( /* store ry to fp_regs.fprs[rx] */
2101 " bras 1,0f\n"
2102 " std 0,0(%1)\n"
2103 "0: ex %0,0(1)"
2104 : /* no output */
2105 : "a" ((opc & 0xf) << 4),
2106 "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
2107 : "1");
2108 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2109 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2110 return 0;
2111}
2112
2113/*
2114 * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
2115 */
2116int math_emu_ler(__u8 *opcode) {
2117 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2118 __u16 opc = *((__u16 *) opcode);
2119
2120 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
2121 /* we got an exception therefore ry can't be in {0,2,4,6} */
2122 asm volatile( /* load rx from fp_regs.fprs[ry] */
2123 " bras 1,0f\n"
2124 " le 0,0(%1)\n"
2125 "0: ex %0,0(1)"
2126 : /* no output */
2127 : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
2128 : "1");
2129 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
2130 asm volatile( /* store ry to fp_regs.fprs[rx] */
2131 " bras 1,0f\n"
2132 " ste 0,0(%1)\n"
2133 "0: ex %0,0(1)"
2134 : /* no output */
2135 : "a" ((opc & 0xf) << 4),
2136 "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
2137 : "1");
2138 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
2139 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
2140 return 0;
2141}
2142
2143/*
2144 * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6}
2145 */
2146int math_emu_ld(__u8 *opcode, struct pt_regs * regs) {
2147 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2148 __u32 opc = *((__u32 *) opcode);
2149 __u64 *dxb;
2150
2151 dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2152 mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8);
2153 return 0;
2154}
2155
2156/*
2157 * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6}
2158 */
2159int math_emu_le(__u8 *opcode, struct pt_regs * regs) {
2160 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2161 __u32 opc = *((__u32 *) opcode);
2162 __u32 *mem, *dxb;
2163
2164 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2165 mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
2166 mathemu_get_user(mem[0], dxb);
2167 return 0;
2168}
2169
2170/*
2171 * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6}
2172 */
2173int math_emu_std(__u8 *opcode, struct pt_regs * regs) {
2174 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2175 __u32 opc = *((__u32 *) opcode);
2176 __u64 *dxb;
2177
2178 dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2179 mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8);
2180 return 0;
2181}
2182
2183/*
2184 * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6}
2185 */
2186int math_emu_ste(__u8 *opcode, struct pt_regs * regs) {
2187 s390_fp_regs *fp_regs = &current->thread.fp_regs;
2188 __u32 opc = *((__u32 *) opcode);
2189 __u32 *mem, *dxb;
2190
2191 dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
2192 mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
2193 mathemu_put_user(mem[0], dxb);
2194 return 0;
2195}
2196
2197/*
2198 * Emulate LFPC D(B)
2199 */
2200int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) {
2201 __u32 opc = *((__u32 *) opcode);
2202 __u32 *dxb, temp;
2203
2204 dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
2205 mathemu_get_user(temp, dxb);
2206 if ((temp & ~FPC_VALID_MASK) != 0)
2207 return SIGILL;
2208 current->thread.fp_regs.fpc = temp;
2209 return 0;
2210}
2211
2212/*
2213 * Emulate STFPC D(B)
2214 */
2215int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) {
2216 __u32 opc = *((__u32 *) opcode);
2217 __u32 *dxb;
2218
2219 dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
2220 mathemu_put_user(current->thread.fp_regs.fpc, dxb);
2221 return 0;
2222}
2223
2224/*
2225 * Emulate SRNM D(B)
2226 */
2227int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) {
2228 __u32 opc = *((__u32 *) opcode);
2229 __u32 temp;
2230
2231 temp = calc_addr(regs, 0, opc>>12, opc);
2232 current->thread.fp_regs.fpc &= ~3;
2233 current->thread.fp_regs.fpc |= (temp & 3);
2234 return 0;
2235}
2236
2237/* broken compiler ... */
2238long long
2239__negdi2 (long long u)
2240{
2241
2242 union lll {
2243 long long ll;
2244 long s[2];
2245 };
2246
2247 union lll w,uu;
2248
2249 uu.ll = u;
2250
2251 w.s[1] = -uu.s[1];
2252 w.s[0] = -uu.s[0] - ((int) w.s[1] != 0);
2253
2254 return w.ll;
2255}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
18 KERNEL_END_NR, 18 KERNEL_END_NR,
19 VMEMMAP_NR, 19 VMEMMAP_NR,
20 VMALLOC_NR, 20 VMALLOC_NR,
21#ifdef CONFIG_64BIT
22 MODULES_NR, 21 MODULES_NR,
23#endif
24}; 22};
25 23
26static struct addr_marker address_markers[] = { 24static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 27 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
30 [VMEMMAP_NR] = {0, "vmemmap Area"}, 28 [VMEMMAP_NR] = {0, "vmemmap Area"},
31 [VMALLOC_NR] = {0, "vmalloc Area"}, 29 [VMALLOC_NR] = {0, "vmalloc Area"},
32#ifdef CONFIG_64BIT
33 [MODULES_NR] = {0, "Modules Area"}, 30 [MODULES_NR] = {0, "Modules Area"},
34#endif
35 { -1, NULL } 31 { -1, NULL }
36}; 32};
37 33
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
127 } 123 }
128} 124}
129 125
130#ifdef CONFIG_64BIT
131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
132#else
133#define _PMD_PROT_MASK 0
134#endif
135
136static void walk_pmd_level(struct seq_file *m, struct pg_state *st, 126static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
137 pud_t *pud, unsigned long addr) 127 pud_t *pud, unsigned long addr)
138{ 128{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
145 pmd = pmd_offset(pud, addr); 135 pmd = pmd_offset(pud, addr);
146 if (!pmd_none(*pmd)) { 136 if (!pmd_none(*pmd)) {
147 if (pmd_large(*pmd)) { 137 if (pmd_large(*pmd)) {
148 prot = pmd_val(*pmd) & _PMD_PROT_MASK; 138 prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
149 note_page(m, st, prot, 3); 139 note_page(m, st, prot, 3);
150 } else 140 } else
151 walk_pte_level(m, st, pmd, addr); 141 walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
155 } 145 }
156} 146}
157 147
158#ifdef CONFIG_64BIT
159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
160#else
161#define _PUD_PROT_MASK 0
162#endif
163
164static void walk_pud_level(struct seq_file *m, struct pg_state *st, 148static void walk_pud_level(struct seq_file *m, struct pg_state *st,
165 pgd_t *pgd, unsigned long addr) 149 pgd_t *pgd, unsigned long addr)
166{ 150{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
173 pud = pud_offset(pgd, addr); 157 pud = pud_offset(pgd, addr);
174 if (!pud_none(*pud)) 158 if (!pud_none(*pud))
175 if (pud_large(*pud)) { 159 if (pud_large(*pud)) {
176 prot = pud_val(*pud) & _PUD_PROT_MASK; 160 prot = pud_val(*pud) & _REGION3_ENTRY_RO;
177 note_page(m, st, prot, 2); 161 note_page(m, st, prot, 2);
178 } else 162 } else
179 walk_pmd_level(m, st, pud, addr); 163 walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
230 * kernel ASCE. We need this to keep the page table walker functions 214 * kernel ASCE. We need this to keep the page table walker functions
231 * from accessing non-existent entries. 215 * from accessing non-existent entries.
232 */ 216 */
233#ifdef CONFIG_32BIT
234 max_addr = 1UL << 31;
235#else
236 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 217 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
237 max_addr = 1UL << (max_addr * 11 + 31); 218 max_addr = 1UL << (max_addr * 11 + 31);
238 address_markers[MODULES_NR].start_address = MODULES_VADDR; 219 address_markers[MODULES_NR].start_address = MODULES_VADDR;
239#endif
240 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 220 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
241 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 221 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
242 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 222 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba716cc3..23c496957c22 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
51 struct qrange range[6]; 51 struct qrange range[6];
52}; 52};
53 53
54#ifdef CONFIG_64BIT
55struct qrange_old { 54struct qrange_old {
56 unsigned int start; /* last byte type */ 55 unsigned int start; /* last byte type */
57 unsigned int end; /* last byte reserved */ 56 unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
65 int segrcnt; 64 int segrcnt;
66 struct qrange_old range[6]; 65 struct qrange_old range[6];
67}; 66};
68#endif
69 67
70struct qin64 { 68struct qin64 {
71 char qopcode; 69 char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
103static int 101static int
104dcss_set_subcodes(void) 102dcss_set_subcodes(void)
105{ 103{
106#ifdef CONFIG_64BIT
107 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); 104 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
108 unsigned long rx, ry; 105 unsigned long rx, ry;
109 int rc; 106 int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
135 segext_scode = DCSS_SEGEXTX; 132 segext_scode = DCSS_SEGEXTX;
136 return 0; 133 return 0;
137 } 134 }
138#endif
139 /* Diag x'64' new subcodes are not supported, set to old subcodes */ 135 /* Diag x'64' new subcodes are not supported, set to old subcodes */
140 loadshr_scode = DCSS_LOADNOLY; 136 loadshr_scode = DCSS_LOADNOLY;
141 loadnsr_scode = DCSS_LOADNSR; 137 loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
208 rx = (unsigned long) parameter; 204 rx = (unsigned long) parameter;
209 ry = (unsigned long) *func; 205 ry = (unsigned long) *func;
210 206
211#ifdef CONFIG_64BIT
212 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 207 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
213 if (*func > DCSS_SEGEXT) 208 if (*func > DCSS_SEGEXT)
214 asm volatile( 209 asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
225 " ipm %2\n" 220 " ipm %2\n"
226 " srl %2,28\n" 221 " srl %2,28\n"
227 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 222 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
228#else
229 asm volatile(
230 " diag %0,%1,0x64\n"
231 " ipm %2\n"
232 " srl %2,28\n"
233 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
234#endif
235 *ret1 = rx; 223 *ret1 = rx;
236 *ret2 = ry; 224 *ret2 = ry;
237 return rc; 225 return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
281 goto out_free; 269 goto out_free;
282 } 270 }
283 271
284#ifdef CONFIG_64BIT
285 /* Only old format of output area of Diagnose x'64' is supported, 272 /* Only old format of output area of Diagnose x'64' is supported,
286 copy data for the new format. */ 273 copy data for the new format. */
287 if (segext_scode == DCSS_SEGEXT) { 274 if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
307 } 294 }
308 kfree(qout_old); 295 kfree(qout_old);
309 } 296 }
310#endif
311 if (qout->segcnt > 6) { 297 if (qout->segcnt > 6) {
312 rc = -EOPNOTSUPP; 298 rc = -EOPNOTSUPP;
313 goto out_free; 299 goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff86533f7db..76515bcea2f1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
36#include <asm/facility.h> 36#include <asm/facility.h>
37#include "../kernel/entry.h" 37#include "../kernel/entry.h"
38 38
39#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000
41#define __SUBCODE_MASK 0x0200
42#define __PF_RES_FIELD 0ULL
43#else /* CONFIG_64BIT */
44#define __FAIL_ADDR_MASK -4096L 39#define __FAIL_ADDR_MASK -4096L
45#define __SUBCODE_MASK 0x0600 40#define __SUBCODE_MASK 0x0600
46#define __PF_RES_FIELD 0x8000000000000000ULL 41#define __PF_RES_FIELD 0x8000000000000000ULL
47#endif /* CONFIG_64BIT */
48 42
49#define VM_FAULT_BADCONTEXT 0x010000 43#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 44#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
54 48
55static unsigned long store_indication __read_mostly; 49static unsigned long store_indication __read_mostly;
56 50
57#ifdef CONFIG_64BIT
58static int __init fault_init(void) 51static int __init fault_init(void)
59{ 52{
60 if (test_facility(75)) 53 if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
62 return 0; 55 return 0;
63} 56}
64early_initcall(fault_init); 57early_initcall(fault_init);
65#endif
66 58
67static inline int notify_page_fault(struct pt_regs *regs) 59static inline int notify_page_fault(struct pt_regs *regs)
68{ 60{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
133 return probe_kernel_address((unsigned long *)p, dummy); 125 return probe_kernel_address((unsigned long *)p, dummy);
134} 126}
135 127
136#ifdef CONFIG_64BIT
137static void dump_pagetable(unsigned long asce, unsigned long address) 128static void dump_pagetable(unsigned long asce, unsigned long address)
138{ 129{
139 unsigned long *table = __va(asce & PAGE_MASK); 130 unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
187 pr_cont("BAD\n"); 178 pr_cont("BAD\n");
188} 179}
189 180
190#else /* CONFIG_64BIT */
191
192static void dump_pagetable(unsigned long asce, unsigned long address)
193{
194 unsigned long *table = __va(asce & PAGE_MASK);
195
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
199 goto bad;
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
202 goto out;
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
206 goto bad;
207 pr_cont("P:%08lx ", *table);
208out:
209 pr_cont("\n");
210 return;
211bad:
212 pr_cont("BAD\n");
213}
214
215#endif /* CONFIG_64BIT */
216
217static void dump_fault_info(struct pt_regs *regs) 181static void dump_fault_info(struct pt_regs *regs)
218{ 182{
219 unsigned long asce; 183 unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c78ca8d..1eb41bb3010c 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
106 pmd_t *pmdp, pmd; 106 pmd_t *pmdp, pmd;
107 107
108 pmdp = (pmd_t *) pudp; 108 pmdp = (pmd_t *) pudp;
109#ifdef CONFIG_64BIT
110 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 109 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
111 pmdp = (pmd_t *) pud_deref(pud); 110 pmdp = (pmd_t *) pud_deref(pud);
112 pmdp += pmd_index(addr); 111 pmdp += pmd_index(addr);
113#endif
114 do { 112 do {
115 pmd = *pmdp; 113 pmd = *pmdp;
116 barrier(); 114 barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
145 pud_t *pudp, pud; 143 pud_t *pudp, pud;
146 144
147 pudp = (pud_t *) pgdp; 145 pudp = (pud_t *) pgdp;
148#ifdef CONFIG_64BIT
149 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 146 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
150 pudp = (pud_t *) pgd_deref(pgd); 147 pudp = (pud_t *) pgd_deref(pgd);
151 pudp += pud_index(addr); 148 pudp += pud_index(addr);
152#endif
153 do { 149 do {
154 pud = *pudp; 150 pud = *pudp;
155 barrier(); 151 barrier();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b15113b17..80875c43a4a4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
105 unsigned long pgd_type, asce_bits; 105 unsigned long pgd_type, asce_bits;
106 106
107 init_mm.pgd = swapper_pg_dir; 107 init_mm.pgd = swapper_pg_dir;
108#ifdef CONFIG_64BIT
109 if (VMALLOC_END > (1UL << 42)) { 108 if (VMALLOC_END > (1UL << 42)) {
110 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 109 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
111 pgd_type = _REGION2_ENTRY_EMPTY; 110 pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 112 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
114 pgd_type = _REGION3_ENTRY_EMPTY; 113 pgd_type = _REGION3_ENTRY_EMPTY;
115 } 114 }
116#else
117 asce_bits = _ASCE_TABLE_LENGTH;
118 pgd_type = _SEGMENT_ENTRY_EMPTY;
119#endif
120 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 115 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
121 clear_table((unsigned long *) init_mm.pgd, pgd_type, 116 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048); 117 sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe0ee11..0f3604395805 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
36 memsize = rzm * rnmax; 36 memsize = rzm * rnmax;
37 if (!rzm) 37 if (!rzm)
38 rzm = 1ULL << 17; 38 rzm = 1ULL << 17;
39 if (IS_ENABLED(CONFIG_32BIT)) {
40 rzm = min(ADDR2G, rzm);
41 memsize = min(ADDR2G, memsize);
42 }
43 max_physmem_end = memsize; 39 max_physmem_end = memsize;
44 addr = 0; 40 addr = 0;
45 /* keep memblock lists close to the kernel */ 41 /* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 179a2c20b01f..2e8378796e87 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -190,29 +190,6 @@ unsigned long randomize_et_dyn(void)
190 return base + mmap_rnd(); 190 return base + mmap_rnd();
191} 191}
192 192
193#ifndef CONFIG_64BIT
194
195/*
196 * This function, called very early during the creation of a new
197 * process VM image, sets up which VM layout function to use:
198 */
199void arch_pick_mmap_layout(struct mm_struct *mm)
200{
201 /*
202 * Fall back to the standard layout if the personality
203 * bit is set, or if the expected stack growth is unlimited:
204 */
205 if (mmap_is_legacy()) {
206 mm->mmap_base = mmap_base_legacy();
207 mm->get_unmapped_area = arch_get_unmapped_area;
208 } else {
209 mm->mmap_base = mmap_base();
210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
211 }
212}
213
214#else
215
216int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 193int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
217{ 194{
218 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 195 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -317,5 +294,3 @@ static int __init setup_mmap_rnd(void)
317 return 0; 294 return 0;
318} 295}
319early_initcall(setup_mmap_rnd); 296early_initcall(setup_mmap_rnd);
320
321#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d462d1c..749c98407b41 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
109{ 109{
110 int i; 110 int i;
111 111
112 if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { 112 if (test_facility(13)) {
113 __ptep_ipte_range(address, nr - 1, pte); 113 __ptep_ipte_range(address, nr - 1, pte);
114 return; 114 return;
115 } 115 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542f2ba2..33f589459113 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,14 +27,8 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29 29
30#ifndef CONFIG_64BIT
31#define ALLOC_ORDER 1
32#define FRAG_MASK 0x0f
33#else
34#define ALLOC_ORDER 2 30#define ALLOC_ORDER 2
35#define FRAG_MASK 0x03 31#define FRAG_MASK 0x03
36#endif
37
38 32
39unsigned long *crst_table_alloc(struct mm_struct *mm) 33unsigned long *crst_table_alloc(struct mm_struct *mm)
40{ 34{
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
50 free_pages((unsigned long) table, ALLOC_ORDER); 44 free_pages((unsigned long) table, ALLOC_ORDER);
51} 45}
52 46
53#ifdef CONFIG_64BIT
54static void __crst_table_upgrade(void *arg) 47static void __crst_table_upgrade(void *arg)
55{ 48{
56 struct mm_struct *mm = arg; 49 struct mm_struct *mm = arg;
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
140 if (current->active_mm == mm) 133 if (current->active_mm == mm)
141 set_user_asce(mm); 134 set_user_asce(mm);
142} 135}
143#endif
144 136
145#ifdef CONFIG_PGSTE 137#ifdef CONFIG_PGSTE
146 138
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..ef7d6c8fea66 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
38{ 38{
39 pud_t *pud = NULL; 39 pud_t *pud = NULL;
40 40
41#ifdef CONFIG_64BIT
42 pud = vmem_alloc_pages(2); 41 pud = vmem_alloc_pages(2);
43 if (!pud) 42 if (!pud)
44 return NULL; 43 return NULL;
45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46#endif
47 return pud; 45 return pud;
48} 46}
49 47
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
51{ 49{
52 pmd_t *pmd = NULL; 50 pmd_t *pmd = NULL;
53 51
54#ifdef CONFIG_64BIT
55 pmd = vmem_alloc_pages(2); 52 pmd = vmem_alloc_pages(2);
56 if (!pmd) 53 if (!pmd)
57 return NULL; 54 return NULL;
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 55 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59#endif
60 return pmd; 56 return pmd;
61} 57}
62 58
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
98 pgd_populate(&init_mm, pg_dir, pu_dir); 94 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 95 }
100 pu_dir = pud_offset(pg_dir, address); 96 pu_dir = pud_offset(pg_dir, address);
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 97#ifndef CONFIG_DEBUG_PAGEALLOC
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 98 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 99 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104 pud_val(*pu_dir) = __pa(address) | 100 pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
115 pud_populate(&init_mm, pu_dir, pm_dir); 111 pud_populate(&init_mm, pu_dir, pm_dir);
116 } 112 }
117 pm_dir = pmd_offset(pu_dir, address); 113 pm_dir = pmd_offset(pu_dir, address);
118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 114#ifndef CONFIG_DEBUG_PAGEALLOC
119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 115 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 116 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121 pmd_val(*pm_dir) = __pa(address) | 117 pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
222 218
223 pm_dir = pmd_offset(pu_dir, address); 219 pm_dir = pmd_offset(pu_dir, address);
224 if (pmd_none(*pm_dir)) { 220 if (pmd_none(*pm_dir)) {
225#ifdef CONFIG_64BIT
226 /* Use 1MB frames for vmemmap if available. We always 221 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially 222 * use large frames even if they are only partially
228 * used. 223 * used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
240 address = (address + PMD_SIZE) & PMD_MASK; 235 address = (address + PMD_SIZE) & PMD_MASK;
241 continue; 236 continue;
242 } 237 }
243#endif
244 pt_dir = vmem_pte_alloc(address); 238 pt_dir = vmem_pte_alloc(address);
245 if (!pt_dir) 239 if (!pt_dir)
246 goto out; 240 goto out;
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 524c4b615821..1bd23017191e 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
10oprofile-$(CONFIG_64BIT) += hwsampler.o 10oprofile-y += hwsampler.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 9ffe645d5989..bc927a09a172 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -21,8 +21,6 @@
21 21
22extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); 22extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
23 23
24#ifdef CONFIG_64BIT
25
26#include "hwsampler.h" 24#include "hwsampler.h"
27#include "op_counter.h" 25#include "op_counter.h"
28 26
@@ -495,14 +493,10 @@ static void oprofile_hwsampler_exit(void)
495 hwsampler_shutdown(); 493 hwsampler_shutdown();
496} 494}
497 495
498#endif /* CONFIG_64BIT */
499
500int __init oprofile_arch_init(struct oprofile_operations *ops) 496int __init oprofile_arch_init(struct oprofile_operations *ops)
501{ 497{
502 ops->backtrace = s390_backtrace; 498 ops->backtrace = s390_backtrace;
503 499
504#ifdef CONFIG_64BIT
505
506 /* 500 /*
507 * -ENODEV is not reported to the caller. The module itself 501 * -ENODEV is not reported to the caller. The module itself
508 * will use the timer mode sampling as fallback and this is 502 * will use the timer mode sampling as fallback and this is
@@ -511,14 +505,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
511 hwsampler_available = oprofile_hwsampler_init(ops) == 0; 505 hwsampler_available = oprofile_hwsampler_init(ops) == 0;
512 506
513 return 0; 507 return 0;
514#else
515 return -ENODEV;
516#endif
517} 508}
518 509
519void oprofile_arch_exit(void) 510void oprofile_arch_exit(void)
520{ 511{
521#ifdef CONFIG_64BIT
522 oprofile_hwsampler_exit(); 512 oprofile_hwsampler_exit();
523#endif
524} 513}