diff options
Diffstat (limited to 'arch')
106 files changed, 1328 insertions, 7095 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 647c3eccc3d0..2938934c6518 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild | |||
@@ -4,6 +4,5 @@ obj-$(CONFIG_KVM) += kvm/ | |||
4 | obj-$(CONFIG_CRYPTO_HW) += crypto/ | 4 | obj-$(CONFIG_CRYPTO_HW) += crypto/ |
5 | obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ | 5 | obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ |
6 | obj-$(CONFIG_APPLDATA_BASE) += appldata/ | 6 | obj-$(CONFIG_APPLDATA_BASE) += appldata/ |
7 | obj-$(CONFIG_MATHEMU) += math-emu/ | ||
8 | obj-y += net/ | 7 | obj-y += net/ |
9 | obj-$(CONFIG_PCI) += pci/ | 8 | obj-$(CONFIG_PCI) += pci/ |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 6321fd8bf813..a5ced5c3c1e0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -35,7 +35,7 @@ config GENERIC_BUG_RELATIVE_POINTERS | |||
35 | def_bool y | 35 | def_bool y |
36 | 36 | ||
37 | config ARCH_DMA_ADDR_T_64BIT | 37 | config ARCH_DMA_ADDR_T_64BIT |
38 | def_bool 64BIT | 38 | def_bool y |
39 | 39 | ||
40 | config GENERIC_LOCKBREAK | 40 | config GENERIC_LOCKBREAK |
41 | def_bool y if SMP && PREEMPT | 41 | def_bool y if SMP && PREEMPT |
@@ -59,7 +59,7 @@ config PCI_QUIRKS | |||
59 | def_bool n | 59 | def_bool n |
60 | 60 | ||
61 | config ARCH_SUPPORTS_UPROBES | 61 | config ARCH_SUPPORTS_UPROBES |
62 | def_bool 64BIT | 62 | def_bool y |
63 | 63 | ||
64 | config S390 | 64 | config S390 |
65 | def_bool y | 65 | def_bool y |
@@ -111,19 +111,19 @@ config S390 | |||
111 | select GENERIC_TIME_VSYSCALL | 111 | select GENERIC_TIME_VSYSCALL |
112 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB | 112 | select HAVE_ALIGNED_STRUCT_PAGE if SLUB |
113 | select HAVE_ARCH_AUDITSYSCALL | 113 | select HAVE_ARCH_AUDITSYSCALL |
114 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | 114 | select HAVE_ARCH_JUMP_LABEL |
115 | select HAVE_ARCH_SECCOMP_FILTER | 115 | select HAVE_ARCH_SECCOMP_FILTER |
116 | select HAVE_ARCH_TRACEHOOK | 116 | select HAVE_ARCH_TRACEHOOK |
117 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT | 117 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
118 | select HAVE_BPF_JIT if 64BIT && PACK_STACK | 118 | select HAVE_BPF_JIT if PACK_STACK |
119 | select HAVE_CMPXCHG_DOUBLE | 119 | select HAVE_CMPXCHG_DOUBLE |
120 | select HAVE_CMPXCHG_LOCAL | 120 | select HAVE_CMPXCHG_LOCAL |
121 | select HAVE_DEBUG_KMEMLEAK | 121 | select HAVE_DEBUG_KMEMLEAK |
122 | select HAVE_DYNAMIC_FTRACE if 64BIT | 122 | select HAVE_DYNAMIC_FTRACE |
123 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT | 123 | select HAVE_DYNAMIC_FTRACE_WITH_REGS |
124 | select HAVE_FTRACE_MCOUNT_RECORD | 124 | select HAVE_FTRACE_MCOUNT_RECORD |
125 | select HAVE_FUNCTION_GRAPH_TRACER if 64BIT | 125 | select HAVE_FUNCTION_GRAPH_TRACER |
126 | select HAVE_FUNCTION_TRACER if 64BIT | 126 | select HAVE_FUNCTION_TRACER |
127 | select HAVE_FUTEX_CMPXCHG if FUTEX | 127 | select HAVE_FUTEX_CMPXCHG if FUTEX |
128 | select HAVE_KERNEL_BZIP2 | 128 | select HAVE_KERNEL_BZIP2 |
129 | select HAVE_KERNEL_GZIP | 129 | select HAVE_KERNEL_GZIP |
@@ -133,7 +133,7 @@ config S390 | |||
133 | select HAVE_KERNEL_XZ | 133 | select HAVE_KERNEL_XZ |
134 | select HAVE_KPROBES | 134 | select HAVE_KPROBES |
135 | select HAVE_KRETPROBES | 135 | select HAVE_KRETPROBES |
136 | select HAVE_KVM if 64BIT | 136 | select HAVE_KVM |
137 | select HAVE_LIVEPATCH | 137 | select HAVE_LIVEPATCH |
138 | select HAVE_MEMBLOCK | 138 | select HAVE_MEMBLOCK |
139 | select HAVE_MEMBLOCK_NODE_MAP | 139 | select HAVE_MEMBLOCK_NODE_MAP |
@@ -143,7 +143,6 @@ config S390 | |||
143 | select HAVE_PERF_EVENTS | 143 | select HAVE_PERF_EVENTS |
144 | select HAVE_REGS_AND_STACK_ACCESS_API | 144 | select HAVE_REGS_AND_STACK_ACCESS_API |
145 | select HAVE_SYSCALL_TRACEPOINTS | 145 | select HAVE_SYSCALL_TRACEPOINTS |
146 | select HAVE_UID16 if 32BIT | ||
147 | select HAVE_VIRT_CPU_ACCOUNTING | 146 | select HAVE_VIRT_CPU_ACCOUNTING |
148 | select MODULES_USE_ELF_RELA | 147 | select MODULES_USE_ELF_RELA |
149 | select NO_BOOTMEM | 148 | select NO_BOOTMEM |
@@ -199,18 +198,11 @@ config HAVE_MARCH_Z13_FEATURES | |||
199 | 198 | ||
200 | choice | 199 | choice |
201 | prompt "Processor type" | 200 | prompt "Processor type" |
202 | default MARCH_G5 | 201 | default MARCH_Z900 |
203 | |||
204 | config MARCH_G5 | ||
205 | bool "System/390 model G5 and G6" | ||
206 | depends on !64BIT | ||
207 | help | ||
208 | Select this to build a 31 bit kernel that works | ||
209 | on all ESA/390 and z/Architecture machines. | ||
210 | 202 | ||
211 | config MARCH_Z900 | 203 | config MARCH_Z900 |
212 | bool "IBM zSeries model z800 and z900" | 204 | bool "IBM zSeries model z800 and z900" |
213 | select HAVE_MARCH_Z900_FEATURES if 64BIT | 205 | select HAVE_MARCH_Z900_FEATURES |
214 | help | 206 | help |
215 | Select this to enable optimizations for model z800/z900 (2064 and | 207 | Select this to enable optimizations for model z800/z900 (2064 and |
216 | 2066 series). This will enable some optimizations that are not | 208 | 2066 series). This will enable some optimizations that are not |
@@ -218,7 +210,7 @@ config MARCH_Z900 | |||
218 | 210 | ||
219 | config MARCH_Z990 | 211 | config MARCH_Z990 |
220 | bool "IBM zSeries model z890 and z990" | 212 | bool "IBM zSeries model z890 and z990" |
221 | select HAVE_MARCH_Z990_FEATURES if 64BIT | 213 | select HAVE_MARCH_Z990_FEATURES |
222 | help | 214 | help |
223 | Select this to enable optimizations for model z890/z990 (2084 and | 215 | Select this to enable optimizations for model z890/z990 (2084 and |
224 | 2086 series). The kernel will be slightly faster but will not work | 216 | 2086 series). The kernel will be slightly faster but will not work |
@@ -226,7 +218,7 @@ config MARCH_Z990 | |||
226 | 218 | ||
227 | config MARCH_Z9_109 | 219 | config MARCH_Z9_109 |
228 | bool "IBM System z9" | 220 | bool "IBM System z9" |
229 | select HAVE_MARCH_Z9_109_FEATURES if 64BIT | 221 | select HAVE_MARCH_Z9_109_FEATURES |
230 | help | 222 | help |
231 | Select this to enable optimizations for IBM System z9 (2094 and | 223 | Select this to enable optimizations for IBM System z9 (2094 and |
232 | 2096 series). The kernel will be slightly faster but will not work | 224 | 2096 series). The kernel will be slightly faster but will not work |
@@ -234,7 +226,7 @@ config MARCH_Z9_109 | |||
234 | 226 | ||
235 | config MARCH_Z10 | 227 | config MARCH_Z10 |
236 | bool "IBM System z10" | 228 | bool "IBM System z10" |
237 | select HAVE_MARCH_Z10_FEATURES if 64BIT | 229 | select HAVE_MARCH_Z10_FEATURES |
238 | help | 230 | help |
239 | Select this to enable optimizations for IBM System z10 (2097 and | 231 | Select this to enable optimizations for IBM System z10 (2097 and |
240 | 2098 series). The kernel will be slightly faster but will not work | 232 | 2098 series). The kernel will be slightly faster but will not work |
@@ -242,7 +234,7 @@ config MARCH_Z10 | |||
242 | 234 | ||
243 | config MARCH_Z196 | 235 | config MARCH_Z196 |
244 | bool "IBM zEnterprise 114 and 196" | 236 | bool "IBM zEnterprise 114 and 196" |
245 | select HAVE_MARCH_Z196_FEATURES if 64BIT | 237 | select HAVE_MARCH_Z196_FEATURES |
246 | help | 238 | help |
247 | Select this to enable optimizations for IBM zEnterprise 114 and 196 | 239 | Select this to enable optimizations for IBM zEnterprise 114 and 196 |
248 | (2818 and 2817 series). The kernel will be slightly faster but will | 240 | (2818 and 2817 series). The kernel will be slightly faster but will |
@@ -250,7 +242,7 @@ config MARCH_Z196 | |||
250 | 242 | ||
251 | config MARCH_ZEC12 | 243 | config MARCH_ZEC12 |
252 | bool "IBM zBC12 and zEC12" | 244 | bool "IBM zBC12 and zEC12" |
253 | select HAVE_MARCH_ZEC12_FEATURES if 64BIT | 245 | select HAVE_MARCH_ZEC12_FEATURES |
254 | help | 246 | help |
255 | Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and | 247 | Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and |
256 | 2827 series). The kernel will be slightly faster but will not work on | 248 | 2827 series). The kernel will be slightly faster but will not work on |
@@ -258,7 +250,7 @@ config MARCH_ZEC12 | |||
258 | 250 | ||
259 | config MARCH_Z13 | 251 | config MARCH_Z13 |
260 | bool "IBM z13" | 252 | bool "IBM z13" |
261 | select HAVE_MARCH_Z13_FEATURES if 64BIT | 253 | select HAVE_MARCH_Z13_FEATURES |
262 | help | 254 | help |
263 | Select this to enable optimizations for IBM z13 (2964 series). | 255 | Select this to enable optimizations for IBM z13 (2964 series). |
264 | The kernel will be slightly faster but will not work on older | 256 | The kernel will be slightly faster but will not work on older |
@@ -266,9 +258,6 @@ config MARCH_Z13 | |||
266 | 258 | ||
267 | endchoice | 259 | endchoice |
268 | 260 | ||
269 | config MARCH_G5_TUNE | ||
270 | def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT | ||
271 | |||
272 | config MARCH_Z900_TUNE | 261 | config MARCH_Z900_TUNE |
273 | def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT | 262 | def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT |
274 | 263 | ||
@@ -307,9 +296,6 @@ config TUNE_DEFAULT | |||
307 | Tune the generated code for the target processor for which the kernel | 296 | Tune the generated code for the target processor for which the kernel |
308 | will be compiled. | 297 | will be compiled. |
309 | 298 | ||
310 | config TUNE_G5 | ||
311 | bool "System/390 model G5 and G6" | ||
312 | |||
313 | config TUNE_Z900 | 299 | config TUNE_Z900 |
314 | bool "IBM zSeries model z800 and z900" | 300 | bool "IBM zSeries model z800 and z900" |
315 | 301 | ||
@@ -335,18 +321,10 @@ endchoice | |||
335 | 321 | ||
336 | config 64BIT | 322 | config 64BIT |
337 | def_bool y | 323 | def_bool y |
338 | prompt "64 bit kernel" | ||
339 | help | ||
340 | Select this option if you have an IBM z/Architecture machine | ||
341 | and want to use the 64 bit addressing mode. | ||
342 | |||
343 | config 32BIT | ||
344 | def_bool y if !64BIT | ||
345 | 324 | ||
346 | config COMPAT | 325 | config COMPAT |
347 | def_bool y | 326 | def_bool y |
348 | prompt "Kernel support for 31 bit emulation" | 327 | prompt "Kernel support for 31 bit emulation" |
349 | depends on 64BIT | ||
350 | select COMPAT_BINFMT_ELF if BINFMT_ELF | 328 | select COMPAT_BINFMT_ELF if BINFMT_ELF |
351 | select ARCH_WANT_OLD_COMPAT_IPC | 329 | select ARCH_WANT_OLD_COMPAT_IPC |
352 | select COMPAT_OLD_SIGACTION | 330 | select COMPAT_OLD_SIGACTION |
@@ -385,8 +363,7 @@ config NR_CPUS | |||
385 | int "Maximum number of CPUs (2-512)" | 363 | int "Maximum number of CPUs (2-512)" |
386 | range 2 512 | 364 | range 2 512 |
387 | depends on SMP | 365 | depends on SMP |
388 | default "32" if !64BIT | 366 | default "64" |
389 | default "64" if 64BIT | ||
390 | help | 367 | help |
391 | This allows you to specify the maximum number of CPUs which this | 368 | This allows you to specify the maximum number of CPUs which this |
392 | kernel will support. The maximum supported value is 512 and the | 369 | kernel will support. The maximum supported value is 512 and the |
@@ -427,15 +404,6 @@ config SCHED_TOPOLOGY | |||
427 | 404 | ||
428 | source kernel/Kconfig.preempt | 405 | source kernel/Kconfig.preempt |
429 | 406 | ||
430 | config MATHEMU | ||
431 | def_bool y | ||
432 | prompt "IEEE FPU emulation" | ||
433 | depends on MARCH_G5 | ||
434 | help | ||
435 | This option is required for IEEE compliant floating point arithmetic | ||
436 | on older ESA/390 machines. Say Y unless you know your machine doesn't | ||
437 | need this. | ||
438 | |||
439 | source kernel/Kconfig.hz | 407 | source kernel/Kconfig.hz |
440 | 408 | ||
441 | endmenu | 409 | endmenu |
@@ -446,7 +414,6 @@ config ARCH_SPARSEMEM_ENABLE | |||
446 | def_bool y | 414 | def_bool y |
447 | select SPARSEMEM_VMEMMAP_ENABLE | 415 | select SPARSEMEM_VMEMMAP_ENABLE |
448 | select SPARSEMEM_VMEMMAP | 416 | select SPARSEMEM_VMEMMAP |
449 | select SPARSEMEM_STATIC if !64BIT | ||
450 | 417 | ||
451 | config ARCH_SPARSEMEM_DEFAULT | 418 | config ARCH_SPARSEMEM_DEFAULT |
452 | def_bool y | 419 | def_bool y |
@@ -462,7 +429,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE | |||
462 | 429 | ||
463 | config ARCH_ENABLE_SPLIT_PMD_PTLOCK | 430 | config ARCH_ENABLE_SPLIT_PMD_PTLOCK |
464 | def_bool y | 431 | def_bool y |
465 | depends on 64BIT | ||
466 | 432 | ||
467 | config FORCE_MAX_ZONEORDER | 433 | config FORCE_MAX_ZONEORDER |
468 | int | 434 | int |
@@ -537,7 +503,6 @@ config QDIO | |||
537 | 503 | ||
538 | menuconfig PCI | 504 | menuconfig PCI |
539 | bool "PCI support" | 505 | bool "PCI support" |
540 | depends on 64BIT | ||
541 | select HAVE_DMA_ATTRS | 506 | select HAVE_DMA_ATTRS |
542 | select PCI_MSI | 507 | select PCI_MSI |
543 | help | 508 | help |
@@ -607,7 +572,6 @@ config CHSC_SCH | |||
607 | 572 | ||
608 | config SCM_BUS | 573 | config SCM_BUS |
609 | def_bool y | 574 | def_bool y |
610 | depends on 64BIT | ||
611 | prompt "SCM bus driver" | 575 | prompt "SCM bus driver" |
612 | help | 576 | help |
613 | Bus driver for Storage Class Memory. | 577 | Bus driver for Storage Class Memory. |
@@ -629,7 +593,7 @@ menu "Dump support" | |||
629 | 593 | ||
630 | config CRASH_DUMP | 594 | config CRASH_DUMP |
631 | bool "kernel crash dumps" | 595 | bool "kernel crash dumps" |
632 | depends on 64BIT && SMP | 596 | depends on SMP |
633 | select KEXEC | 597 | select KEXEC |
634 | help | 598 | help |
635 | Generate crash dump after being started by kexec. | 599 | Generate crash dump after being started by kexec. |
@@ -668,7 +632,7 @@ endmenu | |||
668 | menu "Power Management" | 632 | menu "Power Management" |
669 | 633 | ||
670 | config ARCH_HIBERNATION_POSSIBLE | 634 | config ARCH_HIBERNATION_POSSIBLE |
671 | def_bool y if 64BIT | 635 | def_bool y |
672 | 636 | ||
673 | source "kernel/power/Kconfig" | 637 | source "kernel/power/Kconfig" |
674 | 638 | ||
@@ -819,7 +783,6 @@ source "arch/s390/kvm/Kconfig" | |||
819 | config S390_GUEST | 783 | config S390_GUEST |
820 | def_bool y | 784 | def_bool y |
821 | prompt "s390 support for virtio devices" | 785 | prompt "s390 support for virtio devices" |
822 | depends on 64BIT | ||
823 | select TTY | 786 | select TTY |
824 | select VIRTUALIZATION | 787 | select VIRTUALIZATION |
825 | select VIRTIO | 788 | select VIRTIO |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index acb6859c6a95..667b1bca5681 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -13,15 +13,6 @@ | |||
13 | # Copyright (C) 1994 by Linus Torvalds | 13 | # Copyright (C) 1994 by Linus Torvalds |
14 | # | 14 | # |
15 | 15 | ||
16 | ifndef CONFIG_64BIT | ||
17 | LD_BFD := elf32-s390 | ||
18 | LDFLAGS := -m elf_s390 | ||
19 | KBUILD_CFLAGS += -m31 | ||
20 | KBUILD_AFLAGS += -m31 | ||
21 | UTS_MACHINE := s390 | ||
22 | STACK_SIZE := 8192 | ||
23 | CHECKFLAGS += -D__s390__ -msize-long | ||
24 | else | ||
25 | LD_BFD := elf64-s390 | 16 | LD_BFD := elf64-s390 |
26 | LDFLAGS := -m elf64_s390 | 17 | LDFLAGS := -m elf64_s390 |
27 | KBUILD_AFLAGS_MODULE += -fPIC | 18 | KBUILD_AFLAGS_MODULE += -fPIC |
@@ -31,11 +22,9 @@ KBUILD_AFLAGS += -m64 | |||
31 | UTS_MACHINE := s390x | 22 | UTS_MACHINE := s390x |
32 | STACK_SIZE := 16384 | 23 | STACK_SIZE := 16384 |
33 | CHECKFLAGS += -D__s390__ -D__s390x__ | 24 | CHECKFLAGS += -D__s390__ -D__s390x__ |
34 | endif | ||
35 | 25 | ||
36 | export LD_BFD | 26 | export LD_BFD |
37 | 27 | ||
38 | mflags-$(CONFIG_MARCH_G5) := -march=g5 | ||
39 | mflags-$(CONFIG_MARCH_Z900) := -march=z900 | 28 | mflags-$(CONFIG_MARCH_Z900) := -march=z900 |
40 | mflags-$(CONFIG_MARCH_Z990) := -march=z990 | 29 | mflags-$(CONFIG_MARCH_Z990) := -march=z990 |
41 | mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 | 30 | mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 |
@@ -47,7 +36,6 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13 | |||
47 | aflags-y += $(mflags-y) | 36 | aflags-y += $(mflags-y) |
48 | cflags-y += $(mflags-y) | 37 | cflags-y += $(mflags-y) |
49 | 38 | ||
50 | cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 | ||
51 | cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 | 39 | cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 |
52 | cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 | 40 | cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 |
53 | cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 | 41 | cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 |
@@ -104,7 +92,7 @@ KBUILD_AFLAGS += $(aflags-y) | |||
104 | OBJCOPYFLAGS := -O binary | 92 | OBJCOPYFLAGS := -O binary |
105 | 93 | ||
106 | head-y := arch/s390/kernel/head.o | 94 | head-y := arch/s390/kernel/head.o |
107 | head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) | 95 | head-y += arch/s390/kernel/head64.o |
108 | 96 | ||
109 | # See arch/s390/Kbuild for content of core part of the kernel | 97 | # See arch/s390/Kbuild for content of core part of the kernel |
110 | core-y += arch/s390/ | 98 | core-y += arch/s390/ |
@@ -129,9 +117,7 @@ zfcpdump: | |||
129 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 117 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
130 | 118 | ||
131 | vdso_install: | 119 | vdso_install: |
132 | ifeq ($(CONFIG_64BIT),y) | ||
133 | $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ | 120 | $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ |
134 | endif | ||
135 | $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ | 121 | $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ |
136 | 122 | ||
137 | archclean: | 123 | archclean: |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index f90d1fc6d603..d4788111c161 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -4,13 +4,11 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | BITS := $(if $(CONFIG_64BIT),64,31) | ||
8 | |||
9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 | 7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 |
10 | targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 | 8 | targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 |
11 | targets += misc.o piggy.o sizes.h head$(BITS).o | 9 | targets += misc.o piggy.o sizes.h head.o |
12 | 10 | ||
13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 11 | KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 12 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
15 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks | 13 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks |
16 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) | 14 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) |
@@ -19,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding) | |||
19 | GCOV_PROFILE := n | 17 | GCOV_PROFILE := n |
20 | 18 | ||
21 | OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) | 19 | OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) |
22 | OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o | 20 | OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o |
23 | 21 | ||
24 | LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T | 22 | LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T |
25 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) | 23 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) |
@@ -34,8 +32,8 @@ quiet_cmd_sizes = GEN $@ | |||
34 | $(obj)/sizes.h: vmlinux | 32 | $(obj)/sizes.h: vmlinux |
35 | $(call if_changed,sizes) | 33 | $(call if_changed,sizes) |
36 | 34 | ||
37 | AFLAGS_head$(BITS).o += -I$(obj) | 35 | AFLAGS_head.o += -I$(obj) |
38 | $(obj)/head$(BITS).o: $(obj)/sizes.h | 36 | $(obj)/head.o: $(obj)/sizes.h |
39 | 37 | ||
40 | CFLAGS_misc.o += -I$(obj) | 38 | CFLAGS_misc.o += -I$(obj) |
41 | $(obj)/misc.o: $(obj)/sizes.h | 39 | $(obj)/misc.o: $(obj)/sizes.h |
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head.S index f86a4eef28a9..f86a4eef28a9 100644 --- a/arch/s390/boot/compressed/head64.S +++ b/arch/s390/boot/compressed/head.S | |||
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S deleted file mode 100644 index e8c9e18b8039..000000000000 --- a/arch/s390/boot/compressed/head31.S +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | /* | ||
2 | * Startup glue code to uncompress the kernel | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * | ||
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/linkage.h> | ||
11 | #include <asm/asm-offsets.h> | ||
12 | #include <asm/thread_info.h> | ||
13 | #include <asm/page.h> | ||
14 | #include "sizes.h" | ||
15 | |||
16 | __HEAD | ||
17 | ENTRY(startup_continue) | ||
18 | basr %r13,0 # get base | ||
19 | .LPG1: | ||
20 | # setup stack | ||
21 | l %r15,.Lstack-.LPG1(%r13) | ||
22 | ahi %r15,-96 | ||
23 | l %r1,.Ldecompress-.LPG1(%r13) | ||
24 | basr %r14,%r1 | ||
25 | # setup registers for memory mover & branch to target | ||
26 | lr %r4,%r2 | ||
27 | l %r2,.Loffset-.LPG1(%r13) | ||
28 | la %r4,0(%r2,%r4) | ||
29 | l %r3,.Lmvsize-.LPG1(%r13) | ||
30 | lr %r5,%r3 | ||
31 | # move the memory mover someplace safe | ||
32 | la %r1,0x200 | ||
33 | mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) | ||
34 | # decompress image is started at 0x11000 | ||
35 | lr %r6,%r2 | ||
36 | br %r1 | ||
37 | mover: | ||
38 | mvcle %r2,%r4,0 | ||
39 | jo mover | ||
40 | br %r6 | ||
41 | mover_end: | ||
42 | |||
43 | .align 8 | ||
44 | .Lstack: | ||
45 | .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) | ||
46 | .Ldecompress: | ||
47 | .long decompress_kernel | ||
48 | .Loffset: | ||
49 | .long 0x11000 | ||
50 | .Lmvsize: | ||
51 | .long SZ__bss_start | ||
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index 8e1fb8239287..747735f83426 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S | |||
@@ -1,12 +1,7 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | 1 | #include <asm-generic/vmlinux.lds.h> |
2 | 2 | ||
3 | #ifdef CONFIG_64BIT | ||
4 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | 3 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") |
5 | OUTPUT_ARCH(s390:64-bit) | 4 | OUTPUT_ARCH(s390:64-bit) |
6 | #else | ||
7 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
8 | OUTPUT_ARCH(s390:31-bit) | ||
9 | #endif | ||
10 | 5 | ||
11 | ENTRY(startup) | 6 | ENTRY(startup) |
12 | 7 | ||
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 6c5cc6da7111..ba3b2aefddf5 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -369,14 +369,10 @@ static inline int crypt_s390_func_available(int func, | |||
369 | 369 | ||
370 | if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) | 370 | if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) |
371 | return 0; | 371 | return 0; |
372 | 372 | if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) | |
373 | if (facility_mask & CRYPT_S390_MSA3 && | ||
374 | (!test_facility(2) || !test_facility(76))) | ||
375 | return 0; | 373 | return 0; |
376 | if (facility_mask & CRYPT_S390_MSA4 && | 374 | if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) |
377 | (!test_facility(2) || !test_facility(77))) | ||
378 | return 0; | 375 | return 0; |
379 | |||
380 | switch (func & CRYPT_S390_OP_MASK) { | 376 | switch (func & CRYPT_S390_OP_MASK) { |
381 | case CRYPT_S390_KM: | 377 | case CRYPT_S390_KM: |
382 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | 378 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); |
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index d4c0d3717543..24c747a0fcc3 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c | |||
@@ -19,13 +19,9 @@ | |||
19 | static void diag0c(struct hypfs_diag0c_entry *entry) | 19 | static void diag0c(struct hypfs_diag0c_entry *entry) |
20 | { | 20 | { |
21 | asm volatile ( | 21 | asm volatile ( |
22 | #ifdef CONFIG_64BIT | ||
23 | " sam31\n" | 22 | " sam31\n" |
24 | " diag %0,%0,0x0c\n" | 23 | " diag %0,%0,0x0c\n" |
25 | " sam64\n" | 24 | " sam64\n" |
26 | #else | ||
27 | " diag %0,%0,0x0c\n" | ||
28 | #endif | ||
29 | : /* no output register */ | 25 | : /* no output register */ |
30 | : "a" (entry) | 26 | : "a" (entry) |
31 | : "memory"); | 27 | : "memory"); |
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h index 32a705987156..16887c5fd989 100644 --- a/arch/s390/include/asm/appldata.h +++ b/arch/s390/include/asm/appldata.h | |||
@@ -9,28 +9,6 @@ | |||
9 | 9 | ||
10 | #include <asm/io.h> | 10 | #include <asm/io.h> |
11 | 11 | ||
12 | #ifndef CONFIG_64BIT | ||
13 | |||
14 | #define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ | ||
15 | #define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ | ||
16 | #define APPLDATA_GEN_EVENT_REC 0x02 | ||
17 | #define APPLDATA_START_CONFIG_REC 0x03 | ||
18 | |||
19 | /* | ||
20 | * Parameter list for DIAGNOSE X'DC' | ||
21 | */ | ||
22 | struct appldata_parameter_list { | ||
23 | u16 diag; /* The DIAGNOSE code X'00DC' */ | ||
24 | u8 function; /* The function code for the DIAGNOSE */ | ||
25 | u8 parlist_length; /* Length of the parameter list */ | ||
26 | u32 product_id_addr; /* Address of the 16-byte product ID */ | ||
27 | u16 reserved; | ||
28 | u16 buffer_length; /* Length of the application data buffer */ | ||
29 | u32 buffer_addr; /* Address of the application data buffer */ | ||
30 | } __attribute__ ((packed)); | ||
31 | |||
32 | #else /* CONFIG_64BIT */ | ||
33 | |||
34 | #define APPLDATA_START_INTERVAL_REC 0x80 | 12 | #define APPLDATA_START_INTERVAL_REC 0x80 |
35 | #define APPLDATA_STOP_REC 0x81 | 13 | #define APPLDATA_STOP_REC 0x81 |
36 | #define APPLDATA_GEN_EVENT_REC 0x82 | 14 | #define APPLDATA_GEN_EVENT_REC 0x82 |
@@ -51,8 +29,6 @@ struct appldata_parameter_list { | |||
51 | u64 buffer_addr; | 29 | u64 buffer_addr; |
52 | } __attribute__ ((packed)); | 30 | } __attribute__ ((packed)); |
53 | 31 | ||
54 | #endif /* CONFIG_64BIT */ | ||
55 | |||
56 | struct appldata_product_id { | 32 | struct appldata_product_id { |
57 | char prod_nr[7]; /* product number */ | 33 | char prod_nr[7]; /* product number */ |
58 | u16 prod_fn; /* product function */ | 34 | u16 prod_fn; /* product function */ |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fa934fe080c1..adbe3802e377 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
160 | 160 | ||
161 | #define ATOMIC64_INIT(i) { (i) } | 161 | #define ATOMIC64_INIT(i) { (i) } |
162 | 162 | ||
163 | #ifdef CONFIG_64BIT | ||
164 | |||
165 | #define __ATOMIC64_NO_BARRIER "\n" | 163 | #define __ATOMIC64_NO_BARRIER "\n" |
166 | 164 | ||
167 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 165 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
274 | 272 | ||
275 | #undef __ATOMIC64_LOOP | 273 | #undef __ATOMIC64_LOOP |
276 | 274 | ||
277 | #else /* CONFIG_64BIT */ | ||
278 | |||
279 | typedef struct { | ||
280 | long long counter; | ||
281 | } atomic64_t; | ||
282 | |||
283 | static inline long long atomic64_read(const atomic64_t *v) | ||
284 | { | ||
285 | register_pair rp; | ||
286 | |||
287 | asm volatile( | ||
288 | " lm %0,%N0,%1" | ||
289 | : "=&d" (rp) : "Q" (v->counter) ); | ||
290 | return rp.pair; | ||
291 | } | ||
292 | |||
293 | static inline void atomic64_set(atomic64_t *v, long long i) | ||
294 | { | ||
295 | register_pair rp = {.pair = i}; | ||
296 | |||
297 | asm volatile( | ||
298 | " stm %1,%N1,%0" | ||
299 | : "=Q" (v->counter) : "d" (rp) ); | ||
300 | } | ||
301 | |||
302 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | ||
303 | { | ||
304 | register_pair rp_new = {.pair = new}; | ||
305 | register_pair rp_old; | ||
306 | |||
307 | asm volatile( | ||
308 | " lm %0,%N0,%1\n" | ||
309 | "0: cds %0,%2,%1\n" | ||
310 | " jl 0b\n" | ||
311 | : "=&d" (rp_old), "+Q" (v->counter) | ||
312 | : "d" (rp_new) | ||
313 | : "cc"); | ||
314 | return rp_old.pair; | ||
315 | } | ||
316 | |||
317 | static inline long long atomic64_cmpxchg(atomic64_t *v, | ||
318 | long long old, long long new) | ||
319 | { | ||
320 | register_pair rp_old = {.pair = old}; | ||
321 | register_pair rp_new = {.pair = new}; | ||
322 | |||
323 | asm volatile( | ||
324 | " cds %0,%2,%1" | ||
325 | : "+&d" (rp_old), "+Q" (v->counter) | ||
326 | : "d" (rp_new) | ||
327 | : "cc"); | ||
328 | return rp_old.pair; | ||
329 | } | ||
330 | |||
331 | |||
332 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | ||
333 | { | ||
334 | long long old, new; | ||
335 | |||
336 | do { | ||
337 | old = atomic64_read(v); | ||
338 | new = old + i; | ||
339 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
340 | return new; | ||
341 | } | ||
342 | |||
343 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | ||
344 | { | ||
345 | long long old, new; | ||
346 | |||
347 | do { | ||
348 | old = atomic64_read(v); | ||
349 | new = old | mask; | ||
350 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
351 | } | ||
352 | |||
353 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | ||
354 | { | ||
355 | long long old, new; | ||
356 | |||
357 | do { | ||
358 | old = atomic64_read(v); | ||
359 | new = old & mask; | ||
360 | } while (atomic64_cmpxchg(v, old, new) != old); | ||
361 | } | ||
362 | |||
363 | static inline void atomic64_add(long long i, atomic64_t *v) | ||
364 | { | ||
365 | atomic64_add_return(i, v); | ||
366 | } | ||
367 | |||
368 | #endif /* CONFIG_64BIT */ | ||
369 | |||
370 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | 275 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) |
371 | { | 276 | { |
372 | long long c, old; | 277 | long long c, old; |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 520542477678..9b68e98a724f 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -51,32 +51,6 @@ | |||
51 | 51 | ||
52 | #define __BITOPS_NO_BARRIER "\n" | 52 | #define __BITOPS_NO_BARRIER "\n" |
53 | 53 | ||
54 | #ifndef CONFIG_64BIT | ||
55 | |||
56 | #define __BITOPS_OR "or" | ||
57 | #define __BITOPS_AND "nr" | ||
58 | #define __BITOPS_XOR "xr" | ||
59 | #define __BITOPS_BARRIER "\n" | ||
60 | |||
61 | #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ | ||
62 | ({ \ | ||
63 | unsigned long __old, __new; \ | ||
64 | \ | ||
65 | typecheck(unsigned long *, (__addr)); \ | ||
66 | asm volatile( \ | ||
67 | " l %0,%2\n" \ | ||
68 | "0: lr %1,%0\n" \ | ||
69 | __op_string " %1,%3\n" \ | ||
70 | " cs %0,%1,%2\n" \ | ||
71 | " jl 0b" \ | ||
72 | : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ | ||
73 | : "d" (__val) \ | ||
74 | : "cc", "memory"); \ | ||
75 | __old; \ | ||
76 | }) | ||
77 | |||
78 | #else /* CONFIG_64BIT */ | ||
79 | |||
80 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 54 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
81 | 55 | ||
82 | #define __BITOPS_OR "laog" | 56 | #define __BITOPS_OR "laog" |
@@ -125,8 +99,6 @@ | |||
125 | 99 | ||
126 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | 100 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ |
127 | 101 | ||
128 | #endif /* CONFIG_64BIT */ | ||
129 | |||
130 | #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) | 102 | #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) |
131 | 103 | ||
132 | static inline unsigned long * | 104 | static inline unsigned long * |
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 6259895fcd97..4eadec466b8c 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h | |||
@@ -80,15 +80,10 @@ extern void __cmpxchg_double_called_with_bad_pointer(void); | |||
80 | ({ \ | 80 | ({ \ |
81 | __typeof__(p1) __p1 = (p1); \ | 81 | __typeof__(p1) __p1 = (p1); \ |
82 | __typeof__(p2) __p2 = (p2); \ | 82 | __typeof__(p2) __p2 = (p2); \ |
83 | int __ret; \ | ||
84 | BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ | 83 | BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ |
85 | BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ | 84 | BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ |
86 | VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ | 85 | VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ |
87 | if (sizeof(long) == 4) \ | 86 | __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ |
88 | __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \ | ||
89 | else \ | ||
90 | __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ | ||
91 | __ret; \ | ||
92 | }) | 87 | }) |
93 | 88 | ||
94 | #define system_has_cmpxchg_double() 1 | 89 | #define system_has_cmpxchg_double() 1 |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index b91e960e4045..221b454c734a 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
@@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t; | |||
22 | 22 | ||
23 | static inline unsigned long __div(unsigned long long n, unsigned long base) | 23 | static inline unsigned long __div(unsigned long long n, unsigned long base) |
24 | { | 24 | { |
25 | #ifndef CONFIG_64BIT | ||
26 | register_pair rp; | ||
27 | |||
28 | rp.pair = n >> 1; | ||
29 | asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); | ||
30 | return rp.subreg.odd; | ||
31 | #else /* CONFIG_64BIT */ | ||
32 | return n / base; | 25 | return n / base; |
33 | #endif /* CONFIG_64BIT */ | ||
34 | } | 26 | } |
35 | 27 | ||
36 | #define cputime_one_jiffy jiffies_to_cputime(1) | 28 | #define cputime_one_jiffy jiffies_to_cputime(1) |
@@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime, | |||
101 | struct timespec *value) | 93 | struct timespec *value) |
102 | { | 94 | { |
103 | unsigned long long __cputime = (__force unsigned long long) cputime; | 95 | unsigned long long __cputime = (__force unsigned long long) cputime; |
104 | #ifndef CONFIG_64BIT | ||
105 | register_pair rp; | ||
106 | |||
107 | rp.pair = __cputime >> 1; | ||
108 | asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2)); | ||
109 | value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC; | ||
110 | value->tv_sec = rp.subreg.odd; | ||
111 | #else | ||
112 | value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; | 96 | value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; |
113 | value->tv_sec = __cputime / CPUTIME_PER_SEC; | 97 | value->tv_sec = __cputime / CPUTIME_PER_SEC; |
114 | #endif | ||
115 | } | 98 | } |
116 | 99 | ||
117 | /* | 100 | /* |
@@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime, | |||
129 | struct timeval *value) | 112 | struct timeval *value) |
130 | { | 113 | { |
131 | unsigned long long __cputime = (__force unsigned long long) cputime; | 114 | unsigned long long __cputime = (__force unsigned long long) cputime; |
132 | #ifndef CONFIG_64BIT | ||
133 | register_pair rp; | ||
134 | |||
135 | rp.pair = __cputime >> 1; | ||
136 | asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2)); | ||
137 | value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC; | ||
138 | value->tv_sec = rp.subreg.odd; | ||
139 | #else | ||
140 | value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; | 115 | value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; |
141 | value->tv_sec = __cputime / CPUTIME_PER_SEC; | 116 | value->tv_sec = __cputime / CPUTIME_PER_SEC; |
142 | #endif | ||
143 | } | 117 | } |
144 | 118 | ||
145 | /* | 119 | /* |
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 31ab9f346d7e..cfad7fca01d6 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h | |||
@@ -9,20 +9,12 @@ | |||
9 | 9 | ||
10 | #include <linux/bug.h> | 10 | #include <linux/bug.h> |
11 | 11 | ||
12 | #ifdef CONFIG_64BIT | ||
13 | # define __CTL_LOAD "lctlg" | ||
14 | # define __CTL_STORE "stctg" | ||
15 | #else | ||
16 | # define __CTL_LOAD "lctl" | ||
17 | # define __CTL_STORE "stctl" | ||
18 | #endif | ||
19 | |||
20 | #define __ctl_load(array, low, high) { \ | 12 | #define __ctl_load(array, low, high) { \ |
21 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 13 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
22 | \ | 14 | \ |
23 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ | 15 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ |
24 | asm volatile( \ | 16 | asm volatile( \ |
25 | __CTL_LOAD " %1,%2,%0\n" \ | 17 | " lctlg %1,%2,%0\n" \ |
26 | : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ | 18 | : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ |
27 | } | 19 | } |
28 | 20 | ||
@@ -31,7 +23,7 @@ | |||
31 | \ | 23 | \ |
32 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ | 24 | BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ |
33 | asm volatile( \ | 25 | asm volatile( \ |
34 | __CTL_STORE " %1,%2,%0\n" \ | 26 | " stctg %1,%2,%0\n" \ |
35 | : "=Q" (*(addrtype *)(&array)) \ | 27 | : "=Q" (*(addrtype *)(&array)) \ |
36 | : "i" (low), "i" (high)); \ | 28 | : "i" (low), "i" (high)); \ |
37 | } | 29 | } |
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit); | |||
60 | union ctlreg0 { | 52 | union ctlreg0 { |
61 | unsigned long val; | 53 | unsigned long val; |
62 | struct { | 54 | struct { |
63 | #ifdef CONFIG_64BIT | ||
64 | unsigned long : 32; | 55 | unsigned long : 32; |
65 | #endif | ||
66 | unsigned long : 3; | 56 | unsigned long : 3; |
67 | unsigned long lap : 1; /* Low-address-protection control */ | 57 | unsigned long lap : 1; /* Low-address-protection control */ |
68 | unsigned long : 4; | 58 | unsigned long : 4; |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index a5c4978462c1..3ad48f22de78 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -107,11 +107,7 @@ | |||
107 | /* | 107 | /* |
108 | * These are used to set parameters in the core dumps. | 108 | * These are used to set parameters in the core dumps. |
109 | */ | 109 | */ |
110 | #ifndef CONFIG_64BIT | ||
111 | #define ELF_CLASS ELFCLASS32 | ||
112 | #else /* CONFIG_64BIT */ | ||
113 | #define ELF_CLASS ELFCLASS64 | 110 | #define ELF_CLASS ELFCLASS64 |
114 | #endif /* CONFIG_64BIT */ | ||
115 | #define ELF_DATA ELFDATA2MSB | 111 | #define ELF_DATA ELFDATA2MSB |
116 | #define ELF_ARCH EM_S390 | 112 | #define ELF_ARCH EM_S390 |
117 | 113 | ||
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index ea5a6e45fd93..a7b2d7504049 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h | |||
@@ -19,11 +19,7 @@ | |||
19 | #include <asm/cio.h> | 19 | #include <asm/cio.h> |
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | 21 | ||
22 | #ifdef CONFIG_64BIT | ||
23 | #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ | 22 | #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ |
24 | #else | ||
25 | #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */ | ||
26 | #endif | ||
27 | #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) | 23 | #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) |
28 | 24 | ||
29 | /* | 25 | /* |
@@ -32,11 +28,7 @@ | |||
32 | static inline int | 28 | static inline int |
33 | idal_is_needed(void *vaddr, unsigned int length) | 29 | idal_is_needed(void *vaddr, unsigned int length) |
34 | { | 30 | { |
35 | #ifdef CONFIG_64BIT | ||
36 | return ((__pa(vaddr) + length - 1) >> 31) != 0; | 31 | return ((__pa(vaddr) + length - 1) >> 31) != 0; |
37 | #else | ||
38 | return 0; | ||
39 | #endif | ||
40 | } | 32 | } |
41 | 33 | ||
42 | 34 | ||
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws, | |||
77 | static inline int | 69 | static inline int |
78 | set_normalized_cda(struct ccw1 * ccw, void *vaddr) | 70 | set_normalized_cda(struct ccw1 * ccw, void *vaddr) |
79 | { | 71 | { |
80 | #ifdef CONFIG_64BIT | ||
81 | unsigned int nridaws; | 72 | unsigned int nridaws; |
82 | unsigned long *idal; | 73 | unsigned long *idal; |
83 | 74 | ||
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr) | |||
93 | ccw->flags |= CCW_FLAG_IDA; | 84 | ccw->flags |= CCW_FLAG_IDA; |
94 | vaddr = idal; | 85 | vaddr = idal; |
95 | } | 86 | } |
96 | #endif | ||
97 | ccw->cda = (__u32)(unsigned long) vaddr; | 87 | ccw->cda = (__u32)(unsigned long) vaddr; |
98 | return 0; | 88 | return 0; |
99 | } | 89 | } |
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr) | |||
104 | static inline void | 94 | static inline void |
105 | clear_normalized_cda(struct ccw1 * ccw) | 95 | clear_normalized_cda(struct ccw1 * ccw) |
106 | { | 96 | { |
107 | #ifdef CONFIG_64BIT | ||
108 | if (ccw->flags & CCW_FLAG_IDA) { | 97 | if (ccw->flags & CCW_FLAG_IDA) { |
109 | kfree((void *)(unsigned long) ccw->cda); | 98 | kfree((void *)(unsigned long) ccw->cda); |
110 | ccw->flags &= ~CCW_FLAG_IDA; | 99 | ccw->flags &= ~CCW_FLAG_IDA; |
111 | } | 100 | } |
112 | #endif | ||
113 | ccw->cda = 0; | 101 | ccw->cda = 0; |
114 | } | 102 | } |
115 | 103 | ||
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib) | |||
181 | static inline int | 169 | static inline int |
182 | __idal_buffer_is_needed(struct idal_buffer *ib) | 170 | __idal_buffer_is_needed(struct idal_buffer *ib) |
183 | { | 171 | { |
184 | #ifdef CONFIG_64BIT | ||
185 | return ib->size > (4096ul << ib->page_order) || | 172 | return ib->size > (4096ul << ib->page_order) || |
186 | idal_is_needed(ib->data[0], ib->size); | 173 | idal_is_needed(ib->data[0], ib->size); |
187 | #else | ||
188 | return ib->size > (4096ul << ib->page_order); | ||
189 | #endif | ||
190 | } | 174 | } |
191 | 175 | ||
192 | /* | 176 | /* |
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 2b77e235b5fb..69972b7957ee 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h | |||
@@ -8,14 +8,6 @@ | |||
8 | #define JUMP_LABEL_NOP_SIZE 6 | 8 | #define JUMP_LABEL_NOP_SIZE 6 |
9 | #define JUMP_LABEL_NOP_OFFSET 2 | 9 | #define JUMP_LABEL_NOP_OFFSET 2 |
10 | 10 | ||
11 | #ifdef CONFIG_64BIT | ||
12 | #define ASM_PTR ".quad" | ||
13 | #define ASM_ALIGN ".balign 8" | ||
14 | #else | ||
15 | #define ASM_PTR ".long" | ||
16 | #define ASM_ALIGN ".balign 4" | ||
17 | #endif | ||
18 | |||
19 | /* | 11 | /* |
20 | * We use a brcl 0,2 instruction for jump labels at compile time so it | 12 | * We use a brcl 0,2 instruction for jump labels at compile time so it |
21 | * can be easily distinguished from a hotpatch generated instruction. | 13 | * can be easily distinguished from a hotpatch generated instruction. |
@@ -24,8 +16,8 @@ static __always_inline bool arch_static_branch(struct static_key *key) | |||
24 | { | 16 | { |
25 | asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" | 17 | asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" |
26 | ".pushsection __jump_table, \"aw\"\n" | 18 | ".pushsection __jump_table, \"aw\"\n" |
27 | ASM_ALIGN "\n" | 19 | ".balign 8\n" |
28 | ASM_PTR " 0b, %l[label], %0\n" | 20 | ".quad 0b, %l[label], %0\n" |
29 | ".popsection\n" | 21 | ".popsection\n" |
30 | : : "X" (key) : : label); | 22 | : : "X" (key) : : label); |
31 | return false; | 23 | return false; |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 34fbcac61133..663f23e37460 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -13,163 +13,6 @@ | |||
13 | #include <asm/cpu.h> | 13 | #include <asm/cpu.h> |
14 | #include <asm/types.h> | 14 | #include <asm/types.h> |
15 | 15 | ||
16 | #ifdef CONFIG_32BIT | ||
17 | |||
18 | #define LC_ORDER 0 | ||
19 | #define LC_PAGES 1 | ||
20 | |||
21 | struct save_area { | ||
22 | u32 ext_save; | ||
23 | u64 timer; | ||
24 | u64 clk_cmp; | ||
25 | u8 pad1[24]; | ||
26 | u8 psw[8]; | ||
27 | u32 pref_reg; | ||
28 | u8 pad2[20]; | ||
29 | u32 acc_regs[16]; | ||
30 | u64 fp_regs[4]; | ||
31 | u32 gp_regs[16]; | ||
32 | u32 ctrl_regs[16]; | ||
33 | } __packed; | ||
34 | |||
35 | struct save_area_ext { | ||
36 | struct save_area sa; | ||
37 | __vector128 vx_regs[32]; | ||
38 | }; | ||
39 | |||
40 | struct _lowcore { | ||
41 | psw_t restart_psw; /* 0x0000 */ | ||
42 | psw_t restart_old_psw; /* 0x0008 */ | ||
43 | __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */ | ||
44 | __u32 ipl_parmblock_ptr; /* 0x0014 */ | ||
45 | psw_t external_old_psw; /* 0x0018 */ | ||
46 | psw_t svc_old_psw; /* 0x0020 */ | ||
47 | psw_t program_old_psw; /* 0x0028 */ | ||
48 | psw_t mcck_old_psw; /* 0x0030 */ | ||
49 | psw_t io_old_psw; /* 0x0038 */ | ||
50 | __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */ | ||
51 | psw_t external_new_psw; /* 0x0058 */ | ||
52 | psw_t svc_new_psw; /* 0x0060 */ | ||
53 | psw_t program_new_psw; /* 0x0068 */ | ||
54 | psw_t mcck_new_psw; /* 0x0070 */ | ||
55 | psw_t io_new_psw; /* 0x0078 */ | ||
56 | __u32 ext_params; /* 0x0080 */ | ||
57 | __u16 ext_cpu_addr; /* 0x0084 */ | ||
58 | __u16 ext_int_code; /* 0x0086 */ | ||
59 | __u16 svc_ilc; /* 0x0088 */ | ||
60 | __u16 svc_code; /* 0x008a */ | ||
61 | __u16 pgm_ilc; /* 0x008c */ | ||
62 | __u16 pgm_code; /* 0x008e */ | ||
63 | __u32 trans_exc_code; /* 0x0090 */ | ||
64 | __u16 mon_class_num; /* 0x0094 */ | ||
65 | __u8 per_code; /* 0x0096 */ | ||
66 | __u8 per_atmid; /* 0x0097 */ | ||
67 | __u32 per_address; /* 0x0098 */ | ||
68 | __u32 monitor_code; /* 0x009c */ | ||
69 | __u8 exc_access_id; /* 0x00a0 */ | ||
70 | __u8 per_access_id; /* 0x00a1 */ | ||
71 | __u8 op_access_id; /* 0x00a2 */ | ||
72 | __u8 ar_mode_id; /* 0x00a3 */ | ||
73 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ | ||
74 | __u16 subchannel_id; /* 0x00b8 */ | ||
75 | __u16 subchannel_nr; /* 0x00ba */ | ||
76 | __u32 io_int_parm; /* 0x00bc */ | ||
77 | __u32 io_int_word; /* 0x00c0 */ | ||
78 | __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ | ||
79 | __u32 stfl_fac_list; /* 0x00c8 */ | ||
80 | __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */ | ||
81 | __u32 extended_save_area_addr; /* 0x00d4 */ | ||
82 | __u32 cpu_timer_save_area[2]; /* 0x00d8 */ | ||
83 | __u32 clock_comp_save_area[2]; /* 0x00e0 */ | ||
84 | __u32 mcck_interruption_code[2]; /* 0x00e8 */ | ||
85 | __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ | ||
86 | __u32 external_damage_code; /* 0x00f4 */ | ||
87 | __u32 failing_storage_address; /* 0x00f8 */ | ||
88 | __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ | ||
89 | psw_t psw_save_area; /* 0x0100 */ | ||
90 | __u32 prefixreg_save_area; /* 0x0108 */ | ||
91 | __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */ | ||
92 | |||
93 | /* CPU register save area: defined by architecture */ | ||
94 | __u32 access_regs_save_area[16]; /* 0x0120 */ | ||
95 | __u32 floating_pt_save_area[8]; /* 0x0160 */ | ||
96 | __u32 gpregs_save_area[16]; /* 0x0180 */ | ||
97 | __u32 cregs_save_area[16]; /* 0x01c0 */ | ||
98 | |||
99 | /* Save areas. */ | ||
100 | __u32 save_area_sync[8]; /* 0x0200 */ | ||
101 | __u32 save_area_async[8]; /* 0x0220 */ | ||
102 | __u32 save_area_restart[1]; /* 0x0240 */ | ||
103 | |||
104 | /* CPU flags. */ | ||
105 | __u32 cpu_flags; /* 0x0244 */ | ||
106 | |||
107 | /* Return psws. */ | ||
108 | psw_t return_psw; /* 0x0248 */ | ||
109 | psw_t return_mcck_psw; /* 0x0250 */ | ||
110 | |||
111 | /* CPU time accounting values */ | ||
112 | __u64 sync_enter_timer; /* 0x0258 */ | ||
113 | __u64 async_enter_timer; /* 0x0260 */ | ||
114 | __u64 mcck_enter_timer; /* 0x0268 */ | ||
115 | __u64 exit_timer; /* 0x0270 */ | ||
116 | __u64 user_timer; /* 0x0278 */ | ||
117 | __u64 system_timer; /* 0x0280 */ | ||
118 | __u64 steal_timer; /* 0x0288 */ | ||
119 | __u64 last_update_timer; /* 0x0290 */ | ||
120 | __u64 last_update_clock; /* 0x0298 */ | ||
121 | __u64 int_clock; /* 0x02a0 */ | ||
122 | __u64 mcck_clock; /* 0x02a8 */ | ||
123 | __u64 clock_comparator; /* 0x02b0 */ | ||
124 | |||
125 | /* Current process. */ | ||
126 | __u32 current_task; /* 0x02b8 */ | ||
127 | __u32 thread_info; /* 0x02bc */ | ||
128 | __u32 kernel_stack; /* 0x02c0 */ | ||
129 | |||
130 | /* Interrupt, panic and restart stack. */ | ||
131 | __u32 async_stack; /* 0x02c4 */ | ||
132 | __u32 panic_stack; /* 0x02c8 */ | ||
133 | __u32 restart_stack; /* 0x02cc */ | ||
134 | |||
135 | /* Restart function and parameter. */ | ||
136 | __u32 restart_fn; /* 0x02d0 */ | ||
137 | __u32 restart_data; /* 0x02d4 */ | ||
138 | __u32 restart_source; /* 0x02d8 */ | ||
139 | |||
140 | /* Address space pointer. */ | ||
141 | __u32 kernel_asce; /* 0x02dc */ | ||
142 | __u32 user_asce; /* 0x02e0 */ | ||
143 | __u32 current_pid; /* 0x02e4 */ | ||
144 | |||
145 | /* SMP info area */ | ||
146 | __u32 cpu_nr; /* 0x02e8 */ | ||
147 | __u32 softirq_pending; /* 0x02ec */ | ||
148 | __u32 percpu_offset; /* 0x02f0 */ | ||
149 | __u32 machine_flags; /* 0x02f4 */ | ||
150 | __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */ | ||
151 | __u32 spinlock_lockval; /* 0x02fc */ | ||
152 | |||
153 | __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ | ||
154 | |||
155 | /* | ||
156 | * 0xe00 contains the address of the IPL Parameter Information | ||
157 | * block. Dump tools need IPIB for IPL after dump. | ||
158 | * Note: do not change the position of any fields in 0x0e00-0x0f00 | ||
159 | */ | ||
160 | __u32 ipib; /* 0x0e00 */ | ||
161 | __u32 ipib_checksum; /* 0x0e04 */ | ||
162 | __u32 vmcore_info; /* 0x0e08 */ | ||
163 | __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */ | ||
164 | __u32 os_info; /* 0x0e18 */ | ||
165 | __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */ | ||
166 | |||
167 | /* Extended facility list */ | ||
168 | __u64 stfle_fac_list[32]; /* 0x0f00 */ | ||
169 | } __packed; | ||
170 | |||
171 | #else /* CONFIG_32BIT */ | ||
172 | |||
173 | #define LC_ORDER 1 | 16 | #define LC_ORDER 1 |
174 | #define LC_PAGES 2 | 17 | #define LC_PAGES 2 |
175 | 18 | ||
@@ -354,8 +197,6 @@ struct _lowcore { | |||
354 | __u8 vector_save_area[1024]; /* 0x1c00 */ | 197 | __u8 vector_save_area[1024]; /* 0x1c00 */ |
355 | } __packed; | 198 | } __packed; |
356 | 199 | ||
357 | #endif /* CONFIG_32BIT */ | ||
358 | |||
359 | #define S390_lowcore (*((struct _lowcore *) 0)) | 200 | #define S390_lowcore (*((struct _lowcore *) 0)) |
360 | 201 | ||
361 | extern struct _lowcore *lowcore_ptr[]; | 202 | extern struct _lowcore *lowcore_ptr[]; |
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 9977e08df5bd..b55a59e1d134 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | #include <uapi/asm/mman.h> | 9 | #include <uapi/asm/mman.h> |
10 | 10 | ||
11 | #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) | 11 | #ifndef __ASSEMBLY__ |
12 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); | 12 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); |
13 | #define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) | 13 | #define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) |
14 | #endif | 14 | #endif |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8fb3802f8fad..d25d9ff10ba8 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
19 | atomic_set(&mm->context.attach_count, 0); | 19 | atomic_set(&mm->context.attach_count, 0); |
20 | mm->context.flush_mm = 0; | 20 | mm->context.flush_mm = 0; |
21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
22 | #ifdef CONFIG_64BIT | ||
23 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 22 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
24 | #endif | ||
25 | mm->context.has_pgste = 0; | 23 | mm->context.has_pgste = 0; |
26 | mm->context.use_skey = 0; | 24 | mm->context.use_skey = 0; |
27 | mm->context.asce_limit = STACK_TOP_MAX; | 25 | mm->context.asce_limit = STACK_TOP_MAX; |
@@ -110,10 +108,8 @@ static inline void activate_mm(struct mm_struct *prev, | |||
110 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | 108 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
111 | struct mm_struct *mm) | 109 | struct mm_struct *mm) |
112 | { | 110 | { |
113 | #ifdef CONFIG_64BIT | ||
114 | if (oldmm->context.asce_limit < mm->context.asce_limit) | 111 | if (oldmm->context.asce_limit < mm->context.asce_limit) |
115 | crst_table_downgrade(mm, oldmm->context.asce_limit); | 112 | crst_table_downgrade(mm, oldmm->context.asce_limit); |
116 | #endif | ||
117 | } | 113 | } |
118 | 114 | ||
119 | static inline void arch_exit_mmap(struct mm_struct *mm) | 115 | static inline void arch_exit_mmap(struct mm_struct *mm) |
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 933355e0d091..6d6556ca24aa 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h | |||
@@ -10,8 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | #define __my_cpu_offset S390_lowcore.percpu_offset | 11 | #define __my_cpu_offset S390_lowcore.percpu_offset |
12 | 12 | ||
13 | #ifdef CONFIG_64BIT | ||
14 | |||
15 | /* | 13 | /* |
16 | * For 64 bit module code, the module may be more than 4G above the | 14 | * For 64 bit module code, the module may be more than 4G above the |
17 | * per cpu area, use weak definitions to force the compiler to | 15 | * per cpu area, use weak definitions to force the compiler to |
@@ -183,8 +181,6 @@ | |||
183 | #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double | 181 | #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double |
184 | #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double | 182 | #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double |
185 | 183 | ||
186 | #endif /* CONFIG_64BIT */ | ||
187 | |||
188 | #include <asm-generic/percpu.h> | 184 | #include <asm-generic/percpu.h> |
189 | 185 | ||
190 | #endif /* __ARCH_S390_PERCPU__ */ | 186 | #endif /* __ARCH_S390_PERCPU__ */ |
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 159a8ec6da9a..4cb19fe76dd9 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h | |||
@@ -9,8 +9,6 @@ | |||
9 | #ifndef _ASM_S390_PERF_EVENT_H | 9 | #ifndef _ASM_S390_PERF_EVENT_H |
10 | #define _ASM_S390_PERF_EVENT_H | 10 | #define _ASM_S390_PERF_EVENT_H |
11 | 11 | ||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
15 | #include <linux/device.h> | 13 | #include <linux/device.h> |
16 | #include <asm/cpu_mf.h> | 14 | #include <asm/cpu_mf.h> |
@@ -92,5 +90,4 @@ struct sf_raw_sample { | |||
92 | int perf_reserve_sampling(void); | 90 | int perf_reserve_sampling(void); |
93 | void perf_release_sampling(void); | 91 | void perf_release_sampling(void); |
94 | 92 | ||
95 | #endif /* CONFIG_64BIT */ | ||
96 | #endif /* _ASM_S390_PERF_EVENT_H */ | 93 | #endif /* _ASM_S390_PERF_EVENT_H */ |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 3009c2ba46d2..51e7fb634ebc 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -33,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | |||
33 | *s = val; | 33 | *s = val; |
34 | n = (n / 256) - 1; | 34 | n = (n / 256) - 1; |
35 | asm volatile( | 35 | asm volatile( |
36 | #ifdef CONFIG_64BIT | ||
37 | " mvc 8(248,%0),0(%0)\n" | 36 | " mvc 8(248,%0),0(%0)\n" |
38 | #else | ||
39 | " mvc 4(252,%0),0(%0)\n" | ||
40 | #endif | ||
41 | "0: mvc 256(256,%0),0(%0)\n" | 37 | "0: mvc 256(256,%0),0(%0)\n" |
42 | " la %0,256(%0)\n" | 38 | " la %0,256(%0)\n" |
43 | " brct %1,0b\n" | 39 | " brct %1,0b\n" |
@@ -50,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry) | |||
50 | clear_table(crst, entry, sizeof(unsigned long)*2048); | 46 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
51 | } | 47 | } |
52 | 48 | ||
53 | #ifndef CONFIG_64BIT | ||
54 | |||
55 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) | ||
56 | { | ||
57 | return _SEGMENT_ENTRY_EMPTY; | ||
58 | } | ||
59 | |||
60 | #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) | ||
61 | #define pud_free(mm, x) do { } while (0) | ||
62 | |||
63 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | ||
64 | #define pmd_free(mm, x) do { } while (0) | ||
65 | |||
66 | #define pgd_populate(mm, pgd, pud) BUG() | ||
67 | #define pud_populate(mm, pud, pmd) BUG() | ||
68 | |||
69 | #else /* CONFIG_64BIT */ | ||
70 | |||
71 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) | 49 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) |
72 | { | 50 | { |
73 | if (mm->context.asce_limit <= (1UL << 31)) | 51 | if (mm->context.asce_limit <= (1UL << 31)) |
@@ -119,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
119 | pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); | 97 | pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); |
120 | } | 98 | } |
121 | 99 | ||
122 | #endif /* CONFIG_64BIT */ | ||
123 | |||
124 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 100 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
125 | { | 101 | { |
126 | spin_lock_init(&mm->context.list_lock); | 102 | spin_lock_init(&mm->context.list_lock); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e08ec38f8c6e..989cfae9e202 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -66,15 +66,9 @@ extern unsigned long zero_page_mask; | |||
66 | * table can map | 66 | * table can map |
67 | * PGDIR_SHIFT determines what a third-level page table entry can map | 67 | * PGDIR_SHIFT determines what a third-level page table entry can map |
68 | */ | 68 | */ |
69 | #ifndef CONFIG_64BIT | 69 | #define PMD_SHIFT 20 |
70 | # define PMD_SHIFT 20 | 70 | #define PUD_SHIFT 31 |
71 | # define PUD_SHIFT 20 | 71 | #define PGDIR_SHIFT 42 |
72 | # define PGDIR_SHIFT 20 | ||
73 | #else /* CONFIG_64BIT */ | ||
74 | # define PMD_SHIFT 20 | ||
75 | # define PUD_SHIFT 31 | ||
76 | # define PGDIR_SHIFT 42 | ||
77 | #endif /* CONFIG_64BIT */ | ||
78 | 72 | ||
79 | #define PMD_SIZE (1UL << PMD_SHIFT) | 73 | #define PMD_SIZE (1UL << PMD_SHIFT) |
80 | #define PMD_MASK (~(PMD_SIZE-1)) | 74 | #define PMD_MASK (~(PMD_SIZE-1)) |
@@ -90,15 +84,8 @@ extern unsigned long zero_page_mask; | |||
90 | * that leads to 1024 pte per pgd | 84 | * that leads to 1024 pte per pgd |
91 | */ | 85 | */ |
92 | #define PTRS_PER_PTE 256 | 86 | #define PTRS_PER_PTE 256 |
93 | #ifndef CONFIG_64BIT | ||
94 | #define __PAGETABLE_PUD_FOLDED | ||
95 | #define PTRS_PER_PMD 1 | ||
96 | #define __PAGETABLE_PMD_FOLDED | ||
97 | #define PTRS_PER_PUD 1 | ||
98 | #else /* CONFIG_64BIT */ | ||
99 | #define PTRS_PER_PMD 2048 | 87 | #define PTRS_PER_PMD 2048 |
100 | #define PTRS_PER_PUD 2048 | 88 | #define PTRS_PER_PUD 2048 |
101 | #endif /* CONFIG_64BIT */ | ||
102 | #define PTRS_PER_PGD 2048 | 89 | #define PTRS_PER_PGD 2048 |
103 | 90 | ||
104 | #define FIRST_USER_ADDRESS 0UL | 91 | #define FIRST_USER_ADDRESS 0UL |
@@ -127,23 +114,19 @@ extern struct page *vmemmap; | |||
127 | 114 | ||
128 | #define VMEM_MAX_PHYS ((unsigned long) vmemmap) | 115 | #define VMEM_MAX_PHYS ((unsigned long) vmemmap) |
129 | 116 | ||
130 | #ifdef CONFIG_64BIT | ||
131 | extern unsigned long MODULES_VADDR; | 117 | extern unsigned long MODULES_VADDR; |
132 | extern unsigned long MODULES_END; | 118 | extern unsigned long MODULES_END; |
133 | #define MODULES_VADDR MODULES_VADDR | 119 | #define MODULES_VADDR MODULES_VADDR |
134 | #define MODULES_END MODULES_END | 120 | #define MODULES_END MODULES_END |
135 | #define MODULES_LEN (1UL << 31) | 121 | #define MODULES_LEN (1UL << 31) |
136 | #endif | ||
137 | 122 | ||
138 | static inline int is_module_addr(void *addr) | 123 | static inline int is_module_addr(void *addr) |
139 | { | 124 | { |
140 | #ifdef CONFIG_64BIT | ||
141 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); | 125 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); |
142 | if (addr < (void *)MODULES_VADDR) | 126 | if (addr < (void *)MODULES_VADDR) |
143 | return 0; | 127 | return 0; |
144 | if (addr > (void *)MODULES_END) | 128 | if (addr > (void *)MODULES_END) |
145 | return 0; | 129 | return 0; |
146 | #endif | ||
147 | return 1; | 130 | return 1; |
148 | } | 131 | } |
149 | 132 | ||
@@ -284,56 +267,6 @@ static inline int is_module_addr(void *addr) | |||
284 | * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 | 267 | * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 |
285 | */ | 268 | */ |
286 | 269 | ||
287 | #ifndef CONFIG_64BIT | ||
288 | |||
289 | /* Bits in the segment table address-space-control-element */ | ||
290 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ | ||
291 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ | ||
292 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | ||
293 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | ||
294 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | ||
295 | |||
296 | /* Bits in the segment table entry */ | ||
297 | #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ | ||
298 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | ||
299 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ | ||
300 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ | ||
301 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | ||
302 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | ||
303 | |||
304 | #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */ | ||
305 | #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */ | ||
306 | #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */ | ||
307 | #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */ | ||
308 | #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */ | ||
309 | #define _SEGMENT_ENTRY_BITS_LARGE 0 | ||
310 | #define _SEGMENT_ENTRY_ORIGIN_LARGE 0 | ||
311 | |||
312 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) | ||
313 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) | ||
314 | |||
315 | /* | ||
316 | * Segment table entry encoding (I = invalid, R = read-only bit): | ||
317 | * ..R...I..... | ||
318 | * prot-none ..1...1..... | ||
319 | * read-only ..1...0..... | ||
320 | * read-write ..0...0..... | ||
321 | * empty ..0...1..... | ||
322 | */ | ||
323 | |||
324 | /* Page status table bits for virtualization */ | ||
325 | #define PGSTE_ACC_BITS 0xf0000000UL | ||
326 | #define PGSTE_FP_BIT 0x08000000UL | ||
327 | #define PGSTE_PCL_BIT 0x00800000UL | ||
328 | #define PGSTE_HR_BIT 0x00400000UL | ||
329 | #define PGSTE_HC_BIT 0x00200000UL | ||
330 | #define PGSTE_GR_BIT 0x00040000UL | ||
331 | #define PGSTE_GC_BIT 0x00020000UL | ||
332 | #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ | ||
333 | #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ | ||
334 | |||
335 | #else /* CONFIG_64BIT */ | ||
336 | |||
337 | /* Bits in the segment/region table address-space-control-element */ | 270 | /* Bits in the segment/region table address-space-control-element */ |
338 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | 271 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
339 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | 272 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
@@ -417,8 +350,6 @@ static inline int is_module_addr(void *addr) | |||
417 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ | 350 | #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ |
418 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ | 351 | #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ |
419 | 352 | ||
420 | #endif /* CONFIG_64BIT */ | ||
421 | |||
422 | /* Guest Page State used for virtualization */ | 353 | /* Guest Page State used for virtualization */ |
423 | #define _PGSTE_GPS_ZERO 0x0000000080000000UL | 354 | #define _PGSTE_GPS_ZERO 0x0000000080000000UL |
424 | #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL | 355 | #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL |
@@ -509,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm) | |||
509 | /* | 440 | /* |
510 | * pgd/pmd/pte query functions | 441 | * pgd/pmd/pte query functions |
511 | */ | 442 | */ |
512 | #ifndef CONFIG_64BIT | ||
513 | |||
514 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
515 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
516 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
517 | |||
518 | static inline int pud_present(pud_t pud) { return 1; } | ||
519 | static inline int pud_none(pud_t pud) { return 0; } | ||
520 | static inline int pud_large(pud_t pud) { return 0; } | ||
521 | static inline int pud_bad(pud_t pud) { return 0; } | ||
522 | |||
523 | #else /* CONFIG_64BIT */ | ||
524 | |||
525 | static inline int pgd_present(pgd_t pgd) | 443 | static inline int pgd_present(pgd_t pgd) |
526 | { | 444 | { |
527 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | 445 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
@@ -583,8 +501,6 @@ static inline int pud_bad(pud_t pud) | |||
583 | return (pud_val(pud) & mask) != 0; | 501 | return (pud_val(pud) & mask) != 0; |
584 | } | 502 | } |
585 | 503 | ||
586 | #endif /* CONFIG_64BIT */ | ||
587 | |||
588 | static inline int pmd_present(pmd_t pmd) | 504 | static inline int pmd_present(pmd_t pmd) |
589 | { | 505 | { |
590 | return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; | 506 | return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; |
@@ -916,18 +832,14 @@ static inline int pte_unused(pte_t pte) | |||
916 | 832 | ||
917 | static inline void pgd_clear(pgd_t *pgd) | 833 | static inline void pgd_clear(pgd_t *pgd) |
918 | { | 834 | { |
919 | #ifdef CONFIG_64BIT | ||
920 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | 835 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
921 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; | 836 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
922 | #endif | ||
923 | } | 837 | } |
924 | 838 | ||
925 | static inline void pud_clear(pud_t *pud) | 839 | static inline void pud_clear(pud_t *pud) |
926 | { | 840 | { |
927 | #ifdef CONFIG_64BIT | ||
928 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | 841 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
929 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; | 842 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
930 | #endif | ||
931 | } | 843 | } |
932 | 844 | ||
933 | static inline void pmd_clear(pmd_t *pmdp) | 845 | static inline void pmd_clear(pmd_t *pmdp) |
@@ -1026,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | |||
1026 | { | 938 | { |
1027 | unsigned long pto = (unsigned long) ptep; | 939 | unsigned long pto = (unsigned long) ptep; |
1028 | 940 | ||
1029 | #ifndef CONFIG_64BIT | ||
1030 | /* pto in ESA mode must point to the start of the segment table */ | ||
1031 | pto &= 0x7ffffc00; | ||
1032 | #endif | ||
1033 | /* Invalidation + global TLB flush for the pte */ | 941 | /* Invalidation + global TLB flush for the pte */ |
1034 | asm volatile( | 942 | asm volatile( |
1035 | " ipte %2,%3" | 943 | " ipte %2,%3" |
@@ -1040,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) | |||
1040 | { | 948 | { |
1041 | unsigned long pto = (unsigned long) ptep; | 949 | unsigned long pto = (unsigned long) ptep; |
1042 | 950 | ||
1043 | #ifndef CONFIG_64BIT | ||
1044 | /* pto in ESA mode must point to the start of the segment table */ | ||
1045 | pto &= 0x7ffffc00; | ||
1046 | #endif | ||
1047 | /* Invalidation + local TLB flush for the pte */ | 951 | /* Invalidation + local TLB flush for the pte */ |
1048 | asm volatile( | 952 | asm volatile( |
1049 | " .insn rrf,0xb2210000,%2,%3,0,1" | 953 | " .insn rrf,0xb2210000,%2,%3,0,1" |
@@ -1054,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) | |||
1054 | { | 958 | { |
1055 | unsigned long pto = (unsigned long) ptep; | 959 | unsigned long pto = (unsigned long) ptep; |
1056 | 960 | ||
1057 | #ifndef CONFIG_64BIT | ||
1058 | /* pto in ESA mode must point to the start of the segment table */ | ||
1059 | pto &= 0x7ffffc00; | ||
1060 | #endif | ||
1061 | /* Invalidate a range of ptes + global TLB flush of the ptes */ | 961 | /* Invalidate a range of ptes + global TLB flush of the ptes */ |
1062 | do { | 962 | do { |
1063 | asm volatile( | 963 | asm volatile( |
@@ -1376,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | |||
1376 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 1276 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
1377 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 1277 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
1378 | 1278 | ||
1379 | #ifndef CONFIG_64BIT | ||
1380 | |||
1381 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | ||
1382 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | ||
1383 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | ||
1384 | |||
1385 | #define pud_offset(pgd, address) ((pud_t *) pgd) | ||
1386 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | ||
1387 | |||
1388 | #else /* CONFIG_64BIT */ | ||
1389 | |||
1390 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | 1279 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
1391 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) | 1280 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
1392 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) | 1281 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
@@ -1407,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |||
1407 | return pmd + pmd_index(address); | 1296 | return pmd + pmd_index(address); |
1408 | } | 1297 | } |
1409 | 1298 | ||
1410 | #endif /* CONFIG_64BIT */ | ||
1411 | |||
1412 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) | 1299 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
1413 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 1300 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
1414 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 1301 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
@@ -1729,11 +1616,9 @@ static inline int has_transparent_hugepage(void) | |||
1729 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | 1616 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 |
1730 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | 1617 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 |
1731 | */ | 1618 | */ |
1732 | #ifndef CONFIG_64BIT | 1619 | |
1733 | #define __SWP_OFFSET_MASK (~0UL >> 12) | ||
1734 | #else | ||
1735 | #define __SWP_OFFSET_MASK (~0UL >> 11) | 1620 | #define __SWP_OFFSET_MASK (~0UL >> 11) |
1736 | #endif | 1621 | |
1737 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 1622 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1738 | { | 1623 | { |
1739 | pte_t pte; | 1624 | pte_t pte; |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index e7cbbdcdee13..dedb6218544b 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define _CIF_ASCE (1<<CIF_ASCE) | 19 | #define _CIF_ASCE (1<<CIF_ASCE) |
20 | #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) | 20 | #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) |
21 | 21 | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
24 | 23 | ||
25 | #include <linux/linkage.h> | 24 | #include <linux/linkage.h> |
@@ -66,13 +65,6 @@ extern void execve_tail(void); | |||
66 | /* | 65 | /* |
67 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. | 66 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
68 | */ | 67 | */ |
69 | #ifndef CONFIG_64BIT | ||
70 | |||
71 | #define TASK_SIZE (1UL << 31) | ||
72 | #define TASK_MAX_SIZE (1UL << 31) | ||
73 | #define TASK_UNMAPPED_BASE (1UL << 30) | ||
74 | |||
75 | #else /* CONFIG_64BIT */ | ||
76 | 68 | ||
77 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) | 69 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) |
78 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ | 70 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ |
@@ -80,15 +72,8 @@ extern void execve_tail(void); | |||
80 | #define TASK_SIZE TASK_SIZE_OF(current) | 72 | #define TASK_SIZE TASK_SIZE_OF(current) |
81 | #define TASK_MAX_SIZE (1UL << 53) | 73 | #define TASK_MAX_SIZE (1UL << 53) |
82 | 74 | ||
83 | #endif /* CONFIG_64BIT */ | ||
84 | |||
85 | #ifndef CONFIG_64BIT | ||
86 | #define STACK_TOP (1UL << 31) | ||
87 | #define STACK_TOP_MAX (1UL << 31) | ||
88 | #else /* CONFIG_64BIT */ | ||
89 | #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) | 75 | #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) |
90 | #define STACK_TOP_MAX (1UL << 42) | 76 | #define STACK_TOP_MAX (1UL << 42) |
91 | #endif /* CONFIG_64BIT */ | ||
92 | 77 | ||
93 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | 78 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
94 | 79 | ||
@@ -115,10 +100,8 @@ struct thread_struct { | |||
115 | /* cpu runtime instrumentation */ | 100 | /* cpu runtime instrumentation */ |
116 | struct runtime_instr_cb *ri_cb; | 101 | struct runtime_instr_cb *ri_cb; |
117 | int ri_signum; | 102 | int ri_signum; |
118 | #ifdef CONFIG_64BIT | ||
119 | unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ | 103 | unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ |
120 | __vector128 *vxrs; /* Vector register save area */ | 104 | __vector128 *vxrs; /* Vector register save area */ |
121 | #endif | ||
122 | }; | 105 | }; |
123 | 106 | ||
124 | /* Flag to disable transactions. */ | 107 | /* Flag to disable transactions. */ |
@@ -181,11 +164,7 @@ struct task_struct; | |||
181 | struct mm_struct; | 164 | struct mm_struct; |
182 | struct seq_file; | 165 | struct seq_file; |
183 | 166 | ||
184 | #ifdef CONFIG_64BIT | 167 | void show_cacheinfo(struct seq_file *m); |
185 | extern void show_cacheinfo(struct seq_file *m); | ||
186 | #else | ||
187 | static inline void show_cacheinfo(struct seq_file *m) { } | ||
188 | #endif | ||
189 | 168 | ||
190 | /* Free all resources held by a thread. */ | 169 | /* Free all resources held by a thread. */ |
191 | extern void release_thread(struct task_struct *); | 170 | extern void release_thread(struct task_struct *); |
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key) | |||
229 | */ | 208 | */ |
230 | static inline void __load_psw(psw_t psw) | 209 | static inline void __load_psw(psw_t psw) |
231 | { | 210 | { |
232 | #ifndef CONFIG_64BIT | ||
233 | asm volatile("lpsw %0" : : "Q" (psw) : "cc"); | ||
234 | #else | ||
235 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); | 211 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); |
236 | #endif | ||
237 | } | 212 | } |
238 | 213 | ||
239 | /* | 214 | /* |
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask) | |||
247 | 222 | ||
248 | psw.mask = mask; | 223 | psw.mask = mask; |
249 | 224 | ||
250 | #ifndef CONFIG_64BIT | ||
251 | asm volatile( | ||
252 | " basr %0,0\n" | ||
253 | "0: ahi %0,1f-0b\n" | ||
254 | " st %0,%O1+4(%R1)\n" | ||
255 | " lpsw %1\n" | ||
256 | "1:" | ||
257 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | ||
258 | #else /* CONFIG_64BIT */ | ||
259 | asm volatile( | 225 | asm volatile( |
260 | " larl %0,1f\n" | 226 | " larl %0,1f\n" |
261 | " stg %0,%O1+8(%R1)\n" | 227 | " stg %0,%O1+8(%R1)\n" |
262 | " lpswe %1\n" | 228 | " lpswe %1\n" |
263 | "1:" | 229 | "1:" |
264 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); | 230 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
265 | #endif /* CONFIG_64BIT */ | ||
266 | } | 231 | } |
267 | 232 | ||
268 | /* | 233 | /* |
@@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask) | |||
270 | */ | 235 | */ |
271 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) | 236 | static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) |
272 | { | 237 | { |
273 | #ifndef CONFIG_64BIT | ||
274 | if (psw.addr & PSW_ADDR_AMODE) | ||
275 | /* 31 bit mode */ | ||
276 | return (psw.addr - ilc) | PSW_ADDR_AMODE; | ||
277 | /* 24 bit mode */ | ||
278 | return (psw.addr - ilc) & ((1UL << 24) - 1); | ||
279 | #else | ||
280 | unsigned long mask; | 238 | unsigned long mask; |
281 | 239 | ||
282 | mask = (psw.mask & PSW_MASK_EA) ? -1UL : | 240 | mask = (psw.mask & PSW_MASK_EA) ? -1UL : |
283 | (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : | 241 | (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : |
284 | (1UL << 24) - 1; | 242 | (1UL << 24) - 1; |
285 | return (psw.addr - ilc) & mask; | 243 | return (psw.addr - ilc) & mask; |
286 | #endif | ||
287 | } | 244 | } |
288 | 245 | ||
289 | /* | 246 | /* |
@@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code) | |||
305 | * Store status and then load disabled wait psw, | 262 | * Store status and then load disabled wait psw, |
306 | * the processor is dead afterwards | 263 | * the processor is dead afterwards |
307 | */ | 264 | */ |
308 | #ifndef CONFIG_64BIT | ||
309 | asm volatile( | ||
310 | " stctl 0,0,0(%2)\n" | ||
311 | " ni 0(%2),0xef\n" /* switch off protection */ | ||
312 | " lctl 0,0,0(%2)\n" | ||
313 | " stpt 0xd8\n" /* store timer */ | ||
314 | " stckc 0xe0\n" /* store clock comparator */ | ||
315 | " stpx 0x108\n" /* store prefix register */ | ||
316 | " stam 0,15,0x120\n" /* store access registers */ | ||
317 | " std 0,0x160\n" /* store f0 */ | ||
318 | " std 2,0x168\n" /* store f2 */ | ||
319 | " std 4,0x170\n" /* store f4 */ | ||
320 | " std 6,0x178\n" /* store f6 */ | ||
321 | " stm 0,15,0x180\n" /* store general registers */ | ||
322 | " stctl 0,15,0x1c0\n" /* store control registers */ | ||
323 | " oi 0x1c0,0x10\n" /* fake protection bit */ | ||
324 | " lpsw 0(%1)" | ||
325 | : "=m" (ctl_buf) | ||
326 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); | ||
327 | #else /* CONFIG_64BIT */ | ||
328 | asm volatile( | 265 | asm volatile( |
329 | " stctg 0,0,0(%2)\n" | 266 | " stctg 0,0,0(%2)\n" |
330 | " ni 4(%2),0xef\n" /* switch off protection */ | 267 | " ni 4(%2),0xef\n" /* switch off protection */ |
@@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code) | |||
357 | " lpswe 0(%1)" | 294 | " lpswe 0(%1)" |
358 | : "=m" (ctl_buf) | 295 | : "=m" (ctl_buf) |
359 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); | 296 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); |
360 | #endif /* CONFIG_64BIT */ | ||
361 | while (1); | 297 | while (1); |
362 | } | 298 | } |
363 | 299 | ||
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index be317feff7ac..6feda2599282 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -40,12 +40,8 @@ struct psw_bits { | |||
40 | unsigned long long ri : 1; /* Runtime Instrumentation */ | 40 | unsigned long long ri : 1; /* Runtime Instrumentation */ |
41 | unsigned long long : 6; | 41 | unsigned long long : 6; |
42 | unsigned long long eaba : 2; /* Addressing Mode */ | 42 | unsigned long long eaba : 2; /* Addressing Mode */ |
43 | #ifdef CONFIG_64BIT | ||
44 | unsigned long long : 31; | 43 | unsigned long long : 31; |
45 | unsigned long long ia : 64;/* Instruction Address */ | 44 | unsigned long long ia : 64;/* Instruction Address */ |
46 | #else | ||
47 | unsigned long long ia : 31;/* Instruction Address */ | ||
48 | #endif | ||
49 | }; | 45 | }; |
50 | 46 | ||
51 | enum { | 47 | enum { |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 06f3034605a1..998b61cd0e56 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -211,11 +211,6 @@ struct qdio_buffer_element { | |||
211 | u8 scount; | 211 | u8 scount; |
212 | u8 sflags; | 212 | u8 sflags; |
213 | u32 length; | 213 | u32 length; |
214 | #ifdef CONFIG_32BIT | ||
215 | /* private: */ | ||
216 | void *res2; | ||
217 | /* public: */ | ||
218 | #endif | ||
219 | void *addr; | 214 | void *addr; |
220 | } __attribute__ ((packed, aligned(16))); | 215 | } __attribute__ ((packed, aligned(16))); |
221 | 216 | ||
@@ -232,11 +227,6 @@ struct qdio_buffer { | |||
232 | * @sbal: absolute SBAL address | 227 | * @sbal: absolute SBAL address |
233 | */ | 228 | */ |
234 | struct sl_element { | 229 | struct sl_element { |
235 | #ifdef CONFIG_32BIT | ||
236 | /* private: */ | ||
237 | unsigned long reserved; | ||
238 | /* public: */ | ||
239 | #endif | ||
240 | unsigned long sbal; | 230 | unsigned long sbal; |
241 | } __attribute__ ((packed)); | 231 | } __attribute__ ((packed)); |
242 | 232 | ||
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index 830da737ff85..402ad6df4897 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h | |||
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) | |||
72 | 72 | ||
73 | static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) | 73 | static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) |
74 | { | 74 | { |
75 | #ifdef CONFIG_64BIT | ||
76 | if (cb_prev) | 75 | if (cb_prev) |
77 | store_runtime_instr_cb(cb_prev); | 76 | store_runtime_instr_cb(cb_prev); |
78 | #endif | ||
79 | } | 77 | } |
80 | 78 | ||
81 | static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, | 79 | static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, |
82 | struct runtime_instr_cb *cb_prev) | 80 | struct runtime_instr_cb *cb_prev) |
83 | { | 81 | { |
84 | #ifdef CONFIG_64BIT | ||
85 | if (cb_next) | 82 | if (cb_next) |
86 | load_runtime_instr_cb(cb_next); | 83 | load_runtime_instr_cb(cb_next); |
87 | else if (cb_prev) | 84 | else if (cb_prev) |
88 | load_runtime_instr_cb(&runtime_instr_empty_cb); | 85 | load_runtime_instr_cb(&runtime_instr_empty_cb); |
89 | #endif | ||
90 | } | 86 | } |
91 | 87 | ||
92 | #ifdef CONFIG_64BIT | 88 | void exit_thread_runtime_instr(void); |
93 | extern void exit_thread_runtime_instr(void); | ||
94 | #else | ||
95 | static inline void exit_thread_runtime_instr(void) { } | ||
96 | #endif | ||
97 | 89 | ||
98 | #endif /* _RUNTIME_INSTR_H */ | 90 | #endif /* _RUNTIME_INSTR_H */ |
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 487f9b64efb9..4b43ee7e6776 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h | |||
@@ -39,17 +39,10 @@ | |||
39 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | 39 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifndef CONFIG_64BIT | ||
43 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
44 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
45 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
46 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
47 | #else /* CONFIG_64BIT */ | ||
48 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L | 42 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L |
49 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L | 43 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L |
50 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL | 44 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL |
51 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) | 45 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) |
52 | #endif /* CONFIG_64BIT */ | ||
53 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 46 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
54 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 47 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
55 | 48 | ||
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
61 | signed long old, new; | 54 | signed long old, new; |
62 | 55 | ||
63 | asm volatile( | 56 | asm volatile( |
64 | #ifndef CONFIG_64BIT | ||
65 | " l %0,%2\n" | ||
66 | "0: lr %1,%0\n" | ||
67 | " ahi %1,%4\n" | ||
68 | " cs %0,%1,%2\n" | ||
69 | " jl 0b" | ||
70 | #else /* CONFIG_64BIT */ | ||
71 | " lg %0,%2\n" | 57 | " lg %0,%2\n" |
72 | "0: lgr %1,%0\n" | 58 | "0: lgr %1,%0\n" |
73 | " aghi %1,%4\n" | 59 | " aghi %1,%4\n" |
74 | " csg %0,%1,%2\n" | 60 | " csg %0,%1,%2\n" |
75 | " jl 0b" | 61 | " jl 0b" |
76 | #endif /* CONFIG_64BIT */ | ||
77 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 62 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
78 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) | 63 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
79 | : "cc", "memory"); | 64 | : "cc", "memory"); |
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
89 | signed long old, new; | 74 | signed long old, new; |
90 | 75 | ||
91 | asm volatile( | 76 | asm volatile( |
92 | #ifndef CONFIG_64BIT | ||
93 | " l %0,%2\n" | ||
94 | "0: ltr %1,%0\n" | ||
95 | " jm 1f\n" | ||
96 | " ahi %1,%4\n" | ||
97 | " cs %0,%1,%2\n" | ||
98 | " jl 0b\n" | ||
99 | "1:" | ||
100 | #else /* CONFIG_64BIT */ | ||
101 | " lg %0,%2\n" | 77 | " lg %0,%2\n" |
102 | "0: ltgr %1,%0\n" | 78 | "0: ltgr %1,%0\n" |
103 | " jm 1f\n" | 79 | " jm 1f\n" |
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
105 | " csg %0,%1,%2\n" | 81 | " csg %0,%1,%2\n" |
106 | " jl 0b\n" | 82 | " jl 0b\n" |
107 | "1:" | 83 | "1:" |
108 | #endif /* CONFIG_64BIT */ | ||
109 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 84 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
110 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) | 85 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
111 | : "cc", "memory"); | 86 | : "cc", "memory"); |
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
121 | 96 | ||
122 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 97 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
123 | asm volatile( | 98 | asm volatile( |
124 | #ifndef CONFIG_64BIT | ||
125 | " l %0,%2\n" | ||
126 | "0: lr %1,%0\n" | ||
127 | " a %1,%4\n" | ||
128 | " cs %0,%1,%2\n" | ||
129 | " jl 0b" | ||
130 | #else /* CONFIG_64BIT */ | ||
131 | " lg %0,%2\n" | 99 | " lg %0,%2\n" |
132 | "0: lgr %1,%0\n" | 100 | "0: lgr %1,%0\n" |
133 | " ag %1,%4\n" | 101 | " ag %1,%4\n" |
134 | " csg %0,%1,%2\n" | 102 | " csg %0,%1,%2\n" |
135 | " jl 0b" | 103 | " jl 0b" |
136 | #endif /* CONFIG_64BIT */ | ||
137 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 104 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
138 | : "Q" (sem->count), "m" (tmp) | 105 | : "Q" (sem->count), "m" (tmp) |
139 | : "cc", "memory"); | 106 | : "cc", "memory"); |
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
154 | signed long old; | 121 | signed long old; |
155 | 122 | ||
156 | asm volatile( | 123 | asm volatile( |
157 | #ifndef CONFIG_64BIT | ||
158 | " l %0,%1\n" | ||
159 | "0: ltr %0,%0\n" | ||
160 | " jnz 1f\n" | ||
161 | " cs %0,%3,%1\n" | ||
162 | " jl 0b\n" | ||
163 | #else /* CONFIG_64BIT */ | ||
164 | " lg %0,%1\n" | 124 | " lg %0,%1\n" |
165 | "0: ltgr %0,%0\n" | 125 | "0: ltgr %0,%0\n" |
166 | " jnz 1f\n" | 126 | " jnz 1f\n" |
167 | " csg %0,%3,%1\n" | 127 | " csg %0,%3,%1\n" |
168 | " jl 0b\n" | 128 | " jl 0b\n" |
169 | #endif /* CONFIG_64BIT */ | ||
170 | "1:" | 129 | "1:" |
171 | : "=&d" (old), "=Q" (sem->count) | 130 | : "=&d" (old), "=Q" (sem->count) |
172 | : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) | 131 | : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) |
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
182 | signed long old, new; | 141 | signed long old, new; |
183 | 142 | ||
184 | asm volatile( | 143 | asm volatile( |
185 | #ifndef CONFIG_64BIT | ||
186 | " l %0,%2\n" | ||
187 | "0: lr %1,%0\n" | ||
188 | " ahi %1,%4\n" | ||
189 | " cs %0,%1,%2\n" | ||
190 | " jl 0b" | ||
191 | #else /* CONFIG_64BIT */ | ||
192 | " lg %0,%2\n" | 144 | " lg %0,%2\n" |
193 | "0: lgr %1,%0\n" | 145 | "0: lgr %1,%0\n" |
194 | " aghi %1,%4\n" | 146 | " aghi %1,%4\n" |
195 | " csg %0,%1,%2\n" | 147 | " csg %0,%1,%2\n" |
196 | " jl 0b" | 148 | " jl 0b" |
197 | #endif /* CONFIG_64BIT */ | ||
198 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 149 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
199 | : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) | 150 | : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) |
200 | : "cc", "memory"); | 151 | : "cc", "memory"); |
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
212 | 163 | ||
213 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; | 164 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; |
214 | asm volatile( | 165 | asm volatile( |
215 | #ifndef CONFIG_64BIT | ||
216 | " l %0,%2\n" | ||
217 | "0: lr %1,%0\n" | ||
218 | " a %1,%4\n" | ||
219 | " cs %0,%1,%2\n" | ||
220 | " jl 0b" | ||
221 | #else /* CONFIG_64BIT */ | ||
222 | " lg %0,%2\n" | 166 | " lg %0,%2\n" |
223 | "0: lgr %1,%0\n" | 167 | "0: lgr %1,%0\n" |
224 | " ag %1,%4\n" | 168 | " ag %1,%4\n" |
225 | " csg %0,%1,%2\n" | 169 | " csg %0,%1,%2\n" |
226 | " jl 0b" | 170 | " jl 0b" |
227 | #endif /* CONFIG_64BIT */ | ||
228 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 171 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
229 | : "Q" (sem->count), "m" (tmp) | 172 | : "Q" (sem->count), "m" (tmp) |
230 | : "cc", "memory"); | 173 | : "cc", "memory"); |
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
242 | 185 | ||
243 | tmp = -RWSEM_WAITING_BIAS; | 186 | tmp = -RWSEM_WAITING_BIAS; |
244 | asm volatile( | 187 | asm volatile( |
245 | #ifndef CONFIG_64BIT | ||
246 | " l %0,%2\n" | ||
247 | "0: lr %1,%0\n" | ||
248 | " a %1,%4\n" | ||
249 | " cs %0,%1,%2\n" | ||
250 | " jl 0b" | ||
251 | #else /* CONFIG_64BIT */ | ||
252 | " lg %0,%2\n" | 188 | " lg %0,%2\n" |
253 | "0: lgr %1,%0\n" | 189 | "0: lgr %1,%0\n" |
254 | " ag %1,%4\n" | 190 | " ag %1,%4\n" |
255 | " csg %0,%1,%2\n" | 191 | " csg %0,%1,%2\n" |
256 | " jl 0b" | 192 | " jl 0b" |
257 | #endif /* CONFIG_64BIT */ | ||
258 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 193 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
259 | : "Q" (sem->count), "m" (tmp) | 194 | : "Q" (sem->count), "m" (tmp) |
260 | : "cc", "memory"); | 195 | : "cc", "memory"); |
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) | |||
270 | signed long old, new; | 205 | signed long old, new; |
271 | 206 | ||
272 | asm volatile( | 207 | asm volatile( |
273 | #ifndef CONFIG_64BIT | ||
274 | " l %0,%2\n" | ||
275 | "0: lr %1,%0\n" | ||
276 | " ar %1,%4\n" | ||
277 | " cs %0,%1,%2\n" | ||
278 | " jl 0b" | ||
279 | #else /* CONFIG_64BIT */ | ||
280 | " lg %0,%2\n" | 208 | " lg %0,%2\n" |
281 | "0: lgr %1,%0\n" | 209 | "0: lgr %1,%0\n" |
282 | " agr %1,%4\n" | 210 | " agr %1,%4\n" |
283 | " csg %0,%1,%2\n" | 211 | " csg %0,%1,%2\n" |
284 | " jl 0b" | 212 | " jl 0b" |
285 | #endif /* CONFIG_64BIT */ | ||
286 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 213 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
287 | : "Q" (sem->count), "d" (delta) | 214 | : "Q" (sem->count), "d" (delta) |
288 | : "cc", "memory"); | 215 | : "cc", "memory"); |
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
296 | signed long old, new; | 223 | signed long old, new; |
297 | 224 | ||
298 | asm volatile( | 225 | asm volatile( |
299 | #ifndef CONFIG_64BIT | ||
300 | " l %0,%2\n" | ||
301 | "0: lr %1,%0\n" | ||
302 | " ar %1,%4\n" | ||
303 | " cs %0,%1,%2\n" | ||
304 | " jl 0b" | ||
305 | #else /* CONFIG_64BIT */ | ||
306 | " lg %0,%2\n" | 226 | " lg %0,%2\n" |
307 | "0: lgr %1,%0\n" | 227 | "0: lgr %1,%0\n" |
308 | " agr %1,%4\n" | 228 | " agr %1,%4\n" |
309 | " csg %0,%1,%2\n" | 229 | " csg %0,%1,%2\n" |
310 | " jl 0b" | 230 | " jl 0b" |
311 | #endif /* CONFIG_64BIT */ | ||
312 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) | 231 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
313 | : "Q" (sem->count), "d" (delta) | 232 | : "Q" (sem->count), "d" (delta) |
314 | : "cc", "memory"); | 233 | : "cc", "memory"); |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b8d1e54b4733..b8ffc1bd0a9f 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -15,19 +15,11 @@ | |||
15 | #include <asm/lowcore.h> | 15 | #include <asm/lowcore.h> |
16 | #include <asm/types.h> | 16 | #include <asm/types.h> |
17 | 17 | ||
18 | #ifndef CONFIG_64BIT | ||
19 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) | ||
20 | #define INITRD_START (*(unsigned long *) (0x1040C)) | ||
21 | #define INITRD_SIZE (*(unsigned long *) (0x10414)) | ||
22 | #define OLDMEM_BASE (*(unsigned long *) (0x1041C)) | ||
23 | #define OLDMEM_SIZE (*(unsigned long *) (0x10424)) | ||
24 | #else /* CONFIG_64BIT */ | ||
25 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) | 18 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) |
26 | #define INITRD_START (*(unsigned long *) (0x10408)) | 19 | #define INITRD_START (*(unsigned long *) (0x10408)) |
27 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) | 20 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) |
28 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) | 21 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) |
29 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) | 22 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) |
30 | #endif /* CONFIG_64BIT */ | ||
31 | #define COMMAND_LINE ((char *) (0x10480)) | 23 | #define COMMAND_LINE ((char *) (0x10480)) |
32 | 24 | ||
33 | extern int memory_end_set; | 25 | extern int memory_end_set; |
@@ -68,26 +60,8 @@ extern void detect_memory_memblock(void); | |||
68 | #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 | 60 | #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 |
69 | #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 | 61 | #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 |
70 | 62 | ||
71 | #ifndef CONFIG_64BIT | ||
72 | #define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE) | ||
73 | #define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP) | ||
74 | #define MACHINE_HAS_IDTE (0) | ||
75 | #define MACHINE_HAS_DIAG44 (1) | ||
76 | #define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) | ||
77 | #define MACHINE_HAS_EDAT1 (0) | ||
78 | #define MACHINE_HAS_EDAT2 (0) | ||
79 | #define MACHINE_HAS_LPP (0) | ||
80 | #define MACHINE_HAS_TOPOLOGY (0) | ||
81 | #define MACHINE_HAS_TE (0) | ||
82 | #define MACHINE_HAS_TLB_LC (0) | ||
83 | #define MACHINE_HAS_VX (0) | ||
84 | #define MACHINE_HAS_CAD (0) | ||
85 | #else /* CONFIG_64BIT */ | ||
86 | #define MACHINE_HAS_IEEE (1) | ||
87 | #define MACHINE_HAS_CSP (1) | ||
88 | #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) | 63 | #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) |
89 | #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) | 64 | #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) |
90 | #define MACHINE_HAS_MVPG (1) | ||
91 | #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) | 65 | #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) |
92 | #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) | 66 | #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) |
93 | #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) | 67 | #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) |
@@ -96,7 +70,6 @@ extern void detect_memory_memblock(void); | |||
96 | #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) | 70 | #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) |
97 | #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) | 71 | #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) |
98 | #define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) | 72 | #define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) |
99 | #endif /* CONFIG_64BIT */ | ||
100 | 73 | ||
101 | /* | 74 | /* |
102 | * Console mode. Override with conmode= | 75 | * Console mode. Override with conmode= |
@@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void); | |||
135 | 108 | ||
136 | #else /* __ASSEMBLY__ */ | 109 | #else /* __ASSEMBLY__ */ |
137 | 110 | ||
138 | #ifndef CONFIG_64BIT | ||
139 | #define IPL_DEVICE 0x10404 | ||
140 | #define INITRD_START 0x1040C | ||
141 | #define INITRD_SIZE 0x10414 | ||
142 | #define OLDMEM_BASE 0x1041C | ||
143 | #define OLDMEM_SIZE 0x10424 | ||
144 | #else /* CONFIG_64BIT */ | ||
145 | #define IPL_DEVICE 0x10400 | 111 | #define IPL_DEVICE 0x10400 |
146 | #define INITRD_START 0x10408 | 112 | #define INITRD_START 0x10408 |
147 | #define INITRD_SIZE 0x10410 | 113 | #define INITRD_SIZE 0x10410 |
148 | #define OLDMEM_BASE 0x10418 | 114 | #define OLDMEM_BASE 0x10418 |
149 | #define OLDMEM_SIZE 0x10420 | 115 | #define OLDMEM_SIZE 0x10420 |
150 | #endif /* CONFIG_64BIT */ | ||
151 | #define COMMAND_LINE 0x10480 | 116 | #define COMMAND_LINE 0x10480 |
152 | 117 | ||
153 | #endif /* __ASSEMBLY__ */ | 118 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h index 5959bfb3b693..c8b7cf9d6279 100644 --- a/arch/s390/include/asm/sfp-util.h +++ b/arch/s390/include/asm/sfp-util.h | |||
@@ -51,7 +51,6 @@ | |||
51 | wl = __wl; \ | 51 | wl = __wl; \ |
52 | }) | 52 | }) |
53 | 53 | ||
54 | #ifdef CONFIG_64BIT | ||
55 | #define udiv_qrnnd(q, r, n1, n0, d) \ | 54 | #define udiv_qrnnd(q, r, n1, n0, d) \ |
56 | do { unsigned long __n; \ | 55 | do { unsigned long __n; \ |
57 | unsigned int __r, __d; \ | 56 | unsigned int __r, __d; \ |
@@ -60,15 +59,6 @@ | |||
60 | (q) = __n / __d; \ | 59 | (q) = __n / __d; \ |
61 | (r) = __n % __d; \ | 60 | (r) = __n % __d; \ |
62 | } while (0) | 61 | } while (0) |
63 | #else | ||
64 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
65 | do { unsigned int __r; \ | ||
66 | (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ | ||
67 | (r) = __r; \ | ||
68 | } while (0) | ||
69 | extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, | ||
70 | unsigned int , unsigned int); | ||
71 | #endif | ||
72 | 62 | ||
73 | #define UDIV_NEEDS_NORMALIZATION 0 | 63 | #define UDIV_NEEDS_NORMALIZATION 0 |
74 | 64 | ||
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index a60d085ddb4d..487428b6d099 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h | |||
@@ -1,16 +1,7 @@ | |||
1 | #ifndef _ASM_S390_SPARSEMEM_H | 1 | #ifndef _ASM_S390_SPARSEMEM_H |
2 | #define _ASM_S390_SPARSEMEM_H | 2 | #define _ASM_S390_SPARSEMEM_H |
3 | 3 | ||
4 | #ifdef CONFIG_64BIT | ||
5 | |||
6 | #define SECTION_SIZE_BITS 28 | 4 | #define SECTION_SIZE_BITS 28 |
7 | #define MAX_PHYSMEM_BITS 46 | 5 | #define MAX_PHYSMEM_BITS 46 |
8 | 6 | ||
9 | #else | ||
10 | |||
11 | #define SECTION_SIZE_BITS 25 | ||
12 | #define MAX_PHYSMEM_BITS 31 | ||
13 | |||
14 | #endif /* CONFIG_64BIT */ | ||
15 | |||
16 | #endif /* _ASM_S390_SPARSEMEM_H */ | 7 | #endif /* _ASM_S390_SPARSEMEM_H */ |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 2542a7e4c8b4..d62e7a69605f 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc) | |||
18 | u32 orig_fpc; | 18 | u32 orig_fpc; |
19 | int rc; | 19 | int rc; |
20 | 20 | ||
21 | if (!MACHINE_HAS_IEEE) | ||
22 | return 0; | ||
23 | |||
24 | asm volatile( | 21 | asm volatile( |
25 | " efpc %1\n" | 22 | " efpc %1\n" |
26 | " sfpc %2\n" | 23 | " sfpc %2\n" |
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc) | |||
35 | 32 | ||
36 | static inline void save_fp_ctl(u32 *fpc) | 33 | static inline void save_fp_ctl(u32 *fpc) |
37 | { | 34 | { |
38 | if (!MACHINE_HAS_IEEE) | ||
39 | return; | ||
40 | |||
41 | asm volatile( | 35 | asm volatile( |
42 | " stfpc %0\n" | 36 | " stfpc %0\n" |
43 | : "+Q" (*fpc)); | 37 | : "+Q" (*fpc)); |
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc) | |||
47 | { | 41 | { |
48 | int rc; | 42 | int rc; |
49 | 43 | ||
50 | if (!MACHINE_HAS_IEEE) | ||
51 | return 0; | ||
52 | |||
53 | asm volatile( | 44 | asm volatile( |
54 | " lfpc %1\n" | 45 | " lfpc %1\n" |
55 | "0: la %0,0\n" | 46 | "0: la %0,0\n" |
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs) | |||
65 | asm volatile("std 2,%0" : "=Q" (fprs[2])); | 56 | asm volatile("std 2,%0" : "=Q" (fprs[2])); |
66 | asm volatile("std 4,%0" : "=Q" (fprs[4])); | 57 | asm volatile("std 4,%0" : "=Q" (fprs[4])); |
67 | asm volatile("std 6,%0" : "=Q" (fprs[6])); | 58 | asm volatile("std 6,%0" : "=Q" (fprs[6])); |
68 | if (!MACHINE_HAS_IEEE) | ||
69 | return; | ||
70 | asm volatile("std 1,%0" : "=Q" (fprs[1])); | 59 | asm volatile("std 1,%0" : "=Q" (fprs[1])); |
71 | asm volatile("std 3,%0" : "=Q" (fprs[3])); | 60 | asm volatile("std 3,%0" : "=Q" (fprs[3])); |
72 | asm volatile("std 5,%0" : "=Q" (fprs[5])); | 61 | asm volatile("std 5,%0" : "=Q" (fprs[5])); |
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs) | |||
87 | asm volatile("ld 2,%0" : : "Q" (fprs[2])); | 76 | asm volatile("ld 2,%0" : : "Q" (fprs[2])); |
88 | asm volatile("ld 4,%0" : : "Q" (fprs[4])); | 77 | asm volatile("ld 4,%0" : : "Q" (fprs[4])); |
89 | asm volatile("ld 6,%0" : : "Q" (fprs[6])); | 78 | asm volatile("ld 6,%0" : : "Q" (fprs[6])); |
90 | if (!MACHINE_HAS_IEEE) | ||
91 | return; | ||
92 | asm volatile("ld 1,%0" : : "Q" (fprs[1])); | 79 | asm volatile("ld 1,%0" : : "Q" (fprs[1])); |
93 | asm volatile("ld 3,%0" : : "Q" (fprs[3])); | 80 | asm volatile("ld 3,%0" : : "Q" (fprs[3])); |
94 | asm volatile("ld 5,%0" : : "Q" (fprs[5])); | 81 | asm volatile("ld 5,%0" : : "Q" (fprs[5])); |
@@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs) | |||
140 | 127 | ||
141 | static inline void save_fp_vx_regs(struct task_struct *task) | 128 | static inline void save_fp_vx_regs(struct task_struct *task) |
142 | { | 129 | { |
143 | #ifdef CONFIG_64BIT | ||
144 | if (task->thread.vxrs) | 130 | if (task->thread.vxrs) |
145 | save_vx_regs(task->thread.vxrs); | 131 | save_vx_regs(task->thread.vxrs); |
146 | else | 132 | else |
147 | #endif | 133 | save_fp_regs(task->thread.fp_regs.fprs); |
148 | save_fp_regs(task->thread.fp_regs.fprs); | ||
149 | } | 134 | } |
150 | 135 | ||
151 | static inline void restore_fp_vx_regs(struct task_struct *task) | 136 | static inline void restore_fp_vx_regs(struct task_struct *task) |
152 | { | 137 | { |
153 | #ifdef CONFIG_64BIT | ||
154 | if (task->thread.vxrs) | 138 | if (task->thread.vxrs) |
155 | restore_vx_regs(task->thread.vxrs); | 139 | restore_vx_regs(task->thread.vxrs); |
156 | else | 140 | else |
157 | #endif | 141 | restore_fp_regs(task->thread.fp_regs.fprs); |
158 | restore_fp_regs(task->thread.fp_regs.fprs); | ||
159 | } | 142 | } |
160 | 143 | ||
161 | static inline void save_access_regs(unsigned int *acrs) | 144 | static inline void save_access_regs(unsigned int *acrs) |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 5bc12598ae9e..6ba0bf928909 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -95,6 +95,6 @@ static inline int syscall_get_arch(void) | |||
95 | if (test_tsk_thread_flag(current, TIF_31BIT)) | 95 | if (test_tsk_thread_flag(current, TIF_31BIT)) |
96 | return AUDIT_ARCH_S390; | 96 | return AUDIT_ARCH_S390; |
97 | #endif | 97 | #endif |
98 | return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; | 98 | return AUDIT_ARCH_S390X; |
99 | } | 99 | } |
100 | #endif /* _ASM_SYSCALL_H */ | 100 | #endif /* _ASM_SYSCALL_H */ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ef1df718642d..d532098d98bf 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -10,13 +10,8 @@ | |||
10 | /* | 10 | /* |
11 | * Size of kernel stack for each process | 11 | * Size of kernel stack for each process |
12 | */ | 12 | */ |
13 | #ifndef CONFIG_64BIT | ||
14 | #define THREAD_ORDER 1 | ||
15 | #define ASYNC_ORDER 1 | ||
16 | #else /* CONFIG_64BIT */ | ||
17 | #define THREAD_ORDER 2 | 13 | #define THREAD_ORDER 2 |
18 | #define ASYNC_ORDER 2 | 14 | #define ASYNC_ORDER 2 |
19 | #endif /* CONFIG_64BIT */ | ||
20 | 15 | ||
21 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 16 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
22 | #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) | 17 | #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) |
@@ -66,6 +61,8 @@ static inline struct thread_info *current_thread_info(void) | |||
66 | return (struct thread_info *) S390_lowcore.thread_info; | 61 | return (struct thread_info *) S390_lowcore.thread_info; |
67 | } | 62 | } |
68 | 63 | ||
64 | void arch_release_task_struct(struct task_struct *tsk); | ||
65 | |||
69 | #define THREAD_SIZE_ORDER THREAD_ORDER | 66 | #define THREAD_SIZE_ORDER THREAD_ORDER |
70 | 67 | ||
71 | #endif | 68 | #endif |
@@ -99,10 +96,6 @@ static inline struct thread_info *current_thread_info(void) | |||
99 | #define _TIF_31BIT (1<<TIF_31BIT) | 96 | #define _TIF_31BIT (1<<TIF_31BIT) |
100 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) | 97 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) |
101 | 98 | ||
102 | #ifdef CONFIG_64BIT | ||
103 | #define is_32bit_task() (test_thread_flag(TIF_31BIT)) | 99 | #define is_32bit_task() (test_thread_flag(TIF_31BIT)) |
104 | #else | ||
105 | #define is_32bit_task() (1) | ||
106 | #endif | ||
107 | 100 | ||
108 | #endif /* _ASM_THREAD_INFO_H */ | 101 | #endif /* _ASM_THREAD_INFO_H */ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 06d8741ad6f4..7a92e69c50bc 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
118 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | 118 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
119 | unsigned long address) | 119 | unsigned long address) |
120 | { | 120 | { |
121 | #ifdef CONFIG_64BIT | ||
122 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 121 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
123 | return; | 122 | return; |
124 | pgtable_pmd_page_dtor(virt_to_page(pmd)); | 123 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
125 | tlb_remove_table(tlb, pmd); | 124 | tlb_remove_table(tlb, pmd); |
126 | #endif | ||
127 | } | 125 | } |
128 | 126 | ||
129 | /* | 127 | /* |
@@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
136 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | 134 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
137 | unsigned long address) | 135 | unsigned long address) |
138 | { | 136 | { |
139 | #ifdef CONFIG_64BIT | ||
140 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 137 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
141 | return; | 138 | return; |
142 | tlb_remove_table(tlb, pud); | 139 | tlb_remove_table(tlb, pud); |
143 | #endif | ||
144 | } | 140 | } |
145 | 141 | ||
146 | #define tlb_start_vma(tlb, vma) do { } while (0) | 142 | #define tlb_start_vma(tlb, vma) do { } while (0) |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 16c9c88658c8..ca148f7c3eaa 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void) | |||
49 | register unsigned long reg4 asm("4"); | 49 | register unsigned long reg4 asm("4"); |
50 | long dummy; | 50 | long dummy; |
51 | 51 | ||
52 | #ifndef CONFIG_64BIT | ||
53 | if (!MACHINE_HAS_CSP) { | ||
54 | smp_ptlb_all(); | ||
55 | return; | ||
56 | } | ||
57 | #endif /* CONFIG_64BIT */ | ||
58 | |||
59 | dummy = 0; | 52 | dummy = 0; |
60 | reg2 = reg3 = 0; | 53 | reg2 = reg3 = 0; |
61 | reg4 = ((unsigned long) &dummy) + 1; | 54 | reg4 = ((unsigned long) &dummy) + 1; |
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index dccef3ca91fa..6740f4f9781f 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h | |||
@@ -8,21 +8,4 @@ | |||
8 | 8 | ||
9 | #include <uapi/asm/types.h> | 9 | #include <uapi/asm/types.h> |
10 | 10 | ||
11 | /* | ||
12 | * These aren't exported outside the kernel to avoid name space clashes | ||
13 | */ | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | |||
17 | #ifndef CONFIG_64BIT | ||
18 | typedef union { | ||
19 | unsigned long long pair; | ||
20 | struct { | ||
21 | unsigned long even; | ||
22 | unsigned long odd; | ||
23 | } subreg; | ||
24 | } register_pair; | ||
25 | |||
26 | #endif /* ! CONFIG_64BIT */ | ||
27 | #endif /* __ASSEMBLY__ */ | ||
28 | #endif /* _S390_TYPES_H */ | 11 | #endif /* _S390_TYPES_H */ |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cd4c68e0398d..d64a7a62164f 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo | |||
372 | } | 372 | } |
373 | 373 | ||
374 | int copy_to_user_real(void __user *dest, void *src, unsigned long count); | 374 | int copy_to_user_real(void __user *dest, void *src, unsigned long count); |
375 | void s390_kernel_write(void *dst, const void *src, size_t size); | ||
375 | 376 | ||
376 | #endif /* __S390_UACCESS_H */ | 377 | #endif /* __S390_UACCESS_H */ |
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 651886353551..91f56b1d8156 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h | |||
@@ -9,11 +9,7 @@ | |||
9 | #include <uapi/asm/unistd.h> | 9 | #include <uapi/asm/unistd.h> |
10 | 10 | ||
11 | 11 | ||
12 | #ifndef CONFIG_64BIT | ||
13 | #define __IGNORE_select | ||
14 | #else | ||
15 | #define __IGNORE_time | 12 | #define __IGNORE_time |
16 | #endif | ||
17 | 13 | ||
18 | /* Ignore NUMA system calls. Not wired up on s390. */ | 14 | /* Ignore NUMA system calls. Not wired up on s390. */ |
19 | #define __IGNORE_mbind | 15 | #define __IGNORE_mbind |
@@ -43,10 +39,6 @@ | |||
43 | #define __ARCH_WANT_SYS_OLDUMOUNT | 39 | #define __ARCH_WANT_SYS_OLDUMOUNT |
44 | #define __ARCH_WANT_SYS_SIGPENDING | 40 | #define __ARCH_WANT_SYS_SIGPENDING |
45 | #define __ARCH_WANT_SYS_SIGPROCMASK | 41 | #define __ARCH_WANT_SYS_SIGPROCMASK |
46 | # ifndef CONFIG_64BIT | ||
47 | # define __ARCH_WANT_STAT64 | ||
48 | # define __ARCH_WANT_SYS_TIME | ||
49 | # endif | ||
50 | # ifdef CONFIG_COMPAT | 42 | # ifdef CONFIG_COMPAT |
51 | # define __ARCH_WANT_COMPAT_SYS_TIME | 43 | # define __ARCH_WANT_COMPAT_SYS_TIME |
52 | # endif | 44 | # endif |
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index a62526d09201..787acd4f9668 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h | |||
@@ -42,10 +42,8 @@ struct vdso_per_cpu_data { | |||
42 | 42 | ||
43 | extern struct vdso_data *vdso_data; | 43 | extern struct vdso_data *vdso_data; |
44 | 44 | ||
45 | #ifdef CONFIG_64BIT | ||
46 | int vdso_alloc_per_cpu(struct _lowcore *lowcore); | 45 | int vdso_alloc_per_cpu(struct _lowcore *lowcore); |
47 | void vdso_free_per_cpu(struct _lowcore *lowcore); | 46 | void vdso_free_per_cpu(struct _lowcore *lowcore); |
48 | #endif | ||
49 | 47 | ||
50 | #endif /* __ASSEMBLY__ */ | 48 | #endif /* __ASSEMBLY__ */ |
51 | #endif /* __S390_VDSO_H__ */ | 49 | #endif /* __S390_VDSO_H__ */ |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 31fab2676fe9..ffb87617a36c 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls | |||
26 | # | 26 | # |
27 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 27 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
28 | 28 | ||
29 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | 29 | CFLAGS_sysinfo.o += -w |
30 | 30 | ||
31 | obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o | 31 | obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o |
32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o | 32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o |
33 | obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o | 33 | obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o |
34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o | 34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o |
35 | obj-y += dumpstack.o | 35 | obj-y += runtime_instr.o cache.o dumpstack.o |
36 | obj-y += entry.o reipl.o relocate_kernel.o | ||
36 | 37 | ||
37 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 38 | extra-y += head.o head64.o vmlinux.lds |
38 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | ||
39 | obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | ||
40 | |||
41 | extra-y += head.o vmlinux.lds | ||
42 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) | ||
43 | 39 | ||
44 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 40 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
45 | obj-$(CONFIG_SMP) += smp.o | 41 | obj-$(CONFIG_SMP) += smp.o |
46 | obj-$(CONFIG_SCHED_BOOK) += topology.o | 42 | obj-$(CONFIG_SCHED_BOOK) += topology.o |
47 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | 43 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o |
48 | obj-$(CONFIG_AUDIT) += audit.o | 44 | obj-$(CONFIG_AUDIT) += audit.o |
49 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 45 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
50 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o | 46 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o |
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | |||
56 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 52 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
57 | obj-$(CONFIG_UPROBES) += uprobes.o | 53 | obj-$(CONFIG_UPROBES) += uprobes.o |
58 | 54 | ||
59 | ifdef CONFIG_64BIT | 55 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o |
60 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ | 56 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o |
61 | perf_cpum_cf_events.o | ||
62 | obj-y += runtime_instr.o cache.o | ||
63 | endif | ||
64 | 57 | ||
65 | # vdso | 58 | # vdso |
66 | obj-$(CONFIG_64BIT) += vdso64/ | 59 | obj-y += vdso64/ |
67 | obj-$(CONFIG_32BIT) += vdso32/ | ||
68 | obj-$(CONFIG_COMPAT) += vdso32/ | 60 | obj-$(CONFIG_COMPAT) += vdso32/ |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 8dc4db10d160..f35058da8eaf 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -166,9 +166,6 @@ int main(void) | |||
166 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | 166 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); |
167 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 167 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
168 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 168 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
169 | #ifdef CONFIG_32BIT | ||
170 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | ||
171 | #else /* CONFIG_32BIT */ | ||
172 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); | 169 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); |
173 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); | 170 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); |
174 | DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); | 171 | DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); |
@@ -184,6 +181,5 @@ int main(void) | |||
184 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); | 181 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); |
185 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); | 182 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); |
186 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); | 183 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); |
187 | #endif /* CONFIG_32BIT */ | ||
188 | return 0; | 184 | return 0; |
189 | } | 185 | } |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f74a53d339b0..daed3fde42ec 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
12 | #include <asm/sigp.h> | 12 | #include <asm/sigp.h> |
13 | 13 | ||
14 | #ifdef CONFIG_64BIT | ||
15 | |||
16 | ENTRY(s390_base_mcck_handler) | 14 | ENTRY(s390_base_mcck_handler) |
17 | basr %r13,0 | 15 | basr %r13,0 |
18 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | 16 | 0: lg %r15,__LC_PANIC_STACK # load panic stack |
@@ -131,77 +129,3 @@ ENTRY(diag308_reset) | |||
131 | .Lfpctl: | 129 | .Lfpctl: |
132 | .long 0 | 130 | .long 0 |
133 | .previous | 131 | .previous |
134 | |||
135 | #else /* CONFIG_64BIT */ | ||
136 | |||
137 | ENTRY(s390_base_mcck_handler) | ||
138 | basr %r13,0 | ||
139 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
140 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
141 | l %r1,2f-0b(%r13) | ||
142 | l %r1,0(%r1) | ||
143 | ltr %r1,%r1 | ||
144 | jz 1f | ||
145 | basr %r14,%r1 | ||
146 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
147 | lpsw __LC_MCK_OLD_PSW | ||
148 | |||
149 | 2: .long s390_base_mcck_handler_fn | ||
150 | |||
151 | .section .bss | ||
152 | .align 4 | ||
153 | .globl s390_base_mcck_handler_fn | ||
154 | s390_base_mcck_handler_fn: | ||
155 | .long 0 | ||
156 | .previous | ||
157 | |||
158 | ENTRY(s390_base_ext_handler) | ||
159 | stm %r0,%r15,__LC_SAVE_AREA_ASYNC | ||
160 | basr %r13,0 | ||
161 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
162 | l %r1,2f-0b(%r13) | ||
163 | l %r1,0(%r1) | ||
164 | ltr %r1,%r1 | ||
165 | jz 1f | ||
166 | basr %r14,%r1 | ||
167 | 1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC | ||
168 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
169 | lpsw __LC_EXT_OLD_PSW | ||
170 | |||
171 | 2: .long s390_base_ext_handler_fn | ||
172 | |||
173 | .section .bss | ||
174 | .align 4 | ||
175 | .globl s390_base_ext_handler_fn | ||
176 | s390_base_ext_handler_fn: | ||
177 | .long 0 | ||
178 | .previous | ||
179 | |||
180 | ENTRY(s390_base_pgm_handler) | ||
181 | stm %r0,%r15,__LC_SAVE_AREA_SYNC | ||
182 | basr %r13,0 | ||
183 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
184 | l %r1,2f-0b(%r13) | ||
185 | l %r1,0(%r1) | ||
186 | ltr %r1,%r1 | ||
187 | jz 1f | ||
188 | basr %r14,%r1 | ||
189 | lm %r0,%r15,__LC_SAVE_AREA_SYNC | ||
190 | lpsw __LC_PGM_OLD_PSW | ||
191 | |||
192 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
193 | |||
194 | 2: .long s390_base_pgm_handler_fn | ||
195 | |||
196 | disabled_wait_psw: | ||
197 | .align 8 | ||
198 | .long 0x000a0000,0x00000000 + s390_base_pgm_handler | ||
199 | |||
200 | .section .bss | ||
201 | .align 4 | ||
202 | .globl s390_base_pgm_handler_fn | ||
203 | s390_base_pgm_handler_fn: | ||
204 | .long 0 | ||
205 | .previous | ||
206 | |||
207 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 0969d113b3d6..bff5e3b6d822 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c | |||
@@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m) | |||
70 | struct cacheinfo *cache; | 70 | struct cacheinfo *cache; |
71 | int idx; | 71 | int idx; |
72 | 72 | ||
73 | if (!test_facility(34)) | ||
74 | return; | ||
73 | get_online_cpus(); | 75 | get_online_cpus(); |
74 | this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); | 76 | this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); |
75 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { | 77 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { |
@@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu) | |||
159 | union cache_topology ct; | 161 | union cache_topology ct; |
160 | enum cache_type ctype; | 162 | enum cache_type ctype; |
161 | 163 | ||
164 | if (!test_facility(34)) | ||
165 | return -EOPNOTSUPP; | ||
162 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); | 166 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); |
163 | for (idx = 0, level = 0; level < this_cpu_ci->num_levels && | 167 | for (idx = 0, level = 0; level < this_cpu_ci->num_levels && |
164 | idx < this_cpu_ci->num_leaves; idx++, level++) { | 168 | idx < this_cpu_ci->num_leaves; idx++, level++) { |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d7b0c4d27880..199ec92ef4fe 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen) | |||
27 | register unsigned long reg3 asm ("3") = cmdlen; | 27 | register unsigned long reg3 asm ("3") = cmdlen; |
28 | 28 | ||
29 | asm volatile( | 29 | asm volatile( |
30 | #ifndef CONFIG_64BIT | ||
31 | " diag %1,%0,0x8\n" | ||
32 | #else /* CONFIG_64BIT */ | ||
33 | " sam31\n" | 30 | " sam31\n" |
34 | " diag %1,%0,0x8\n" | 31 | " diag %1,%0,0x8\n" |
35 | " sam64\n" | 32 | " sam64\n" |
36 | #endif /* CONFIG_64BIT */ | ||
37 | : "+d" (reg3) : "d" (reg2) : "cc"); | 33 | : "+d" (reg3) : "d" (reg2) : "cc"); |
38 | return reg3; | 34 | return reg3; |
39 | } | 35 | } |
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen) | |||
46 | register unsigned long reg5 asm ("5") = *rlen; | 42 | register unsigned long reg5 asm ("5") = *rlen; |
47 | 43 | ||
48 | asm volatile( | 44 | asm volatile( |
49 | #ifndef CONFIG_64BIT | ||
50 | " diag %2,%0,0x8\n" | ||
51 | " brc 8,1f\n" | ||
52 | " ar %1,%4\n" | ||
53 | #else /* CONFIG_64BIT */ | ||
54 | " sam31\n" | 45 | " sam31\n" |
55 | " diag %2,%0,0x8\n" | 46 | " diag %2,%0,0x8\n" |
56 | " sam64\n" | 47 | " sam64\n" |
57 | " brc 8,1f\n" | 48 | " brc 8,1f\n" |
58 | " agr %1,%4\n" | 49 | " agr %1,%4\n" |
59 | #endif /* CONFIG_64BIT */ | ||
60 | "1:\n" | 50 | "1:\n" |
61 | : "+d" (reg4), "+d" (reg5) | 51 | : "+d" (reg4), "+d" (reg5) |
62 | : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); | 52 | : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); |
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 8237fc07ac79..2f69243bf700 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c | |||
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) | |||
18 | int rc = 0; | 18 | int rc = 0; |
19 | 19 | ||
20 | asm volatile( | 20 | asm volatile( |
21 | #ifdef CONFIG_64BIT | ||
22 | " sam31\n" | 21 | " sam31\n" |
23 | " diag %2,2,0x14\n" | 22 | " diag %2,2,0x14\n" |
24 | " sam64\n" | 23 | " sam64\n" |
25 | #else | ||
26 | " diag %2,2,0x14\n" | ||
27 | #endif | ||
28 | " ipm %0\n" | 24 | " ipm %0\n" |
29 | " srl %0,28\n" | 25 | " srl %0,28\n" |
30 | : "=d" (rc), "+d" (_ry2) | 26 | : "=d" (rc), "+d" (_ry2) |
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr) | |||
52 | spin_lock_irqsave(&diag210_lock, flags); | 48 | spin_lock_irqsave(&diag210_lock, flags); |
53 | diag210_tmp = *addr; | 49 | diag210_tmp = *addr; |
54 | 50 | ||
55 | #ifdef CONFIG_64BIT | ||
56 | asm volatile( | 51 | asm volatile( |
57 | " lhi %0,-1\n" | 52 | " lhi %0,-1\n" |
58 | " sam31\n" | 53 | " sam31\n" |
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr) | |||
62 | "1: sam64\n" | 57 | "1: sam64\n" |
63 | EX_TABLE(0b, 1b) | 58 | EX_TABLE(0b, 1b) |
64 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | 59 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); |
65 | #else | ||
66 | asm volatile( | ||
67 | " lhi %0,-1\n" | ||
68 | " diag %1,0,0x210\n" | ||
69 | "0: ipm %0\n" | ||
70 | " srl %0,28\n" | ||
71 | "1:\n" | ||
72 | EX_TABLE(0b, 1b) | ||
73 | : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); | ||
74 | #endif | ||
75 | 60 | ||
76 | *addr = diag210_tmp; | 61 | *addr = diag210_tmp; |
77 | spin_unlock_irqrestore(&diag210_lock, flags); | 62 | spin_unlock_irqrestore(&diag210_lock, flags); |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 533430307da8..8140d10c6785 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -32,12 +32,6 @@ | |||
32 | #include <asm/debug.h> | 32 | #include <asm/debug.h> |
33 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
34 | 34 | ||
35 | #ifndef CONFIG_64BIT | ||
36 | #define ONELONG "%08lx: " | ||
37 | #else /* CONFIG_64BIT */ | ||
38 | #define ONELONG "%016lx: " | ||
39 | #endif /* CONFIG_64BIT */ | ||
40 | |||
41 | enum { | 35 | enum { |
42 | UNUSED, /* Indicates the end of the operand list */ | 36 | UNUSED, /* Indicates the end of the operand list */ |
43 | R_8, /* GPR starting at position 8 */ | 37 | R_8, /* GPR starting at position 8 */ |
@@ -536,12 +530,10 @@ static char *long_insn_name[] = { | |||
536 | }; | 530 | }; |
537 | 531 | ||
538 | static struct s390_insn opcode[] = { | 532 | static struct s390_insn opcode[] = { |
539 | #ifdef CONFIG_64BIT | ||
540 | { "bprp", 0xc5, INSTR_MII_UPI }, | 533 | { "bprp", 0xc5, INSTR_MII_UPI }, |
541 | { "bpp", 0xc7, INSTR_SMI_U0RDP }, | 534 | { "bpp", 0xc7, INSTR_SMI_U0RDP }, |
542 | { "trtr", 0xd0, INSTR_SS_L0RDRD }, | 535 | { "trtr", 0xd0, INSTR_SS_L0RDRD }, |
543 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, | 536 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, |
544 | #endif | ||
545 | { "spm", 0x04, INSTR_RR_R0 }, | 537 | { "spm", 0x04, INSTR_RR_R0 }, |
546 | { "balr", 0x05, INSTR_RR_RR }, | 538 | { "balr", 0x05, INSTR_RR_RR }, |
547 | { "bctr", 0x06, INSTR_RR_RR }, | 539 | { "bctr", 0x06, INSTR_RR_RR }, |
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = { | |||
725 | }; | 717 | }; |
726 | 718 | ||
727 | static struct s390_insn opcode_01[] = { | 719 | static struct s390_insn opcode_01[] = { |
728 | #ifdef CONFIG_64BIT | ||
729 | { "ptff", 0x04, INSTR_E }, | 720 | { "ptff", 0x04, INSTR_E }, |
730 | { "pfpo", 0x0a, INSTR_E }, | 721 | { "pfpo", 0x0a, INSTR_E }, |
731 | { "sam64", 0x0e, INSTR_E }, | 722 | { "sam64", 0x0e, INSTR_E }, |
732 | #endif | ||
733 | { "pr", 0x01, INSTR_E }, | 723 | { "pr", 0x01, INSTR_E }, |
734 | { "upt", 0x02, INSTR_E }, | 724 | { "upt", 0x02, INSTR_E }, |
735 | { "sckpf", 0x07, INSTR_E }, | 725 | { "sckpf", 0x07, INSTR_E }, |
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = { | |||
741 | }; | 731 | }; |
742 | 732 | ||
743 | static struct s390_insn opcode_a5[] = { | 733 | static struct s390_insn opcode_a5[] = { |
744 | #ifdef CONFIG_64BIT | ||
745 | { "iihh", 0x00, INSTR_RI_RU }, | 734 | { "iihh", 0x00, INSTR_RI_RU }, |
746 | { "iihl", 0x01, INSTR_RI_RU }, | 735 | { "iihl", 0x01, INSTR_RI_RU }, |
747 | { "iilh", 0x02, INSTR_RI_RU }, | 736 | { "iilh", 0x02, INSTR_RI_RU }, |
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = { | |||
758 | { "llihl", 0x0d, INSTR_RI_RU }, | 747 | { "llihl", 0x0d, INSTR_RI_RU }, |
759 | { "llilh", 0x0e, INSTR_RI_RU }, | 748 | { "llilh", 0x0e, INSTR_RI_RU }, |
760 | { "llill", 0x0f, INSTR_RI_RU }, | 749 | { "llill", 0x0f, INSTR_RI_RU }, |
761 | #endif | ||
762 | { "", 0, INSTR_INVALID } | 750 | { "", 0, INSTR_INVALID } |
763 | }; | 751 | }; |
764 | 752 | ||
765 | static struct s390_insn opcode_a7[] = { | 753 | static struct s390_insn opcode_a7[] = { |
766 | #ifdef CONFIG_64BIT | ||
767 | { "tmhh", 0x02, INSTR_RI_RU }, | 754 | { "tmhh", 0x02, INSTR_RI_RU }, |
768 | { "tmhl", 0x03, INSTR_RI_RU }, | 755 | { "tmhl", 0x03, INSTR_RI_RU }, |
769 | { "brctg", 0x07, INSTR_RI_RP }, | 756 | { "brctg", 0x07, INSTR_RI_RP }, |
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = { | |||
771 | { "aghi", 0x0b, INSTR_RI_RI }, | 758 | { "aghi", 0x0b, INSTR_RI_RI }, |
772 | { "mghi", 0x0d, INSTR_RI_RI }, | 759 | { "mghi", 0x0d, INSTR_RI_RI }, |
773 | { "cghi", 0x0f, INSTR_RI_RI }, | 760 | { "cghi", 0x0f, INSTR_RI_RI }, |
774 | #endif | ||
775 | { "tmlh", 0x00, INSTR_RI_RU }, | 761 | { "tmlh", 0x00, INSTR_RI_RU }, |
776 | { "tmll", 0x01, INSTR_RI_RU }, | 762 | { "tmll", 0x01, INSTR_RI_RU }, |
777 | { "brc", 0x04, INSTR_RI_UP }, | 763 | { "brc", 0x04, INSTR_RI_UP }, |
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = { | |||
785 | }; | 771 | }; |
786 | 772 | ||
787 | static struct s390_insn opcode_aa[] = { | 773 | static struct s390_insn opcode_aa[] = { |
788 | #ifdef CONFIG_64BIT | ||
789 | { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, | 774 | { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, |
790 | { "rion", 0x01, INSTR_RI_RI }, | 775 | { "rion", 0x01, INSTR_RI_RI }, |
791 | { "tric", 0x02, INSTR_RI_RI }, | 776 | { "tric", 0x02, INSTR_RI_RI }, |
792 | { "rioff", 0x03, INSTR_RI_RI }, | 777 | { "rioff", 0x03, INSTR_RI_RI }, |
793 | { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, | 778 | { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, |
794 | #endif | ||
795 | { "", 0, INSTR_INVALID } | 779 | { "", 0, INSTR_INVALID } |
796 | }; | 780 | }; |
797 | 781 | ||
798 | static struct s390_insn opcode_b2[] = { | 782 | static struct s390_insn opcode_b2[] = { |
799 | #ifdef CONFIG_64BIT | ||
800 | { "stckf", 0x7c, INSTR_S_RD }, | 783 | { "stckf", 0x7c, INSTR_S_RD }, |
801 | { "lpp", 0x80, INSTR_S_RD }, | 784 | { "lpp", 0x80, INSTR_S_RD }, |
802 | { "lcctl", 0x84, INSTR_S_RD }, | 785 | { "lcctl", 0x84, INSTR_S_RD }, |
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = { | |||
819 | { "tend", 0xf8, INSTR_S_00 }, | 802 | { "tend", 0xf8, INSTR_S_00 }, |
820 | { "niai", 0xfa, INSTR_IE_UU }, | 803 | { "niai", 0xfa, INSTR_IE_UU }, |
821 | { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, | 804 | { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, |
822 | #endif | ||
823 | { "stidp", 0x02, INSTR_S_RD }, | 805 | { "stidp", 0x02, INSTR_S_RD }, |
824 | { "sck", 0x04, INSTR_S_RD }, | 806 | { "sck", 0x04, INSTR_S_RD }, |
825 | { "stck", 0x05, INSTR_S_RD }, | 807 | { "stck", 0x05, INSTR_S_RD }, |
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = { | |||
908 | }; | 890 | }; |
909 | 891 | ||
910 | static struct s390_insn opcode_b3[] = { | 892 | static struct s390_insn opcode_b3[] = { |
911 | #ifdef CONFIG_64BIT | ||
912 | { "maylr", 0x38, INSTR_RRF_F0FF }, | 893 | { "maylr", 0x38, INSTR_RRF_F0FF }, |
913 | { "mylr", 0x39, INSTR_RRF_F0FF }, | 894 | { "mylr", 0x39, INSTR_RRF_F0FF }, |
914 | { "mayr", 0x3a, INSTR_RRF_F0FF }, | 895 | { "mayr", 0x3a, INSTR_RRF_F0FF }, |
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = { | |||
996 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, | 977 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, |
997 | { "iextr", 0xfe, INSTR_RRF_F0FR }, | 978 | { "iextr", 0xfe, INSTR_RRF_F0FR }, |
998 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, | 979 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, |
999 | #endif | ||
1000 | { "lpebr", 0x00, INSTR_RRE_FF }, | 980 | { "lpebr", 0x00, INSTR_RRE_FF }, |
1001 | { "lnebr", 0x01, INSTR_RRE_FF }, | 981 | { "lnebr", 0x01, INSTR_RRE_FF }, |
1002 | { "ltebr", 0x02, INSTR_RRE_FF }, | 982 | { "ltebr", 0x02, INSTR_RRE_FF }, |
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = { | |||
1091 | }; | 1071 | }; |
1092 | 1072 | ||
1093 | static struct s390_insn opcode_b9[] = { | 1073 | static struct s390_insn opcode_b9[] = { |
1094 | #ifdef CONFIG_64BIT | ||
1095 | { "lpgr", 0x00, INSTR_RRE_RR }, | 1074 | { "lpgr", 0x00, INSTR_RRE_RR }, |
1096 | { "lngr", 0x01, INSTR_RRE_RR }, | 1075 | { "lngr", 0x01, INSTR_RRE_RR }, |
1097 | { "ltgr", 0x02, INSTR_RRE_RR }, | 1076 | { "ltgr", 0x02, INSTR_RRE_RR }, |
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = { | |||
1204 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, | 1183 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, |
1205 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, | 1184 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, |
1206 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, | 1185 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, |
1207 | #endif | ||
1208 | { "kmac", 0x1e, INSTR_RRE_RR }, | 1186 | { "kmac", 0x1e, INSTR_RRE_RR }, |
1209 | { "lrvr", 0x1f, INSTR_RRE_RR }, | 1187 | { "lrvr", 0x1f, INSTR_RRE_RR }, |
1210 | { "km", 0x2e, INSTR_RRE_RR }, | 1188 | { "km", 0x2e, INSTR_RRE_RR }, |
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = { | |||
1224 | }; | 1202 | }; |
1225 | 1203 | ||
1226 | static struct s390_insn opcode_c0[] = { | 1204 | static struct s390_insn opcode_c0[] = { |
1227 | #ifdef CONFIG_64BIT | ||
1228 | { "lgfi", 0x01, INSTR_RIL_RI }, | 1205 | { "lgfi", 0x01, INSTR_RIL_RI }, |
1229 | { "xihf", 0x06, INSTR_RIL_RU }, | 1206 | { "xihf", 0x06, INSTR_RIL_RU }, |
1230 | { "xilf", 0x07, INSTR_RIL_RU }, | 1207 | { "xilf", 0x07, INSTR_RIL_RU }, |
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = { | |||
1236 | { "oilf", 0x0d, INSTR_RIL_RU }, | 1213 | { "oilf", 0x0d, INSTR_RIL_RU }, |
1237 | { "llihf", 0x0e, INSTR_RIL_RU }, | 1214 | { "llihf", 0x0e, INSTR_RIL_RU }, |
1238 | { "llilf", 0x0f, INSTR_RIL_RU }, | 1215 | { "llilf", 0x0f, INSTR_RIL_RU }, |
1239 | #endif | ||
1240 | { "larl", 0x00, INSTR_RIL_RP }, | 1216 | { "larl", 0x00, INSTR_RIL_RP }, |
1241 | { "brcl", 0x04, INSTR_RIL_UP }, | 1217 | { "brcl", 0x04, INSTR_RIL_UP }, |
1242 | { "brasl", 0x05, INSTR_RIL_RP }, | 1218 | { "brasl", 0x05, INSTR_RIL_RP }, |
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = { | |||
1244 | }; | 1220 | }; |
1245 | 1221 | ||
1246 | static struct s390_insn opcode_c2[] = { | 1222 | static struct s390_insn opcode_c2[] = { |
1247 | #ifdef CONFIG_64BIT | ||
1248 | { "msgfi", 0x00, INSTR_RIL_RI }, | 1223 | { "msgfi", 0x00, INSTR_RIL_RI }, |
1249 | { "msfi", 0x01, INSTR_RIL_RI }, | 1224 | { "msfi", 0x01, INSTR_RIL_RI }, |
1250 | { "slgfi", 0x04, INSTR_RIL_RU }, | 1225 | { "slgfi", 0x04, INSTR_RIL_RU }, |
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = { | |||
1257 | { "cfi", 0x0d, INSTR_RIL_RI }, | 1232 | { "cfi", 0x0d, INSTR_RIL_RI }, |
1258 | { "clgfi", 0x0e, INSTR_RIL_RU }, | 1233 | { "clgfi", 0x0e, INSTR_RIL_RU }, |
1259 | { "clfi", 0x0f, INSTR_RIL_RU }, | 1234 | { "clfi", 0x0f, INSTR_RIL_RU }, |
1260 | #endif | ||
1261 | { "", 0, INSTR_INVALID } | 1235 | { "", 0, INSTR_INVALID } |
1262 | }; | 1236 | }; |
1263 | 1237 | ||
1264 | static struct s390_insn opcode_c4[] = { | 1238 | static struct s390_insn opcode_c4[] = { |
1265 | #ifdef CONFIG_64BIT | ||
1266 | { "llhrl", 0x02, INSTR_RIL_RP }, | 1239 | { "llhrl", 0x02, INSTR_RIL_RP }, |
1267 | { "lghrl", 0x04, INSTR_RIL_RP }, | 1240 | { "lghrl", 0x04, INSTR_RIL_RP }, |
1268 | { "lhrl", 0x05, INSTR_RIL_RP }, | 1241 | { "lhrl", 0x05, INSTR_RIL_RP }, |
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = { | |||
1274 | { "lrl", 0x0d, INSTR_RIL_RP }, | 1247 | { "lrl", 0x0d, INSTR_RIL_RP }, |
1275 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, | 1248 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, |
1276 | { "strl", 0x0f, INSTR_RIL_RP }, | 1249 | { "strl", 0x0f, INSTR_RIL_RP }, |
1277 | #endif | ||
1278 | { "", 0, INSTR_INVALID } | 1250 | { "", 0, INSTR_INVALID } |
1279 | }; | 1251 | }; |
1280 | 1252 | ||
1281 | static struct s390_insn opcode_c6[] = { | 1253 | static struct s390_insn opcode_c6[] = { |
1282 | #ifdef CONFIG_64BIT | ||
1283 | { "exrl", 0x00, INSTR_RIL_RP }, | 1254 | { "exrl", 0x00, INSTR_RIL_RP }, |
1284 | { "pfdrl", 0x02, INSTR_RIL_UP }, | 1255 | { "pfdrl", 0x02, INSTR_RIL_UP }, |
1285 | { "cghrl", 0x04, INSTR_RIL_RP }, | 1256 | { "cghrl", 0x04, INSTR_RIL_RP }, |
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = { | |||
1292 | { "crl", 0x0d, INSTR_RIL_RP }, | 1263 | { "crl", 0x0d, INSTR_RIL_RP }, |
1293 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, | 1264 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, |
1294 | { "clrl", 0x0f, INSTR_RIL_RP }, | 1265 | { "clrl", 0x0f, INSTR_RIL_RP }, |
1295 | #endif | ||
1296 | { "", 0, INSTR_INVALID } | 1266 | { "", 0, INSTR_INVALID } |
1297 | }; | 1267 | }; |
1298 | 1268 | ||
1299 | static struct s390_insn opcode_c8[] = { | 1269 | static struct s390_insn opcode_c8[] = { |
1300 | #ifdef CONFIG_64BIT | ||
1301 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, | 1270 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, |
1302 | { "ectg", 0x01, INSTR_SSF_RRDRD }, | 1271 | { "ectg", 0x01, INSTR_SSF_RRDRD }, |
1303 | { "csst", 0x02, INSTR_SSF_RRDRD }, | 1272 | { "csst", 0x02, INSTR_SSF_RRDRD }, |
1304 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, | 1273 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, |
1305 | { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, | 1274 | { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, |
1306 | #endif | ||
1307 | { "", 0, INSTR_INVALID } | 1275 | { "", 0, INSTR_INVALID } |
1308 | }; | 1276 | }; |
1309 | 1277 | ||
1310 | static struct s390_insn opcode_cc[] = { | 1278 | static struct s390_insn opcode_cc[] = { |
1311 | #ifdef CONFIG_64BIT | ||
1312 | { "brcth", 0x06, INSTR_RIL_RP }, | 1279 | { "brcth", 0x06, INSTR_RIL_RP }, |
1313 | { "aih", 0x08, INSTR_RIL_RI }, | 1280 | { "aih", 0x08, INSTR_RIL_RI }, |
1314 | { "alsih", 0x0a, INSTR_RIL_RI }, | 1281 | { "alsih", 0x0a, INSTR_RIL_RI }, |
1315 | { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, | 1282 | { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, |
1316 | { "cih", 0x0d, INSTR_RIL_RI }, | 1283 | { "cih", 0x0d, INSTR_RIL_RI }, |
1317 | { "clih", 0x0f, INSTR_RIL_RI }, | 1284 | { "clih", 0x0f, INSTR_RIL_RI }, |
1318 | #endif | ||
1319 | { "", 0, INSTR_INVALID } | 1285 | { "", 0, INSTR_INVALID } |
1320 | }; | 1286 | }; |
1321 | 1287 | ||
1322 | static struct s390_insn opcode_e3[] = { | 1288 | static struct s390_insn opcode_e3[] = { |
1323 | #ifdef CONFIG_64BIT | ||
1324 | { "ltg", 0x02, INSTR_RXY_RRRD }, | 1289 | { "ltg", 0x02, INSTR_RXY_RRRD }, |
1325 | { "lrag", 0x03, INSTR_RXY_RRRD }, | 1290 | { "lrag", 0x03, INSTR_RXY_RRRD }, |
1326 | { "lg", 0x04, INSTR_RXY_RRRD }, | 1291 | { "lg", 0x04, INSTR_RXY_RRRD }, |
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = { | |||
1414 | { "clhf", 0xcf, INSTR_RXY_RRRD }, | 1379 | { "clhf", 0xcf, INSTR_RXY_RRRD }, |
1415 | { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, | 1380 | { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, |
1416 | { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, | 1381 | { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, |
1417 | #endif | ||
1418 | { "lrv", 0x1e, INSTR_RXY_RRRD }, | 1382 | { "lrv", 0x1e, INSTR_RXY_RRRD }, |
1419 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, | 1383 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, |
1420 | { "strv", 0x3e, INSTR_RXY_RRRD }, | 1384 | { "strv", 0x3e, INSTR_RXY_RRRD }, |
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = { | |||
1426 | }; | 1390 | }; |
1427 | 1391 | ||
1428 | static struct s390_insn opcode_e5[] = { | 1392 | static struct s390_insn opcode_e5[] = { |
1429 | #ifdef CONFIG_64BIT | ||
1430 | { "strag", 0x02, INSTR_SSE_RDRD }, | 1393 | { "strag", 0x02, INSTR_SSE_RDRD }, |
1431 | { "mvhhi", 0x44, INSTR_SIL_RDI }, | 1394 | { "mvhhi", 0x44, INSTR_SIL_RDI }, |
1432 | { "mvghi", 0x48, INSTR_SIL_RDI }, | 1395 | { "mvghi", 0x48, INSTR_SIL_RDI }, |
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = { | |||
1439 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, | 1402 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, |
1440 | { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, | 1403 | { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, |
1441 | { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, | 1404 | { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, |
1442 | #endif | ||
1443 | { "lasp", 0x00, INSTR_SSE_RDRD }, | 1405 | { "lasp", 0x00, INSTR_SSE_RDRD }, |
1444 | { "tprot", 0x01, INSTR_SSE_RDRD }, | 1406 | { "tprot", 0x01, INSTR_SSE_RDRD }, |
1445 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, | 1407 | { "mvcsk", 0x0e, INSTR_SSE_RDRD }, |
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = { | |||
1448 | }; | 1410 | }; |
1449 | 1411 | ||
1450 | static struct s390_insn opcode_e7[] = { | 1412 | static struct s390_insn opcode_e7[] = { |
1451 | #ifdef CONFIG_64BIT | ||
1452 | { "lcbb", 0x27, INSTR_RXE_RRRDM }, | 1413 | { "lcbb", 0x27, INSTR_RXE_RRRDM }, |
1453 | { "vgef", 0x13, INSTR_VRV_VVRDM }, | 1414 | { "vgef", 0x13, INSTR_VRV_VVRDM }, |
1454 | { "vgeg", 0x12, INSTR_VRV_VVRDM }, | 1415 | { "vgeg", 0x12, INSTR_VRV_VVRDM }, |
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = { | |||
1588 | { "vfsq", 0xce, INSTR_VRR_VV000MM }, | 1549 | { "vfsq", 0xce, INSTR_VRR_VV000MM }, |
1589 | { "vfs", 0xe2, INSTR_VRR_VVV00MM }, | 1550 | { "vfs", 0xe2, INSTR_VRR_VVV00MM }, |
1590 | { "vftci", 0x4a, INSTR_VRI_VVIMM }, | 1551 | { "vftci", 0x4a, INSTR_VRI_VVIMM }, |
1591 | #endif | ||
1592 | }; | 1552 | }; |
1593 | 1553 | ||
1594 | static struct s390_insn opcode_eb[] = { | 1554 | static struct s390_insn opcode_eb[] = { |
1595 | #ifdef CONFIG_64BIT | ||
1596 | { "lmg", 0x04, INSTR_RSY_RRRD }, | 1555 | { "lmg", 0x04, INSTR_RSY_RRRD }, |
1597 | { "srag", 0x0a, INSTR_RSY_RRRD }, | 1556 | { "srag", 0x0a, INSTR_RSY_RRRD }, |
1598 | { "slag", 0x0b, INSTR_RSY_RRRD }, | 1557 | { "slag", 0x0b, INSTR_RSY_RRRD }, |
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = { | |||
1659 | { "stric", 0x61, INSTR_RSY_RDRM }, | 1618 | { "stric", 0x61, INSTR_RSY_RDRM }, |
1660 | { "mric", 0x62, INSTR_RSY_RDRM }, | 1619 | { "mric", 0x62, INSTR_RSY_RDRM }, |
1661 | { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, | 1620 | { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, |
1662 | #endif | ||
1663 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1621 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
1664 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1622 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
1665 | { "tp", 0xc0, INSTR_RSL_R0RD }, | 1623 | { "tp", 0xc0, INSTR_RSL_R0RD }, |
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = { | |||
1667 | }; | 1625 | }; |
1668 | 1626 | ||
1669 | static struct s390_insn opcode_ec[] = { | 1627 | static struct s390_insn opcode_ec[] = { |
1670 | #ifdef CONFIG_64BIT | ||
1671 | { "brxhg", 0x44, INSTR_RIE_RRP }, | 1628 | { "brxhg", 0x44, INSTR_RIE_RRP }, |
1672 | { "brxlg", 0x45, INSTR_RIE_RRP }, | 1629 | { "brxlg", 0x45, INSTR_RIE_RRP }, |
1673 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, | 1630 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, |
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = { | |||
1701 | { "clgib", 0xfd, INSTR_RIS_RURDU }, | 1658 | { "clgib", 0xfd, INSTR_RIS_RURDU }, |
1702 | { "cib", 0xfe, INSTR_RIS_RURDI }, | 1659 | { "cib", 0xfe, INSTR_RIS_RURDI }, |
1703 | { "clib", 0xff, INSTR_RIS_RURDU }, | 1660 | { "clib", 0xff, INSTR_RIS_RURDU }, |
1704 | #endif | ||
1705 | { "", 0, INSTR_INVALID } | 1661 | { "", 0, INSTR_INVALID } |
1706 | }; | 1662 | }; |
1707 | 1663 | ||
1708 | static struct s390_insn opcode_ed[] = { | 1664 | static struct s390_insn opcode_ed[] = { |
1709 | #ifdef CONFIG_64BIT | ||
1710 | { "mayl", 0x38, INSTR_RXF_FRRDF }, | 1665 | { "mayl", 0x38, INSTR_RXF_FRRDF }, |
1711 | { "myl", 0x39, INSTR_RXF_FRRDF }, | 1666 | { "myl", 0x39, INSTR_RXF_FRRDF }, |
1712 | { "may", 0x3a, INSTR_RXF_FRRDF }, | 1667 | { "may", 0x3a, INSTR_RXF_FRRDF }, |
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = { | |||
1731 | { "czxt", 0xa9, INSTR_RSL_LRDFU }, | 1686 | { "czxt", 0xa9, INSTR_RSL_LRDFU }, |
1732 | { "cdzt", 0xaa, INSTR_RSL_LRDFU }, | 1687 | { "cdzt", 0xaa, INSTR_RSL_LRDFU }, |
1733 | { "cxzt", 0xab, INSTR_RSL_LRDFU }, | 1688 | { "cxzt", 0xab, INSTR_RSL_LRDFU }, |
1734 | #endif | ||
1735 | { "ldeb", 0x04, INSTR_RXE_FRRD }, | 1689 | { "ldeb", 0x04, INSTR_RXE_FRRD }, |
1736 | { "lxdb", 0x05, INSTR_RXE_FRRD }, | 1690 | { "lxdb", 0x05, INSTR_RXE_FRRD }, |
1737 | { "lxeb", 0x06, INSTR_RXE_FRRD }, | 1691 | { "lxeb", 0x06, INSTR_RXE_FRRD }, |
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs) | |||
2051 | else | 2005 | else |
2052 | *ptr++ = ' '; | 2006 | *ptr++ = ' '; |
2053 | addr = regs->psw.addr + start - 32; | 2007 | addr = regs->psw.addr + start - 32; |
2054 | ptr += sprintf(ptr, ONELONG, addr); | 2008 | ptr += sprintf(ptr, "%016lx: ", addr); |
2055 | if (start + opsize >= end) | 2009 | if (start + opsize >= end) |
2056 | break; | 2010 | break; |
2057 | for (i = 0; i < opsize; i++) | 2011 | for (i = 0; i < opsize; i++) |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index a99852e96a77..dc8e20473484 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c | |||
@@ -18,16 +18,6 @@ | |||
18 | #include <asm/dis.h> | 18 | #include <asm/dis.h> |
19 | #include <asm/ipl.h> | 19 | #include <asm/ipl.h> |
20 | 20 | ||
21 | #ifndef CONFIG_64BIT | ||
22 | #define LONG "%08lx " | ||
23 | #define FOURLONG "%08lx %08lx %08lx %08lx\n" | ||
24 | static int kstack_depth_to_print = 12; | ||
25 | #else /* CONFIG_64BIT */ | ||
26 | #define LONG "%016lx " | ||
27 | #define FOURLONG "%016lx %016lx %016lx %016lx\n" | ||
28 | static int kstack_depth_to_print = 20; | ||
29 | #endif /* CONFIG_64BIT */ | ||
30 | |||
31 | /* | 21 | /* |
32 | * For show_trace we have tree different stack to consider: | 22 | * For show_trace we have tree different stack to consider: |
33 | * - the panic stack which is used if the kernel stack has overflown | 23 | * - the panic stack which is used if the kernel stack has overflown |
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
115 | else | 105 | else |
116 | stack = sp; | 106 | stack = sp; |
117 | 107 | ||
118 | for (i = 0; i < kstack_depth_to_print; i++) { | 108 | for (i = 0; i < 20; i++) { |
119 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) | 109 | if (((addr_t) stack & (THREAD_SIZE-1)) == 0) |
120 | break; | 110 | break; |
121 | if ((i * sizeof(long) % 32) == 0) | 111 | if ((i * sizeof(long) % 32) == 0) |
122 | printk("%s ", i == 0 ? "" : "\n"); | 112 | printk("%s ", i == 0 ? "" : "\n"); |
123 | printk(LONG, *stack++); | 113 | printk("%016lx ", *stack++); |
124 | } | 114 | } |
125 | printk("\n"); | 115 | printk("\n"); |
126 | show_trace(task, sp); | 116 | show_trace(task, sp); |
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
128 | 118 | ||
129 | static void show_last_breaking_event(struct pt_regs *regs) | 119 | static void show_last_breaking_event(struct pt_regs *regs) |
130 | { | 120 | { |
131 | #ifdef CONFIG_64BIT | ||
132 | printk("Last Breaking-Event-Address:\n"); | 121 | printk("Last Breaking-Event-Address:\n"); |
133 | printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); | 122 | printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); |
134 | #endif | ||
135 | } | 123 | } |
136 | 124 | ||
137 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) | 125 | static inline int mask_bits(struct pt_regs *regs, unsigned long bits) |
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs) | |||
155 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), | 143 | mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), |
156 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), | 144 | mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), |
157 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); | 145 | mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); |
158 | #ifdef CONFIG_64BIT | ||
159 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); | 146 | printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); |
160 | #endif | 147 | printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, |
161 | printk("\n%s GPRS: " FOURLONG, mode, | ||
162 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); | 148 | regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); |
163 | printk(" " FOURLONG, | 149 | printk(" %016lx %016lx %016lx %016lx\n", |
164 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); | 150 | regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); |
165 | printk(" " FOURLONG, | 151 | printk(" %016lx %016lx %016lx %016lx\n", |
166 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); | 152 | regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); |
167 | printk(" " FOURLONG, | 153 | printk(" %016lx %016lx %016lx %016lx\n", |
168 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); | 154 | regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); |
169 | show_code(regs); | 155 | show_code(regs); |
170 | } | 156 | } |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4427ab7ac23a..549a73a4b543 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -64,7 +64,6 @@ asm( | |||
64 | " .align 4\n" | 64 | " .align 4\n" |
65 | " .type savesys_ipl_nss, @function\n" | 65 | " .type savesys_ipl_nss, @function\n" |
66 | "savesys_ipl_nss:\n" | 66 | "savesys_ipl_nss:\n" |
67 | #ifdef CONFIG_64BIT | ||
68 | " stmg 6,15,48(15)\n" | 67 | " stmg 6,15,48(15)\n" |
69 | " lgr 14,3\n" | 68 | " lgr 14,3\n" |
70 | " sam31\n" | 69 | " sam31\n" |
@@ -72,13 +71,6 @@ asm( | |||
72 | " sam64\n" | 71 | " sam64\n" |
73 | " lgr 2,14\n" | 72 | " lgr 2,14\n" |
74 | " lmg 6,15,48(15)\n" | 73 | " lmg 6,15,48(15)\n" |
75 | #else | ||
76 | " stm 6,15,24(15)\n" | ||
77 | " lr 14,3\n" | ||
78 | " diag 2,14,0x8\n" | ||
79 | " lr 2,14\n" | ||
80 | " lm 6,15,24(15)\n" | ||
81 | #endif | ||
82 | " br 14\n" | 74 | " br 14\n" |
83 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n" | 75 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n" |
84 | " .previous\n"); | 76 | " .previous\n"); |
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void) | |||
240 | 232 | ||
241 | static __init void setup_topology(void) | 233 | static __init void setup_topology(void) |
242 | { | 234 | { |
243 | #ifdef CONFIG_64BIT | ||
244 | int max_mnest; | 235 | int max_mnest; |
245 | 236 | ||
246 | if (!test_facility(11)) | 237 | if (!test_facility(11)) |
@@ -251,7 +242,6 @@ static __init void setup_topology(void) | |||
251 | break; | 242 | break; |
252 | } | 243 | } |
253 | topology_max_mnest = max_mnest; | 244 | topology_max_mnest = max_mnest; |
254 | #endif | ||
255 | } | 245 | } |
256 | 246 | ||
257 | static void early_pgm_check_handler(void) | 247 | static void early_pgm_check_handler(void) |
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void) | |||
290 | ARRAY_SIZE(S390_lowcore.stfle_fac_list)); | 280 | ARRAY_SIZE(S390_lowcore.stfle_fac_list)); |
291 | } | 281 | } |
292 | 282 | ||
293 | static __init void detect_mvpg(void) | ||
294 | { | ||
295 | #ifndef CONFIG_64BIT | ||
296 | int rc; | ||
297 | |||
298 | asm volatile( | ||
299 | " la 0,0\n" | ||
300 | " mvpg %2,%2\n" | ||
301 | "0: la %0,0\n" | ||
302 | "1:\n" | ||
303 | EX_TABLE(0b,1b) | ||
304 | : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); | ||
305 | if (!rc) | ||
306 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; | ||
307 | #endif | ||
308 | } | ||
309 | |||
310 | static __init void detect_ieee(void) | ||
311 | { | ||
312 | #ifndef CONFIG_64BIT | ||
313 | int rc, tmp; | ||
314 | |||
315 | asm volatile( | ||
316 | " efpc %1,0\n" | ||
317 | "0: la %0,0\n" | ||
318 | "1:\n" | ||
319 | EX_TABLE(0b,1b) | ||
320 | : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); | ||
321 | if (!rc) | ||
322 | S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | static __init void detect_csp(void) | ||
327 | { | ||
328 | #ifndef CONFIG_64BIT | ||
329 | int rc; | ||
330 | |||
331 | asm volatile( | ||
332 | " la 0,0\n" | ||
333 | " la 1,0\n" | ||
334 | " la 2,4\n" | ||
335 | " csp 0,2\n" | ||
336 | "0: la %0,0\n" | ||
337 | "1:\n" | ||
338 | EX_TABLE(0b,1b) | ||
339 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); | ||
340 | if (!rc) | ||
341 | S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; | ||
342 | #endif | ||
343 | } | ||
344 | |||
345 | static __init void detect_diag9c(void) | 283 | static __init void detect_diag9c(void) |
346 | { | 284 | { |
347 | unsigned int cpu_address; | 285 | unsigned int cpu_address; |
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void) | |||
360 | 298 | ||
361 | static __init void detect_diag44(void) | 299 | static __init void detect_diag44(void) |
362 | { | 300 | { |
363 | #ifdef CONFIG_64BIT | ||
364 | int rc; | 301 | int rc; |
365 | 302 | ||
366 | asm volatile( | 303 | asm volatile( |
@@ -371,12 +308,10 @@ static __init void detect_diag44(void) | |||
371 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); | 308 | : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); |
372 | if (!rc) | 309 | if (!rc) |
373 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; | 310 | S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; |
374 | #endif | ||
375 | } | 311 | } |
376 | 312 | ||
377 | static __init void detect_machine_facilities(void) | 313 | static __init void detect_machine_facilities(void) |
378 | { | 314 | { |
379 | #ifdef CONFIG_64BIT | ||
380 | if (test_facility(8)) { | 315 | if (test_facility(8)) { |
381 | S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; | 316 | S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; |
382 | __ctl_set_bit(0, 23); | 317 | __ctl_set_bit(0, 23); |
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void) | |||
393 | S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; | 328 | S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; |
394 | if (test_facility(129)) | 329 | if (test_facility(129)) |
395 | S390_lowcore.machine_flags |= MACHINE_FLAG_VX; | 330 | S390_lowcore.machine_flags |= MACHINE_FLAG_VX; |
396 | #endif | ||
397 | } | 331 | } |
398 | 332 | ||
399 | static int __init cad_setup(char *str) | 333 | static int __init cad_setup(char *str) |
@@ -501,9 +435,6 @@ void __init startup_init(void) | |||
501 | ipl_update_parameters(); | 435 | ipl_update_parameters(); |
502 | setup_boot_command_line(); | 436 | setup_boot_command_line(); |
503 | create_kernel_nss(); | 437 | create_kernel_nss(); |
504 | detect_mvpg(); | ||
505 | detect_ieee(); | ||
506 | detect_csp(); | ||
507 | detect_diag9c(); | 438 | detect_diag9c(); |
508 | detect_diag44(); | 439 | detect_diag44(); |
509 | detect_machine_facilities(); | 440 | detect_machine_facilities(); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 398329b2b518..99b44acbfcc7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -22,27 +22,28 @@ | |||
22 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
23 | 23 | ||
24 | __PT_R0 = __PT_GPRS | 24 | __PT_R0 = __PT_GPRS |
25 | __PT_R1 = __PT_GPRS + 4 | 25 | __PT_R1 = __PT_GPRS + 8 |
26 | __PT_R2 = __PT_GPRS + 8 | 26 | __PT_R2 = __PT_GPRS + 16 |
27 | __PT_R3 = __PT_GPRS + 12 | 27 | __PT_R3 = __PT_GPRS + 24 |
28 | __PT_R4 = __PT_GPRS + 16 | 28 | __PT_R4 = __PT_GPRS + 32 |
29 | __PT_R5 = __PT_GPRS + 20 | 29 | __PT_R5 = __PT_GPRS + 40 |
30 | __PT_R6 = __PT_GPRS + 24 | 30 | __PT_R6 = __PT_GPRS + 48 |
31 | __PT_R7 = __PT_GPRS + 28 | 31 | __PT_R7 = __PT_GPRS + 56 |
32 | __PT_R8 = __PT_GPRS + 32 | 32 | __PT_R8 = __PT_GPRS + 64 |
33 | __PT_R9 = __PT_GPRS + 36 | 33 | __PT_R9 = __PT_GPRS + 72 |
34 | __PT_R10 = __PT_GPRS + 40 | 34 | __PT_R10 = __PT_GPRS + 80 |
35 | __PT_R11 = __PT_GPRS + 44 | 35 | __PT_R11 = __PT_GPRS + 88 |
36 | __PT_R12 = __PT_GPRS + 48 | 36 | __PT_R12 = __PT_GPRS + 96 |
37 | __PT_R13 = __PT_GPRS + 524 | 37 | __PT_R13 = __PT_GPRS + 104 |
38 | __PT_R14 = __PT_GPRS + 56 | 38 | __PT_R14 = __PT_GPRS + 112 |
39 | __PT_R15 = __PT_GPRS + 60 | 39 | __PT_R15 = __PT_GPRS + 120 |
40 | 40 | ||
41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
42 | STACK_SIZE = 1 << STACK_SHIFT | 42 | STACK_SIZE = 1 << STACK_SHIFT |
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
44 | 44 | ||
45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) | 45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ |
46 | _TIF_UPROBE) | ||
46 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 47 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
47 | _TIF_SYSCALL_TRACEPOINT) | 48 | _TIF_SYSCALL_TRACEPOINT) |
48 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) | 49 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) |
@@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
53 | .macro TRACE_IRQS_ON | 54 | .macro TRACE_IRQS_ON |
54 | #ifdef CONFIG_TRACE_IRQFLAGS | 55 | #ifdef CONFIG_TRACE_IRQFLAGS |
55 | basr %r2,%r0 | 56 | basr %r2,%r0 |
56 | l %r1,BASED(.Lc_hardirqs_on) | 57 | brasl %r14,trace_hardirqs_on_caller |
57 | basr %r14,%r1 # call trace_hardirqs_on_caller | ||
58 | #endif | 58 | #endif |
59 | .endm | 59 | .endm |
60 | 60 | ||
61 | .macro TRACE_IRQS_OFF | 61 | .macro TRACE_IRQS_OFF |
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | basr %r2,%r0 | 63 | basr %r2,%r0 |
64 | l %r1,BASED(.Lc_hardirqs_off) | 64 | brasl %r14,trace_hardirqs_off_caller |
65 | basr %r14,%r1 # call trace_hardirqs_off_caller | ||
66 | #endif | 65 | #endif |
67 | .endm | 66 | .endm |
68 | 67 | ||
@@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
70 | #ifdef CONFIG_LOCKDEP | 69 | #ifdef CONFIG_LOCKDEP |
71 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 70 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
72 | jz .+10 | 71 | jz .+10 |
73 | l %r1,BASED(.Lc_lockdep_sys_exit) | 72 | brasl %r14,lockdep_sys_exit |
74 | basr %r14,%r1 # call lockdep_sys_exit | 73 | #endif |
74 | .endm | ||
75 | |||
76 | .macro LPP newpp | ||
77 | #if IS_ENABLED(CONFIG_KVM) | ||
78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP | ||
79 | jz .+8 | ||
80 | .insn s,0xb2800000,\newpp | ||
81 | #endif | ||
82 | .endm | ||
83 | |||
84 | .macro HANDLE_SIE_INTERCEPT scratch,reason | ||
85 | #if IS_ENABLED(CONFIG_KVM) | ||
86 | tmhh %r8,0x0001 # interrupting from user ? | ||
87 | jnz .+62 | ||
88 | lgr \scratch,%r9 | ||
89 | slg \scratch,BASED(.Lsie_critical) | ||
90 | clg \scratch,BASED(.Lsie_critical_length) | ||
91 | .if \reason==1 | ||
92 | # Some program interrupts are suppressing (e.g. protection). | ||
93 | # We must also check the instruction after SIE in that case. | ||
94 | # do_protection_exception will rewind to .Lrewind_pad | ||
95 | jh .+42 | ||
96 | .else | ||
97 | jhe .+42 | ||
98 | .endif | ||
99 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
100 | LPP __SF_EMPTY+16(%r15) # set host id | ||
101 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
102 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
103 | larl %r9,sie_exit # skip forward to sie_exit | ||
104 | mvi __SF_EMPTY+31(%r15),\reason # set exit reason | ||
75 | #endif | 105 | #endif |
76 | .endm | 106 | .endm |
77 | 107 | ||
78 | .macro CHECK_STACK stacksize,savearea | 108 | .macro CHECK_STACK stacksize,savearea |
79 | #ifdef CONFIG_CHECK_STACK | 109 | #ifdef CONFIG_CHECK_STACK |
80 | tml %r15,\stacksize - CONFIG_STACK_GUARD | 110 | tml %r15,\stacksize - CONFIG_STACK_GUARD |
81 | la %r14,\savearea | 111 | lghi %r14,\savearea |
82 | jz stack_overflow | 112 | jz stack_overflow |
83 | #endif | 113 | #endif |
84 | .endm | 114 | .endm |
85 | 115 | ||
86 | .macro SWITCH_ASYNC savearea,stack,shift | 116 | .macro SWITCH_ASYNC savearea,stack,shift |
87 | tmh %r8,0x0001 # interrupting from user ? | 117 | tmhh %r8,0x0001 # interrupting from user ? |
88 | jnz 1f | 118 | jnz 1f |
89 | lr %r14,%r9 | 119 | lgr %r14,%r9 |
90 | sl %r14,BASED(.Lc_critical_start) | 120 | slg %r14,BASED(.Lcritical_start) |
91 | cl %r14,BASED(.Lc_critical_length) | 121 | clg %r14,BASED(.Lcritical_length) |
92 | jhe 0f | 122 | jhe 0f |
93 | la %r11,\savearea # inside critical section, do cleanup | 123 | lghi %r11,\savearea # inside critical section, do cleanup |
94 | bras %r14,cleanup_critical | 124 | brasl %r14,cleanup_critical |
95 | tmh %r8,0x0001 # retest problem state after cleanup | 125 | tmhh %r8,0x0001 # retest problem state after cleanup |
96 | jnz 1f | 126 | jnz 1f |
97 | 0: l %r14,\stack # are we already on the target stack? | 127 | 0: lg %r14,\stack # are we already on the target stack? |
98 | slr %r14,%r15 | 128 | slgr %r14,%r15 |
99 | sra %r14,\shift | 129 | srag %r14,%r14,\shift |
100 | jnz 1f | 130 | jnz 1f |
101 | CHECK_STACK 1<<\shift,\savearea | 131 | CHECK_STACK 1<<\shift,\savearea |
102 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 132 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
103 | j 2f | 133 | j 2f |
104 | 1: l %r15,\stack # load target stack | 134 | 1: lg %r15,\stack # load target stack |
105 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | 135 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
106 | .endm | 136 | .endm |
107 | 137 | ||
108 | .macro ADD64 high,low,timer | 138 | .macro UPDATE_VTIME scratch,enter_timer |
109 | al \high,\timer | 139 | lg \scratch,__LC_EXIT_TIMER |
110 | al \low,4+\timer | 140 | slg \scratch,\enter_timer |
111 | brc 12,.+8 | 141 | alg \scratch,__LC_USER_TIMER |
112 | ahi \high,1 | 142 | stg \scratch,__LC_USER_TIMER |
113 | .endm | 143 | lg \scratch,__LC_LAST_UPDATE_TIMER |
114 | 144 | slg \scratch,__LC_EXIT_TIMER | |
115 | .macro SUB64 high,low,timer | 145 | alg \scratch,__LC_SYSTEM_TIMER |
116 | sl \high,\timer | 146 | stg \scratch,__LC_SYSTEM_TIMER |
117 | sl \low,4+\timer | 147 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer |
118 | brc 3,.+8 | ||
119 | ahi \high,-1 | ||
120 | .endm | 148 | .endm |
121 | 149 | ||
122 | .macro UPDATE_VTIME high,low,enter_timer | 150 | .macro LAST_BREAK scratch |
123 | lm \high,\low,__LC_EXIT_TIMER | 151 | srag \scratch,%r10,23 |
124 | SUB64 \high,\low,\enter_timer | 152 | jz .+10 |
125 | ADD64 \high,\low,__LC_USER_TIMER | 153 | stg %r10,__TI_last_break(%r12) |
126 | stm \high,\low,__LC_USER_TIMER | ||
127 | lm \high,\low,__LC_LAST_UPDATE_TIMER | ||
128 | SUB64 \high,\low,__LC_EXIT_TIMER | ||
129 | ADD64 \high,\low,__LC_SYSTEM_TIMER | ||
130 | stm \high,\low,__LC_SYSTEM_TIMER | ||
131 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
132 | .endm | 154 | .endm |
133 | 155 | ||
134 | .macro REENABLE_IRQS | 156 | .macro REENABLE_IRQS |
135 | st %r8,__LC_RETURN_PSW | 157 | stg %r8,__LC_RETURN_PSW |
136 | ni __LC_RETURN_PSW,0xbf | 158 | ni __LC_RETURN_PSW,0xbf |
137 | ssm __LC_RETURN_PSW | 159 | ssm __LC_RETURN_PSW |
138 | .endm | 160 | .endm |
139 | 161 | ||
162 | .macro STCK savearea | ||
163 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES | ||
164 | .insn s,0xb27c0000,\savearea # store clock fast | ||
165 | #else | ||
166 | .insn s,0xb2050000,\savearea # store clock | ||
167 | #endif | ||
168 | .endm | ||
169 | |||
140 | .section .kprobes.text, "ax" | 170 | .section .kprobes.text, "ax" |
141 | 171 | ||
142 | /* | 172 | /* |
@@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
147 | * gpr2 = prev | 177 | * gpr2 = prev |
148 | */ | 178 | */ |
149 | ENTRY(__switch_to) | 179 | ENTRY(__switch_to) |
150 | stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
151 | st %r15,__THREAD_ksp(%r2) # store kernel stack of prev | 181 | stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev |
152 | l %r4,__THREAD_info(%r2) # get thread_info of prev | 182 | lg %r4,__THREAD_info(%r2) # get thread_info of prev |
153 | l %r5,__THREAD_info(%r3) # get thread_info of next | 183 | lg %r5,__THREAD_info(%r3) # get thread_info of next |
154 | lr %r15,%r5 | 184 | lgr %r15,%r5 |
155 | ahi %r15,STACK_INIT # end of kernel stack of next | 185 | aghi %r15,STACK_INIT # end of kernel stack of next |
156 | st %r3,__LC_CURRENT # store task struct of next | 186 | stg %r3,__LC_CURRENT # store task struct of next |
157 | st %r5,__LC_THREAD_INFO # store thread info of next | 187 | stg %r5,__LC_THREAD_INFO # store thread info of next |
158 | st %r15,__LC_KERNEL_STACK # store end of kernel stack | 188 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
159 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 189 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
160 | mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next | 190 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next |
161 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next | 191 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next |
162 | lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
163 | br %r14 | 193 | br %r14 |
164 | 194 | ||
165 | .L__critical_start: | 195 | .L__critical_start: |
@@ -170,75 +200,83 @@ ENTRY(__switch_to) | |||
170 | 200 | ||
171 | ENTRY(system_call) | 201 | ENTRY(system_call) |
172 | stpt __LC_SYNC_ENTER_TIMER | 202 | stpt __LC_SYNC_ENTER_TIMER |
173 | .Lsysc_stm: | 203 | .Lsysc_stmg: |
174 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
175 | l %r12,__LC_THREAD_INFO | 205 | lg %r10,__LC_LAST_BREAK |
176 | l %r13,__LC_SVC_NEW_PSW+4 | 206 | lg %r12,__LC_THREAD_INFO |
177 | lhi %r14,_PIF_SYSCALL | 207 | lghi %r14,_PIF_SYSCALL |
178 | .Lsysc_per: | 208 | .Lsysc_per: |
179 | l %r15,__LC_KERNEL_STACK | 209 | lg %r15,__LC_KERNEL_STACK |
180 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
181 | .Lsysc_vtime: | 211 | .Lsysc_vtime: |
182 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER | 212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER |
183 | stm %r0,%r7,__PT_R0(%r11) | 213 | LAST_BREAK %r13 |
184 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 214 | stmg %r0,%r7,__PT_R0(%r11) |
185 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW | 215 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | ||
186 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
187 | st %r14,__PT_FLAGS(%r11) | 218 | stg %r14,__PT_FLAGS(%r11) |
188 | .Lsysc_do_svc: | 219 | .Lsysc_do_svc: |
189 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 220 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
190 | lh %r8,__PT_INT_CODE+2(%r11) | 221 | llgh %r8,__PT_INT_CODE+2(%r11) |
191 | sla %r8,2 # shift and test for svc0 | 222 | slag %r8,%r8,2 # shift and test for svc 0 |
192 | jnz .Lsysc_nr_ok | 223 | jnz .Lsysc_nr_ok |
193 | # svc 0: system call number in %r1 | 224 | # svc 0: system call number in %r1 |
194 | cl %r1,BASED(.Lnr_syscalls) | 225 | llgfr %r1,%r1 # clear high word in r1 |
226 | cghi %r1,NR_syscalls | ||
195 | jnl .Lsysc_nr_ok | 227 | jnl .Lsysc_nr_ok |
196 | sth %r1,__PT_INT_CODE+2(%r11) | 228 | sth %r1,__PT_INT_CODE+2(%r11) |
197 | lr %r8,%r1 | 229 | slag %r8,%r1,2 |
198 | sla %r8,2 | ||
199 | .Lsysc_nr_ok: | 230 | .Lsysc_nr_ok: |
200 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
201 | st %r2,__PT_ORIG_GPR2(%r11) | 232 | stg %r2,__PT_ORIG_GPR2(%r11) |
202 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
203 | l %r9,0(%r8,%r10) # get system call addr. | 234 | lgf %r9,0(%r8,%r10) # get system call add. |
204 | tm __TI_flags+3(%r12),_TIF_TRACE | 235 | tm __TI_flags+7(%r12),_TIF_TRACE |
205 | jnz .Lsysc_tracesys | 236 | jnz .Lsysc_tracesys |
206 | basr %r14,%r9 # call sys_xxxx | 237 | basr %r14,%r9 # call sys_xxxx |
207 | st %r2,__PT_R2(%r11) # store return value | 238 | stg %r2,__PT_R2(%r11) # store return value |
208 | 239 | ||
209 | .Lsysc_return: | 240 | .Lsysc_return: |
210 | LOCKDEP_SYS_EXIT | 241 | LOCKDEP_SYS_EXIT |
211 | .Lsysc_tif: | 242 | .Lsysc_tif: |
212 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
213 | jno .Lsysc_restore | 244 | jno .Lsysc_restore |
214 | tm __PT_FLAGS+3(%r11),_PIF_WORK | 245 | tm __PT_FLAGS+7(%r11),_PIF_WORK |
215 | jnz .Lsysc_work | 246 | jnz .Lsysc_work |
216 | tm __TI_flags+3(%r12),_TIF_WORK | 247 | tm __TI_flags+7(%r12),_TIF_WORK |
217 | jnz .Lsysc_work # check for thread work | 248 | jnz .Lsysc_work # check for work |
218 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 249 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
219 | jnz .Lsysc_work | 250 | jnz .Lsysc_work |
220 | .Lsysc_restore: | 251 | .Lsysc_restore: |
221 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 252 | lg %r14,__LC_VDSO_PER_CPU |
253 | lmg %r0,%r10,__PT_R0(%r11) | ||
254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
222 | stpt __LC_EXIT_TIMER | 255 | stpt __LC_EXIT_TIMER |
223 | lm %r0,%r15,__PT_R0(%r11) | 256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
224 | lpsw __LC_RETURN_PSW | 257 | lmg %r11,%r15,__PT_R11(%r11) |
258 | lpswe __LC_RETURN_PSW | ||
225 | .Lsysc_done: | 259 | .Lsysc_done: |
226 | 260 | ||
227 | # | 261 | # |
228 | # One of the work bits is on. Find out which one. | 262 | # One of the work bits is on. Find out which one. |
229 | # | 263 | # |
230 | .Lsysc_work: | 264 | .Lsysc_work: |
231 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
232 | jo .Lsysc_mcck_pending | 266 | jo .Lsysc_mcck_pending |
233 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
234 | jo .Lsysc_reschedule | 268 | jo .Lsysc_reschedule |
235 | tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 269 | #ifdef CONFIG_UPROBES |
270 | tm __TI_flags+7(%r12),_TIF_UPROBE | ||
271 | jo .Lsysc_uprobe_notify | ||
272 | #endif | ||
273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
236 | jo .Lsysc_singlestep | 274 | jo .Lsysc_singlestep |
237 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
238 | jo .Lsysc_sigpending | 276 | jo .Lsysc_sigpending |
239 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
240 | jo .Lsysc_notify_resume | 278 | jo .Lsysc_notify_resume |
241 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
242 | jo .Lsysc_uaccess | 280 | jo .Lsysc_uaccess |
243 | j .Lsysc_return # beware of critical section cleanup | 281 | j .Lsysc_return # beware of critical section cleanup |
244 | 282 | ||
@@ -246,109 +284,109 @@ ENTRY(system_call) | |||
246 | # _TIF_NEED_RESCHED is set, call schedule | 284 | # _TIF_NEED_RESCHED is set, call schedule |
247 | # | 285 | # |
248 | .Lsysc_reschedule: | 286 | .Lsysc_reschedule: |
249 | l %r1,BASED(.Lc_schedule) | 287 | larl %r14,.Lsysc_return |
250 | la %r14,BASED(.Lsysc_return) | 288 | jg schedule |
251 | br %r1 # call schedule | ||
252 | 289 | ||
253 | # | 290 | # |
254 | # _CIF_MCCK_PENDING is set, call handler | 291 | # _CIF_MCCK_PENDING is set, call handler |
255 | # | 292 | # |
256 | .Lsysc_mcck_pending: | 293 | .Lsysc_mcck_pending: |
257 | l %r1,BASED(.Lc_handle_mcck) | 294 | larl %r14,.Lsysc_return |
258 | la %r14,BASED(.Lsysc_return) | 295 | jg s390_handle_mcck # TIF bit will be cleared by handler |
259 | br %r1 # TIF bit will be cleared by handler | ||
260 | 296 | ||
261 | # | 297 | # |
262 | # _CIF_ASCE is set, load user space asce | 298 | # _CIF_ASCE is set, load user space asce |
263 | # | 299 | # |
264 | .Lsysc_uaccess: | 300 | .Lsysc_uaccess: |
265 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
266 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
267 | j .Lsysc_return | 303 | j .Lsysc_return |
268 | 304 | ||
269 | # | 305 | # |
270 | # _TIF_SIGPENDING is set, call do_signal | 306 | # _TIF_SIGPENDING is set, call do_signal |
271 | # | 307 | # |
272 | .Lsysc_sigpending: | 308 | .Lsysc_sigpending: |
273 | lr %r2,%r11 # pass pointer to pt_regs | 309 | lgr %r2,%r11 # pass pointer to pt_regs |
274 | l %r1,BASED(.Lc_do_signal) | 310 | brasl %r14,do_signal |
275 | basr %r14,%r1 # call do_signal | 311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL |
276 | tm __PT_FLAGS+3(%r11),_PIF_SYSCALL | ||
277 | jno .Lsysc_return | 312 | jno .Lsysc_return |
278 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments | 313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
279 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 314 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
280 | xr %r8,%r8 # svc 0 returns -ENOSYS | 315 | lghi %r8,0 # svc 0 returns -ENOSYS |
281 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) | 316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number |
317 | cghi %r1,NR_syscalls | ||
282 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 | 318 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 |
283 | lh %r8,__PT_INT_CODE+2(%r11) # load new svc number | 319 | slag %r8,%r1,2 |
284 | sla %r8,2 | ||
285 | j .Lsysc_nr_ok # restart svc | 320 | j .Lsysc_nr_ok # restart svc |
286 | 321 | ||
287 | # | 322 | # |
288 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
289 | # | 324 | # |
290 | .Lsysc_notify_resume: | 325 | .Lsysc_notify_resume: |
291 | lr %r2,%r11 # pass pointer to pt_regs | 326 | lgr %r2,%r11 # pass pointer to pt_regs |
292 | l %r1,BASED(.Lc_do_notify_resume) | 327 | larl %r14,.Lsysc_return |
293 | la %r14,BASED(.Lsysc_return) | 328 | jg do_notify_resume |
294 | br %r1 # call do_notify_resume | 329 | |
330 | # | ||
331 | # _TIF_UPROBE is set, call uprobe_notify_resume | ||
332 | # | ||
333 | #ifdef CONFIG_UPROBES | ||
334 | .Lsysc_uprobe_notify: | ||
335 | lgr %r2,%r11 # pass pointer to pt_regs | ||
336 | larl %r14,.Lsysc_return | ||
337 | jg uprobe_notify_resume | ||
338 | #endif | ||
295 | 339 | ||
296 | # | 340 | # |
297 | # _PIF_PER_TRAP is set, call do_per_trap | 341 | # _PIF_PER_TRAP is set, call do_per_trap |
298 | # | 342 | # |
299 | .Lsysc_singlestep: | 343 | .Lsysc_singlestep: |
300 | ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP | 344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP |
301 | lr %r2,%r11 # pass pointer to pt_regs | 345 | lgr %r2,%r11 # pass pointer to pt_regs |
302 | l %r1,BASED(.Lc_do_per_trap) | 346 | larl %r14,.Lsysc_return |
303 | la %r14,BASED(.Lsysc_return) | 347 | jg do_per_trap |
304 | br %r1 # call do_per_trap | ||
305 | 348 | ||
306 | # | 349 | # |
307 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
308 | # and after the system call | 351 | # and after the system call |
309 | # | 352 | # |
310 | .Lsysc_tracesys: | 353 | .Lsysc_tracesys: |
311 | l %r1,BASED(.Lc_trace_enter) | 354 | lgr %r2,%r11 # pass pointer to pt_regs |
312 | lr %r2,%r11 # pass pointer to pt_regs | ||
313 | la %r3,0 | 355 | la %r3,0 |
314 | xr %r0,%r0 | 356 | llgh %r0,__PT_INT_CODE+2(%r11) |
315 | icm %r0,3,__PT_INT_CODE+2(%r11) | 357 | stg %r0,__PT_R2(%r11) |
316 | st %r0,__PT_R2(%r11) | 358 | brasl %r14,do_syscall_trace_enter |
317 | basr %r14,%r1 # call do_syscall_trace_enter | 359 | lghi %r0,NR_syscalls |
318 | cl %r2,BASED(.Lnr_syscalls) | 360 | clgr %r0,%r2 |
319 | jnl .Lsysc_tracenogo | 361 | jnh .Lsysc_tracenogo |
320 | lr %r8,%r2 | 362 | sllg %r8,%r2,2 |
321 | sll %r8,2 | 363 | lgf %r9,0(%r8,%r10) |
322 | l %r9,0(%r8,%r10) | ||
323 | .Lsysc_tracego: | 364 | .Lsysc_tracego: |
324 | lm %r3,%r7,__PT_R3(%r11) | 365 | lmg %r3,%r7,__PT_R3(%r11) |
325 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
326 | l %r2,__PT_ORIG_GPR2(%r11) | 367 | lg %r2,__PT_ORIG_GPR2(%r11) |
327 | basr %r14,%r9 # call sys_xxx | 368 | basr %r14,%r9 # call sys_xxx |
328 | st %r2,__PT_R2(%r11) # store return value | 369 | stg %r2,__PT_R2(%r11) # store return value |
329 | .Lsysc_tracenogo: | 370 | .Lsysc_tracenogo: |
330 | tm __TI_flags+3(%r12),_TIF_TRACE | 371 | tm __TI_flags+7(%r12),_TIF_TRACE |
331 | jz .Lsysc_return | 372 | jz .Lsysc_return |
332 | l %r1,BASED(.Lc_trace_exit) | 373 | lgr %r2,%r11 # pass pointer to pt_regs |
333 | lr %r2,%r11 # pass pointer to pt_regs | 374 | larl %r14,.Lsysc_return |
334 | la %r14,BASED(.Lsysc_return) | 375 | jg do_syscall_trace_exit |
335 | br %r1 # call do_syscall_trace_exit | ||
336 | 376 | ||
337 | # | 377 | # |
338 | # a new process exits the kernel with ret_from_fork | 378 | # a new process exits the kernel with ret_from_fork |
339 | # | 379 | # |
340 | ENTRY(ret_from_fork) | 380 | ENTRY(ret_from_fork) |
341 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 381 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
342 | l %r12,__LC_THREAD_INFO | 382 | lg %r12,__LC_THREAD_INFO |
343 | l %r13,__LC_SVC_NEW_PSW+4 | 383 | brasl %r14,schedule_tail |
344 | l %r1,BASED(.Lc_schedule_tail) | ||
345 | basr %r14,%r1 # call schedule_tail | ||
346 | TRACE_IRQS_ON | 384 | TRACE_IRQS_ON |
347 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 385 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
348 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | 386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
349 | jne .Lsysc_tracenogo | 387 | jne .Lsysc_tracenogo |
350 | # it's a kernel thread | 388 | # it's a kernel thread |
351 | lm %r9,%r10,__PT_R9(%r11) # load gprs | 389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs |
352 | ENTRY(kernel_thread_starter) | 390 | ENTRY(kernel_thread_starter) |
353 | la %r2,0(%r10) | 391 | la %r2,0(%r10) |
354 | basr %r14,%r9 | 392 | basr %r14,%r9 |
@@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter) | |||
360 | 398 | ||
361 | ENTRY(pgm_check_handler) | 399 | ENTRY(pgm_check_handler) |
362 | stpt __LC_SYNC_ENTER_TIMER | 400 | stpt __LC_SYNC_ENTER_TIMER |
363 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 401 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
364 | l %r12,__LC_THREAD_INFO | 402 | lg %r10,__LC_LAST_BREAK |
365 | l %r13,__LC_SVC_NEW_PSW+4 | 403 | lg %r12,__LC_THREAD_INFO |
366 | lm %r8,%r9,__LC_PGM_OLD_PSW | 404 | larl %r13,system_call |
367 | tmh %r8,0x0001 # test problem state bit | 405 | lmg %r8,%r9,__LC_PGM_OLD_PSW |
406 | HANDLE_SIE_INTERCEPT %r14,1 | ||
407 | tmhh %r8,0x0001 # test problem state bit | ||
368 | jnz 1f # -> fault in user space | 408 | jnz 1f # -> fault in user space |
369 | tmh %r8,0x4000 # PER bit set in old PSW ? | 409 | tmhh %r8,0x4000 # PER bit set in old PSW ? |
370 | jnz 0f # -> enabled, can't be a double fault | 410 | jnz 0f # -> enabled, can't be a double fault |
371 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 411 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
372 | jnz .Lpgm_svcper # -> single stepped svc | 412 | jnz .Lpgm_svcper # -> single stepped svc |
373 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
374 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
375 | j 2f | 415 | j 2f |
376 | 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER | 416 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER |
377 | l %r15,__LC_KERNEL_STACK | 417 | LAST_BREAK %r14 |
418 | lg %r15,__LC_KERNEL_STACK | ||
419 | lg %r14,__TI_task(%r12) | ||
420 | lghi %r13,__LC_PGM_TDB | ||
421 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | ||
422 | jz 2f | ||
423 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | ||
378 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | 424 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) |
379 | stm %r0,%r7,__PT_R0(%r11) | 425 | stmg %r0,%r7,__PT_R0(%r11) |
380 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 426 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
381 | stm %r8,%r9,__PT_PSW(%r11) | 427 | stmg %r8,%r9,__PT_PSW(%r11) |
382 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | 428 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC |
383 | mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE | 429 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE |
384 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 430 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
431 | stg %r10,__PT_ARGS(%r11) | ||
385 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 432 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
386 | jz 0f | 433 | jz 0f |
387 | l %r1,__TI_task(%r12) | 434 | tmhh %r8,0x0001 # kernel per event ? |
388 | tmh %r8,0x0001 # kernel per event ? | ||
389 | jz .Lpgm_kprobe | 435 | jz .Lpgm_kprobe |
390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE | 438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID | 439 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID |
394 | 0: REENABLE_IRQS | 440 | 0: REENABLE_IRQS |
395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 441 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
396 | l %r1,BASED(.Lc_jump_table) | 442 | larl %r1,pgm_check_table |
397 | la %r10,0x7f | 443 | llgh %r10,__PT_INT_CODE+2(%r11) |
398 | n %r10,__PT_INT_CODE(%r11) | 444 | nill %r10,0x007f |
399 | je .Lsysc_return | ||
400 | sll %r10,2 | 445 | sll %r10,2 |
401 | l %r1,0(%r10,%r1) # load address of handler routine | 446 | je .Lsysc_return |
402 | lr %r2,%r11 # pass pointer to pt_regs | 447 | lgf %r1,0(%r10,%r1) # load address of handler routine |
448 | lgr %r2,%r11 # pass pointer to pt_regs | ||
403 | basr %r14,%r1 # branch to interrupt-handler | 449 | basr %r14,%r1 # branch to interrupt-handler |
404 | j .Lsysc_return | 450 | j .Lsysc_return |
405 | 451 | ||
@@ -408,54 +454,55 @@ ENTRY(pgm_check_handler) | |||
408 | # | 454 | # |
409 | .Lpgm_kprobe: | 455 | .Lpgm_kprobe: |
410 | REENABLE_IRQS | 456 | REENABLE_IRQS |
411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
412 | l %r1,BASED(.Lc_do_per_trap) | 458 | lgr %r2,%r11 # pass pointer to pt_regs |
413 | lr %r2,%r11 # pass pointer to pt_regs | 459 | brasl %r14,do_per_trap |
414 | basr %r14,%r1 # call do_per_trap | ||
415 | j .Lsysc_return | 460 | j .Lsysc_return |
416 | 461 | ||
417 | # | 462 | # |
418 | # single stepped system call | 463 | # single stepped system call |
419 | # | 464 | # |
420 | .Lpgm_svcper: | 465 | .Lpgm_svcper: |
421 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW | 466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
422 | mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) | 467 | larl %r14,.Lsysc_per |
423 | lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | 468 | stg %r14,__LC_RETURN_PSW+8 |
424 | lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | 469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP |
470 | lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | ||
425 | 471 | ||
426 | /* | 472 | /* |
427 | * IO interrupt handler routine | 473 | * IO interrupt handler routine |
428 | */ | 474 | */ |
429 | |||
430 | ENTRY(io_int_handler) | 475 | ENTRY(io_int_handler) |
431 | stck __LC_INT_CLOCK | 476 | STCK __LC_INT_CLOCK |
432 | stpt __LC_ASYNC_ENTER_TIMER | 477 | stpt __LC_ASYNC_ENTER_TIMER |
433 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC | 478 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
434 | l %r12,__LC_THREAD_INFO | 479 | lg %r10,__LC_LAST_BREAK |
435 | l %r13,__LC_SVC_NEW_PSW+4 | 480 | lg %r12,__LC_THREAD_INFO |
436 | lm %r8,%r9,__LC_IO_OLD_PSW | 481 | larl %r13,system_call |
437 | tmh %r8,0x0001 # interrupting from user ? | 482 | lmg %r8,%r9,__LC_IO_OLD_PSW |
483 | HANDLE_SIE_INTERCEPT %r14,2 | ||
484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
485 | tmhh %r8,0x0001 # interrupting from user? | ||
438 | jz .Lio_skip | 486 | jz .Lio_skip |
439 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
488 | LAST_BREAK %r14 | ||
440 | .Lio_skip: | 489 | .Lio_skip: |
441 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 490 | stmg %r0,%r7,__PT_R0(%r11) |
442 | stm %r0,%r7,__PT_R0(%r11) | 491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
443 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 492 | stmg %r8,%r9,__PT_PSW(%r11) |
444 | stm %r8,%r9,__PT_PSW(%r11) | ||
445 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 493 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
446 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
447 | TRACE_IRQS_OFF | 495 | TRACE_IRQS_OFF |
448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
449 | .Lio_loop: | 497 | .Lio_loop: |
450 | l %r1,BASED(.Lc_do_IRQ) | 498 | lgr %r2,%r11 # pass pointer to pt_regs |
451 | lr %r2,%r11 # pass pointer to pt_regs | 499 | lghi %r3,IO_INTERRUPT |
452 | lhi %r3,IO_INTERRUPT | ||
453 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | 500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? |
454 | jz .Lio_call | 501 | jz .Lio_call |
455 | lhi %r3,THIN_INTERRUPT | 502 | lghi %r3,THIN_INTERRUPT |
456 | .Lio_call: | 503 | .Lio_call: |
457 | basr %r14,%r1 # call do_IRQ | 504 | brasl %r14,do_IRQ |
458 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR | 505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR |
459 | jz .Lio_return | 506 | jz .Lio_return |
460 | tpi 0 | 507 | tpi 0 |
461 | jz .Lio_return | 508 | jz .Lio_return |
@@ -465,21 +512,26 @@ ENTRY(io_int_handler) | |||
465 | LOCKDEP_SYS_EXIT | 512 | LOCKDEP_SYS_EXIT |
466 | TRACE_IRQS_ON | 513 | TRACE_IRQS_ON |
467 | .Lio_tif: | 514 | .Lio_tif: |
468 | tm __TI_flags+3(%r12),_TIF_WORK | 515 | tm __TI_flags+7(%r12),_TIF_WORK |
469 | jnz .Lio_work # there is work to do (signals etc.) | 516 | jnz .Lio_work # there is work to do (signals etc.) |
470 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 517 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
471 | jnz .Lio_work | 518 | jnz .Lio_work |
472 | .Lio_restore: | 519 | .Lio_restore: |
473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 520 | lg %r14,__LC_VDSO_PER_CPU |
521 | lmg %r0,%r10,__PT_R0(%r11) | ||
522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
474 | stpt __LC_EXIT_TIMER | 523 | stpt __LC_EXIT_TIMER |
475 | lm %r0,%r15,__PT_R0(%r11) | 524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
476 | lpsw __LC_RETURN_PSW | 525 | lmg %r11,%r15,__PT_R11(%r11) |
526 | lpswe __LC_RETURN_PSW | ||
477 | .Lio_done: | 527 | .Lio_done: |
478 | 528 | ||
479 | # | 529 | # |
480 | # There is work todo, find out in which context we have been interrupted: | 530 | # There is work todo, find out in which context we have been interrupted: |
481 | # 1) if we return to user space we can do all _TIF_WORK work | 531 | # 1) if we return to user space we can do all _TIF_WORK work |
482 | # 2) if we return to kernel code and preemptive scheduling is enabled check | 532 | # 2) if we return to kernel code and kvm is enabled check if we need to |
533 | # modify the psw to leave SIE | ||
534 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
483 | # the preemption counter and if it is zero call preempt_schedule_irq | 535 | # the preemption counter and if it is zero call preempt_schedule_irq |
484 | # Before any work can be done, a switch to the kernel stack is required. | 536 | # Before any work can be done, a switch to the kernel stack is required. |
485 | # | 537 | # |
@@ -489,21 +541,20 @@ ENTRY(io_int_handler) | |||
489 | #ifdef CONFIG_PREEMPT | 541 | #ifdef CONFIG_PREEMPT |
490 | # check for preemptive scheduling | 542 | # check for preemptive scheduling |
491 | icm %r0,15,__TI_precount(%r12) | 543 | icm %r0,15,__TI_precount(%r12) |
492 | jnz .Lio_restore # preemption disabled | 544 | jnz .Lio_restore # preemption is disabled |
493 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
494 | jno .Lio_restore | 546 | jno .Lio_restore |
495 | # switch to kernel stack | 547 | # switch to kernel stack |
496 | l %r1,__PT_R15(%r11) | 548 | lg %r1,__PT_R15(%r11) |
497 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
498 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 550 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
499 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
500 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 552 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
501 | lr %r15,%r1 | 553 | lgr %r15,%r1 |
502 | # TRACE_IRQS_ON already done at .Lio_return, call | 554 | # TRACE_IRQS_ON already done at .Lio_return, call |
503 | # TRACE_IRQS_OFF to keep things symmetrical | 555 | # TRACE_IRQS_OFF to keep things symmetrical |
504 | TRACE_IRQS_OFF | 556 | TRACE_IRQS_OFF |
505 | l %r1,BASED(.Lc_preempt_irq) | 557 | brasl %r14,preempt_schedule_irq |
506 | basr %r14,%r1 # call preempt_schedule_irq | ||
507 | j .Lio_return | 558 | j .Lio_return |
508 | #else | 559 | #else |
509 | j .Lio_restore | 560 | j .Lio_restore |
@@ -513,25 +564,25 @@ ENTRY(io_int_handler) | |||
513 | # Need to do work before returning to userspace, switch to kernel stack | 564 | # Need to do work before returning to userspace, switch to kernel stack |
514 | # | 565 | # |
515 | .Lio_work_user: | 566 | .Lio_work_user: |
516 | l %r1,__LC_KERNEL_STACK | 567 | lg %r1,__LC_KERNEL_STACK |
517 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
518 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
519 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 570 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
520 | lr %r15,%r1 | 571 | lgr %r15,%r1 |
521 | 572 | ||
522 | # | 573 | # |
523 | # One of the work bits is on. Find out which one. | 574 | # One of the work bits is on. Find out which one. |
524 | # | 575 | # |
525 | .Lio_work_tif: | 576 | .Lio_work_tif: |
526 | tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING | 577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
527 | jo .Lio_mcck_pending | 578 | jo .Lio_mcck_pending |
528 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
529 | jo .Lio_reschedule | 580 | jo .Lio_reschedule |
530 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
531 | jo .Lio_sigpending | 582 | jo .Lio_sigpending |
532 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
533 | jo .Lio_notify_resume | 584 | jo .Lio_notify_resume |
534 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
535 | jo .Lio_uaccess | 586 | jo .Lio_uaccess |
536 | j .Lio_return # beware of critical section cleanup | 587 | j .Lio_return # beware of critical section cleanup |
537 | 588 | ||
@@ -540,8 +591,7 @@ ENTRY(io_int_handler) | |||
540 | # | 591 | # |
541 | .Lio_mcck_pending: | 592 | .Lio_mcck_pending: |
542 | # TRACE_IRQS_ON already done at .Lio_return | 593 | # TRACE_IRQS_ON already done at .Lio_return |
543 | l %r1,BASED(.Lc_handle_mcck) | 594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler |
544 | basr %r14,%r1 # TIF bit will be cleared by handler | ||
545 | TRACE_IRQS_OFF | 595 | TRACE_IRQS_OFF |
546 | j .Lio_return | 596 | j .Lio_return |
547 | 597 | ||
@@ -549,8 +599,8 @@ ENTRY(io_int_handler) | |||
549 | # _CIF_ASCE is set, load user space asce | 599 | # _CIF_ASCE is set, load user space asce |
550 | # | 600 | # |
551 | .Lio_uaccess: | 601 | .Lio_uaccess: |
552 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
553 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
554 | j .Lio_return | 604 | j .Lio_return |
555 | 605 | ||
556 | # | 606 | # |
@@ -558,35 +608,32 @@ ENTRY(io_int_handler) | |||
558 | # | 608 | # |
559 | .Lio_reschedule: | 609 | .Lio_reschedule: |
560 | # TRACE_IRQS_ON already done at .Lio_return | 610 | # TRACE_IRQS_ON already done at .Lio_return |
561 | l %r1,BASED(.Lc_schedule) | ||
562 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 611 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
563 | basr %r14,%r1 # call scheduler | 612 | brasl %r14,schedule # call scheduler |
564 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
565 | TRACE_IRQS_OFF | 614 | TRACE_IRQS_OFF |
566 | j .Lio_return | 615 | j .Lio_return |
567 | 616 | ||
568 | # | 617 | # |
569 | # _TIF_SIGPENDING is set, call do_signal | 618 | # _TIF_SIGPENDING or is set, call do_signal |
570 | # | 619 | # |
571 | .Lio_sigpending: | 620 | .Lio_sigpending: |
572 | # TRACE_IRQS_ON already done at .Lio_return | 621 | # TRACE_IRQS_ON already done at .Lio_return |
573 | l %r1,BASED(.Lc_do_signal) | ||
574 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 622 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
575 | lr %r2,%r11 # pass pointer to pt_regs | 623 | lgr %r2,%r11 # pass pointer to pt_regs |
576 | basr %r14,%r1 # call do_signal | 624 | brasl %r14,do_signal |
577 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
578 | TRACE_IRQS_OFF | 626 | TRACE_IRQS_OFF |
579 | j .Lio_return | 627 | j .Lio_return |
580 | 628 | ||
581 | # | 629 | # |
582 | # _TIF_SIGPENDING is set, call do_signal | 630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume |
583 | # | 631 | # |
584 | .Lio_notify_resume: | 632 | .Lio_notify_resume: |
585 | # TRACE_IRQS_ON already done at .Lio_return | 633 | # TRACE_IRQS_ON already done at .Lio_return |
586 | l %r1,BASED(.Lc_do_notify_resume) | ||
587 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 634 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
588 | lr %r2,%r11 # pass pointer to pt_regs | 635 | lgr %r2,%r11 # pass pointer to pt_regs |
589 | basr %r14,%r1 # call do_notify_resume | 636 | brasl %r14,do_notify_resume |
590 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
591 | TRACE_IRQS_OFF | 638 | TRACE_IRQS_OFF |
592 | j .Lio_return | 639 | j .Lio_return |
@@ -594,45 +641,47 @@ ENTRY(io_int_handler) | |||
594 | /* | 641 | /* |
595 | * External interrupt handler routine | 642 | * External interrupt handler routine |
596 | */ | 643 | */ |
597 | |||
598 | ENTRY(ext_int_handler) | 644 | ENTRY(ext_int_handler) |
599 | stck __LC_INT_CLOCK | 645 | STCK __LC_INT_CLOCK |
600 | stpt __LC_ASYNC_ENTER_TIMER | 646 | stpt __LC_ASYNC_ENTER_TIMER |
601 | stm %r8,%r15,__LC_SAVE_AREA_ASYNC | 647 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
602 | l %r12,__LC_THREAD_INFO | 648 | lg %r10,__LC_LAST_BREAK |
603 | l %r13,__LC_SVC_NEW_PSW+4 | 649 | lg %r12,__LC_THREAD_INFO |
604 | lm %r8,%r9,__LC_EXT_OLD_PSW | 650 | larl %r13,system_call |
605 | tmh %r8,0x0001 # interrupting from user ? | 651 | lmg %r8,%r9,__LC_EXT_OLD_PSW |
652 | HANDLE_SIE_INTERCEPT %r14,3 | ||
653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
654 | tmhh %r8,0x0001 # interrupting from user ? | ||
606 | jz .Lext_skip | 655 | jz .Lext_skip |
607 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
657 | LAST_BREAK %r14 | ||
608 | .Lext_skip: | 658 | .Lext_skip: |
609 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 659 | stmg %r0,%r7,__PT_R0(%r11) |
610 | stm %r0,%r7,__PT_R0(%r11) | 660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
611 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 661 | stmg %r8,%r9,__PT_PSW(%r11) |
612 | stm %r8,%r9,__PT_PSW(%r11) | 662 | lghi %r1,__LC_EXT_PARAMS2 |
613 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | 663 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR |
614 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 664 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
615 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 665 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) |
666 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
616 | TRACE_IRQS_OFF | 667 | TRACE_IRQS_OFF |
617 | l %r1,BASED(.Lc_do_IRQ) | 668 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
618 | lr %r2,%r11 # pass pointer to pt_regs | 669 | lgr %r2,%r11 # pass pointer to pt_regs |
619 | lhi %r3,EXT_INTERRUPT | 670 | lghi %r3,EXT_INTERRUPT |
620 | basr %r14,%r1 # call do_IRQ | 671 | brasl %r14,do_IRQ |
621 | j .Lio_return | 672 | j .Lio_return |
622 | 673 | ||
623 | /* | 674 | /* |
624 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. | 675 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. |
625 | */ | 676 | */ |
626 | ENTRY(psw_idle) | 677 | ENTRY(psw_idle) |
627 | st %r3,__SF_EMPTY(%r15) | 678 | stg %r3,__SF_EMPTY(%r15) |
628 | basr %r1,0 | 679 | larl %r1,.Lpsw_idle_lpsw+4 |
629 | la %r1,.Lpsw_idle_lpsw+4-.(%r1) | 680 | stg %r1,__SF_EMPTY+8(%r15) |
630 | st %r1,__SF_EMPTY+4(%r15) | 681 | STCK __CLOCK_IDLE_ENTER(%r2) |
631 | oi __SF_EMPTY+4(%r15),0x80 | ||
632 | stck __CLOCK_IDLE_ENTER(%r2) | ||
633 | stpt __TIMER_IDLE_ENTER(%r2) | 682 | stpt __TIMER_IDLE_ENTER(%r2) |
634 | .Lpsw_idle_lpsw: | 683 | .Lpsw_idle_lpsw: |
635 | lpsw __SF_EMPTY(%r15) | 684 | lpswe __SF_EMPTY(%r15) |
636 | br %r14 | 685 | br %r14 |
637 | .Lpsw_idle_end: | 686 | .Lpsw_idle_end: |
638 | 687 | ||
@@ -641,17 +690,19 @@ ENTRY(psw_idle) | |||
641 | /* | 690 | /* |
642 | * Machine check handler routines | 691 | * Machine check handler routines |
643 | */ | 692 | */ |
644 | |||
645 | ENTRY(mcck_int_handler) | 693 | ENTRY(mcck_int_handler) |
646 | stck __LC_MCCK_CLOCK | 694 | STCK __LC_MCCK_CLOCK |
647 | spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer | 695 | la %r1,4095 # revalidate r1 |
648 | lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs | 696 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer |
649 | l %r12,__LC_THREAD_INFO | 697 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs |
650 | l %r13,__LC_SVC_NEW_PSW+4 | 698 | lg %r10,__LC_LAST_BREAK |
651 | lm %r8,%r9,__LC_MCK_OLD_PSW | 699 | lg %r12,__LC_THREAD_INFO |
700 | larl %r13,system_call | ||
701 | lmg %r8,%r9,__LC_MCK_OLD_PSW | ||
702 | HANDLE_SIE_INTERCEPT %r14,4 | ||
652 | tm __LC_MCCK_CODE,0x80 # system damage? | 703 | tm __LC_MCCK_CODE,0x80 # system damage? |
653 | jo .Lmcck_panic # yes -> rest of mcck code invalid | 704 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
654 | la %r14,__LC_CPU_TIMER_SAVE_AREA | 705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
655 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
656 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
657 | jo 3f | 708 | jo 3f |
@@ -669,76 +720,76 @@ ENTRY(mcck_int_handler) | |||
669 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
670 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
671 | jno .Lmcck_panic # no -> skip cleanup critical | 722 | jno .Lmcck_panic # no -> skip cleanup critical |
723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT | ||
672 | tm %r8,0x0001 # interrupting from user ? | 724 | tm %r8,0x0001 # interrupting from user ? |
673 | jz .Lmcck_skip | 725 | jz .Lmcck_skip |
674 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER | 726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
727 | LAST_BREAK %r14 | ||
675 | .Lmcck_skip: | 728 | .Lmcck_skip: |
676 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT | 729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
677 | stm %r0,%r7,__PT_R0(%r11) | 730 | stmg %r0,%r7,__PT_R0(%r11) |
678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | 731 | mvc __PT_R8(64,%r11),0(%r14) |
679 | stm %r8,%r9,__PT_PSW(%r11) | 732 | stmg %r8,%r9,__PT_PSW(%r11) |
680 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 733 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
681 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 734 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
682 | l %r1,BASED(.Lc_do_machine_check) | 735 | lgr %r2,%r11 # pass pointer to pt_regs |
683 | lr %r2,%r11 # pass pointer to pt_regs | 736 | brasl %r14,s390_do_machine_check |
684 | basr %r14,%r1 # call s390_do_machine_check | ||
685 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
686 | jno .Lmcck_return | 738 | jno .Lmcck_return |
687 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
688 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
689 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
690 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 742 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
691 | lr %r15,%r1 | 743 | lgr %r15,%r1 |
692 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
693 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
694 | jno .Lmcck_return | 746 | jno .Lmcck_return |
695 | TRACE_IRQS_OFF | 747 | TRACE_IRQS_OFF |
696 | l %r1,BASED(.Lc_handle_mcck) | 748 | brasl %r14,s390_handle_mcck |
697 | basr %r14,%r1 # call s390_handle_mcck | ||
698 | TRACE_IRQS_ON | 749 | TRACE_IRQS_ON |
699 | .Lmcck_return: | 750 | .Lmcck_return: |
700 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW | 751 | lg %r14,__LC_VDSO_PER_CPU |
752 | lmg %r0,%r10,__PT_R0(%r11) | ||
753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | ||
701 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 754 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
702 | jno 0f | 755 | jno 0f |
703 | lm %r0,%r15,__PT_R0(%r11) | ||
704 | stpt __LC_EXIT_TIMER | 756 | stpt __LC_EXIT_TIMER |
705 | lpsw __LC_RETURN_MCCK_PSW | 757 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
706 | 0: lm %r0,%r15,__PT_R0(%r11) | 758 | 0: lmg %r11,%r15,__PT_R11(%r11) |
707 | lpsw __LC_RETURN_MCCK_PSW | 759 | lpswe __LC_RETURN_MCCK_PSW |
708 | 760 | ||
709 | .Lmcck_panic: | 761 | .Lmcck_panic: |
710 | l %r14,__LC_PANIC_STACK | 762 | lg %r14,__LC_PANIC_STACK |
711 | slr %r14,%r15 | 763 | slgr %r14,%r15 |
712 | sra %r14,PAGE_SHIFT | 764 | srag %r14,%r14,PAGE_SHIFT |
713 | jz 0f | 765 | jz 0f |
714 | l %r15,__LC_PANIC_STACK | 766 | lg %r15,__LC_PANIC_STACK |
715 | j .Lmcck_skip | 767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
716 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
717 | j .Lmcck_skip | 768 | j .Lmcck_skip |
718 | 769 | ||
719 | # | 770 | # |
720 | # PSW restart interrupt handler | 771 | # PSW restart interrupt handler |
721 | # | 772 | # |
722 | ENTRY(restart_int_handler) | 773 | ENTRY(restart_int_handler) |
723 | st %r15,__LC_SAVE_AREA_RESTART | 774 | stg %r15,__LC_SAVE_AREA_RESTART |
724 | l %r15,__LC_RESTART_STACK | 775 | lg %r15,__LC_RESTART_STACK |
725 | ahi %r15,-__PT_SIZE # create pt_regs on stack | 776 | aghi %r15,-__PT_SIZE # create pt_regs on stack |
726 | xc 0(__PT_SIZE,%r15),0(%r15) | 777 | xc 0(__PT_SIZE,%r15),0(%r15) |
727 | stm %r0,%r14,__PT_R0(%r15) | 778 | stmg %r0,%r14,__PT_R0(%r15) |
728 | mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART | 779 | mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART |
729 | mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw | 780 | mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw |
730 | ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack | 781 | aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack |
731 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) | 782 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) |
732 | l %r1,__LC_RESTART_FN # load fn, parm & source cpu | 783 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu |
733 | l %r2,__LC_RESTART_DATA | 784 | lg %r2,__LC_RESTART_DATA |
734 | l %r3,__LC_RESTART_SOURCE | 785 | lg %r3,__LC_RESTART_SOURCE |
735 | ltr %r3,%r3 # test source cpu address | 786 | ltgr %r3,%r3 # test source cpu address |
736 | jm 1f # negative -> skip source stop | 787 | jm 1f # negative -> skip source stop |
737 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu | 788 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu |
738 | brc 10,0b # wait for status stored | 789 | brc 10,0b # wait for status stored |
739 | 1: basr %r14,%r1 # call function | 790 | 1: basr %r14,%r1 # call function |
740 | stap __SF_EMPTY(%r15) # store cpu address | 791 | stap __SF_EMPTY(%r15) # store cpu address |
741 | lh %r3,__SF_EMPTY(%r15) | 792 | llgh %r3,__SF_EMPTY(%r15) |
742 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu | 793 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu |
743 | brc 2,2b | 794 | brc 2,2b |
744 | 3: j 3b | 795 | 3: j 3b |
@@ -752,215 +803,257 @@ ENTRY(restart_int_handler) | |||
752 | * Setup a pt_regs so that show_trace can provide a good call trace. | 803 | * Setup a pt_regs so that show_trace can provide a good call trace. |
753 | */ | 804 | */ |
754 | stack_overflow: | 805 | stack_overflow: |
755 | l %r15,__LC_PANIC_STACK # change to panic stack | 806 | lg %r15,__LC_PANIC_STACK # change to panic stack |
756 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 807 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
757 | stm %r0,%r7,__PT_R0(%r11) | 808 | stmg %r0,%r7,__PT_R0(%r11) |
758 | stm %r8,%r9,__PT_PSW(%r11) | 809 | stmg %r8,%r9,__PT_PSW(%r11) |
759 | mvc __PT_R8(32,%r11),0(%r14) | 810 | mvc __PT_R8(64,%r11),0(%r14) |
760 | l %r1,BASED(1f) | 811 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 |
761 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 812 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
762 | lr %r2,%r11 # pass pointer to pt_regs | 813 | lgr %r2,%r11 # pass pointer to pt_regs |
763 | br %r1 # branch to kernel_stack_overflow | 814 | jg kernel_stack_overflow |
764 | 1: .long kernel_stack_overflow | ||
765 | #endif | 815 | #endif |
766 | 816 | ||
817 | .align 8 | ||
767 | .Lcleanup_table: | 818 | .Lcleanup_table: |
768 | .long system_call + 0x80000000 | 819 | .quad system_call |
769 | .long .Lsysc_do_svc + 0x80000000 | 820 | .quad .Lsysc_do_svc |
770 | .long .Lsysc_tif + 0x80000000 | 821 | .quad .Lsysc_tif |
771 | .long .Lsysc_restore + 0x80000000 | 822 | .quad .Lsysc_restore |
772 | .long .Lsysc_done + 0x80000000 | 823 | .quad .Lsysc_done |
773 | .long .Lio_tif + 0x80000000 | 824 | .quad .Lio_tif |
774 | .long .Lio_restore + 0x80000000 | 825 | .quad .Lio_restore |
775 | .long .Lio_done + 0x80000000 | 826 | .quad .Lio_done |
776 | .long psw_idle + 0x80000000 | 827 | .quad psw_idle |
777 | .long .Lpsw_idle_end + 0x80000000 | 828 | .quad .Lpsw_idle_end |
778 | 829 | ||
779 | cleanup_critical: | 830 | cleanup_critical: |
780 | cl %r9,BASED(.Lcleanup_table) # system_call | 831 | clg %r9,BASED(.Lcleanup_table) # system_call |
781 | jl 0f | 832 | jl 0f |
782 | cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc | 833 | clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc |
783 | jl .Lcleanup_system_call | 834 | jl .Lcleanup_system_call |
784 | cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif | 835 | clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif |
785 | jl 0f | 836 | jl 0f |
786 | cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore | 837 | clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore |
787 | jl .Lcleanup_sysc_tif | 838 | jl .Lcleanup_sysc_tif |
788 | cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done | 839 | clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done |
789 | jl .Lcleanup_sysc_restore | 840 | jl .Lcleanup_sysc_restore |
790 | cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif | 841 | clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif |
791 | jl 0f | 842 | jl 0f |
792 | cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore | 843 | clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore |
793 | jl .Lcleanup_io_tif | 844 | jl .Lcleanup_io_tif |
794 | cl %r9,BASED(.Lcleanup_table+28) # .Lio_done | 845 | clg %r9,BASED(.Lcleanup_table+56) # .Lio_done |
795 | jl .Lcleanup_io_restore | 846 | jl .Lcleanup_io_restore |
796 | cl %r9,BASED(.Lcleanup_table+32) # psw_idle | 847 | clg %r9,BASED(.Lcleanup_table+64) # psw_idle |
797 | jl 0f | 848 | jl 0f |
798 | cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end | 849 | clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end |
799 | jl .Lcleanup_idle | 850 | jl .Lcleanup_idle |
800 | 0: br %r14 | 851 | 0: br %r14 |
801 | 852 | ||
853 | |||
802 | .Lcleanup_system_call: | 854 | .Lcleanup_system_call: |
803 | # check if stpt has been executed | 855 | # check if stpt has been executed |
804 | cl %r9,BASED(.Lcleanup_system_call_insn) | 856 | clg %r9,BASED(.Lcleanup_system_call_insn) |
805 | jh 0f | 857 | jh 0f |
806 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
807 | chi %r11,__LC_SAVE_AREA_ASYNC | 859 | cghi %r11,__LC_SAVE_AREA_ASYNC |
808 | je 0f | 860 | je 0f |
809 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | 861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
810 | 0: # check if stm has been executed | 862 | 0: # check if stmg has been executed |
811 | cl %r9,BASED(.Lcleanup_system_call_insn+4) | 863 | clg %r9,BASED(.Lcleanup_system_call_insn+8) |
812 | jh 0f | 864 | jh 0f |
813 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) | 865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) |
814 | 0: # set up saved registers r12, and r13 | 866 | 0: # check if base register setup + TIF bit load has been done |
815 | st %r12,16(%r11) # r12 thread-info pointer | 867 | clg %r9,BASED(.Lcleanup_system_call_insn+16) |
816 | st %r13,20(%r11) # r13 literal-pool pointer | 868 | jhe 0f |
817 | # check if the user time calculation has been done | 869 | # set up saved registers r10 and r12 |
818 | cl %r9,BASED(.Lcleanup_system_call_insn+8) | 870 | stg %r10,16(%r11) # r10 last break |
871 | stg %r12,32(%r11) # r12 thread-info pointer | ||
872 | 0: # check if the user time update has been done | ||
873 | clg %r9,BASED(.Lcleanup_system_call_insn+24) | ||
819 | jh 0f | 874 | jh 0f |
820 | l %r10,__LC_EXIT_TIMER | 875 | lg %r15,__LC_EXIT_TIMER |
821 | l %r15,__LC_EXIT_TIMER+4 | 876 | slg %r15,__LC_SYNC_ENTER_TIMER |
822 | SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER | 877 | alg %r15,__LC_USER_TIMER |
823 | ADD64 %r10,%r15,__LC_USER_TIMER | 878 | stg %r15,__LC_USER_TIMER |
824 | st %r10,__LC_USER_TIMER | 879 | 0: # check if the system time update has been done |
825 | st %r15,__LC_USER_TIMER+4 | 880 | clg %r9,BASED(.Lcleanup_system_call_insn+32) |
826 | 0: # check if the system time calculation has been done | ||
827 | cl %r9,BASED(.Lcleanup_system_call_insn+12) | ||
828 | jh 0f | 881 | jh 0f |
829 | l %r10,__LC_LAST_UPDATE_TIMER | 882 | lg %r15,__LC_LAST_UPDATE_TIMER |
830 | l %r15,__LC_LAST_UPDATE_TIMER+4 | 883 | slg %r15,__LC_EXIT_TIMER |
831 | SUB64 %r10,%r15,__LC_EXIT_TIMER | 884 | alg %r15,__LC_SYSTEM_TIMER |
832 | ADD64 %r10,%r15,__LC_SYSTEM_TIMER | 885 | stg %r15,__LC_SYSTEM_TIMER |
833 | st %r10,__LC_SYSTEM_TIMER | ||
834 | st %r15,__LC_SYSTEM_TIMER+4 | ||
835 | 0: # update accounting time stamp | 886 | 0: # update accounting time stamp |
836 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 887 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
837 | # set up saved register 11 | 888 | # do LAST_BREAK |
838 | l %r15,__LC_KERNEL_STACK | 889 | lg %r9,16(%r11) |
890 | srag %r9,%r9,23 | ||
891 | jz 0f | ||
892 | mvc __TI_last_break(8,%r12),16(%r11) | ||
893 | 0: # set up saved register r11 | ||
894 | lg %r15,__LC_KERNEL_STACK | ||
839 | la %r9,STACK_FRAME_OVERHEAD(%r15) | 895 | la %r9,STACK_FRAME_OVERHEAD(%r15) |
840 | st %r9,12(%r11) # r11 pt_regs pointer | 896 | stg %r9,24(%r11) # r11 pt_regs pointer |
841 | # fill pt_regs | 897 | # fill pt_regs |
842 | mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC | 898 | mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC |
843 | stm %r0,%r7,__PT_R0(%r9) | 899 | stmg %r0,%r7,__PT_R0(%r9) |
844 | mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW | 900 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW |
845 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | 901 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
846 | xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) | 902 | xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) |
847 | mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL | 903 | mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL |
848 | # setup saved register 15 | 904 | # setup saved register r15 |
849 | st %r15,28(%r11) # r15 stack pointer | 905 | stg %r15,56(%r11) # r15 stack pointer |
850 | # set new psw address and exit | 906 | # set new psw address and exit |
851 | l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 | 907 | larl %r9,.Lsysc_do_svc |
852 | br %r14 | 908 | br %r14 |
853 | .Lcleanup_system_call_insn: | 909 | .Lcleanup_system_call_insn: |
854 | .long system_call + 0x80000000 | 910 | .quad system_call |
855 | .long .Lsysc_stm + 0x80000000 | 911 | .quad .Lsysc_stmg |
856 | .long .Lsysc_vtime + 0x80000000 + 36 | 912 | .quad .Lsysc_per |
857 | .long .Lsysc_vtime + 0x80000000 + 76 | 913 | .quad .Lsysc_vtime+18 |
914 | .quad .Lsysc_vtime+42 | ||
858 | 915 | ||
859 | .Lcleanup_sysc_tif: | 916 | .Lcleanup_sysc_tif: |
860 | l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 | 917 | larl %r9,.Lsysc_tif |
861 | br %r14 | 918 | br %r14 |
862 | 919 | ||
863 | .Lcleanup_sysc_restore: | 920 | .Lcleanup_sysc_restore: |
864 | cl %r9,BASED(.Lcleanup_sysc_restore_insn) | 921 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) |
865 | jhe 0f | 922 | je 0f |
866 | l %r9,12(%r11) # get saved pointer to pt_regs | 923 | lg %r9,24(%r11) # get saved pointer to pt_regs |
867 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
868 | mvc 0(32,%r11),__PT_R8(%r9) | 925 | mvc 0(64,%r11),__PT_R8(%r9) |
869 | lm %r0,%r7,__PT_R0(%r9) | 926 | lmg %r0,%r7,__PT_R0(%r9) |
870 | 0: lm %r8,%r9,__LC_RETURN_PSW | 927 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
871 | br %r14 | 928 | br %r14 |
872 | .Lcleanup_sysc_restore_insn: | 929 | .Lcleanup_sysc_restore_insn: |
873 | .long .Lsysc_done - 4 + 0x80000000 | 930 | .quad .Lsysc_done - 4 |
874 | 931 | ||
875 | .Lcleanup_io_tif: | 932 | .Lcleanup_io_tif: |
876 | l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 | 933 | larl %r9,.Lio_tif |
877 | br %r14 | 934 | br %r14 |
878 | 935 | ||
879 | .Lcleanup_io_restore: | 936 | .Lcleanup_io_restore: |
880 | cl %r9,BASED(.Lcleanup_io_restore_insn) | 937 | clg %r9,BASED(.Lcleanup_io_restore_insn) |
881 | jhe 0f | 938 | je 0f |
882 | l %r9,12(%r11) # get saved r11 pointer to pt_regs | 939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
883 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
884 | mvc 0(32,%r11),__PT_R8(%r9) | 941 | mvc 0(64,%r11),__PT_R8(%r9) |
885 | lm %r0,%r7,__PT_R0(%r9) | 942 | lmg %r0,%r7,__PT_R0(%r9) |
886 | 0: lm %r8,%r9,__LC_RETURN_PSW | 943 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
887 | br %r14 | 944 | br %r14 |
888 | .Lcleanup_io_restore_insn: | 945 | .Lcleanup_io_restore_insn: |
889 | .long .Lio_done - 4 + 0x80000000 | 946 | .quad .Lio_done - 4 |
890 | 947 | ||
891 | .Lcleanup_idle: | 948 | .Lcleanup_idle: |
892 | # copy interrupt clock & cpu timer | 949 | # copy interrupt clock & cpu timer |
893 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | 950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK |
894 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | 951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER |
895 | chi %r11,__LC_SAVE_AREA_ASYNC | 952 | cghi %r11,__LC_SAVE_AREA_ASYNC |
896 | je 0f | 953 | je 0f |
897 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | 954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK |
898 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | 955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER |
899 | 0: # check if stck has been executed | 956 | 0: # check if stck & stpt have been executed |
900 | cl %r9,BASED(.Lcleanup_idle_insn) | 957 | clg %r9,BASED(.Lcleanup_idle_insn) |
901 | jhe 1f | 958 | jhe 1f |
902 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | 959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) |
903 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) | 960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) |
904 | 1: # account system time going idle | 961 | 1: # account system time going idle |
905 | lm %r9,%r10,__LC_STEAL_TIMER | 962 | lg %r9,__LC_STEAL_TIMER |
906 | ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) | 963 | alg %r9,__CLOCK_IDLE_ENTER(%r2) |
907 | SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK | 964 | slg %r9,__LC_LAST_UPDATE_CLOCK |
908 | stm %r9,%r10,__LC_STEAL_TIMER | 965 | stg %r9,__LC_STEAL_TIMER |
909 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) | 966 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) |
910 | lm %r9,%r10,__LC_SYSTEM_TIMER | 967 | lg %r9,__LC_SYSTEM_TIMER |
911 | ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER | 968 | alg %r9,__LC_LAST_UPDATE_TIMER |
912 | SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) | 969 | slg %r9,__TIMER_IDLE_ENTER(%r2) |
913 | stm %r9,%r10,__LC_SYSTEM_TIMER | 970 | stg %r9,__LC_SYSTEM_TIMER |
914 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | 971 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) |
915 | # prepare return psw | 972 | # prepare return psw |
916 | n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits | 973 | nihh %r8,0xfcfd # clear irq & wait state bits |
917 | l %r9,24(%r11) # return from psw_idle | 974 | lg %r9,48(%r11) # return from psw_idle |
918 | br %r14 | 975 | br %r14 |
919 | .Lcleanup_idle_insn: | 976 | .Lcleanup_idle_insn: |
920 | .long .Lpsw_idle_lpsw + 0x80000000 | 977 | .quad .Lpsw_idle_lpsw |
921 | .Lcleanup_idle_wait: | ||
922 | .long 0xfcfdffff | ||
923 | 978 | ||
924 | /* | 979 | /* |
925 | * Integer constants | 980 | * Integer constants |
926 | */ | 981 | */ |
927 | .align 4 | 982 | .align 8 |
928 | .Lnr_syscalls: | 983 | .Lcritical_start: |
929 | .long NR_syscalls | 984 | .quad .L__critical_start |
930 | .Lvtimer_max: | 985 | .Lcritical_length: |
931 | .quad 0x7fffffffffffffff | 986 | .quad .L__critical_end - .L__critical_start |
987 | |||
932 | 988 | ||
989 | #if IS_ENABLED(CONFIG_KVM) | ||
933 | /* | 990 | /* |
934 | * Symbol constants | 991 | * sie64a calling convention: |
992 | * %r2 pointer to sie control block | ||
993 | * %r3 guest register save area | ||
935 | */ | 994 | */ |
936 | .Lc_do_machine_check: .long s390_do_machine_check | 995 | ENTRY(sie64a) |
937 | .Lc_handle_mcck: .long s390_handle_mcck | 996 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers |
938 | .Lc_do_IRQ: .long do_IRQ | 997 | stg %r2,__SF_EMPTY(%r15) # save control block pointer |
939 | .Lc_do_signal: .long do_signal | 998 | stg %r3,__SF_EMPTY+8(%r15) # save guest register save area |
940 | .Lc_do_notify_resume: .long do_notify_resume | 999 | xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason |
941 | .Lc_do_per_trap: .long do_per_trap | 1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 |
942 | .Lc_jump_table: .long pgm_check_table | 1001 | lg %r14,__LC_GMAP # get gmap pointer |
943 | .Lc_schedule: .long schedule | 1002 | ltgr %r14,%r14 |
944 | #ifdef CONFIG_PREEMPT | 1003 | jz .Lsie_gmap |
945 | .Lc_preempt_irq: .long preempt_schedule_irq | 1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce |
946 | #endif | 1005 | .Lsie_gmap: |
947 | .Lc_trace_enter: .long do_syscall_trace_enter | 1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer |
948 | .Lc_trace_exit: .long do_syscall_trace_exit | 1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
949 | .Lc_schedule_tail: .long schedule_tail | 1008 | tm __SIE_PROG20+3(%r14),1 # last exit... |
950 | .Lc_sysc_per: .long .Lsysc_per + 0x80000000 | 1009 | jnz .Lsie_done |
951 | #ifdef CONFIG_TRACE_IRQFLAGS | 1010 | LPP __SF_EMPTY(%r15) # set guest id |
952 | .Lc_hardirqs_on: .long trace_hardirqs_on_caller | 1011 | sie 0(%r14) |
953 | .Lc_hardirqs_off: .long trace_hardirqs_off_caller | 1012 | .Lsie_done: |
954 | #endif | 1013 | LPP __SF_EMPTY+16(%r15) # set host id |
955 | #ifdef CONFIG_LOCKDEP | 1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
956 | .Lc_lockdep_sys_exit: .long lockdep_sys_exit | 1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) | ||
1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | ||
1018 | # instructions between sie64a and .Lsie_done should not cause program | ||
1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | ||
1020 | # See also HANDLE_SIE_INTERCEPT | ||
1021 | .Lrewind_pad: | ||
1022 | nop 0 | ||
1023 | .globl sie_exit | ||
1024 | sie_exit: | ||
1025 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | ||
1026 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | ||
1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code | ||
1029 | br %r14 | ||
1030 | .Lsie_fault: | ||
1031 | lghi %r14,-EFAULT | ||
1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code | ||
1033 | j sie_exit | ||
1034 | |||
1035 | .align 8 | ||
1036 | .Lsie_critical: | ||
1037 | .quad .Lsie_gmap | ||
1038 | .Lsie_critical_length: | ||
1039 | .quad .Lsie_done - .Lsie_gmap | ||
1040 | |||
1041 | EX_TABLE(.Lrewind_pad,.Lsie_fault) | ||
1042 | EX_TABLE(sie_exit,.Lsie_fault) | ||
957 | #endif | 1043 | #endif |
958 | .Lc_critical_start: .long .L__critical_start + 0x80000000 | ||
959 | .Lc_critical_length: .long .L__critical_end - .L__critical_start | ||
960 | 1044 | ||
961 | .section .rodata, "a" | 1045 | .section .rodata, "a" |
962 | #define SYSCALL(esa,esame,emu) .long esa | 1046 | #define SYSCALL(esame,emu) .long esame |
963 | .globl sys_call_table | 1047 | .globl sys_call_table |
964 | sys_call_table: | 1048 | sys_call_table: |
965 | #include "syscalls.S" | 1049 | #include "syscalls.S" |
966 | #undef SYSCALL | 1050 | #undef SYSCALL |
1051 | |||
1052 | #ifdef CONFIG_COMPAT | ||
1053 | |||
1054 | #define SYSCALL(esame,emu) .long emu | ||
1055 | .globl sys_call_table_emu | ||
1056 | sys_call_table_emu: | ||
1057 | #include "syscalls.S" | ||
1058 | #undef SYSCALL | ||
1059 | #endif | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S deleted file mode 100644 index c329446a951d..000000000000 --- a/arch/s390/kernel/entry64.S +++ /dev/null | |||
@@ -1,1059 +0,0 @@ | |||
1 | /* | ||
2 | * S390 low-level entry points. | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999, 2012 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
6 | * Hartmut Penner (hp@de.ibm.com), | ||
7 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | ||
8 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/cache.h> | ||
15 | #include <asm/errno.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <asm/unistd.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/sigp.h> | ||
22 | #include <asm/irq.h> | ||
23 | |||
24 | __PT_R0 = __PT_GPRS | ||
25 | __PT_R1 = __PT_GPRS + 8 | ||
26 | __PT_R2 = __PT_GPRS + 16 | ||
27 | __PT_R3 = __PT_GPRS + 24 | ||
28 | __PT_R4 = __PT_GPRS + 32 | ||
29 | __PT_R5 = __PT_GPRS + 40 | ||
30 | __PT_R6 = __PT_GPRS + 48 | ||
31 | __PT_R7 = __PT_GPRS + 56 | ||
32 | __PT_R8 = __PT_GPRS + 64 | ||
33 | __PT_R9 = __PT_GPRS + 72 | ||
34 | __PT_R10 = __PT_GPRS + 80 | ||
35 | __PT_R11 = __PT_GPRS + 88 | ||
36 | __PT_R12 = __PT_GPRS + 96 | ||
37 | __PT_R13 = __PT_GPRS + 104 | ||
38 | __PT_R14 = __PT_GPRS + 112 | ||
39 | __PT_R15 = __PT_GPRS + 120 | ||
40 | |||
41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | ||
42 | STACK_SIZE = 1 << STACK_SHIFT | ||
43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | ||
44 | |||
45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | ||
46 | _TIF_UPROBE) | ||
47 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
48 | _TIF_SYSCALL_TRACEPOINT) | ||
49 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) | ||
50 | _PIF_WORK = (_PIF_PER_TRAP) | ||
51 | |||
52 | #define BASED(name) name-system_call(%r13) | ||
53 | |||
54 | .macro TRACE_IRQS_ON | ||
55 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
56 | basr %r2,%r0 | ||
57 | brasl %r14,trace_hardirqs_on_caller | ||
58 | #endif | ||
59 | .endm | ||
60 | |||
61 | .macro TRACE_IRQS_OFF | ||
62 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
63 | basr %r2,%r0 | ||
64 | brasl %r14,trace_hardirqs_off_caller | ||
65 | #endif | ||
66 | .endm | ||
67 | |||
68 | .macro LOCKDEP_SYS_EXIT | ||
69 | #ifdef CONFIG_LOCKDEP | ||
70 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
71 | jz .+10 | ||
72 | brasl %r14,lockdep_sys_exit | ||
73 | #endif | ||
74 | .endm | ||
75 | |||
76 | .macro LPP newpp | ||
77 | #if IS_ENABLED(CONFIG_KVM) | ||
78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP | ||
79 | jz .+8 | ||
80 | .insn s,0xb2800000,\newpp | ||
81 | #endif | ||
82 | .endm | ||
83 | |||
84 | .macro HANDLE_SIE_INTERCEPT scratch,reason | ||
85 | #if IS_ENABLED(CONFIG_KVM) | ||
86 | tmhh %r8,0x0001 # interrupting from user ? | ||
87 | jnz .+62 | ||
88 | lgr \scratch,%r9 | ||
89 | slg \scratch,BASED(.Lsie_critical) | ||
90 | clg \scratch,BASED(.Lsie_critical_length) | ||
91 | .if \reason==1 | ||
92 | # Some program interrupts are suppressing (e.g. protection). | ||
93 | # We must also check the instruction after SIE in that case. | ||
94 | # do_protection_exception will rewind to .Lrewind_pad | ||
95 | jh .+42 | ||
96 | .else | ||
97 | jhe .+42 | ||
98 | .endif | ||
99 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
100 | LPP __SF_EMPTY+16(%r15) # set host id | ||
101 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
102 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
103 | larl %r9,sie_exit # skip forward to sie_exit | ||
104 | mvi __SF_EMPTY+31(%r15),\reason # set exit reason | ||
105 | #endif | ||
106 | .endm | ||
107 | |||
108 | .macro CHECK_STACK stacksize,savearea | ||
109 | #ifdef CONFIG_CHECK_STACK | ||
110 | tml %r15,\stacksize - CONFIG_STACK_GUARD | ||
111 | lghi %r14,\savearea | ||
112 | jz stack_overflow | ||
113 | #endif | ||
114 | .endm | ||
115 | |||
116 | .macro SWITCH_ASYNC savearea,stack,shift | ||
117 | tmhh %r8,0x0001 # interrupting from user ? | ||
118 | jnz 1f | ||
119 | lgr %r14,%r9 | ||
120 | slg %r14,BASED(.Lcritical_start) | ||
121 | clg %r14,BASED(.Lcritical_length) | ||
122 | jhe 0f | ||
123 | lghi %r11,\savearea # inside critical section, do cleanup | ||
124 | brasl %r14,cleanup_critical | ||
125 | tmhh %r8,0x0001 # retest problem state after cleanup | ||
126 | jnz 1f | ||
127 | 0: lg %r14,\stack # are we already on the target stack? | ||
128 | slgr %r14,%r15 | ||
129 | srag %r14,%r14,\shift | ||
130 | jnz 1f | ||
131 | CHECK_STACK 1<<\shift,\savearea | ||
132 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
133 | j 2f | ||
134 | 1: lg %r15,\stack # load target stack | ||
135 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
136 | .endm | ||
137 | |||
138 | .macro UPDATE_VTIME scratch,enter_timer | ||
139 | lg \scratch,__LC_EXIT_TIMER | ||
140 | slg \scratch,\enter_timer | ||
141 | alg \scratch,__LC_USER_TIMER | ||
142 | stg \scratch,__LC_USER_TIMER | ||
143 | lg \scratch,__LC_LAST_UPDATE_TIMER | ||
144 | slg \scratch,__LC_EXIT_TIMER | ||
145 | alg \scratch,__LC_SYSTEM_TIMER | ||
146 | stg \scratch,__LC_SYSTEM_TIMER | ||
147 | mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer | ||
148 | .endm | ||
149 | |||
150 | .macro LAST_BREAK scratch | ||
151 | srag \scratch,%r10,23 | ||
152 | jz .+10 | ||
153 | stg %r10,__TI_last_break(%r12) | ||
154 | .endm | ||
155 | |||
156 | .macro REENABLE_IRQS | ||
157 | stg %r8,__LC_RETURN_PSW | ||
158 | ni __LC_RETURN_PSW,0xbf | ||
159 | ssm __LC_RETURN_PSW | ||
160 | .endm | ||
161 | |||
162 | .macro STCK savearea | ||
163 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES | ||
164 | .insn s,0xb27c0000,\savearea # store clock fast | ||
165 | #else | ||
166 | .insn s,0xb2050000,\savearea # store clock | ||
167 | #endif | ||
168 | .endm | ||
169 | |||
170 | .section .kprobes.text, "ax" | ||
171 | |||
172 | /* | ||
173 | * Scheduler resume function, called by switch_to | ||
174 | * gpr2 = (task_struct *) prev | ||
175 | * gpr3 = (task_struct *) next | ||
176 | * Returns: | ||
177 | * gpr2 = prev | ||
178 | */ | ||
179 | ENTRY(__switch_to) | ||
180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | ||
181 | stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev | ||
182 | lg %r4,__THREAD_info(%r2) # get thread_info of prev | ||
183 | lg %r5,__THREAD_info(%r3) # get thread_info of next | ||
184 | lgr %r15,%r5 | ||
185 | aghi %r15,STACK_INIT # end of kernel stack of next | ||
186 | stg %r3,__LC_CURRENT # store task struct of next | ||
187 | stg %r5,__LC_THREAD_INFO # store thread info of next | ||
188 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack | ||
189 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | ||
190 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next | ||
191 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next | ||
192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | ||
193 | br %r14 | ||
194 | |||
195 | .L__critical_start: | ||
196 | /* | ||
197 | * SVC interrupt handler routine. System calls are synchronous events and | ||
198 | * are executed with interrupts enabled. | ||
199 | */ | ||
200 | |||
201 | ENTRY(system_call) | ||
202 | stpt __LC_SYNC_ENTER_TIMER | ||
203 | .Lsysc_stmg: | ||
204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | ||
205 | lg %r10,__LC_LAST_BREAK | ||
206 | lg %r12,__LC_THREAD_INFO | ||
207 | lghi %r14,_PIF_SYSCALL | ||
208 | .Lsysc_per: | ||
209 | lg %r15,__LC_KERNEL_STACK | ||
210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | ||
211 | .Lsysc_vtime: | ||
212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER | ||
213 | LAST_BREAK %r13 | ||
214 | stmg %r0,%r7,__PT_R0(%r11) | ||
215 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | ||
216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | ||
217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | ||
218 | stg %r14,__PT_FLAGS(%r11) | ||
219 | .Lsysc_do_svc: | ||
220 | lg %r10,__TI_sysc_table(%r12) # address of system call table | ||
221 | llgh %r8,__PT_INT_CODE+2(%r11) | ||
222 | slag %r8,%r8,2 # shift and test for svc 0 | ||
223 | jnz .Lsysc_nr_ok | ||
224 | # svc 0: system call number in %r1 | ||
225 | llgfr %r1,%r1 # clear high word in r1 | ||
226 | cghi %r1,NR_syscalls | ||
227 | jnl .Lsysc_nr_ok | ||
228 | sth %r1,__PT_INT_CODE+2(%r11) | ||
229 | slag %r8,%r1,2 | ||
230 | .Lsysc_nr_ok: | ||
231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
232 | stg %r2,__PT_ORIG_GPR2(%r11) | ||
233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | ||
234 | lgf %r9,0(%r8,%r10) # get system call add. | ||
235 | tm __TI_flags+7(%r12),_TIF_TRACE | ||
236 | jnz .Lsysc_tracesys | ||
237 | basr %r14,%r9 # call sys_xxxx | ||
238 | stg %r2,__PT_R2(%r11) # store return value | ||
239 | |||
240 | .Lsysc_return: | ||
241 | LOCKDEP_SYS_EXIT | ||
242 | .Lsysc_tif: | ||
243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
244 | jno .Lsysc_restore | ||
245 | tm __PT_FLAGS+7(%r11),_PIF_WORK | ||
246 | jnz .Lsysc_work | ||
247 | tm __TI_flags+7(%r12),_TIF_WORK | ||
248 | jnz .Lsysc_work # check for work | ||
249 | tm __LC_CPU_FLAGS+7,_CIF_WORK | ||
250 | jnz .Lsysc_work | ||
251 | .Lsysc_restore: | ||
252 | lg %r14,__LC_VDSO_PER_CPU | ||
253 | lmg %r0,%r10,__PT_R0(%r11) | ||
254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
255 | stpt __LC_EXIT_TIMER | ||
256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
257 | lmg %r11,%r15,__PT_R11(%r11) | ||
258 | lpswe __LC_RETURN_PSW | ||
259 | .Lsysc_done: | ||
260 | |||
261 | # | ||
262 | # One of the work bits is on. Find out which one. | ||
263 | # | ||
264 | .Lsysc_work: | ||
265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
266 | jo .Lsysc_mcck_pending | ||
267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
268 | jo .Lsysc_reschedule | ||
269 | #ifdef CONFIG_UPROBES | ||
270 | tm __TI_flags+7(%r12),_TIF_UPROBE | ||
271 | jo .Lsysc_uprobe_notify | ||
272 | #endif | ||
273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
274 | jo .Lsysc_singlestep | ||
275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | ||
276 | jo .Lsysc_sigpending | ||
277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | ||
278 | jo .Lsysc_notify_resume | ||
279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | ||
280 | jo .Lsysc_uaccess | ||
281 | j .Lsysc_return # beware of critical section cleanup | ||
282 | |||
283 | # | ||
284 | # _TIF_NEED_RESCHED is set, call schedule | ||
285 | # | ||
286 | .Lsysc_reschedule: | ||
287 | larl %r14,.Lsysc_return | ||
288 | jg schedule | ||
289 | |||
290 | # | ||
291 | # _CIF_MCCK_PENDING is set, call handler | ||
292 | # | ||
293 | .Lsysc_mcck_pending: | ||
294 | larl %r14,.Lsysc_return | ||
295 | jg s390_handle_mcck # TIF bit will be cleared by handler | ||
296 | |||
297 | # | ||
298 | # _CIF_ASCE is set, load user space asce | ||
299 | # | ||
300 | .Lsysc_uaccess: | ||
301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | ||
302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
303 | j .Lsysc_return | ||
304 | |||
305 | # | ||
306 | # _TIF_SIGPENDING is set, call do_signal | ||
307 | # | ||
308 | .Lsysc_sigpending: | ||
309 | lgr %r2,%r11 # pass pointer to pt_regs | ||
310 | brasl %r14,do_signal | ||
311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL | ||
312 | jno .Lsysc_return | ||
313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments | ||
314 | lg %r10,__TI_sysc_table(%r12) # address of system call table | ||
315 | lghi %r8,0 # svc 0 returns -ENOSYS | ||
316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number | ||
317 | cghi %r1,NR_syscalls | ||
318 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 | ||
319 | slag %r8,%r1,2 | ||
320 | j .Lsysc_nr_ok # restart svc | ||
321 | |||
322 | # | ||
323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | ||
324 | # | ||
325 | .Lsysc_notify_resume: | ||
326 | lgr %r2,%r11 # pass pointer to pt_regs | ||
327 | larl %r14,.Lsysc_return | ||
328 | jg do_notify_resume | ||
329 | |||
330 | # | ||
331 | # _TIF_UPROBE is set, call uprobe_notify_resume | ||
332 | # | ||
333 | #ifdef CONFIG_UPROBES | ||
334 | .Lsysc_uprobe_notify: | ||
335 | lgr %r2,%r11 # pass pointer to pt_regs | ||
336 | larl %r14,.Lsysc_return | ||
337 | jg uprobe_notify_resume | ||
338 | #endif | ||
339 | |||
340 | # | ||
341 | # _PIF_PER_TRAP is set, call do_per_trap | ||
342 | # | ||
343 | .Lsysc_singlestep: | ||
344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP | ||
345 | lgr %r2,%r11 # pass pointer to pt_regs | ||
346 | larl %r14,.Lsysc_return | ||
347 | jg do_per_trap | ||
348 | |||
349 | # | ||
350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | ||
351 | # and after the system call | ||
352 | # | ||
353 | .Lsysc_tracesys: | ||
354 | lgr %r2,%r11 # pass pointer to pt_regs | ||
355 | la %r3,0 | ||
356 | llgh %r0,__PT_INT_CODE+2(%r11) | ||
357 | stg %r0,__PT_R2(%r11) | ||
358 | brasl %r14,do_syscall_trace_enter | ||
359 | lghi %r0,NR_syscalls | ||
360 | clgr %r0,%r2 | ||
361 | jnh .Lsysc_tracenogo | ||
362 | sllg %r8,%r2,2 | ||
363 | lgf %r9,0(%r8,%r10) | ||
364 | .Lsysc_tracego: | ||
365 | lmg %r3,%r7,__PT_R3(%r11) | ||
366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | ||
367 | lg %r2,__PT_ORIG_GPR2(%r11) | ||
368 | basr %r14,%r9 # call sys_xxx | ||
369 | stg %r2,__PT_R2(%r11) # store return value | ||
370 | .Lsysc_tracenogo: | ||
371 | tm __TI_flags+7(%r12),_TIF_TRACE | ||
372 | jz .Lsysc_return | ||
373 | lgr %r2,%r11 # pass pointer to pt_regs | ||
374 | larl %r14,.Lsysc_return | ||
375 | jg do_syscall_trace_exit | ||
376 | |||
377 | # | ||
378 | # a new process exits the kernel with ret_from_fork | ||
379 | # | ||
380 | ENTRY(ret_from_fork) | ||
381 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
382 | lg %r12,__LC_THREAD_INFO | ||
383 | brasl %r14,schedule_tail | ||
384 | TRACE_IRQS_ON | ||
385 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | ||
387 | jne .Lsysc_tracenogo | ||
388 | # it's a kernel thread | ||
389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs | ||
390 | ENTRY(kernel_thread_starter) | ||
391 | la %r2,0(%r10) | ||
392 | basr %r14,%r9 | ||
393 | j .Lsysc_tracenogo | ||
394 | |||
395 | /* | ||
396 | * Program check handler routine | ||
397 | */ | ||
398 | |||
399 | ENTRY(pgm_check_handler) | ||
400 | stpt __LC_SYNC_ENTER_TIMER | ||
401 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | ||
402 | lg %r10,__LC_LAST_BREAK | ||
403 | lg %r12,__LC_THREAD_INFO | ||
404 | larl %r13,system_call | ||
405 | lmg %r8,%r9,__LC_PGM_OLD_PSW | ||
406 | HANDLE_SIE_INTERCEPT %r14,1 | ||
407 | tmhh %r8,0x0001 # test problem state bit | ||
408 | jnz 1f # -> fault in user space | ||
409 | tmhh %r8,0x4000 # PER bit set in old PSW ? | ||
410 | jnz 0f # -> enabled, can't be a double fault | ||
411 | tm __LC_PGM_ILC+3,0x80 # check for per exception | ||
412 | jnz .Lpgm_svcper # -> single stepped svc | ||
413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | ||
414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
415 | j 2f | ||
416 | 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER | ||
417 | LAST_BREAK %r14 | ||
418 | lg %r15,__LC_KERNEL_STACK | ||
419 | lg %r14,__TI_task(%r12) | ||
420 | lghi %r13,__LC_PGM_TDB | ||
421 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | ||
422 | jz 2f | ||
423 | mvc __THREAD_trap_tdb(256,%r14),0(%r13) | ||
424 | 2: la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
425 | stmg %r0,%r7,__PT_R0(%r11) | ||
426 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | ||
427 | stmg %r8,%r9,__PT_PSW(%r11) | ||
428 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | ||
429 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE | ||
430 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
431 | stg %r10,__PT_ARGS(%r11) | ||
432 | tm __LC_PGM_ILC+3,0x80 # check for per exception | ||
433 | jz 0f | ||
434 | tmhh %r8,0x0001 # kernel per event ? | ||
435 | jz .Lpgm_kprobe | ||
436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP | ||
437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | ||
438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE | ||
439 | mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID | ||
440 | 0: REENABLE_IRQS | ||
441 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
442 | larl %r1,pgm_check_table | ||
443 | llgh %r10,__PT_INT_CODE+2(%r11) | ||
444 | nill %r10,0x007f | ||
445 | sll %r10,2 | ||
446 | je .Lsysc_return | ||
447 | lgf %r1,0(%r10,%r1) # load address of handler routine | ||
448 | lgr %r2,%r11 # pass pointer to pt_regs | ||
449 | basr %r14,%r1 # branch to interrupt-handler | ||
450 | j .Lsysc_return | ||
451 | |||
452 | # | ||
453 | # PER event in supervisor state, must be kprobes | ||
454 | # | ||
455 | .Lpgm_kprobe: | ||
456 | REENABLE_IRQS | ||
457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
458 | lgr %r2,%r11 # pass pointer to pt_regs | ||
459 | brasl %r14,do_per_trap | ||
460 | j .Lsysc_return | ||
461 | |||
462 | # | ||
463 | # single stepped system call | ||
464 | # | ||
465 | .Lpgm_svcper: | ||
466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW | ||
467 | larl %r14,.Lsysc_per | ||
468 | stg %r14,__LC_RETURN_PSW+8 | ||
469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | ||
470 | lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs | ||
471 | |||
472 | /* | ||
473 | * IO interrupt handler routine | ||
474 | */ | ||
475 | ENTRY(io_int_handler) | ||
476 | STCK __LC_INT_CLOCK | ||
477 | stpt __LC_ASYNC_ENTER_TIMER | ||
478 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC | ||
479 | lg %r10,__LC_LAST_BREAK | ||
480 | lg %r12,__LC_THREAD_INFO | ||
481 | larl %r13,system_call | ||
482 | lmg %r8,%r9,__LC_IO_OLD_PSW | ||
483 | HANDLE_SIE_INTERCEPT %r14,2 | ||
484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
485 | tmhh %r8,0x0001 # interrupting from user? | ||
486 | jz .Lio_skip | ||
487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | ||
488 | LAST_BREAK %r14 | ||
489 | .Lio_skip: | ||
490 | stmg %r0,%r7,__PT_R0(%r11) | ||
491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
492 | stmg %r8,%r9,__PT_PSW(%r11) | ||
493 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | ||
494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
495 | TRACE_IRQS_OFF | ||
496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
497 | .Lio_loop: | ||
498 | lgr %r2,%r11 # pass pointer to pt_regs | ||
499 | lghi %r3,IO_INTERRUPT | ||
500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | ||
501 | jz .Lio_call | ||
502 | lghi %r3,THIN_INTERRUPT | ||
503 | .Lio_call: | ||
504 | brasl %r14,do_IRQ | ||
505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR | ||
506 | jz .Lio_return | ||
507 | tpi 0 | ||
508 | jz .Lio_return | ||
509 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | ||
510 | j .Lio_loop | ||
511 | .Lio_return: | ||
512 | LOCKDEP_SYS_EXIT | ||
513 | TRACE_IRQS_ON | ||
514 | .Lio_tif: | ||
515 | tm __TI_flags+7(%r12),_TIF_WORK | ||
516 | jnz .Lio_work # there is work to do (signals etc.) | ||
517 | tm __LC_CPU_FLAGS+7,_CIF_WORK | ||
518 | jnz .Lio_work | ||
519 | .Lio_restore: | ||
520 | lg %r14,__LC_VDSO_PER_CPU | ||
521 | lmg %r0,%r10,__PT_R0(%r11) | ||
522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | ||
523 | stpt __LC_EXIT_TIMER | ||
524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
525 | lmg %r11,%r15,__PT_R11(%r11) | ||
526 | lpswe __LC_RETURN_PSW | ||
527 | .Lio_done: | ||
528 | |||
529 | # | ||
530 | # There is work todo, find out in which context we have been interrupted: | ||
531 | # 1) if we return to user space we can do all _TIF_WORK work | ||
532 | # 2) if we return to kernel code and kvm is enabled check if we need to | ||
533 | # modify the psw to leave SIE | ||
534 | # 3) if we return to kernel code and preemptive scheduling is enabled check | ||
535 | # the preemption counter and if it is zero call preempt_schedule_irq | ||
536 | # Before any work can be done, a switch to the kernel stack is required. | ||
537 | # | ||
538 | .Lio_work: | ||
539 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
540 | jo .Lio_work_user # yes -> do resched & signal | ||
541 | #ifdef CONFIG_PREEMPT | ||
542 | # check for preemptive scheduling | ||
543 | icm %r0,15,__TI_precount(%r12) | ||
544 | jnz .Lio_restore # preemption is disabled | ||
545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
546 | jno .Lio_restore | ||
547 | # switch to kernel stack | ||
548 | lg %r1,__PT_R15(%r11) | ||
549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
550 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
552 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
553 | lgr %r15,%r1 | ||
554 | # TRACE_IRQS_ON already done at .Lio_return, call | ||
555 | # TRACE_IRQS_OFF to keep things symmetrical | ||
556 | TRACE_IRQS_OFF | ||
557 | brasl %r14,preempt_schedule_irq | ||
558 | j .Lio_return | ||
559 | #else | ||
560 | j .Lio_restore | ||
561 | #endif | ||
562 | |||
563 | # | ||
564 | # Need to do work before returning to userspace, switch to kernel stack | ||
565 | # | ||
566 | .Lio_work_user: | ||
567 | lg %r1,__LC_KERNEL_STACK | ||
568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
570 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
571 | lgr %r15,%r1 | ||
572 | |||
573 | # | ||
574 | # One of the work bits is on. Find out which one. | ||
575 | # | ||
576 | .Lio_work_tif: | ||
577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
578 | jo .Lio_mcck_pending | ||
579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | ||
580 | jo .Lio_reschedule | ||
581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | ||
582 | jo .Lio_sigpending | ||
583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | ||
584 | jo .Lio_notify_resume | ||
585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | ||
586 | jo .Lio_uaccess | ||
587 | j .Lio_return # beware of critical section cleanup | ||
588 | |||
589 | # | ||
590 | # _CIF_MCCK_PENDING is set, call handler | ||
591 | # | ||
592 | .Lio_mcck_pending: | ||
593 | # TRACE_IRQS_ON already done at .Lio_return | ||
594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler | ||
595 | TRACE_IRQS_OFF | ||
596 | j .Lio_return | ||
597 | |||
598 | # | ||
599 | # _CIF_ASCE is set, load user space asce | ||
600 | # | ||
601 | .Lio_uaccess: | ||
602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | ||
603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
604 | j .Lio_return | ||
605 | |||
606 | # | ||
607 | # _TIF_NEED_RESCHED is set, call schedule | ||
608 | # | ||
609 | .Lio_reschedule: | ||
610 | # TRACE_IRQS_ON already done at .Lio_return | ||
611 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
612 | brasl %r14,schedule # call scheduler | ||
613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
614 | TRACE_IRQS_OFF | ||
615 | j .Lio_return | ||
616 | |||
617 | # | ||
618 | # _TIF_SIGPENDING or is set, call do_signal | ||
619 | # | ||
620 | .Lio_sigpending: | ||
621 | # TRACE_IRQS_ON already done at .Lio_return | ||
622 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
623 | lgr %r2,%r11 # pass pointer to pt_regs | ||
624 | brasl %r14,do_signal | ||
625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
626 | TRACE_IRQS_OFF | ||
627 | j .Lio_return | ||
628 | |||
629 | # | ||
630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume | ||
631 | # | ||
632 | .Lio_notify_resume: | ||
633 | # TRACE_IRQS_ON already done at .Lio_return | ||
634 | ssm __LC_SVC_NEW_PSW # reenable interrupts | ||
635 | lgr %r2,%r11 # pass pointer to pt_regs | ||
636 | brasl %r14,do_notify_resume | ||
637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | ||
638 | TRACE_IRQS_OFF | ||
639 | j .Lio_return | ||
640 | |||
641 | /* | ||
642 | * External interrupt handler routine | ||
643 | */ | ||
644 | ENTRY(ext_int_handler) | ||
645 | STCK __LC_INT_CLOCK | ||
646 | stpt __LC_ASYNC_ENTER_TIMER | ||
647 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC | ||
648 | lg %r10,__LC_LAST_BREAK | ||
649 | lg %r12,__LC_THREAD_INFO | ||
650 | larl %r13,system_call | ||
651 | lmg %r8,%r9,__LC_EXT_OLD_PSW | ||
652 | HANDLE_SIE_INTERCEPT %r14,3 | ||
653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | ||
654 | tmhh %r8,0x0001 # interrupting from user ? | ||
655 | jz .Lext_skip | ||
656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | ||
657 | LAST_BREAK %r14 | ||
658 | .Lext_skip: | ||
659 | stmg %r0,%r7,__PT_R0(%r11) | ||
660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | ||
661 | stmg %r8,%r9,__PT_PSW(%r11) | ||
662 | lghi %r1,__LC_EXT_PARAMS2 | ||
663 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | ||
664 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | ||
665 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) | ||
666 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
667 | TRACE_IRQS_OFF | ||
668 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
669 | lgr %r2,%r11 # pass pointer to pt_regs | ||
670 | lghi %r3,EXT_INTERRUPT | ||
671 | brasl %r14,do_IRQ | ||
672 | j .Lio_return | ||
673 | |||
674 | /* | ||
675 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. | ||
676 | */ | ||
677 | ENTRY(psw_idle) | ||
678 | stg %r3,__SF_EMPTY(%r15) | ||
679 | larl %r1,.Lpsw_idle_lpsw+4 | ||
680 | stg %r1,__SF_EMPTY+8(%r15) | ||
681 | STCK __CLOCK_IDLE_ENTER(%r2) | ||
682 | stpt __TIMER_IDLE_ENTER(%r2) | ||
683 | .Lpsw_idle_lpsw: | ||
684 | lpswe __SF_EMPTY(%r15) | ||
685 | br %r14 | ||
686 | .Lpsw_idle_end: | ||
687 | |||
688 | .L__critical_end: | ||
689 | |||
690 | /* | ||
691 | * Machine check handler routines | ||
692 | */ | ||
693 | ENTRY(mcck_int_handler) | ||
694 | STCK __LC_MCCK_CLOCK | ||
695 | la %r1,4095 # revalidate r1 | ||
696 | spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer | ||
697 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs | ||
698 | lg %r10,__LC_LAST_BREAK | ||
699 | lg %r12,__LC_THREAD_INFO | ||
700 | larl %r13,system_call | ||
701 | lmg %r8,%r9,__LC_MCK_OLD_PSW | ||
702 | HANDLE_SIE_INTERCEPT %r14,4 | ||
703 | tm __LC_MCCK_CODE,0x80 # system damage? | ||
704 | jo .Lmcck_panic # yes -> rest of mcck code invalid | ||
705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA | ||
706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | ||
708 | jo 3f | ||
709 | la %r14,__LC_SYNC_ENTER_TIMER | ||
710 | clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER | ||
711 | jl 0f | ||
712 | la %r14,__LC_ASYNC_ENTER_TIMER | ||
713 | 0: clc 0(8,%r14),__LC_EXIT_TIMER | ||
714 | jl 1f | ||
715 | la %r14,__LC_EXIT_TIMER | ||
716 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER | ||
717 | jl 2f | ||
718 | la %r14,__LC_LAST_UPDATE_TIMER | ||
719 | 2: spt 0(%r14) | ||
720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | ||
721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
722 | jno .Lmcck_panic # no -> skip cleanup critical | ||
723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT | ||
724 | tm %r8,0x0001 # interrupting from user ? | ||
725 | jz .Lmcck_skip | ||
726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER | ||
727 | LAST_BREAK %r14 | ||
728 | .Lmcck_skip: | ||
729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 | ||
730 | stmg %r0,%r7,__PT_R0(%r11) | ||
731 | mvc __PT_R8(64,%r11),0(%r14) | ||
732 | stmg %r8,%r9,__PT_PSW(%r11) | ||
733 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
734 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
735 | lgr %r2,%r11 # pass pointer to pt_regs | ||
736 | brasl %r14,s390_do_machine_check | ||
737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | ||
738 | jno .Lmcck_return | ||
739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | ||
740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | ||
741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | ||
742 | la %r11,STACK_FRAME_OVERHEAD(%r1) | ||
743 | lgr %r15,%r1 | ||
744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | ||
745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | ||
746 | jno .Lmcck_return | ||
747 | TRACE_IRQS_OFF | ||
748 | brasl %r14,s390_handle_mcck | ||
749 | TRACE_IRQS_ON | ||
750 | .Lmcck_return: | ||
751 | lg %r14,__LC_VDSO_PER_CPU | ||
752 | lmg %r0,%r10,__PT_R0(%r11) | ||
753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | ||
754 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | ||
755 | jno 0f | ||
756 | stpt __LC_EXIT_TIMER | ||
757 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | ||
758 | 0: lmg %r11,%r15,__PT_R11(%r11) | ||
759 | lpswe __LC_RETURN_MCCK_PSW | ||
760 | |||
761 | .Lmcck_panic: | ||
762 | lg %r14,__LC_PANIC_STACK | ||
763 | slgr %r14,%r15 | ||
764 | srag %r14,%r14,PAGE_SHIFT | ||
765 | jz 0f | ||
766 | lg %r15,__LC_PANIC_STACK | ||
767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | ||
768 | j .Lmcck_skip | ||
769 | |||
770 | # | ||
771 | # PSW restart interrupt handler | ||
772 | # | ||
773 | ENTRY(restart_int_handler) | ||
774 | stg %r15,__LC_SAVE_AREA_RESTART | ||
775 | lg %r15,__LC_RESTART_STACK | ||
776 | aghi %r15,-__PT_SIZE # create pt_regs on stack | ||
777 | xc 0(__PT_SIZE,%r15),0(%r15) | ||
778 | stmg %r0,%r14,__PT_R0(%r15) | ||
779 | mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART | ||
780 | mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw | ||
781 | aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack | ||
782 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) | ||
783 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu | ||
784 | lg %r2,__LC_RESTART_DATA | ||
785 | lg %r3,__LC_RESTART_SOURCE | ||
786 | ltgr %r3,%r3 # test source cpu address | ||
787 | jm 1f # negative -> skip source stop | ||
788 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu | ||
789 | brc 10,0b # wait for status stored | ||
790 | 1: basr %r14,%r1 # call function | ||
791 | stap __SF_EMPTY(%r15) # store cpu address | ||
792 | llgh %r3,__SF_EMPTY(%r15) | ||
793 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu | ||
794 | brc 2,2b | ||
795 | 3: j 3b | ||
796 | |||
797 | .section .kprobes.text, "ax" | ||
798 | |||
799 | #ifdef CONFIG_CHECK_STACK | ||
800 | /* | ||
801 | * The synchronous or the asynchronous stack overflowed. We are dead. | ||
802 | * No need to properly save the registers, we are going to panic anyway. | ||
803 | * Setup a pt_regs so that show_trace can provide a good call trace. | ||
804 | */ | ||
805 | stack_overflow: | ||
806 | lg %r15,__LC_PANIC_STACK # change to panic stack | ||
807 | la %r11,STACK_FRAME_OVERHEAD(%r15) | ||
808 | stmg %r0,%r7,__PT_R0(%r11) | ||
809 | stmg %r8,%r9,__PT_PSW(%r11) | ||
810 | mvc __PT_R8(64,%r11),0(%r14) | ||
811 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 | ||
812 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | ||
813 | lgr %r2,%r11 # pass pointer to pt_regs | ||
814 | jg kernel_stack_overflow | ||
815 | #endif | ||
816 | |||
817 | .align 8 | ||
818 | .Lcleanup_table: | ||
819 | .quad system_call | ||
820 | .quad .Lsysc_do_svc | ||
821 | .quad .Lsysc_tif | ||
822 | .quad .Lsysc_restore | ||
823 | .quad .Lsysc_done | ||
824 | .quad .Lio_tif | ||
825 | .quad .Lio_restore | ||
826 | .quad .Lio_done | ||
827 | .quad psw_idle | ||
828 | .quad .Lpsw_idle_end | ||
829 | |||
830 | cleanup_critical: | ||
831 | clg %r9,BASED(.Lcleanup_table) # system_call | ||
832 | jl 0f | ||
833 | clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc | ||
834 | jl .Lcleanup_system_call | ||
835 | clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif | ||
836 | jl 0f | ||
837 | clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore | ||
838 | jl .Lcleanup_sysc_tif | ||
839 | clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done | ||
840 | jl .Lcleanup_sysc_restore | ||
841 | clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif | ||
842 | jl 0f | ||
843 | clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore | ||
844 | jl .Lcleanup_io_tif | ||
845 | clg %r9,BASED(.Lcleanup_table+56) # .Lio_done | ||
846 | jl .Lcleanup_io_restore | ||
847 | clg %r9,BASED(.Lcleanup_table+64) # psw_idle | ||
848 | jl 0f | ||
849 | clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end | ||
850 | jl .Lcleanup_idle | ||
851 | 0: br %r14 | ||
852 | |||
853 | |||
854 | .Lcleanup_system_call: | ||
855 | # check if stpt has been executed | ||
856 | clg %r9,BASED(.Lcleanup_system_call_insn) | ||
857 | jh 0f | ||
858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | ||
859 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
860 | je 0f | ||
861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | ||
862 | 0: # check if stmg has been executed | ||
863 | clg %r9,BASED(.Lcleanup_system_call_insn+8) | ||
864 | jh 0f | ||
865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) | ||
866 | 0: # check if base register setup + TIF bit load has been done | ||
867 | clg %r9,BASED(.Lcleanup_system_call_insn+16) | ||
868 | jhe 0f | ||
869 | # set up saved registers r10 and r12 | ||
870 | stg %r10,16(%r11) # r10 last break | ||
871 | stg %r12,32(%r11) # r12 thread-info pointer | ||
872 | 0: # check if the user time update has been done | ||
873 | clg %r9,BASED(.Lcleanup_system_call_insn+24) | ||
874 | jh 0f | ||
875 | lg %r15,__LC_EXIT_TIMER | ||
876 | slg %r15,__LC_SYNC_ENTER_TIMER | ||
877 | alg %r15,__LC_USER_TIMER | ||
878 | stg %r15,__LC_USER_TIMER | ||
879 | 0: # check if the system time update has been done | ||
880 | clg %r9,BASED(.Lcleanup_system_call_insn+32) | ||
881 | jh 0f | ||
882 | lg %r15,__LC_LAST_UPDATE_TIMER | ||
883 | slg %r15,__LC_EXIT_TIMER | ||
884 | alg %r15,__LC_SYSTEM_TIMER | ||
885 | stg %r15,__LC_SYSTEM_TIMER | ||
886 | 0: # update accounting time stamp | ||
887 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | ||
888 | # do LAST_BREAK | ||
889 | lg %r9,16(%r11) | ||
890 | srag %r9,%r9,23 | ||
891 | jz 0f | ||
892 | mvc __TI_last_break(8,%r12),16(%r11) | ||
893 | 0: # set up saved register r11 | ||
894 | lg %r15,__LC_KERNEL_STACK | ||
895 | la %r9,STACK_FRAME_OVERHEAD(%r15) | ||
896 | stg %r9,24(%r11) # r11 pt_regs pointer | ||
897 | # fill pt_regs | ||
898 | mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC | ||
899 | stmg %r0,%r7,__PT_R0(%r9) | ||
900 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW | ||
901 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | ||
902 | xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) | ||
903 | mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL | ||
904 | # setup saved register r15 | ||
905 | stg %r15,56(%r11) # r15 stack pointer | ||
906 | # set new psw address and exit | ||
907 | larl %r9,.Lsysc_do_svc | ||
908 | br %r14 | ||
909 | .Lcleanup_system_call_insn: | ||
910 | .quad system_call | ||
911 | .quad .Lsysc_stmg | ||
912 | .quad .Lsysc_per | ||
913 | .quad .Lsysc_vtime+18 | ||
914 | .quad .Lsysc_vtime+42 | ||
915 | |||
916 | .Lcleanup_sysc_tif: | ||
917 | larl %r9,.Lsysc_tif | ||
918 | br %r14 | ||
919 | |||
920 | .Lcleanup_sysc_restore: | ||
921 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) | ||
922 | je 0f | ||
923 | lg %r9,24(%r11) # get saved pointer to pt_regs | ||
924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | ||
925 | mvc 0(64,%r11),__PT_R8(%r9) | ||
926 | lmg %r0,%r7,__PT_R0(%r9) | ||
927 | 0: lmg %r8,%r9,__LC_RETURN_PSW | ||
928 | br %r14 | ||
929 | .Lcleanup_sysc_restore_insn: | ||
930 | .quad .Lsysc_done - 4 | ||
931 | |||
932 | .Lcleanup_io_tif: | ||
933 | larl %r9,.Lio_tif | ||
934 | br %r14 | ||
935 | |||
936 | .Lcleanup_io_restore: | ||
937 | clg %r9,BASED(.Lcleanup_io_restore_insn) | ||
938 | je 0f | ||
939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs | ||
940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | ||
941 | mvc 0(64,%r11),__PT_R8(%r9) | ||
942 | lmg %r0,%r7,__PT_R0(%r9) | ||
943 | 0: lmg %r8,%r9,__LC_RETURN_PSW | ||
944 | br %r14 | ||
945 | .Lcleanup_io_restore_insn: | ||
946 | .quad .Lio_done - 4 | ||
947 | |||
948 | .Lcleanup_idle: | ||
949 | # copy interrupt clock & cpu timer | ||
950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | ||
951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | ||
952 | cghi %r11,__LC_SAVE_AREA_ASYNC | ||
953 | je 0f | ||
954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | ||
955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | ||
956 | 0: # check if stck & stpt have been executed | ||
957 | clg %r9,BASED(.Lcleanup_idle_insn) | ||
958 | jhe 1f | ||
959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | ||
960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) | ||
961 | 1: # account system time going idle | ||
962 | lg %r9,__LC_STEAL_TIMER | ||
963 | alg %r9,__CLOCK_IDLE_ENTER(%r2) | ||
964 | slg %r9,__LC_LAST_UPDATE_CLOCK | ||
965 | stg %r9,__LC_STEAL_TIMER | ||
966 | mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) | ||
967 | lg %r9,__LC_SYSTEM_TIMER | ||
968 | alg %r9,__LC_LAST_UPDATE_TIMER | ||
969 | slg %r9,__TIMER_IDLE_ENTER(%r2) | ||
970 | stg %r9,__LC_SYSTEM_TIMER | ||
971 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | ||
972 | # prepare return psw | ||
973 | nihh %r8,0xfcfd # clear irq & wait state bits | ||
974 | lg %r9,48(%r11) # return from psw_idle | ||
975 | br %r14 | ||
976 | .Lcleanup_idle_insn: | ||
977 | .quad .Lpsw_idle_lpsw | ||
978 | |||
979 | /* | ||
980 | * Integer constants | ||
981 | */ | ||
982 | .align 8 | ||
983 | .Lcritical_start: | ||
984 | .quad .L__critical_start | ||
985 | .Lcritical_length: | ||
986 | .quad .L__critical_end - .L__critical_start | ||
987 | |||
988 | |||
989 | #if IS_ENABLED(CONFIG_KVM) | ||
990 | /* | ||
991 | * sie64a calling convention: | ||
992 | * %r2 pointer to sie control block | ||
993 | * %r3 guest register save area | ||
994 | */ | ||
995 | ENTRY(sie64a) | ||
996 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers | ||
997 | stg %r2,__SF_EMPTY(%r15) # save control block pointer | ||
998 | stg %r3,__SF_EMPTY+8(%r15) # save guest register save area | ||
999 | xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason | ||
1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 | ||
1001 | lg %r14,__LC_GMAP # get gmap pointer | ||
1002 | ltgr %r14,%r14 | ||
1003 | jz .Lsie_gmap | ||
1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce | ||
1005 | .Lsie_gmap: | ||
1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | ||
1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now | ||
1008 | tm __SIE_PROG20+3(%r14),1 # last exit... | ||
1009 | jnz .Lsie_done | ||
1010 | LPP __SF_EMPTY(%r15) # set guest id | ||
1011 | sie 0(%r14) | ||
1012 | .Lsie_done: | ||
1013 | LPP __SF_EMPTY+16(%r15) # set host id | ||
1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) | ||
1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | ||
1018 | # instructions between sie64a and .Lsie_done should not cause program | ||
1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | ||
1020 | # See also HANDLE_SIE_INTERCEPT | ||
1021 | .Lrewind_pad: | ||
1022 | nop 0 | ||
1023 | .globl sie_exit | ||
1024 | sie_exit: | ||
1025 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | ||
1026 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | ||
1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | ||
1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code | ||
1029 | br %r14 | ||
1030 | .Lsie_fault: | ||
1031 | lghi %r14,-EFAULT | ||
1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code | ||
1033 | j sie_exit | ||
1034 | |||
1035 | .align 8 | ||
1036 | .Lsie_critical: | ||
1037 | .quad .Lsie_gmap | ||
1038 | .Lsie_critical_length: | ||
1039 | .quad .Lsie_done - .Lsie_gmap | ||
1040 | |||
1041 | EX_TABLE(.Lrewind_pad,.Lsie_fault) | ||
1042 | EX_TABLE(sie_exit,.Lsie_fault) | ||
1043 | #endif | ||
1044 | |||
1045 | .section .rodata, "a" | ||
1046 | #define SYSCALL(esa,esame,emu) .long esame | ||
1047 | .globl sys_call_table | ||
1048 | sys_call_table: | ||
1049 | #include "syscalls.S" | ||
1050 | #undef SYSCALL | ||
1051 | |||
1052 | #ifdef CONFIG_COMPAT | ||
1053 | |||
1054 | #define SYSCALL(esa,esame,emu) .long emu | ||
1055 | .globl sys_call_table_emu | ||
1056 | sys_call_table_emu: | ||
1057 | #include "syscalls.S" | ||
1058 | #undef SYSCALL | ||
1059 | #endif | ||
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6c79f1b44fe7..e0eaf11134b4 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -130,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
130 | /* Verify that the to be replaced code matches what we expect. */ | 130 | /* Verify that the to be replaced code matches what we expect. */ |
131 | if (memcmp(&orig, &old, sizeof(old))) | 131 | if (memcmp(&orig, &old, sizeof(old))) |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) | 133 | s390_kernel_write((void *) rec->ip, &new, sizeof(new)); |
134 | return -EPERM; | ||
135 | return 0; | 134 | return 0; |
136 | } | 135 | } |
137 | 136 | ||
@@ -159,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
159 | /* Verify that the to be replaced code matches what we expect. */ | 158 | /* Verify that the to be replaced code matches what we expect. */ |
160 | if (memcmp(&orig, &old, sizeof(old))) | 159 | if (memcmp(&orig, &old, sizeof(old))) |
161 | return -EINVAL; | 160 | return -EINVAL; |
162 | if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) | 161 | s390_kernel_write((void *) rec->ip, &new, sizeof(new)); |
163 | return -EPERM; | ||
164 | return 0; | 162 | return 0; |
165 | } | 163 | } |
166 | 164 | ||
@@ -231,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void) | |||
231 | { | 229 | { |
232 | u8 op = 0x04; /* set mask field to zero */ | 230 | u8 op = 0x04; /* set mask field to zero */ |
233 | 231 | ||
234 | return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); | 232 | s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); |
233 | return 0; | ||
235 | } | 234 | } |
236 | 235 | ||
237 | int ftrace_disable_ftrace_graph_caller(void) | 236 | int ftrace_disable_ftrace_graph_caller(void) |
238 | { | 237 | { |
239 | u8 op = 0xf4; /* set mask field to all ones */ | 238 | u8 op = 0xf4; /* set mask field to all ones */ |
240 | 239 | ||
241 | return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); | 240 | s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); |
241 | return 0; | ||
242 | } | 242 | } |
243 | 243 | ||
244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 132f4c9ade60..59b7c6470567 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -27,11 +27,7 @@ | |||
27 | #include <asm/thread_info.h> | 27 | #include <asm/thread_info.h> |
28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
29 | 29 | ||
30 | #ifdef CONFIG_64BIT | ||
31 | #define ARCH_OFFSET 4 | 30 | #define ARCH_OFFSET 4 |
32 | #else | ||
33 | #define ARCH_OFFSET 0 | ||
34 | #endif | ||
35 | 31 | ||
36 | __HEAD | 32 | __HEAD |
37 | 33 | ||
@@ -67,7 +63,6 @@ __HEAD | |||
67 | # subroutine to set architecture mode | 63 | # subroutine to set architecture mode |
68 | # | 64 | # |
69 | .Lsetmode: | 65 | .Lsetmode: |
70 | #ifdef CONFIG_64BIT | ||
71 | mvi __LC_AR_MODE_ID,1 # set esame flag | 66 | mvi __LC_AR_MODE_ID,1 # set esame flag |
72 | slr %r0,%r0 # set cpuid to zero | 67 | slr %r0,%r0 # set cpuid to zero |
73 | lhi %r1,2 # mode 2 = esame (dump) | 68 | lhi %r1,2 # mode 2 = esame (dump) |
@@ -76,16 +71,12 @@ __HEAD | |||
76 | .fill 16,4,0x0 | 71 | .fill 16,4,0x0 |
77 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | 72 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs |
78 | sam31 # switch to 31 bit addressing mode | 73 | sam31 # switch to 31 bit addressing mode |
79 | #else | ||
80 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
81 | #endif | ||
82 | br %r14 | 74 | br %r14 |
83 | 75 | ||
84 | # | 76 | # |
85 | # subroutine to wait for end I/O | 77 | # subroutine to wait for end I/O |
86 | # | 78 | # |
87 | .Lirqwait: | 79 | .Lirqwait: |
88 | #ifdef CONFIG_64BIT | ||
89 | mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw | 80 | mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw |
90 | lpsw .Lwaitpsw | 81 | lpsw .Lwaitpsw |
91 | .Lioint: | 82 | .Lioint: |
@@ -93,15 +84,6 @@ __HEAD | |||
93 | .align 8 | 84 | .align 8 |
94 | .Lnewpsw: | 85 | .Lnewpsw: |
95 | .quad 0x0000000080000000,.Lioint | 86 | .quad 0x0000000080000000,.Lioint |
96 | #else | ||
97 | mvc 0x78(8),.Lnewpsw # set up IO interrupt psw | ||
98 | lpsw .Lwaitpsw | ||
99 | .Lioint: | ||
100 | br %r14 | ||
101 | .align 8 | ||
102 | .Lnewpsw: | ||
103 | .long 0x00080000,0x80000000+.Lioint | ||
104 | #endif | ||
105 | .Lwaitpsw: | 87 | .Lwaitpsw: |
106 | .long 0x020a0000,0x80000000+.Lioint | 88 | .long 0x020a0000,0x80000000+.Lioint |
107 | 89 | ||
@@ -375,7 +357,6 @@ ENTRY(startup) | |||
375 | ENTRY(startup_kdump) | 357 | ENTRY(startup_kdump) |
376 | j .Lep_startup_kdump | 358 | j .Lep_startup_kdump |
377 | .Lep_startup_normal: | 359 | .Lep_startup_normal: |
378 | #ifdef CONFIG_64BIT | ||
379 | mvi __LC_AR_MODE_ID,1 # set esame flag | 360 | mvi __LC_AR_MODE_ID,1 # set esame flag |
380 | slr %r0,%r0 # set cpuid to zero | 361 | slr %r0,%r0 # set cpuid to zero |
381 | lhi %r1,2 # mode 2 = esame (dump) | 362 | lhi %r1,2 # mode 2 = esame (dump) |
@@ -384,9 +365,6 @@ ENTRY(startup_kdump) | |||
384 | .fill 16,4,0x0 | 365 | .fill 16,4,0x0 |
385 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs | 366 | 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs |
386 | sam31 # switch to 31 bit addressing mode | 367 | sam31 # switch to 31 bit addressing mode |
387 | #else | ||
388 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
389 | #endif | ||
390 | basr %r13,0 # get base | 368 | basr %r13,0 # get base |
391 | .LPG0: | 369 | .LPG0: |
392 | xc 0x200(256),0x200 # partially clear lowcore | 370 | xc 0x200(256),0x200 # partially clear lowcore |
@@ -396,7 +374,6 @@ ENTRY(startup_kdump) | |||
396 | spt 6f-.LPG0(%r13) | 374 | spt 6f-.LPG0(%r13) |
397 | mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) | 375 | mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) |
398 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | 376 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST |
399 | #ifndef CONFIG_MARCH_G5 | ||
400 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} | 377 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} |
401 | .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST | 378 | .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST |
402 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? | 379 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? |
@@ -435,7 +412,6 @@ ENTRY(startup_kdump) | |||
435 | # the kernel will crash. Format is number of facility words with bits set, | 412 | # the kernel will crash. Format is number of facility words with bits set, |
436 | # followed by the facility words. | 413 | # followed by the facility words. |
437 | 414 | ||
438 | #if defined(CONFIG_64BIT) | ||
439 | #if defined(CONFIG_MARCH_Z13) | 415 | #if defined(CONFIG_MARCH_Z13) |
440 | .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 | 416 | .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 |
441 | #elif defined(CONFIG_MARCH_ZEC12) | 417 | #elif defined(CONFIG_MARCH_ZEC12) |
@@ -451,35 +427,10 @@ ENTRY(startup_kdump) | |||
451 | #elif defined(CONFIG_MARCH_Z900) | 427 | #elif defined(CONFIG_MARCH_Z900) |
452 | .long 1, 0xc0000000 | 428 | .long 1, 0xc0000000 |
453 | #endif | 429 | #endif |
454 | #else | ||
455 | #if defined(CONFIG_MARCH_ZEC12) | ||
456 | .long 1, 0x8100c880 | ||
457 | #elif defined(CONFIG_MARCH_Z196) | ||
458 | .long 1, 0x8100c880 | ||
459 | #elif defined(CONFIG_MARCH_Z10) | ||
460 | .long 1, 0x8100c880 | ||
461 | #elif defined(CONFIG_MARCH_Z9_109) | ||
462 | .long 1, 0x8100c880 | ||
463 | #elif defined(CONFIG_MARCH_Z990) | ||
464 | .long 1, 0x80002000 | ||
465 | #elif defined(CONFIG_MARCH_Z900) | ||
466 | .long 1, 0x80000000 | ||
467 | #endif | ||
468 | #endif | ||
469 | 4: | 430 | 4: |
470 | #endif | ||
471 | |||
472 | #ifdef CONFIG_64BIT | ||
473 | /* Continue with 64bit startup code in head64.S */ | 431 | /* Continue with 64bit startup code in head64.S */ |
474 | sam64 # switch to 64 bit mode | 432 | sam64 # switch to 64 bit mode |
475 | jg startup_continue | 433 | jg startup_continue |
476 | #else | ||
477 | /* Continue with 31bit startup code in head31.S */ | ||
478 | l %r13,5f-.LPG0(%r13) | ||
479 | b 0(%r13) | ||
480 | .align 8 | ||
481 | 5: .long startup_continue | ||
482 | #endif | ||
483 | 434 | ||
484 | .align 8 | 435 | .align 8 |
485 | 6: .long 0x7fffffff,0xffffffff | 436 | 6: .long 0x7fffffff,0xffffffff |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S deleted file mode 100644 index 6dbe80983a24..000000000000 --- a/arch/s390/kernel/head31.S +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2005, 2010 | ||
3 | * | ||
4 | * Author(s): Hartmut Penner <hp@de.ibm.com> | ||
5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | * Rob van der Heij <rvdhei@iae.nl> | ||
7 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/page.h> | ||
16 | |||
17 | __HEAD | ||
18 | ENTRY(startup_continue) | ||
19 | basr %r13,0 # get base | ||
20 | .LPG1: | ||
21 | |||
22 | l %r1,.Lbase_cc-.LPG1(%r13) | ||
23 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK | ||
24 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | ||
25 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | ||
26 | # move IPL device to lowcore | ||
27 | # | ||
28 | # Setup stack | ||
29 | # | ||
30 | l %r15,.Linittu-.LPG1(%r13) | ||
31 | st %r15,__LC_THREAD_INFO # cache thread info in lowcore | ||
32 | mvc __LC_CURRENT(4),__TI_task(%r15) | ||
33 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | ||
34 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | ||
35 | ahi %r15,-96 | ||
36 | # | ||
37 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | ||
38 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
39 | # | ||
40 | l %r14,.Lstartup_init-.LPG1(%r13) | ||
41 | basr %r14,%r14 | ||
42 | lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, | ||
43 | # virtual and never return ... | ||
44 | .align 8 | ||
45 | .Lentry:.long 0x00080000,0x80000000 + _stext | ||
46 | .Lctl: .long 0x04b50000 # cr0: various things | ||
47 | .long 0 # cr1: primary space segment table | ||
48 | .long .Lduct # cr2: dispatchable unit control table | ||
49 | .long 0 # cr3: instruction authorization | ||
50 | .long 0 # cr4: instruction authorization | ||
51 | .long .Lduct # cr5: primary-aste origin | ||
52 | .long 0 # cr6: I/O interrupts | ||
53 | .long 0 # cr7: secondary space segment table | ||
54 | .long 0 # cr8: access registers translation | ||
55 | .long 0 # cr9: tracing off | ||
56 | .long 0 # cr10: tracing off | ||
57 | .long 0 # cr11: tracing off | ||
58 | .long 0 # cr12: tracing off | ||
59 | .long 0 # cr13: home space segment table | ||
60 | .long 0xc0000000 # cr14: machine check handling off | ||
61 | .long 0 # cr15: linkage stack operations | ||
62 | .Lbss_bgn: .long __bss_start | ||
63 | .Lbss_end: .long _end | ||
64 | .Lparmaddr: .long PARMAREA | ||
65 | .Linittu: .long init_thread_union | ||
66 | .Lstartup_init: | ||
67 | .long startup_init | ||
68 | .align 64 | ||
69 | .Lduct: .long 0,0,0,0,.Lduald,0,0,0 | ||
70 | .long 0,0,0,0,0,0,0,0 | ||
71 | .align 128 | ||
72 | .Lduald:.rept 8 | ||
73 | .long 0x80000000,0,0,0 # invalid access-list entries | ||
74 | .endr | ||
75 | .Lbase_cc: | ||
76 | .long sched_clock_base_cc | ||
77 | |||
78 | ENTRY(_ehead) | ||
79 | |||
80 | .org 0x100000 - 0x11000 # head.o ends at 0x11000 | ||
81 | # | ||
82 | # startup-code, running in absolute addressing mode | ||
83 | # | ||
84 | ENTRY(_stext) | ||
85 | basr %r13,0 # get base | ||
86 | .LPG3: | ||
87 | # check control registers | ||
88 | stctl %c0,%c15,0(%r15) | ||
89 | oi 2(%r15),0x60 # enable sigp emergency & external call | ||
90 | oi 0(%r15),0x10 # switch on low address protection | ||
91 | lctl %c0,%c15,0(%r15) | ||
92 | |||
93 | # | ||
94 | lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess | ||
95 | l %r14,.Lstart-.LPG3(%r13) | ||
96 | basr %r14,%r14 # call start_kernel | ||
97 | # | ||
98 | # We returned from start_kernel ?!? PANIK | ||
99 | # | ||
100 | basr %r13,0 | ||
101 | lpsw .Ldw-.(%r13) # load disabled wait psw | ||
102 | # | ||
103 | .align 8 | ||
104 | .Ldw: .long 0x000a0000,0x00000000 | ||
105 | .Lstart:.long start_kernel | ||
106 | .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 | ||
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S index 085a95eb315f..d05950f02c34 100644 --- a/arch/s390/kernel/head_kdump.S +++ b/arch/s390/kernel/head_kdump.S | |||
@@ -92,17 +92,9 @@ startup_kdump_relocated: | |||
92 | #else | 92 | #else |
93 | .align 2 | 93 | .align 2 |
94 | .Lep_startup_kdump: | 94 | .Lep_startup_kdump: |
95 | #ifdef CONFIG_64BIT | ||
96 | larl %r13,startup_kdump_crash | 95 | larl %r13,startup_kdump_crash |
97 | lpswe 0(%r13) | 96 | lpswe 0(%r13) |
98 | .align 8 | 97 | .align 8 |
99 | startup_kdump_crash: | 98 | startup_kdump_crash: |
100 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash | 99 | .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash |
101 | #else | ||
102 | basr %r13,0 | ||
103 | 0: lpsw startup_kdump_crash-0b(%r13) | ||
104 | .align 8 | ||
105 | startup_kdump_crash: | ||
106 | .long 0x000a0000,0x00000000 + startup_kdump_crash | ||
107 | #endif /* CONFIG_64BIT */ | ||
108 | #endif /* CONFIG_CRASH_DUMP */ | 100 | #endif /* CONFIG_CRASH_DUMP */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5c8651f36509..52fbef91d1d9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308); | |||
182 | 182 | ||
183 | /* SYSFS */ | 183 | /* SYSFS */ |
184 | 184 | ||
185 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ | 185 | #define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \ |
186 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 186 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ |
187 | struct kobj_attribute *attr, \ | 187 | struct kobj_attribute *attr, \ |
188 | char *page) \ | 188 | char *page) \ |
189 | { \ | 189 | { \ |
190 | return sprintf(page, _format, _value); \ | 190 | return snprintf(page, PAGE_SIZE, _format, ##args); \ |
191 | } \ | 191 | } |
192 | |||
193 | #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ | ||
194 | IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ | ||
192 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 195 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
193 | __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); | 196 | __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL) |
194 | 197 | ||
195 | #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ | 198 | #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ |
196 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 199 | IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \ |
197 | struct kobj_attribute *attr, \ | ||
198 | char *page) \ | ||
199 | { \ | ||
200 | return sprintf(page, _fmt_out, \ | ||
201 | (unsigned long long) _value); \ | ||
202 | } \ | ||
203 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | 200 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ |
204 | struct kobj_attribute *attr, \ | 201 | struct kobj_attribute *attr, \ |
205 | const char *buf, size_t len) \ | 202 | const char *buf, size_t len) \ |
@@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | |||
213 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 210 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
214 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | 211 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ |
215 | sys_##_prefix##_##_name##_show, \ | 212 | sys_##_prefix##_##_name##_show, \ |
216 | sys_##_prefix##_##_name##_store); | 213 | sys_##_prefix##_##_name##_store) |
217 | 214 | ||
218 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ | 215 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ |
219 | static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ | 216 | IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \ |
220 | struct kobj_attribute *attr, \ | ||
221 | char *page) \ | ||
222 | { \ | ||
223 | return sprintf(page, _fmt_out, _value); \ | ||
224 | } \ | ||
225 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | 217 | static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ |
226 | struct kobj_attribute *attr, \ | 218 | struct kobj_attribute *attr, \ |
227 | const char *buf, size_t len) \ | 219 | const char *buf, size_t len) \ |
@@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ | |||
233 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ | 225 | static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ |
234 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | 226 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ |
235 | sys_##_prefix##_##_name##_show, \ | 227 | sys_##_prefix##_##_name##_show, \ |
236 | sys_##_prefix##_##_name##_store); | 228 | sys_##_prefix##_##_name##_store) |
237 | 229 | ||
238 | static void make_attrs_ro(struct attribute **attrs) | 230 | static void make_attrs_ro(struct attribute **attrs) |
239 | { | 231 | { |
@@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, | |||
415 | return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, | 407 | return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, |
416 | IPL_PARMBLOCK_SIZE); | 408 | IPL_PARMBLOCK_SIZE); |
417 | } | 409 | } |
418 | 410 | static struct bin_attribute ipl_parameter_attr = | |
419 | static struct bin_attribute ipl_parameter_attr = { | 411 | __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL, |
420 | .attr = { | 412 | PAGE_SIZE); |
421 | .name = "binary_parameter", | ||
422 | .mode = S_IRUGO, | ||
423 | }, | ||
424 | .size = PAGE_SIZE, | ||
425 | .read = &ipl_parameter_read, | ||
426 | }; | ||
427 | 413 | ||
428 | static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, | 414 | static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, |
429 | struct bin_attribute *attr, char *buf, | 415 | struct bin_attribute *attr, char *buf, |
@@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, | |||
434 | 420 | ||
435 | return memory_read_from_buffer(buf, count, &off, scp_data, size); | 421 | return memory_read_from_buffer(buf, count, &off, scp_data, size); |
436 | } | 422 | } |
423 | static struct bin_attribute ipl_scp_data_attr = | ||
424 | __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE); | ||
437 | 425 | ||
438 | static struct bin_attribute ipl_scp_data_attr = { | 426 | static struct bin_attribute *ipl_fcp_bin_attrs[] = { |
439 | .attr = { | 427 | &ipl_parameter_attr, |
440 | .name = "scp_data", | 428 | &ipl_scp_data_attr, |
441 | .mode = S_IRUGO, | 429 | NULL, |
442 | }, | ||
443 | .size = PAGE_SIZE, | ||
444 | .read = ipl_scp_data_read, | ||
445 | }; | 430 | }; |
446 | 431 | ||
447 | /* FCP ipl device attributes */ | 432 | /* FCP ipl device attributes */ |
@@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = { | |||
484 | 469 | ||
485 | static struct attribute_group ipl_fcp_attr_group = { | 470 | static struct attribute_group ipl_fcp_attr_group = { |
486 | .attrs = ipl_fcp_attrs, | 471 | .attrs = ipl_fcp_attrs, |
472 | .bin_attrs = ipl_fcp_bin_attrs, | ||
487 | }; | 473 | }; |
488 | 474 | ||
489 | /* CCW ipl device attributes */ | 475 | /* CCW ipl device attributes */ |
@@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = { | |||
540 | 526 | ||
541 | static struct kset *ipl_kset; | 527 | static struct kset *ipl_kset; |
542 | 528 | ||
543 | static int __init ipl_register_fcp_files(void) | ||
544 | { | ||
545 | int rc; | ||
546 | |||
547 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
548 | if (rc) | ||
549 | goto out; | ||
550 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
551 | if (rc) | ||
552 | goto out_ipl_parm; | ||
553 | rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr); | ||
554 | if (!rc) | ||
555 | goto out; | ||
556 | |||
557 | sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); | ||
558 | |||
559 | out_ipl_parm: | ||
560 | sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group); | ||
561 | out: | ||
562 | return rc; | ||
563 | } | ||
564 | |||
565 | static void __ipl_run(void *unused) | 529 | static void __ipl_run(void *unused) |
566 | { | 530 | { |
567 | diag308(DIAG308_IPL, NULL); | 531 | diag308(DIAG308_IPL, NULL); |
@@ -596,7 +560,7 @@ static int __init ipl_init(void) | |||
596 | break; | 560 | break; |
597 | case IPL_TYPE_FCP: | 561 | case IPL_TYPE_FCP: |
598 | case IPL_TYPE_FCP_DUMP: | 562 | case IPL_TYPE_FCP_DUMP: |
599 | rc = ipl_register_fcp_files(); | 563 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); |
600 | break; | 564 | break; |
601 | case IPL_TYPE_NSS: | 565 | case IPL_TYPE_NSS: |
602 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); | 566 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); |
@@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, | |||
744 | 708 | ||
745 | return count; | 709 | return count; |
746 | } | 710 | } |
711 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = | ||
712 | __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, | ||
713 | reipl_fcp_scpdata_write, PAGE_SIZE); | ||
747 | 714 | ||
748 | static struct bin_attribute sys_reipl_fcp_scp_data_attr = { | 715 | static struct bin_attribute *reipl_fcp_bin_attrs[] = { |
749 | .attr = { | 716 | &sys_reipl_fcp_scp_data_attr, |
750 | .name = "scp_data", | 717 | NULL, |
751 | .mode = S_IRUGO | S_IWUSR, | ||
752 | }, | ||
753 | .size = PAGE_SIZE, | ||
754 | .read = reipl_fcp_scpdata_read, | ||
755 | .write = reipl_fcp_scpdata_write, | ||
756 | }; | 718 | }; |
757 | 719 | ||
758 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", | 720 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", |
@@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = { | |||
841 | 803 | ||
842 | static struct attribute_group reipl_fcp_attr_group = { | 804 | static struct attribute_group reipl_fcp_attr_group = { |
843 | .attrs = reipl_fcp_attrs, | 805 | .attrs = reipl_fcp_attrs, |
806 | .bin_attrs = reipl_fcp_bin_attrs, | ||
844 | }; | 807 | }; |
845 | 808 | ||
846 | /* CCW reipl device attributes */ | 809 | /* CCW reipl device attributes */ |
@@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void) | |||
1261 | return rc; | 1224 | return rc; |
1262 | } | 1225 | } |
1263 | 1226 | ||
1264 | rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, | ||
1265 | &sys_reipl_fcp_scp_data_attr); | ||
1266 | if (rc) { | ||
1267 | sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); | ||
1268 | kset_unregister(reipl_fcp_kset); | ||
1269 | free_page((unsigned long) reipl_block_fcp); | ||
1270 | return rc; | ||
1271 | } | ||
1272 | |||
1273 | if (ipl_info.type == IPL_TYPE_FCP) { | 1227 | if (ipl_info.type == IPL_TYPE_FCP) { |
1274 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1228 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1275 | /* | 1229 | /* |
@@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj, | |||
1713 | { | 1667 | { |
1714 | return set_trigger(buf, &on_reboot_trigger, len); | 1668 | return set_trigger(buf, &on_reboot_trigger, len); |
1715 | } | 1669 | } |
1716 | 1670 | static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot); | |
1717 | static struct kobj_attribute on_reboot_attr = | ||
1718 | __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store); | ||
1719 | 1671 | ||
1720 | static void do_machine_restart(char *__unused) | 1672 | static void do_machine_restart(char *__unused) |
1721 | { | 1673 | { |
@@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj, | |||
1741 | { | 1693 | { |
1742 | return set_trigger(buf, &on_panic_trigger, len); | 1694 | return set_trigger(buf, &on_panic_trigger, len); |
1743 | } | 1695 | } |
1744 | 1696 | static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic); | |
1745 | static struct kobj_attribute on_panic_attr = | ||
1746 | __ATTR(on_panic, 0644, on_panic_show, on_panic_store); | ||
1747 | 1697 | ||
1748 | static void do_panic(void) | 1698 | static void do_panic(void) |
1749 | { | 1699 | { |
@@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj, | |||
1769 | { | 1719 | { |
1770 | return set_trigger(buf, &on_restart_trigger, len); | 1720 | return set_trigger(buf, &on_restart_trigger, len); |
1771 | } | 1721 | } |
1772 | 1722 | static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart); | |
1773 | static struct kobj_attribute on_restart_attr = | ||
1774 | __ATTR(on_restart, 0644, on_restart_show, on_restart_store); | ||
1775 | 1723 | ||
1776 | static void __do_restart(void *ignore) | 1724 | static void __do_restart(void *ignore) |
1777 | { | 1725 | { |
@@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj, | |||
1808 | { | 1756 | { |
1809 | return set_trigger(buf, &on_halt_trigger, len); | 1757 | return set_trigger(buf, &on_halt_trigger, len); |
1810 | } | 1758 | } |
1811 | 1759 | static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt); | |
1812 | static struct kobj_attribute on_halt_attr = | ||
1813 | __ATTR(on_halt, 0644, on_halt_show, on_halt_store); | ||
1814 | |||
1815 | 1760 | ||
1816 | static void do_machine_halt(void) | 1761 | static void do_machine_halt(void) |
1817 | { | 1762 | { |
@@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj, | |||
1837 | { | 1782 | { |
1838 | return set_trigger(buf, &on_poff_trigger, len); | 1783 | return set_trigger(buf, &on_poff_trigger, len); |
1839 | } | 1784 | } |
1840 | 1785 | static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff); | |
1841 | static struct kobj_attribute on_poff_attr = | ||
1842 | __ATTR(on_poff, 0644, on_poff_show, on_poff_store); | ||
1843 | |||
1844 | 1786 | ||
1845 | static void do_machine_power_off(void) | 1787 | static void do_machine_power_off(void) |
1846 | { | 1788 | { |
@@ -1850,26 +1792,27 @@ static void do_machine_power_off(void) | |||
1850 | } | 1792 | } |
1851 | void (*_machine_power_off)(void) = do_machine_power_off; | 1793 | void (*_machine_power_off)(void) = do_machine_power_off; |
1852 | 1794 | ||
1795 | static struct attribute *shutdown_action_attrs[] = { | ||
1796 | &on_restart_attr.attr, | ||
1797 | &on_reboot_attr.attr, | ||
1798 | &on_panic_attr.attr, | ||
1799 | &on_halt_attr.attr, | ||
1800 | &on_poff_attr.attr, | ||
1801 | NULL, | ||
1802 | }; | ||
1803 | |||
1804 | static struct attribute_group shutdown_action_attr_group = { | ||
1805 | .attrs = shutdown_action_attrs, | ||
1806 | }; | ||
1807 | |||
1853 | static void __init shutdown_triggers_init(void) | 1808 | static void __init shutdown_triggers_init(void) |
1854 | { | 1809 | { |
1855 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, | 1810 | shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, |
1856 | firmware_kobj); | 1811 | firmware_kobj); |
1857 | if (!shutdown_actions_kset) | 1812 | if (!shutdown_actions_kset) |
1858 | goto fail; | 1813 | goto fail; |
1859 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | 1814 | if (sysfs_create_group(&shutdown_actions_kset->kobj, |
1860 | &on_reboot_attr.attr)) | 1815 | &shutdown_action_attr_group)) |
1861 | goto fail; | ||
1862 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1863 | &on_panic_attr.attr)) | ||
1864 | goto fail; | ||
1865 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1866 | &on_halt_attr.attr)) | ||
1867 | goto fail; | ||
1868 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1869 | &on_poff_attr.attr)) | ||
1870 | goto fail; | ||
1871 | if (sysfs_create_file(&shutdown_actions_kset->kobj, | ||
1872 | &on_restart_attr.attr)) | ||
1873 | goto fail; | 1816 | goto fail; |
1874 | return; | 1817 | return; |
1875 | fail: | 1818 | fail: |
@@ -2062,12 +2005,10 @@ static void do_reset_calls(void) | |||
2062 | { | 2005 | { |
2063 | struct reset_call *reset; | 2006 | struct reset_call *reset; |
2064 | 2007 | ||
2065 | #ifdef CONFIG_64BIT | ||
2066 | if (diag308_set_works) { | 2008 | if (diag308_set_works) { |
2067 | diag308_reset(); | 2009 | diag308_reset(); |
2068 | return; | 2010 | return; |
2069 | } | 2011 | } |
2070 | #endif | ||
2071 | list_for_each_entry(reset, &rcall, list) | 2012 | list_for_each_entry(reset, &rcall, list) |
2072 | reset->fn(); | 2013 | reset->fn(); |
2073 | } | 2014 | } |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f238720690f3..02ab9aa3812e 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { | |||
56 | * /proc/interrupts. | 56 | * /proc/interrupts. |
57 | * In addition this list contains non external / I/O events like NMIs. | 57 | * In addition this list contains non external / I/O events like NMIs. |
58 | */ | 58 | */ |
59 | static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | 59 | static const struct irq_class irqclass_sub_desc[] = { |
60 | {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, | 60 | {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, |
61 | {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, | 61 | {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, |
62 | {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, | 62 | {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, |
@@ -94,6 +94,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | |||
94 | 94 | ||
95 | void __init init_IRQ(void) | 95 | void __init init_IRQ(void) |
96 | { | 96 | { |
97 | BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS); | ||
97 | init_cio_interrupts(); | 98 | init_cio_interrupts(); |
98 | init_airq_interrupts(); | 99 | init_airq_interrupts(); |
99 | init_ext_interrupts(); | 100 | init_ext_interrupts(); |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 830066f936c8..a90299600483 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
78 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
79 | jump_label_bug(entry, &old, &new); | 79 | jump_label_bug(entry, &old, &new); |
80 | } | 80 | } |
81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | s390_kernel_write((void *)entry->code, &new, sizeof(new)); |
82 | } | 82 | } |
83 | 83 | ||
84 | static int __sm_arch_jump_label_transform(void *data) | 84 | static int __sm_arch_jump_label_transform(void *data) |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index f516edc1fbe3..389db56a2208 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -178,7 +178,7 @@ static int swap_instruction(void *data) | |||
178 | } | 178 | } |
179 | skip_ftrace: | 179 | skip_ftrace: |
180 | kcb->kprobe_status = KPROBE_SWAP_INST; | 180 | kcb->kprobe_status = KPROBE_SWAP_INST; |
181 | probe_kernel_write(p->addr, &new_insn, len); | 181 | s390_kernel_write(p->addr, &new_insn, len); |
182 | kcb->kprobe_status = status; | 182 | kcb->kprobe_status = status; |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 2ca95862e336..0c1a679314dd 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -38,13 +38,8 @@ | |||
38 | #define DEBUGP(fmt , ...) | 38 | #define DEBUGP(fmt , ...) |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #ifndef CONFIG_64BIT | ||
42 | #define PLT_ENTRY_SIZE 12 | ||
43 | #else /* CONFIG_64BIT */ | ||
44 | #define PLT_ENTRY_SIZE 20 | 41 | #define PLT_ENTRY_SIZE 20 |
45 | #endif /* CONFIG_64BIT */ | ||
46 | 42 | ||
47 | #ifdef CONFIG_64BIT | ||
48 | void *module_alloc(unsigned long size) | 43 | void *module_alloc(unsigned long size) |
49 | { | 44 | { |
50 | if (PAGE_ALIGN(size) > MODULES_LEN) | 45 | if (PAGE_ALIGN(size) > MODULES_LEN) |
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size) | |||
53 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, | 48 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, |
54 | __builtin_return_address(0)); | 49 | __builtin_return_address(0)); |
55 | } | 50 | } |
56 | #endif | ||
57 | 51 | ||
58 | void module_arch_freeing_init(struct module *mod) | 52 | void module_arch_freeing_init(struct module *mod) |
59 | { | 53 | { |
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
323 | unsigned int *ip; | 317 | unsigned int *ip; |
324 | ip = me->module_core + me->arch.plt_offset + | 318 | ip = me->module_core + me->arch.plt_offset + |
325 | info->plt_offset; | 319 | info->plt_offset; |
326 | #ifndef CONFIG_64BIT | ||
327 | ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ | ||
328 | ip[1] = 0x100607f1; | ||
329 | ip[2] = val; | ||
330 | #else /* CONFIG_64BIT */ | ||
331 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ |
332 | ip[1] = 0x100a0004; | 321 | ip[1] = 0x100a0004; |
333 | ip[2] = 0x07f10000; | 322 | ip[2] = 0x07f10000; |
334 | ip[3] = (unsigned int) (val >> 32); | 323 | ip[3] = (unsigned int) (val >> 32); |
335 | ip[4] = (unsigned int) val; | 324 | ip[4] = (unsigned int) val; |
336 | #endif /* CONFIG_64BIT */ | ||
337 | info->plt_initialized = 1; | 325 | info->plt_initialized = 1; |
338 | } | 326 | } |
339 | if (r_type == R_390_PLTOFF16 || | 327 | if (r_type == R_390_PLTOFF16 || |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 3f51cf4e8f02..505c17c0ae1a 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
117 | */ | 117 | */ |
118 | kill_task = 1; | 118 | kill_task = 1; |
119 | } | 119 | } |
120 | #ifndef CONFIG_64BIT | 120 | fpt_save_area = &S390_lowcore.floating_pt_save_area; |
121 | fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; | ||
122 | if (!mci->fc) { | ||
123 | /* | ||
124 | * Floating point control register can't be restored. | ||
125 | * Task will be terminated. | ||
126 | */ | ||
127 | asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); | ||
128 | kill_task = 1; | ||
129 | } else | ||
130 | asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); | ||
131 | |||
121 | asm volatile( | 132 | asm volatile( |
122 | " ld 0,0(%0)\n" | 133 | " ld 0,0(%0)\n" |
123 | " ld 2,8(%0)\n" | 134 | " ld 1,8(%0)\n" |
124 | " ld 4,16(%0)\n" | 135 | " ld 2,16(%0)\n" |
125 | " ld 6,24(%0)" | 136 | " ld 3,24(%0)\n" |
126 | : : "a" (&S390_lowcore.floating_pt_save_area)); | 137 | " ld 4,32(%0)\n" |
127 | #endif | 138 | " ld 5,40(%0)\n" |
128 | 139 | " ld 6,48(%0)\n" | |
129 | if (MACHINE_HAS_IEEE) { | 140 | " ld 7,56(%0)\n" |
130 | #ifdef CONFIG_64BIT | 141 | " ld 8,64(%0)\n" |
131 | fpt_save_area = &S390_lowcore.floating_pt_save_area; | 142 | " ld 9,72(%0)\n" |
132 | fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; | 143 | " ld 10,80(%0)\n" |
133 | #else | 144 | " ld 11,88(%0)\n" |
134 | fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; | 145 | " ld 12,96(%0)\n" |
135 | fpt_creg_save_area = fpt_save_area + 128; | 146 | " ld 13,104(%0)\n" |
136 | #endif | 147 | " ld 14,112(%0)\n" |
137 | if (!mci->fc) { | 148 | " ld 15,120(%0)\n" |
138 | /* | 149 | : : "a" (fpt_save_area)); |
139 | * Floating point control register can't be restored. | ||
140 | * Task will be terminated. | ||
141 | */ | ||
142 | asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); | ||
143 | kill_task = 1; | ||
144 | |||
145 | } else | ||
146 | asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); | ||
147 | |||
148 | asm volatile( | ||
149 | " ld 0,0(%0)\n" | ||
150 | " ld 1,8(%0)\n" | ||
151 | " ld 2,16(%0)\n" | ||
152 | " ld 3,24(%0)\n" | ||
153 | " ld 4,32(%0)\n" | ||
154 | " ld 5,40(%0)\n" | ||
155 | " ld 6,48(%0)\n" | ||
156 | " ld 7,56(%0)\n" | ||
157 | " ld 8,64(%0)\n" | ||
158 | " ld 9,72(%0)\n" | ||
159 | " ld 10,80(%0)\n" | ||
160 | " ld 11,88(%0)\n" | ||
161 | " ld 12,96(%0)\n" | ||
162 | " ld 13,104(%0)\n" | ||
163 | " ld 14,112(%0)\n" | ||
164 | " ld 15,120(%0)\n" | ||
165 | : : "a" (fpt_save_area)); | ||
166 | } | ||
167 | |||
168 | #ifdef CONFIG_64BIT | ||
169 | /* Revalidate vector registers */ | 150 | /* Revalidate vector registers */ |
170 | if (MACHINE_HAS_VX && current->thread.vxrs) { | 151 | if (MACHINE_HAS_VX && current->thread.vxrs) { |
171 | if (!mci->vr) { | 152 | if (!mci->vr) { |
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
178 | restore_vx_regs((__vector128 *) | 159 | restore_vx_regs((__vector128 *) |
179 | S390_lowcore.vector_save_area_addr); | 160 | S390_lowcore.vector_save_area_addr); |
180 | } | 161 | } |
181 | #endif | ||
182 | /* Revalidate access registers */ | 162 | /* Revalidate access registers */ |
183 | asm volatile( | 163 | asm volatile( |
184 | " lam 0,15,0(%0)" | 164 | " lam 0,15,0(%0)" |
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
198 | */ | 178 | */ |
199 | s390_handle_damage("invalid control registers."); | 179 | s390_handle_damage("invalid control registers."); |
200 | } else { | 180 | } else { |
201 | #ifdef CONFIG_64BIT | ||
202 | asm volatile( | 181 | asm volatile( |
203 | " lctlg 0,15,0(%0)" | 182 | " lctlg 0,15,0(%0)" |
204 | : : "a" (&S390_lowcore.cregs_save_area)); | 183 | : : "a" (&S390_lowcore.cregs_save_area)); |
205 | #else | ||
206 | asm volatile( | ||
207 | " lctl 0,15,0(%0)" | ||
208 | : : "a" (&S390_lowcore.cregs_save_area)); | ||
209 | #endif | ||
210 | } | 184 | } |
211 | /* | 185 | /* |
212 | * We don't even try to revalidate the TOD register, since we simply | 186 | * We don't even try to revalidate the TOD register, since we simply |
213 | * can't write something sensible into that register. | 187 | * can't write something sensible into that register. |
214 | */ | 188 | */ |
215 | #ifdef CONFIG_64BIT | ||
216 | /* | 189 | /* |
217 | * See if we can revalidate the TOD programmable register with its | 190 | * See if we can revalidate the TOD programmable register with its |
218 | * old contents (should be zero) otherwise set it to zero. | 191 | * old contents (should be zero) otherwise set it to zero. |
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
228 | " sckpf" | 201 | " sckpf" |
229 | : : "a" (&S390_lowcore.tod_progreg_save_area) | 202 | : : "a" (&S390_lowcore.tod_progreg_save_area) |
230 | : "0", "cc"); | 203 | : "0", "cc"); |
231 | #endif | ||
232 | /* Revalidate clock comparator register */ | 204 | /* Revalidate clock comparator register */ |
233 | set_clock_comparator(S390_lowcore.clock_comparator); | 205 | set_clock_comparator(S390_lowcore.clock_comparator); |
234 | /* Check if old PSW is valid */ | 206 | /* Check if old PSW is valid */ |
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
280 | if (mci->b) { | 252 | if (mci->b) { |
281 | /* Processing backup -> verify if we can survive this */ | 253 | /* Processing backup -> verify if we can survive this */ |
282 | u64 z_mcic, o_mcic, t_mcic; | 254 | u64 z_mcic, o_mcic, t_mcic; |
283 | #ifdef CONFIG_64BIT | ||
284 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); | 255 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); |
285 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | 256 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | |
286 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | 257 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | |
287 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | | 258 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | |
288 | 1ULL<<16); | 259 | 1ULL<<16); |
289 | #else | ||
290 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | | ||
291 | 1ULL<<29); | ||
292 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | ||
293 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | ||
294 | 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); | ||
295 | #endif | ||
296 | t_mcic = *(u64 *)mci; | 260 | t_mcic = *(u64 *)mci; |
297 | 261 | ||
298 | if (((t_mcic & z_mcic) != 0) || | 262 | if (((t_mcic & z_mcic) != 0) || |
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index f6f8886399f6..036aa01d06a9 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S | |||
@@ -6,19 +6,13 @@ | |||
6 | 6 | ||
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
8 | 8 | ||
9 | #ifdef CONFIG_32BIT | ||
10 | #define PGM_CHECK_64BIT(handler) .long default_trap_handler | ||
11 | #else | ||
12 | #define PGM_CHECK_64BIT(handler) .long handler | ||
13 | #endif | ||
14 | |||
15 | #define PGM_CHECK(handler) .long handler | 9 | #define PGM_CHECK(handler) .long handler |
16 | #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) | 10 | #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) |
17 | 11 | ||
18 | /* | 12 | /* |
19 | * The program check table contains exactly 128 (0x00-0x7f) entries. Each | 13 | * The program check table contains exactly 128 (0x00-0x7f) entries. Each |
20 | * line defines the 31 and/or 64 bit function to be called corresponding | 14 | * line defines the function to be called corresponding to the program check |
21 | * to the program check interruption code. | 15 | * interruption code. |
22 | */ | 16 | */ |
23 | .section .rodata, "a" | 17 | .section .rodata, "a" |
24 | ENTRY(pgm_check_table) | 18 | ENTRY(pgm_check_table) |
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */ | |||
46 | PGM_CHECK(operand_exception) /* 15 */ | 40 | PGM_CHECK(operand_exception) /* 15 */ |
47 | PGM_CHECK_DEFAULT /* 16 */ | 41 | PGM_CHECK_DEFAULT /* 16 */ |
48 | PGM_CHECK_DEFAULT /* 17 */ | 42 | PGM_CHECK_DEFAULT /* 17 */ |
49 | PGM_CHECK_64BIT(transaction_exception) /* 18 */ | 43 | PGM_CHECK(transaction_exception) /* 18 */ |
50 | PGM_CHECK_DEFAULT /* 19 */ | 44 | PGM_CHECK_DEFAULT /* 19 */ |
51 | PGM_CHECK_DEFAULT /* 1a */ | 45 | PGM_CHECK_DEFAULT /* 1a */ |
52 | PGM_CHECK_64BIT(vector_exception) /* 1b */ | 46 | PGM_CHECK(vector_exception) /* 1b */ |
53 | PGM_CHECK(space_switch_exception) /* 1c */ | 47 | PGM_CHECK(space_switch_exception) /* 1c */ |
54 | PGM_CHECK(hfp_sqrt_exception) /* 1d */ | 48 | PGM_CHECK(hfp_sqrt_exception) /* 1d */ |
55 | PGM_CHECK_DEFAULT /* 1e */ | 49 | PGM_CHECK_DEFAULT /* 1e */ |
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */ | |||
78 | PGM_CHECK_DEFAULT /* 35 */ | 72 | PGM_CHECK_DEFAULT /* 35 */ |
79 | PGM_CHECK_DEFAULT /* 36 */ | 73 | PGM_CHECK_DEFAULT /* 36 */ |
80 | PGM_CHECK_DEFAULT /* 37 */ | 74 | PGM_CHECK_DEFAULT /* 37 */ |
81 | PGM_CHECK_64BIT(do_dat_exception) /* 38 */ | 75 | PGM_CHECK(do_dat_exception) /* 38 */ |
82 | PGM_CHECK_64BIT(do_dat_exception) /* 39 */ | 76 | PGM_CHECK(do_dat_exception) /* 39 */ |
83 | PGM_CHECK_64BIT(do_dat_exception) /* 3a */ | 77 | PGM_CHECK(do_dat_exception) /* 3a */ |
84 | PGM_CHECK_64BIT(do_dat_exception) /* 3b */ | 78 | PGM_CHECK(do_dat_exception) /* 3b */ |
85 | PGM_CHECK_DEFAULT /* 3c */ | 79 | PGM_CHECK_DEFAULT /* 3c */ |
86 | PGM_CHECK_DEFAULT /* 3d */ | 80 | PGM_CHECK_DEFAULT /* 3d */ |
87 | PGM_CHECK_DEFAULT /* 3e */ | 81 | PGM_CHECK_DEFAULT /* 3e */ |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 13fc0978ca7e..dc5edc29b73a 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task) | |||
79 | { | 79 | { |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_64BIT | ||
83 | void arch_release_task_struct(struct task_struct *tsk) | 82 | void arch_release_task_struct(struct task_struct *tsk) |
84 | { | 83 | { |
85 | if (tsk->thread.vxrs) | 84 | if (tsk->thread.vxrs) |
86 | kfree(tsk->thread.vxrs); | 85 | kfree(tsk->thread.vxrs); |
87 | } | 86 | } |
88 | #endif | ||
89 | 87 | ||
90 | int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | 88 | int copy_thread(unsigned long clone_flags, unsigned long new_stackp, |
91 | unsigned long arg, struct task_struct *p) | 89 | unsigned long arg, struct task_struct *p) |
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
144 | p->thread.ri_signum = 0; | 142 | p->thread.ri_signum = 0; |
145 | frame->childregs.psw.mask &= ~PSW_MASK_RI; | 143 | frame->childregs.psw.mask &= ~PSW_MASK_RI; |
146 | 144 | ||
147 | #ifndef CONFIG_64BIT | ||
148 | /* | ||
149 | * save fprs to current->thread.fp_regs to merge them with | ||
150 | * the emulated registers and then copy the result to the child. | ||
151 | */ | ||
152 | save_fp_ctl(¤t->thread.fp_regs.fpc); | ||
153 | save_fp_regs(current->thread.fp_regs.fprs); | ||
154 | memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs, | ||
155 | sizeof(s390_fp_regs)); | ||
156 | /* Set a new TLS ? */ | ||
157 | if (clone_flags & CLONE_SETTLS) | ||
158 | p->thread.acrs[0] = frame->childregs.gprs[6]; | ||
159 | #else /* CONFIG_64BIT */ | ||
160 | /* Save the fpu registers to new thread structure. */ | 145 | /* Save the fpu registers to new thread structure. */ |
161 | save_fp_ctl(&p->thread.fp_regs.fpc); | 146 | save_fp_ctl(&p->thread.fp_regs.fpc); |
162 | save_fp_regs(p->thread.fp_regs.fprs); | 147 | save_fp_regs(p->thread.fp_regs.fprs); |
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
172 | p->thread.acrs[1] = (unsigned int)tls; | 157 | p->thread.acrs[1] = (unsigned int)tls; |
173 | } | 158 | } |
174 | } | 159 | } |
175 | #endif /* CONFIG_64BIT */ | ||
176 | return 0; | 160 | return 0; |
177 | } | 161 | } |
178 | 162 | ||
179 | asmlinkage void execve_tail(void) | 163 | asmlinkage void execve_tail(void) |
180 | { | 164 | { |
181 | current->thread.fp_regs.fpc = 0; | 165 | current->thread.fp_regs.fpc = 0; |
182 | if (MACHINE_HAS_IEEE) | 166 | asm volatile("sfpc %0,%0" : : "d" (0)); |
183 | asm volatile("sfpc %0,%0" : : "d" (0)); | ||
184 | } | 167 | } |
185 | 168 | ||
186 | /* | 169 | /* |
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void) | |||
188 | */ | 171 | */ |
189 | int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) | 172 | int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) |
190 | { | 173 | { |
191 | #ifndef CONFIG_64BIT | ||
192 | /* | ||
193 | * save fprs to current->thread.fp_regs to merge them with | ||
194 | * the emulated registers and then copy the result to the dump. | ||
195 | */ | ||
196 | save_fp_ctl(¤t->thread.fp_regs.fpc); | ||
197 | save_fp_regs(current->thread.fp_regs.fprs); | ||
198 | memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); | ||
199 | #else /* CONFIG_64BIT */ | ||
200 | save_fp_ctl(&fpregs->fpc); | 174 | save_fp_ctl(&fpregs->fpc); |
201 | save_fp_regs(fpregs->fprs); | 175 | save_fp_regs(fpregs->fprs); |
202 | #endif /* CONFIG_64BIT */ | ||
203 | return 1; | 176 | return 1; |
204 | } | 177 | } |
205 | EXPORT_SYMBOL(dump_fpu); | 178 | EXPORT_SYMBOL(dump_fpu); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index eabfb4594517..d363c9c322a1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task) | |||
44 | struct thread_struct *thread = &task->thread; | 44 | struct thread_struct *thread = &task->thread; |
45 | struct per_regs old, new; | 45 | struct per_regs old, new; |
46 | 46 | ||
47 | #ifdef CONFIG_64BIT | ||
48 | /* Take care of the enable/disable of transactional execution. */ | 47 | /* Take care of the enable/disable of transactional execution. */ |
49 | if (MACHINE_HAS_TE || MACHINE_HAS_VX) { | 48 | if (MACHINE_HAS_TE || MACHINE_HAS_VX) { |
50 | unsigned long cr, cr_new; | 49 | unsigned long cr, cr_new; |
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task) | |||
80 | __ctl_load(cr_new, 2, 2); | 79 | __ctl_load(cr_new, 2, 2); |
81 | } | 80 | } |
82 | } | 81 | } |
83 | #endif | ||
84 | /* Copy user specified PER registers */ | 82 | /* Copy user specified PER registers */ |
85 | new.control = thread->per_user.control; | 83 | new.control = thread->per_user.control; |
86 | new.start = thread->per_user.start; | 84 | new.start = thread->per_user.start; |
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task) | |||
93 | new.control |= PER_EVENT_BRANCH; | 91 | new.control |= PER_EVENT_BRANCH; |
94 | else | 92 | else |
95 | new.control |= PER_EVENT_IFETCH; | 93 | new.control |= PER_EVENT_IFETCH; |
96 | #ifdef CONFIG_64BIT | ||
97 | new.control |= PER_CONTROL_SUSPENSION; | 94 | new.control |= PER_CONTROL_SUSPENSION; |
98 | new.control |= PER_EVENT_TRANSACTION_END; | 95 | new.control |= PER_EVENT_TRANSACTION_END; |
99 | #endif | ||
100 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) | 96 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) |
101 | new.control |= PER_EVENT_IFETCH; | 97 | new.control |= PER_EVENT_IFETCH; |
102 | new.start = 0; | 98 | new.start = 0; |
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task) | |||
146 | task->thread.per_flags = 0; | 142 | task->thread.per_flags = 0; |
147 | } | 143 | } |
148 | 144 | ||
149 | #ifndef CONFIG_64BIT | 145 | #define __ADDR_MASK 7 |
150 | # define __ADDR_MASK 3 | ||
151 | #else | ||
152 | # define __ADDR_MASK 7 | ||
153 | #endif | ||
154 | 146 | ||
155 | static inline unsigned long __peek_user_per(struct task_struct *child, | 147 | static inline unsigned long __peek_user_per(struct task_struct *child, |
156 | addr_t addr) | 148 | addr_t addr) |
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
223 | * access registers are stored in the thread structure | 215 | * access registers are stored in the thread structure |
224 | */ | 216 | */ |
225 | offset = addr - (addr_t) &dummy->regs.acrs; | 217 | offset = addr - (addr_t) &dummy->regs.acrs; |
226 | #ifdef CONFIG_64BIT | ||
227 | /* | 218 | /* |
228 | * Very special case: old & broken 64 bit gdb reading | 219 | * Very special case: old & broken 64 bit gdb reading |
229 | * from acrs[15]. Result is a 64 bit value. Read the | 220 | * from acrs[15]. Result is a 64 bit value. Read the |
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
232 | if (addr == (addr_t) &dummy->regs.acrs[15]) | 223 | if (addr == (addr_t) &dummy->regs.acrs[15]) |
233 | tmp = ((unsigned long) child->thread.acrs[15]) << 32; | 224 | tmp = ((unsigned long) child->thread.acrs[15]) << 32; |
234 | else | 225 | else |
235 | #endif | 226 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); |
236 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); | ||
237 | 227 | ||
238 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 228 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
239 | /* | 229 | /* |
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
261 | * or the child->thread.vxrs array | 251 | * or the child->thread.vxrs array |
262 | */ | 252 | */ |
263 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; | 253 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
264 | #ifdef CONFIG_64BIT | ||
265 | if (child->thread.vxrs) | 254 | if (child->thread.vxrs) |
266 | tmp = *(addr_t *) | 255 | tmp = *(addr_t *) |
267 | ((addr_t) child->thread.vxrs + 2*offset); | 256 | ((addr_t) child->thread.vxrs + 2*offset); |
268 | else | 257 | else |
269 | #endif | ||
270 | tmp = *(addr_t *) | 258 | tmp = *(addr_t *) |
271 | ((addr_t) &child->thread.fp_regs.fprs + offset); | 259 | ((addr_t) &child->thread.fp_regs.fprs + offset); |
272 | 260 | ||
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
293 | * an alignment of 4. Programmers from hell... | 281 | * an alignment of 4. Programmers from hell... |
294 | */ | 282 | */ |
295 | mask = __ADDR_MASK; | 283 | mask = __ADDR_MASK; |
296 | #ifdef CONFIG_64BIT | ||
297 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && | 284 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
298 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) | 285 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
299 | mask = 3; | 286 | mask = 3; |
300 | #endif | ||
301 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 287 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
302 | return -EIO; | 288 | return -EIO; |
303 | 289 | ||
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
370 | * access registers are stored in the thread structure | 356 | * access registers are stored in the thread structure |
371 | */ | 357 | */ |
372 | offset = addr - (addr_t) &dummy->regs.acrs; | 358 | offset = addr - (addr_t) &dummy->regs.acrs; |
373 | #ifdef CONFIG_64BIT | ||
374 | /* | 359 | /* |
375 | * Very special case: old & broken 64 bit gdb writing | 360 | * Very special case: old & broken 64 bit gdb writing |
376 | * to acrs[15] with a 64 bit value. Ignore the lower | 361 | * to acrs[15] with a 64 bit value. Ignore the lower |
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
380 | if (addr == (addr_t) &dummy->regs.acrs[15]) | 365 | if (addr == (addr_t) &dummy->regs.acrs[15]) |
381 | child->thread.acrs[15] = (unsigned int) (data >> 32); | 366 | child->thread.acrs[15] = (unsigned int) (data >> 32); |
382 | else | 367 | else |
383 | #endif | 368 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; |
384 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; | ||
385 | 369 | ||
386 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 370 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
387 | /* | 371 | /* |
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
411 | * or the child->thread.vxrs array | 395 | * or the child->thread.vxrs array |
412 | */ | 396 | */ |
413 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; | 397 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
414 | #ifdef CONFIG_64BIT | ||
415 | if (child->thread.vxrs) | 398 | if (child->thread.vxrs) |
416 | *(addr_t *)((addr_t) | 399 | *(addr_t *)((addr_t) |
417 | child->thread.vxrs + 2*offset) = data; | 400 | child->thread.vxrs + 2*offset) = data; |
418 | else | 401 | else |
419 | #endif | ||
420 | *(addr_t *)((addr_t) | 402 | *(addr_t *)((addr_t) |
421 | &child->thread.fp_regs.fprs + offset) = data; | 403 | &child->thread.fp_regs.fprs + offset) = data; |
422 | 404 | ||
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
441 | * an alignment of 4. Programmers from hell indeed... | 423 | * an alignment of 4. Programmers from hell indeed... |
442 | */ | 424 | */ |
443 | mask = __ADDR_MASK; | 425 | mask = __ADDR_MASK; |
444 | #ifdef CONFIG_64BIT | ||
445 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && | 426 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
446 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) | 427 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
447 | mask = 3; | 428 | mask = 3; |
448 | #endif | ||
449 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 429 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
450 | return -EIO; | 430 | return -EIO; |
451 | 431 | ||
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
649 | * or the child->thread.vxrs array | 629 | * or the child->thread.vxrs array |
650 | */ | 630 | */ |
651 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; | 631 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; |
652 | #ifdef CONFIG_64BIT | ||
653 | if (child->thread.vxrs) | 632 | if (child->thread.vxrs) |
654 | tmp = *(__u32 *) | 633 | tmp = *(__u32 *) |
655 | ((addr_t) child->thread.vxrs + 2*offset); | 634 | ((addr_t) child->thread.vxrs + 2*offset); |
656 | else | 635 | else |
657 | #endif | ||
658 | tmp = *(__u32 *) | 636 | tmp = *(__u32 *) |
659 | ((addr_t) &child->thread.fp_regs.fprs + offset); | 637 | ((addr_t) &child->thread.fp_regs.fprs + offset); |
660 | 638 | ||
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child, | |||
776 | * or the child->thread.vxrs array | 754 | * or the child->thread.vxrs array |
777 | */ | 755 | */ |
778 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; | 756 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; |
779 | #ifdef CONFIG_64BIT | ||
780 | if (child->thread.vxrs) | 757 | if (child->thread.vxrs) |
781 | *(__u32 *)((addr_t) | 758 | *(__u32 *)((addr_t) |
782 | child->thread.vxrs + 2*offset) = tmp; | 759 | child->thread.vxrs + 2*offset) = tmp; |
783 | else | 760 | else |
784 | #endif | ||
785 | *(__u32 *)((addr_t) | 761 | *(__u32 *)((addr_t) |
786 | &child->thread.fp_regs.fprs + offset) = tmp; | 762 | &child->thread.fp_regs.fprs + offset) = tmp; |
787 | 763 | ||
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target, | |||
979 | if (target == current) { | 955 | if (target == current) { |
980 | save_fp_ctl(&target->thread.fp_regs.fpc); | 956 | save_fp_ctl(&target->thread.fp_regs.fpc); |
981 | save_fp_regs(target->thread.fp_regs.fprs); | 957 | save_fp_regs(target->thread.fp_regs.fprs); |
982 | } | 958 | } else if (target->thread.vxrs) { |
983 | #ifdef CONFIG_64BIT | ||
984 | else if (target->thread.vxrs) { | ||
985 | int i; | 959 | int i; |
986 | 960 | ||
987 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 961 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
988 | target->thread.fp_regs.fprs[i] = | 962 | target->thread.fp_regs.fprs[i] = |
989 | *(freg_t *)(target->thread.vxrs + i); | 963 | *(freg_t *)(target->thread.vxrs + i); |
990 | } | 964 | } |
991 | #endif | ||
992 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 965 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
993 | &target->thread.fp_regs, 0, -1); | 966 | &target->thread.fp_regs, 0, -1); |
994 | } | 967 | } |
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target, | |||
1026 | if (target == current) { | 999 | if (target == current) { |
1027 | restore_fp_ctl(&target->thread.fp_regs.fpc); | 1000 | restore_fp_ctl(&target->thread.fp_regs.fpc); |
1028 | restore_fp_regs(target->thread.fp_regs.fprs); | 1001 | restore_fp_regs(target->thread.fp_regs.fprs); |
1029 | } | 1002 | } else if (target->thread.vxrs) { |
1030 | #ifdef CONFIG_64BIT | ||
1031 | else if (target->thread.vxrs) { | ||
1032 | int i; | 1003 | int i; |
1033 | 1004 | ||
1034 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 1005 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
1035 | *(freg_t *)(target->thread.vxrs + i) = | 1006 | *(freg_t *)(target->thread.vxrs + i) = |
1036 | target->thread.fp_regs.fprs[i]; | 1007 | target->thread.fp_regs.fprs[i]; |
1037 | } | 1008 | } |
1038 | #endif | ||
1039 | } | 1009 | } |
1040 | 1010 | ||
1041 | return rc; | 1011 | return rc; |
1042 | } | 1012 | } |
1043 | 1013 | ||
1044 | #ifdef CONFIG_64BIT | ||
1045 | |||
1046 | static int s390_last_break_get(struct task_struct *target, | 1014 | static int s390_last_break_get(struct task_struct *target, |
1047 | const struct user_regset *regset, | 1015 | const struct user_regset *regset, |
1048 | unsigned int pos, unsigned int count, | 1016 | unsigned int pos, unsigned int count, |
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target, | |||
1182 | return rc; | 1150 | return rc; |
1183 | } | 1151 | } |
1184 | 1152 | ||
1185 | #endif | ||
1186 | |||
1187 | static int s390_system_call_get(struct task_struct *target, | 1153 | static int s390_system_call_get(struct task_struct *target, |
1188 | const struct user_regset *regset, | 1154 | const struct user_regset *regset, |
1189 | unsigned int pos, unsigned int count, | 1155 | unsigned int pos, unsigned int count, |
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = { | |||
1229 | .get = s390_system_call_get, | 1195 | .get = s390_system_call_get, |
1230 | .set = s390_system_call_set, | 1196 | .set = s390_system_call_set, |
1231 | }, | 1197 | }, |
1232 | #ifdef CONFIG_64BIT | ||
1233 | { | 1198 | { |
1234 | .core_note_type = NT_S390_LAST_BREAK, | 1199 | .core_note_type = NT_S390_LAST_BREAK, |
1235 | .n = 1, | 1200 | .n = 1, |
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = { | |||
1262 | .get = s390_vxrs_high_get, | 1227 | .get = s390_vxrs_high_get, |
1263 | .set = s390_vxrs_high_set, | 1228 | .set = s390_vxrs_high_set, |
1264 | }, | 1229 | }, |
1265 | #endif | ||
1266 | }; | 1230 | }; |
1267 | 1231 | ||
1268 | static const struct user_regset_view user_s390_view = { | 1232 | static const struct user_regset_view user_s390_view = { |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index dd8016b0477e..52aab0bd84f8 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * S390 version | 2 | * Copyright IBM Corp 2000, 2011 |
3 | * Copyright IBM Corp. 2000 | 3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, |
4 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) | 4 | * Denis Joseph Barrow, |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
@@ -9,43 +9,90 @@ | |||
9 | #include <asm/sigp.h> | 9 | #include <asm/sigp.h> |
10 | 10 | ||
11 | # | 11 | # |
12 | # store_status: Empty implementation until kdump is supported on 31 bit | 12 | # store_status |
13 | # | ||
14 | # Prerequisites to run this function: | ||
15 | # - Prefix register is set to zero | ||
16 | # - Original prefix register is stored in "dump_prefix_page" | ||
17 | # - Lowcore protection is off | ||
13 | # | 18 | # |
14 | ENTRY(store_status) | 19 | ENTRY(store_status) |
15 | br %r14 | 20 | /* Save register one and load save area base */ |
21 | stg %r1,__LC_SAVE_AREA_RESTART | ||
22 | lghi %r1,SAVE_AREA_BASE | ||
23 | /* General purpose registers */ | ||
24 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
25 | lg %r2,__LC_SAVE_AREA_RESTART | ||
26 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | ||
27 | /* Control registers */ | ||
28 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
29 | /* Access registers */ | ||
30 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
31 | /* Floating point registers */ | ||
32 | std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
33 | std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
34 | std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
35 | std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
36 | std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
37 | std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
38 | std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
39 | std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
40 | std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
41 | std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
42 | std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
43 | std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
44 | std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
45 | std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
46 | std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
47 | std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
48 | /* Floating point control register */ | ||
49 | stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
50 | /* CPU timer */ | ||
51 | stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
52 | /* Saved prefix register */ | ||
53 | larl %r2,dump_prefix_page | ||
54 | mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) | ||
55 | /* Clock comparator - seven bytes */ | ||
56 | larl %r2,.Lclkcmp | ||
57 | stckc 0(%r2) | ||
58 | mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) | ||
59 | /* Program status word */ | ||
60 | epsw %r2,%r3 | ||
61 | st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) | ||
62 | st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) | ||
63 | larl %r2,store_status | ||
64 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | ||
65 | br %r14 | ||
66 | |||
67 | .section .bss | ||
68 | .align 8 | ||
69 | .Lclkcmp: .quad 0x0000000000000000 | ||
70 | .previous | ||
16 | 71 | ||
17 | # | 72 | # |
18 | # do_reipl_asm | 73 | # do_reipl_asm |
19 | # Parameter: r2 = schid of reipl device | 74 | # Parameter: r2 = schid of reipl device |
20 | # | 75 | # |
76 | |||
21 | ENTRY(do_reipl_asm) | 77 | ENTRY(do_reipl_asm) |
22 | basr %r13,0 | 78 | basr %r13,0 |
23 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) | 79 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) |
24 | .Lpg1: # do store status of all registers | 80 | .Lpg1: brasl %r14,store_status |
25 | 81 | ||
26 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA | 82 | lctlg %c6,%c6,.Lall-.Lpg0(%r13) |
27 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA | 83 | lgr %r1,%r2 |
28 | stam %a0,%a15,__LC_AREGS_SAVE_AREA | 84 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) |
29 | l %r10,.Ldump_pfx-.Lpg0(%r13) | ||
30 | mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) | ||
31 | stckc .Lclkcmp-.Lpg0(%r13) | ||
32 | mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) | ||
33 | stpt __LC_CPU_TIMER_SAVE_AREA | ||
34 | st %r13, __LC_PSW_SAVE_AREA+4 | ||
35 | lctl %c6,%c6,.Lall-.Lpg0(%r13) | ||
36 | lr %r1,%r2 | ||
37 | mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) | ||
38 | stsch .Lschib-.Lpg0(%r13) | 85 | stsch .Lschib-.Lpg0(%r13) |
39 | oi .Lschib+5-.Lpg0(%r13),0x84 | 86 | oi .Lschib+5-.Lpg0(%r13),0x84 |
40 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | 87 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 |
41 | msch .Lschib-.Lpg0(%r13) | 88 | msch .Lschib-.Lpg0(%r13) |
42 | lhi %r0,5 | 89 | lghi %r0,5 |
43 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | 90 | .Lssch: ssch .Liplorb-.Lpg0(%r13) |
44 | jz .L001 | 91 | jz .L001 |
45 | brct %r0,.Lssch | 92 | brct %r0,.Lssch |
46 | bas %r14,.Ldisab-.Lpg0(%r13) | 93 | bas %r14,.Ldisab-.Lpg0(%r13) |
47 | .L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13) | 94 | .L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) |
48 | .Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13) | 95 | .Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) |
49 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | 96 | .Lcont: c %r1,__LC_SUBCHANNEL_ID |
50 | jnz .Ltpi | 97 | jnz .Ltpi |
51 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | 98 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) |
@@ -58,20 +105,36 @@ ENTRY(do_reipl_asm) | |||
58 | jz .L003 | 105 | jz .L003 |
59 | bas %r14,.Ldisab-.Lpg0(%r13) | 106 | bas %r14,.Ldisab-.Lpg0(%r13) |
60 | .L003: st %r1,__LC_SUBCHANNEL_ID | 107 | .L003: st %r1,__LC_SUBCHANNEL_ID |
108 | lhi %r1,0 # mode 0 = esa | ||
109 | slr %r0,%r0 # set cpuid to zero | ||
110 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode | ||
61 | lpsw 0 | 111 | lpsw 0 |
62 | sigp 0,0,SIGP_RESTART | 112 | .Ldisab: sll %r14,1 |
63 | .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) | 113 | srl %r14,1 # need to kill hi bit to avoid specification exceptions. |
64 | lpsw .Ldispsw-.Lpg0(%r13) | 114 | st %r14,.Ldispsw+12-.Lpg0(%r13) |
115 | lpswe .Ldispsw-.Lpg0(%r13) | ||
65 | .align 8 | 116 | .align 8 |
66 | .Lclkcmp: .quad 0x0000000000000000 | 117 | .Lall: .quad 0x00000000ff000000 |
67 | .Lall: .long 0xff000000 | 118 | .align 16 |
68 | .Ldump_pfx: .long dump_prefix_page | 119 | /* |
69 | .align 8 | 120 | * These addresses have to be 31 bit otherwise |
70 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 | 121 | * the sigp will throw a specifcation exception |
71 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs | 122 | * when switching to ESA mode as bit 31 be set |
72 | .Lionew: .long 0x00080000,0x80000000+.Lcont | 123 | * in the ESA psw. |
73 | .Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi | 124 | * Bit 31 of the addresses has to be 0 for the |
74 | .Ldispsw: .long 0x000a0000,0x00000000 | 125 | * 31bit lpswe instruction a fact they appear to have |
126 | * omitted from the pop. | ||
127 | */ | ||
128 | .Lnewpsw: .quad 0x0000000080000000 | ||
129 | .quad .Lpg1 | ||
130 | .Lpcnew: .quad 0x0000000080000000 | ||
131 | .quad .Lecs | ||
132 | .Lionew: .quad 0x0000000080000000 | ||
133 | .quad .Lcont | ||
134 | .Lwaitpsw: .quad 0x0202000080000000 | ||
135 | .quad .Ltpi | ||
136 | .Ldispsw: .quad 0x0002000080000000 | ||
137 | .quad 0x0000000000000000 | ||
75 | .Liplccws: .long 0x02000000,0x60000018 | 138 | .Liplccws: .long 0x02000000,0x60000018 |
76 | .long 0x08000008,0x20000001 | 139 | .long 0x08000008,0x20000001 |
77 | .Liplorb: .long 0x0049504c,0x0040ff80 | 140 | .Liplorb: .long 0x0049504c,0x0040ff80 |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S deleted file mode 100644 index dc3b1273c4dc..000000000000 --- a/arch/s390/kernel/reipl64.S +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp 2000, 2011 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Denis Joseph Barrow, | ||
5 | */ | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | #include <asm/asm-offsets.h> | ||
9 | #include <asm/sigp.h> | ||
10 | |||
11 | # | ||
12 | # store_status | ||
13 | # | ||
14 | # Prerequisites to run this function: | ||
15 | # - Prefix register is set to zero | ||
16 | # - Original prefix register is stored in "dump_prefix_page" | ||
17 | # - Lowcore protection is off | ||
18 | # | ||
19 | ENTRY(store_status) | ||
20 | /* Save register one and load save area base */ | ||
21 | stg %r1,__LC_SAVE_AREA_RESTART | ||
22 | lghi %r1,SAVE_AREA_BASE | ||
23 | /* General purpose registers */ | ||
24 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
25 | lg %r2,__LC_SAVE_AREA_RESTART | ||
26 | stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) | ||
27 | /* Control registers */ | ||
28 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
29 | /* Access registers */ | ||
30 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
31 | /* Floating point registers */ | ||
32 | std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
33 | std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
34 | std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
35 | std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
36 | std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
37 | std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
38 | std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
39 | std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
40 | std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
41 | std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
42 | std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
43 | std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
44 | std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
45 | std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
46 | std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
47 | std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
48 | /* Floating point control register */ | ||
49 | stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
50 | /* CPU timer */ | ||
51 | stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) | ||
52 | /* Saved prefix register */ | ||
53 | larl %r2,dump_prefix_page | ||
54 | mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) | ||
55 | /* Clock comparator - seven bytes */ | ||
56 | larl %r2,.Lclkcmp | ||
57 | stckc 0(%r2) | ||
58 | mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) | ||
59 | /* Program status word */ | ||
60 | epsw %r2,%r3 | ||
61 | st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) | ||
62 | st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) | ||
63 | larl %r2,store_status | ||
64 | stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) | ||
65 | br %r14 | ||
66 | |||
67 | .section .bss | ||
68 | .align 8 | ||
69 | .Lclkcmp: .quad 0x0000000000000000 | ||
70 | .previous | ||
71 | |||
72 | # | ||
73 | # do_reipl_asm | ||
74 | # Parameter: r2 = schid of reipl device | ||
75 | # | ||
76 | |||
77 | ENTRY(do_reipl_asm) | ||
78 | basr %r13,0 | ||
79 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) | ||
80 | .Lpg1: brasl %r14,store_status | ||
81 | |||
82 | lctlg %c6,%c6,.Lall-.Lpg0(%r13) | ||
83 | lgr %r1,%r2 | ||
84 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) | ||
85 | stsch .Lschib-.Lpg0(%r13) | ||
86 | oi .Lschib+5-.Lpg0(%r13),0x84 | ||
87 | .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 | ||
88 | msch .Lschib-.Lpg0(%r13) | ||
89 | lghi %r0,5 | ||
90 | .Lssch: ssch .Liplorb-.Lpg0(%r13) | ||
91 | jz .L001 | ||
92 | brct %r0,.Lssch | ||
93 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
94 | .L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) | ||
95 | .Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) | ||
96 | .Lcont: c %r1,__LC_SUBCHANNEL_ID | ||
97 | jnz .Ltpi | ||
98 | clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) | ||
99 | jnz .Ltpi | ||
100 | tsch .Liplirb-.Lpg0(%r13) | ||
101 | tm .Liplirb+9-.Lpg0(%r13),0xbf | ||
102 | jz .L002 | ||
103 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
104 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | ||
105 | jz .L003 | ||
106 | bas %r14,.Ldisab-.Lpg0(%r13) | ||
107 | .L003: st %r1,__LC_SUBCHANNEL_ID | ||
108 | lhi %r1,0 # mode 0 = esa | ||
109 | slr %r0,%r0 # set cpuid to zero | ||
110 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode | ||
111 | lpsw 0 | ||
112 | .Ldisab: sll %r14,1 | ||
113 | srl %r14,1 # need to kill hi bit to avoid specification exceptions. | ||
114 | st %r14,.Ldispsw+12-.Lpg0(%r13) | ||
115 | lpswe .Ldispsw-.Lpg0(%r13) | ||
116 | .align 8 | ||
117 | .Lall: .quad 0x00000000ff000000 | ||
118 | .align 16 | ||
119 | /* | ||
120 | * These addresses have to be 31 bit otherwise | ||
121 | * the sigp will throw a specifcation exception | ||
122 | * when switching to ESA mode as bit 31 be set | ||
123 | * in the ESA psw. | ||
124 | * Bit 31 of the addresses has to be 0 for the | ||
125 | * 31bit lpswe instruction a fact they appear to have | ||
126 | * omitted from the pop. | ||
127 | */ | ||
128 | .Lnewpsw: .quad 0x0000000080000000 | ||
129 | .quad .Lpg1 | ||
130 | .Lpcnew: .quad 0x0000000080000000 | ||
131 | .quad .Lecs | ||
132 | .Lionew: .quad 0x0000000080000000 | ||
133 | .quad .Lcont | ||
134 | .Lwaitpsw: .quad 0x0202000080000000 | ||
135 | .quad .Ltpi | ||
136 | .Ldispsw: .quad 0x0002000080000000 | ||
137 | .quad 0x0000000000000000 | ||
138 | .Liplccws: .long 0x02000000,0x60000018 | ||
139 | .long 0x08000008,0x20000001 | ||
140 | .Liplorb: .long 0x0049504c,0x0040ff80 | ||
141 | .long 0x00000000+.Liplccws | ||
142 | .Lschib: .long 0x00000000,0x00000000 | ||
143 | .long 0x00000000,0x00000000 | ||
144 | .long 0x00000000,0x00000000 | ||
145 | .long 0x00000000,0x00000000 | ||
146 | .long 0x00000000,0x00000000 | ||
147 | .long 0x00000000,0x00000000 | ||
148 | .Liplirb: .long 0x00000000,0x00000000 | ||
149 | .long 0x00000000,0x00000000 | ||
150 | .long 0x00000000,0x00000000 | ||
151 | .long 0x00000000,0x00000000 | ||
152 | .long 0x00000000,0x00000000 | ||
153 | .long 0x00000000,0x00000000 | ||
154 | .long 0x00000000,0x00000000 | ||
155 | .long 0x00000000,0x00000000 | ||
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index f4e6f20e117a..cfac28330b03 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S | |||
@@ -19,7 +19,8 @@ | |||
19 | * %r7 = PAGE_SIZE | 19 | * %r7 = PAGE_SIZE |
20 | * %r8 holds the source address | 20 | * %r8 holds the source address |
21 | * %r9 = PAGE_SIZE | 21 | * %r9 = PAGE_SIZE |
22 | * %r10 is a page mask | 22 | * |
23 | * 0xf000 is a page_mask | ||
23 | */ | 24 | */ |
24 | 25 | ||
25 | .text | 26 | .text |
@@ -27,46 +28,47 @@ ENTRY(relocate_kernel) | |||
27 | basr %r13,0 # base address | 28 | basr %r13,0 # base address |
28 | .base: | 29 | .base: |
29 | stnsm sys_msk-.base(%r13),0xfb # disable DAT | 30 | stnsm sys_msk-.base(%r13),0xfb # disable DAT |
30 | stctl %c0,%c15,ctlregs-.base(%r13) | 31 | stctg %c0,%c15,ctlregs-.base(%r13) |
31 | stm %r0,%r15,gprregs-.base(%r13) | 32 | stmg %r0,%r15,gprregs-.base(%r13) |
33 | lghi %r0,3 | ||
34 | sllg %r0,%r0,31 | ||
35 | stg %r0,0x1d0(%r0) | ||
36 | la %r0,.back_pgm-.base(%r13) | ||
37 | stg %r0,0x1d8(%r0) | ||
32 | la %r1,load_psw-.base(%r13) | 38 | la %r1,load_psw-.base(%r13) |
33 | mvc 0(8,%r0),0(%r1) | 39 | mvc 0(8,%r0),0(%r1) |
34 | la %r0,.back-.base(%r13) | 40 | la %r0,.back-.base(%r13) |
35 | st %r0,4(%r0) | 41 | st %r0,4(%r0) |
36 | oi 4(%r0),0x80 | 42 | oi 4(%r0),0x80 |
37 | mvc 0x68(8,%r0),0(%r1) | 43 | lghi %r0,0 |
38 | la %r0,.back_pgm-.base(%r13) | ||
39 | st %r0,0x6c(%r0) | ||
40 | oi 0x6c(%r0),0x80 | ||
41 | lhi %r0,0 | ||
42 | diag %r0,%r0,0x308 | 44 | diag %r0,%r0,0x308 |
43 | .back: | 45 | .back: |
46 | lhi %r1,1 # mode 1 = esame | ||
47 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode | ||
48 | sam64 # switch to 64 bit addressing mode | ||
44 | basr %r13,0 | 49 | basr %r13,0 |
45 | .back_base: | 50 | .back_base: |
46 | oi have_diag308-.back_base(%r13),0x01 | 51 | oi have_diag308-.back_base(%r13),0x01 |
47 | lctl %c0,%c15,ctlregs-.back_base(%r13) | 52 | lctlg %c0,%c15,ctlregs-.back_base(%r13) |
48 | lm %r0,%r15,gprregs-.back_base(%r13) | 53 | lmg %r0,%r15,gprregs-.back_base(%r13) |
49 | j .start_reloc | 54 | j .top |
50 | .back_pgm: | 55 | .back_pgm: |
51 | lm %r0,%r15,gprregs-.base(%r13) | 56 | lmg %r0,%r15,gprregs-.base(%r13) |
52 | .start_reloc: | ||
53 | lhi %r10,-1 # preparing the mask | ||
54 | sll %r10,12 # shift it such that it becomes 0xf000 | ||
55 | .top: | 57 | .top: |
56 | lhi %r7,4096 # load PAGE_SIZE in r7 | 58 | lghi %r7,4096 # load PAGE_SIZE in r7 |
57 | lhi %r9,4096 # load PAGE_SIZE in r9 | 59 | lghi %r9,4096 # load PAGE_SIZE in r9 |
58 | l %r5,0(%r2) # read another word for indirection page | 60 | lg %r5,0(%r2) # read another word for indirection page |
59 | ahi %r2,4 # increment pointer | 61 | aghi %r2,8 # increment pointer |
60 | tml %r5,0x1 # is it a destination page? | 62 | tml %r5,0x1 # is it a destination page? |
61 | je .indir_check # NO, goto "indir_check" | 63 | je .indir_check # NO, goto "indir_check" |
62 | lr %r6,%r5 # r6 = r5 | 64 | lgr %r6,%r5 # r6 = r5 |
63 | nr %r6,%r10 # mask it out and... | 65 | nill %r6,0xf000 # mask it out and... |
64 | j .top # ...next iteration | 66 | j .top # ...next iteration |
65 | .indir_check: | 67 | .indir_check: |
66 | tml %r5,0x2 # is it a indirection page? | 68 | tml %r5,0x2 # is it a indirection page? |
67 | je .done_test # NO, goto "done_test" | 69 | je .done_test # NO, goto "done_test" |
68 | nr %r5,%r10 # YES, mask out, | 70 | nill %r5,0xf000 # YES, mask out, |
69 | lr %r2,%r5 # move it into the right register, | 71 | lgr %r2,%r5 # move it into the right register, |
70 | j .top # and read next... | 72 | j .top # and read next... |
71 | .done_test: | 73 | .done_test: |
72 | tml %r5,0x4 # is it the done indicator? | 74 | tml %r5,0x4 # is it the done indicator? |
@@ -75,13 +77,13 @@ ENTRY(relocate_kernel) | |||
75 | .source_test: | 77 | .source_test: |
76 | tml %r5,0x8 # it should be a source indicator... | 78 | tml %r5,0x8 # it should be a source indicator... |
77 | je .top # NO, ignore it... | 79 | je .top # NO, ignore it... |
78 | lr %r8,%r5 # r8 = r5 | 80 | lgr %r8,%r5 # r8 = r5 |
79 | nr %r8,%r10 # masking | 81 | nill %r8,0xf000 # masking |
80 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 | 82 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 |
81 | jo 0b | 83 | jo 0b |
82 | j .top | 84 | j .top |
83 | .done: | 85 | .done: |
84 | sr %r0,%r0 # clear register r0 | 86 | sgr %r0,%r0 # clear register r0 |
85 | la %r4,load_psw-.base(%r13) # load psw-address into the register | 87 | la %r4,load_psw-.base(%r13) # load psw-address into the register |
86 | o %r3,4(%r4) # or load address into psw | 88 | o %r3,4(%r4) # or load address into psw |
87 | st %r3,4(%r4) | 89 | st %r3,4(%r4) |
@@ -90,8 +92,9 @@ ENTRY(relocate_kernel) | |||
90 | jno .no_diag308 | 92 | jno .no_diag308 |
91 | diag %r0,%r0,0x308 | 93 | diag %r0,%r0,0x308 |
92 | .no_diag308: | 94 | .no_diag308: |
93 | sr %r1,%r1 # clear %r1 | 95 | sam31 # 31 bit mode |
94 | sr %r2,%r2 # clear %r2 | 96 | sr %r1,%r1 # erase register r1 |
97 | sr %r2,%r2 # erase register r2 | ||
95 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero | 98 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero |
96 | lpsw 0 # hopefully start new kernel... | 99 | lpsw 0 # hopefully start new kernel... |
97 | 100 | ||
@@ -102,11 +105,11 @@ ENTRY(relocate_kernel) | |||
102 | .quad 0 | 105 | .quad 0 |
103 | ctlregs: | 106 | ctlregs: |
104 | .rept 16 | 107 | .rept 16 |
105 | .long 0 | 108 | .quad 0 |
106 | .endr | 109 | .endr |
107 | gprregs: | 110 | gprregs: |
108 | .rept 16 | 111 | .rept 16 |
109 | .long 0 | 112 | .quad 0 |
110 | .endr | 113 | .endr |
111 | have_diag308: | 114 | have_diag308: |
112 | .byte 0 | 115 | .byte 0 |
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S deleted file mode 100644 index cfac28330b03..000000000000 --- a/arch/s390/kernel/relocate_kernel64.S +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2005 | ||
3 | * | ||
4 | * Author(s): Rolf Adelsberger, | ||
5 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/sigp.h> | ||
11 | |||
12 | /* | ||
13 | * moves the new kernel to its destination... | ||
14 | * %r2 = pointer to first kimage_entry_t | ||
15 | * %r3 = start address - where to jump to after the job is done... | ||
16 | * | ||
17 | * %r5 will be used as temp. storage | ||
18 | * %r6 holds the destination address | ||
19 | * %r7 = PAGE_SIZE | ||
20 | * %r8 holds the source address | ||
21 | * %r9 = PAGE_SIZE | ||
22 | * | ||
23 | * 0xf000 is a page_mask | ||
24 | */ | ||
25 | |||
26 | .text | ||
27 | ENTRY(relocate_kernel) | ||
28 | basr %r13,0 # base address | ||
29 | .base: | ||
30 | stnsm sys_msk-.base(%r13),0xfb # disable DAT | ||
31 | stctg %c0,%c15,ctlregs-.base(%r13) | ||
32 | stmg %r0,%r15,gprregs-.base(%r13) | ||
33 | lghi %r0,3 | ||
34 | sllg %r0,%r0,31 | ||
35 | stg %r0,0x1d0(%r0) | ||
36 | la %r0,.back_pgm-.base(%r13) | ||
37 | stg %r0,0x1d8(%r0) | ||
38 | la %r1,load_psw-.base(%r13) | ||
39 | mvc 0(8,%r0),0(%r1) | ||
40 | la %r0,.back-.base(%r13) | ||
41 | st %r0,4(%r0) | ||
42 | oi 4(%r0),0x80 | ||
43 | lghi %r0,0 | ||
44 | diag %r0,%r0,0x308 | ||
45 | .back: | ||
46 | lhi %r1,1 # mode 1 = esame | ||
47 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode | ||
48 | sam64 # switch to 64 bit addressing mode | ||
49 | basr %r13,0 | ||
50 | .back_base: | ||
51 | oi have_diag308-.back_base(%r13),0x01 | ||
52 | lctlg %c0,%c15,ctlregs-.back_base(%r13) | ||
53 | lmg %r0,%r15,gprregs-.back_base(%r13) | ||
54 | j .top | ||
55 | .back_pgm: | ||
56 | lmg %r0,%r15,gprregs-.base(%r13) | ||
57 | .top: | ||
58 | lghi %r7,4096 # load PAGE_SIZE in r7 | ||
59 | lghi %r9,4096 # load PAGE_SIZE in r9 | ||
60 | lg %r5,0(%r2) # read another word for indirection page | ||
61 | aghi %r2,8 # increment pointer | ||
62 | tml %r5,0x1 # is it a destination page? | ||
63 | je .indir_check # NO, goto "indir_check" | ||
64 | lgr %r6,%r5 # r6 = r5 | ||
65 | nill %r6,0xf000 # mask it out and... | ||
66 | j .top # ...next iteration | ||
67 | .indir_check: | ||
68 | tml %r5,0x2 # is it a indirection page? | ||
69 | je .done_test # NO, goto "done_test" | ||
70 | nill %r5,0xf000 # YES, mask out, | ||
71 | lgr %r2,%r5 # move it into the right register, | ||
72 | j .top # and read next... | ||
73 | .done_test: | ||
74 | tml %r5,0x4 # is it the done indicator? | ||
75 | je .source_test # NO! Well, then it should be the source indicator... | ||
76 | j .done # ok, lets finish it here... | ||
77 | .source_test: | ||
78 | tml %r5,0x8 # it should be a source indicator... | ||
79 | je .top # NO, ignore it... | ||
80 | lgr %r8,%r5 # r8 = r5 | ||
81 | nill %r8,0xf000 # masking | ||
82 | 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 | ||
83 | jo 0b | ||
84 | j .top | ||
85 | .done: | ||
86 | sgr %r0,%r0 # clear register r0 | ||
87 | la %r4,load_psw-.base(%r13) # load psw-address into the register | ||
88 | o %r3,4(%r4) # or load address into psw | ||
89 | st %r3,4(%r4) | ||
90 | mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 | ||
91 | tm have_diag308-.base(%r13),0x01 | ||
92 | jno .no_diag308 | ||
93 | diag %r0,%r0,0x308 | ||
94 | .no_diag308: | ||
95 | sam31 # 31 bit mode | ||
96 | sr %r1,%r1 # erase register r1 | ||
97 | sr %r2,%r2 # erase register r2 | ||
98 | sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero | ||
99 | lpsw 0 # hopefully start new kernel... | ||
100 | |||
101 | .align 8 | ||
102 | load_psw: | ||
103 | .long 0x00080000,0x80000000 | ||
104 | sys_msk: | ||
105 | .quad 0 | ||
106 | ctlregs: | ||
107 | .rept 16 | ||
108 | .quad 0 | ||
109 | .endr | ||
110 | gprregs: | ||
111 | .rept 16 | ||
112 | .quad 0 | ||
113 | .endr | ||
114 | have_diag308: | ||
115 | .byte 0 | ||
116 | .align 8 | ||
117 | relocate_kernel_end: | ||
118 | .align 8 | ||
119 | .globl relocate_kernel_len | ||
120 | relocate_kernel_len: | ||
121 | .quad relocate_kernel_end - relocate_kernel | ||
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 7e77e03378f3..43c3169ea49c 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S | |||
@@ -36,21 +36,17 @@ _sclp_wait_int: | |||
36 | ahi %r15,-96 # create stack frame | 36 | ahi %r15,-96 # create stack frame |
37 | la %r8,LC_EXT_NEW_PSW # register int handler | 37 | la %r8,LC_EXT_NEW_PSW # register int handler |
38 | la %r9,.LextpswS1-.LbaseS1(%r13) | 38 | la %r9,.LextpswS1-.LbaseS1(%r13) |
39 | #ifdef CONFIG_64BIT | ||
40 | tm LC_AR_MODE_ID,1 | 39 | tm LC_AR_MODE_ID,1 |
41 | jno .Lesa1 | 40 | jno .Lesa1 |
42 | la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit | 41 | la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit |
43 | la %r9,.LextpswS1_64-.LbaseS1(%r13) | 42 | la %r9,.LextpswS1_64-.LbaseS1(%r13) |
44 | .Lesa1: | 43 | .Lesa1: |
45 | #endif | ||
46 | mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) | 44 | mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) |
47 | mvc 0(16,%r8),0(%r9) | 45 | mvc 0(16,%r8),0(%r9) |
48 | #ifdef CONFIG_64BIT | ||
49 | epsw %r6,%r7 # set current addressing mode | 46 | epsw %r6,%r7 # set current addressing mode |
50 | nill %r6,0x1 # in new psw (31 or 64 bit mode) | 47 | nill %r6,0x1 # in new psw (31 or 64 bit mode) |
51 | nilh %r7,0x8000 | 48 | nilh %r7,0x8000 |
52 | stm %r6,%r7,0(%r8) | 49 | stm %r6,%r7,0(%r8) |
53 | #endif | ||
54 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) | 50 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) |
55 | ltr %r2,%r2 | 51 | ltr %r2,%r2 |
56 | jz .LsetctS1 | 52 | jz .LsetctS1 |
@@ -92,10 +88,8 @@ _sclp_wait_int: | |||
92 | .long 0, 0, 0, 0 # old ext int PSW | 88 | .long 0, 0, 0, 0 # old ext int PSW |
93 | .LextpswS1: | 89 | .LextpswS1: |
94 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int | 90 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int |
95 | #ifdef CONFIG_64BIT | ||
96 | .LextpswS1_64: | 91 | .LextpswS1_64: |
97 | .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit | 92 | .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit |
98 | #endif | ||
99 | .LwaitpswS1: | 93 | .LwaitpswS1: |
100 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int | 94 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int |
101 | .LtimeS1: | 95 | .LtimeS1: |
@@ -272,13 +266,11 @@ _sclp_print: | |||
272 | ENTRY(_sclp_print_early) | 266 | ENTRY(_sclp_print_early) |
273 | stm %r6,%r15,24(%r15) # save registers | 267 | stm %r6,%r15,24(%r15) # save registers |
274 | ahi %r15,-96 # create stack frame | 268 | ahi %r15,-96 # create stack frame |
275 | #ifdef CONFIG_64BIT | ||
276 | tm LC_AR_MODE_ID,1 | 269 | tm LC_AR_MODE_ID,1 |
277 | jno .Lesa2 | 270 | jno .Lesa2 |
278 | ahi %r15,-80 | 271 | ahi %r15,-80 |
279 | stmh %r6,%r15,96(%r15) # store upper register halves | 272 | stmh %r6,%r15,96(%r15) # store upper register halves |
280 | .Lesa2: | 273 | .Lesa2: |
281 | #endif | ||
282 | lr %r10,%r2 # save string pointer | 274 | lr %r10,%r2 # save string pointer |
283 | lhi %r2,0 | 275 | lhi %r2,0 |
284 | bras %r14,_sclp_setup # enable console | 276 | bras %r14,_sclp_setup # enable console |
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early) | |||
291 | lhi %r2,1 | 283 | lhi %r2,1 |
292 | bras %r14,_sclp_setup # disable console | 284 | bras %r14,_sclp_setup # disable console |
293 | .LendS5: | 285 | .LendS5: |
294 | #ifdef CONFIG_64BIT | ||
295 | tm LC_AR_MODE_ID,1 | 286 | tm LC_AR_MODE_ID,1 |
296 | jno .Lesa3 | 287 | jno .Lesa3 |
297 | lgfr %r2,%r2 # sign extend return value | 288 | lgfr %r2,%r2 # sign extend return value |
298 | lmh %r6,%r15,96(%r15) # restore upper register halves | 289 | lmh %r6,%r15,96(%r15) # restore upper register halves |
299 | ahi %r15,80 | 290 | ahi %r15,80 |
300 | .Lesa3: | 291 | .Lesa3: |
301 | #endif | ||
302 | lm %r6,%r15,120(%r15) # restore registers | 292 | lm %r6,%r15,120(%r15) # restore registers |
303 | br %r14 | 293 | br %r14 |
304 | 294 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5ea8bc17cb3..7262fe438c99 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END); | |||
92 | struct page *vmemmap; | 92 | struct page *vmemmap; |
93 | EXPORT_SYMBOL(vmemmap); | 93 | EXPORT_SYMBOL(vmemmap); |
94 | 94 | ||
95 | #ifdef CONFIG_64BIT | ||
96 | unsigned long MODULES_VADDR; | 95 | unsigned long MODULES_VADDR; |
97 | unsigned long MODULES_END; | 96 | unsigned long MODULES_END; |
98 | #endif | ||
99 | 97 | ||
100 | /* An array with a pointer to the lowcore of every CPU. */ | 98 | /* An array with a pointer to the lowcore of every CPU. */ |
101 | struct _lowcore *lowcore_ptr[NR_CPUS]; | 99 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void) | |||
334 | lc->stfl_fac_list = S390_lowcore.stfl_fac_list; | 332 | lc->stfl_fac_list = S390_lowcore.stfl_fac_list; |
335 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | 333 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, |
336 | MAX_FACILITY_BIT/8); | 334 | MAX_FACILITY_BIT/8); |
337 | #ifndef CONFIG_64BIT | ||
338 | if (MACHINE_HAS_IEEE) { | ||
339 | lc->extended_save_area_addr = (__u32) | ||
340 | __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); | ||
341 | /* enable extended save area */ | ||
342 | __ctl_set_bit(14, 29); | ||
343 | } | ||
344 | #else | ||
345 | if (MACHINE_HAS_VX) | 335 | if (MACHINE_HAS_VX) |
346 | lc->vector_save_area_addr = | 336 | lc->vector_save_area_addr = |
347 | (unsigned long) &lc->vector_save_area; | 337 | (unsigned long) &lc->vector_save_area; |
348 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; | 338 | lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; |
349 | #endif | ||
350 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; | 339 | lc->sync_enter_timer = S390_lowcore.sync_enter_timer; |
351 | lc->async_enter_timer = S390_lowcore.async_enter_timer; | 340 | lc->async_enter_timer = S390_lowcore.async_enter_timer; |
352 | lc->exit_timer = S390_lowcore.exit_timer; | 341 | lc->exit_timer = S390_lowcore.exit_timer; |
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void) | |||
450 | unsigned long vmax, vmalloc_size, tmp; | 439 | unsigned long vmax, vmalloc_size, tmp; |
451 | 440 | ||
452 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ | 441 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ |
453 | #ifdef CONFIG_64BIT | ||
454 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; | 442 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; |
455 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; | 443 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; |
456 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE); | 444 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE); |
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void) | |||
462 | MODULES_END = vmax; | 450 | MODULES_END = vmax; |
463 | MODULES_VADDR = MODULES_END - MODULES_LEN; | 451 | MODULES_VADDR = MODULES_END - MODULES_LEN; |
464 | VMALLOC_END = MODULES_VADDR; | 452 | VMALLOC_END = MODULES_VADDR; |
465 | #else | ||
466 | vmalloc_size = VMALLOC_END ?: 96UL << 20; | ||
467 | vmax = 1UL << 31; /* 2-level kernel page table */ | ||
468 | /* vmalloc area is at the end of the kernel address space. */ | ||
469 | VMALLOC_END = vmax; | ||
470 | #endif | ||
471 | VMALLOC_START = vmax - vmalloc_size; | 453 | VMALLOC_START = vmax - vmalloc_size; |
472 | 454 | ||
473 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ | 455 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ |
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void) | |||
754 | if (MACHINE_HAS_HPAGE) | 736 | if (MACHINE_HAS_HPAGE) |
755 | elf_hwcap |= HWCAP_S390_HPAGE; | 737 | elf_hwcap |= HWCAP_S390_HPAGE; |
756 | 738 | ||
757 | #if defined(CONFIG_64BIT) | ||
758 | /* | 739 | /* |
759 | * 64-bit register support for 31-bit processes | 740 | * 64-bit register support for 31-bit processes |
760 | * HWCAP_S390_HIGH_GPRS is bit 9. | 741 | * HWCAP_S390_HIGH_GPRS is bit 9. |
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void) | |||
772 | */ | 753 | */ |
773 | if (test_facility(129)) | 754 | if (test_facility(129)) |
774 | elf_hwcap |= HWCAP_S390_VXRS; | 755 | elf_hwcap |= HWCAP_S390_VXRS; |
775 | #endif | ||
776 | |||
777 | get_cpu_id(&cpu_id); | 756 | get_cpu_id(&cpu_id); |
778 | add_device_randomness(&cpu_id, sizeof(cpu_id)); | 757 | add_device_randomness(&cpu_id, sizeof(cpu_id)); |
779 | switch (cpu_id.machine) { | 758 | switch (cpu_id.machine) { |
780 | case 0x9672: | 759 | case 0x9672: |
781 | #if !defined(CONFIG_64BIT) | ||
782 | default: /* Use "g5" as default for 31 bit kernels. */ | ||
783 | #endif | ||
784 | strcpy(elf_platform, "g5"); | 760 | strcpy(elf_platform, "g5"); |
785 | break; | 761 | break; |
786 | case 0x2064: | 762 | case 0x2064: |
787 | case 0x2066: | 763 | case 0x2066: |
788 | #if defined(CONFIG_64BIT) | ||
789 | default: /* Use "z900" as default for 64 bit kernels. */ | 764 | default: /* Use "z900" as default for 64 bit kernels. */ |
790 | #endif | ||
791 | strcpy(elf_platform, "z900"); | 765 | strcpy(elf_platform, "z900"); |
792 | break; | 766 | break; |
793 | case 0x2084: | 767 | case 0x2084: |
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p) | |||
839 | /* | 813 | /* |
840 | * print what head.S has found out about the machine | 814 | * print what head.S has found out about the machine |
841 | */ | 815 | */ |
842 | #ifndef CONFIG_64BIT | ||
843 | if (MACHINE_IS_VM) | ||
844 | pr_info("Linux is running as a z/VM " | ||
845 | "guest operating system in 31-bit mode\n"); | ||
846 | else if (MACHINE_IS_LPAR) | ||
847 | pr_info("Linux is running natively in 31-bit mode\n"); | ||
848 | if (MACHINE_HAS_IEEE) | ||
849 | pr_info("The hardware system has IEEE compatible " | ||
850 | "floating point units\n"); | ||
851 | else | ||
852 | pr_info("The hardware system has no IEEE compatible " | ||
853 | "floating point units\n"); | ||
854 | #else /* CONFIG_64BIT */ | ||
855 | if (MACHINE_IS_VM) | 816 | if (MACHINE_IS_VM) |
856 | pr_info("Linux is running as a z/VM " | 817 | pr_info("Linux is running as a z/VM " |
857 | "guest operating system in 64-bit mode\n"); | 818 | "guest operating system in 64-bit mode\n"); |
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p) | |||
859 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 820 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
860 | else if (MACHINE_IS_LPAR) | 821 | else if (MACHINE_IS_LPAR) |
861 | pr_info("Linux is running natively in 64-bit mode\n"); | 822 | pr_info("Linux is running natively in 64-bit mode\n"); |
862 | #endif /* CONFIG_64BIT */ | ||
863 | 823 | ||
864 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 824 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
865 | /* boot_command_line has been already set up in early.c */ | 825 | /* boot_command_line has been already set up in early.c */ |
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p) | |||
930 | /* Add system specific data to the random pool */ | 890 | /* Add system specific data to the random pool */ |
931 | setup_randomness(); | 891 | setup_randomness(); |
932 | } | 892 | } |
933 | |||
934 | #ifdef CONFIG_32BIT | ||
935 | static int no_removal_warning __initdata; | ||
936 | |||
937 | static int __init parse_no_removal_warning(char *str) | ||
938 | { | ||
939 | no_removal_warning = 1; | ||
940 | return 0; | ||
941 | } | ||
942 | __setup("no_removal_warning", parse_no_removal_warning); | ||
943 | |||
944 | static int __init removal_warning(void) | ||
945 | { | ||
946 | if (no_removal_warning) | ||
947 | return 0; | ||
948 | printk(KERN_ALERT "\n\n"); | ||
949 | printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); | ||
950 | printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); | ||
951 | printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); | ||
952 | printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); | ||
953 | printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); | ||
954 | printk(KERN_CONT "please let us know. Please write to:\n"); | ||
955 | printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); | ||
956 | printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); | ||
957 | printk(KERN_CONT "Thank you!\n\n"); | ||
958 | printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); | ||
959 | printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); | ||
960 | schedule_timeout_uninterruptible(300 * HZ); | ||
961 | return 0; | ||
962 | } | ||
963 | early_initcall(removal_warning); | ||
964 | #endif | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index b3ae6f70c6d6..7fec60cb0b75 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -106,7 +106,6 @@ static void store_sigregs(void) | |||
106 | { | 106 | { |
107 | save_access_regs(current->thread.acrs); | 107 | save_access_regs(current->thread.acrs); |
108 | save_fp_ctl(¤t->thread.fp_regs.fpc); | 108 | save_fp_ctl(¤t->thread.fp_regs.fpc); |
109 | #ifdef CONFIG_64BIT | ||
110 | if (current->thread.vxrs) { | 109 | if (current->thread.vxrs) { |
111 | int i; | 110 | int i; |
112 | 111 | ||
@@ -115,7 +114,6 @@ static void store_sigregs(void) | |||
115 | current->thread.fp_regs.fprs[i] = | 114 | current->thread.fp_regs.fprs[i] = |
116 | *(freg_t *)(current->thread.vxrs + i); | 115 | *(freg_t *)(current->thread.vxrs + i); |
117 | } else | 116 | } else |
118 | #endif | ||
119 | save_fp_regs(current->thread.fp_regs.fprs); | 117 | save_fp_regs(current->thread.fp_regs.fprs); |
120 | } | 118 | } |
121 | 119 | ||
@@ -124,7 +122,6 @@ static void load_sigregs(void) | |||
124 | { | 122 | { |
125 | restore_access_regs(current->thread.acrs); | 123 | restore_access_regs(current->thread.acrs); |
126 | /* restore_fp_ctl is done in restore_sigregs */ | 124 | /* restore_fp_ctl is done in restore_sigregs */ |
127 | #ifdef CONFIG_64BIT | ||
128 | if (current->thread.vxrs) { | 125 | if (current->thread.vxrs) { |
129 | int i; | 126 | int i; |
130 | 127 | ||
@@ -133,7 +130,6 @@ static void load_sigregs(void) | |||
133 | current->thread.fp_regs.fprs[i]; | 130 | current->thread.fp_regs.fprs[i]; |
134 | restore_vx_regs(current->thread.vxrs); | 131 | restore_vx_regs(current->thread.vxrs); |
135 | } else | 132 | } else |
136 | #endif | ||
137 | restore_fp_regs(current->thread.fp_regs.fprs); | 133 | restore_fp_regs(current->thread.fp_regs.fprs); |
138 | } | 134 | } |
139 | 135 | ||
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
200 | static int save_sigregs_ext(struct pt_regs *regs, | 196 | static int save_sigregs_ext(struct pt_regs *regs, |
201 | _sigregs_ext __user *sregs_ext) | 197 | _sigregs_ext __user *sregs_ext) |
202 | { | 198 | { |
203 | #ifdef CONFIG_64BIT | ||
204 | __u64 vxrs[__NUM_VXRS_LOW]; | 199 | __u64 vxrs[__NUM_VXRS_LOW]; |
205 | int i; | 200 | int i; |
206 | 201 | ||
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs, | |||
215 | sizeof(sregs_ext->vxrs_high))) | 210 | sizeof(sregs_ext->vxrs_high))) |
216 | return -EFAULT; | 211 | return -EFAULT; |
217 | } | 212 | } |
218 | #endif | ||
219 | return 0; | 213 | return 0; |
220 | } | 214 | } |
221 | 215 | ||
222 | static int restore_sigregs_ext(struct pt_regs *regs, | 216 | static int restore_sigregs_ext(struct pt_regs *regs, |
223 | _sigregs_ext __user *sregs_ext) | 217 | _sigregs_ext __user *sregs_ext) |
224 | { | 218 | { |
225 | #ifdef CONFIG_64BIT | ||
226 | __u64 vxrs[__NUM_VXRS_LOW]; | 219 | __u64 vxrs[__NUM_VXRS_LOW]; |
227 | int i; | 220 | int i; |
228 | 221 | ||
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs, | |||
237 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 230 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
238 | *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; | 231 | *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; |
239 | } | 232 | } |
240 | #endif | ||
241 | return 0; | 233 | return 0; |
242 | } | 234 | } |
243 | 235 | ||
@@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
416 | * included in the signal frame on a 31-bit system. | 408 | * included in the signal frame on a 31-bit system. |
417 | */ | 409 | */ |
418 | uc_flags = 0; | 410 | uc_flags = 0; |
419 | #ifdef CONFIG_64BIT | ||
420 | if (MACHINE_HAS_VX) { | 411 | if (MACHINE_HAS_VX) { |
421 | frame_size += sizeof(_sigregs_ext); | 412 | frame_size += sizeof(_sigregs_ext); |
422 | if (current->thread.vxrs) | 413 | if (current->thread.vxrs) |
423 | uc_flags |= UC_VXRS; | 414 | uc_flags |= UC_VXRS; |
424 | } | 415 | } |
425 | #endif | ||
426 | frame = get_sigframe(&ksig->ka, regs, frame_size); | 416 | frame = get_sigframe(&ksig->ka, regs, frame_size); |
427 | if (frame == (void __user *) -1UL) | 417 | if (frame == (void __user *) -1UL) |
428 | return -EFAULT; | 418 | return -EFAULT; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index db8f1115a3bf..efd2c1968000 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | |||
198 | lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; | 198 | lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; |
199 | lc->cpu_nr = cpu; | 199 | lc->cpu_nr = cpu; |
200 | lc->spinlock_lockval = arch_spin_lockval(cpu); | 200 | lc->spinlock_lockval = arch_spin_lockval(cpu); |
201 | #ifndef CONFIG_64BIT | ||
202 | if (MACHINE_HAS_IEEE) { | ||
203 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); | ||
204 | if (!lc->extended_save_area_addr) | ||
205 | goto out; | ||
206 | } | ||
207 | #else | ||
208 | if (MACHINE_HAS_VX) | 201 | if (MACHINE_HAS_VX) |
209 | lc->vector_save_area_addr = | 202 | lc->vector_save_area_addr = |
210 | (unsigned long) &lc->vector_save_area; | 203 | (unsigned long) &lc->vector_save_area; |
211 | if (vdso_alloc_per_cpu(lc)) | 204 | if (vdso_alloc_per_cpu(lc)) |
212 | goto out; | 205 | goto out; |
213 | #endif | ||
214 | lowcore_ptr[cpu] = lc; | 206 | lowcore_ptr[cpu] = lc; |
215 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); | 207 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); |
216 | return 0; | 208 | return 0; |
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) | |||
229 | { | 221 | { |
230 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); | 222 | pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); |
231 | lowcore_ptr[pcpu - pcpu_devices] = NULL; | 223 | lowcore_ptr[pcpu - pcpu_devices] = NULL; |
232 | #ifndef CONFIG_64BIT | ||
233 | if (MACHINE_HAS_IEEE) { | ||
234 | struct _lowcore *lc = pcpu->lowcore; | ||
235 | |||
236 | free_page((unsigned long) lc->extended_save_area_addr); | ||
237 | lc->extended_save_area_addr = 0; | ||
238 | } | ||
239 | #else | ||
240 | vdso_free_per_cpu(pcpu->lowcore); | 224 | vdso_free_per_cpu(pcpu->lowcore); |
241 | #endif | ||
242 | if (pcpu == &pcpu_devices[0]) | 225 | if (pcpu == &pcpu_devices[0]) |
243 | return; | 226 | return; |
244 | free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); | 227 | free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); |
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu) | |||
492 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); | 475 | pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); |
493 | } | 476 | } |
494 | 477 | ||
495 | #ifndef CONFIG_64BIT | ||
496 | /* | ||
497 | * this function sends a 'purge tlb' signal to another CPU. | ||
498 | */ | ||
499 | static void smp_ptlb_callback(void *info) | ||
500 | { | ||
501 | __tlb_flush_local(); | ||
502 | } | ||
503 | |||
504 | void smp_ptlb_all(void) | ||
505 | { | ||
506 | on_each_cpu(smp_ptlb_callback, NULL, 1); | ||
507 | } | ||
508 | EXPORT_SYMBOL(smp_ptlb_all); | ||
509 | #endif /* ! CONFIG_64BIT */ | ||
510 | |||
511 | /* | 478 | /* |
512 | * this function sends a 'reschedule' IPI to another CPU. | 479 | * this function sends a 'reschedule' IPI to another CPU. |
513 | * it goes straight through and wastes no time serializing | 480 | * it goes straight through and wastes no time serializing |
@@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
851 | pcpu_prepare_secondary(pcpu, cpu); | 818 | pcpu_prepare_secondary(pcpu, cpu); |
852 | pcpu_attach_task(pcpu, tidle); | 819 | pcpu_attach_task(pcpu, tidle); |
853 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); | 820 | pcpu_start_fn(pcpu, smp_start_secondary, NULL); |
854 | while (!cpu_online(cpu)) | 821 | /* Wait until cpu puts itself in the online & active maps */ |
822 | while (!cpu_online(cpu) || !cpu_active(cpu)) | ||
855 | cpu_relax(); | 823 | cpu_relax(); |
856 | return 0; | 824 | return 0; |
857 | } | 825 | } |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 1c4c5accd220..d3236c9e226b 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn) | |||
138 | { | 138 | { |
139 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); | 139 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
140 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); | 140 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
141 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
142 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
141 | 143 | ||
142 | /* Always save lowcore pages (LC protection might be enabled). */ | 144 | /* Always save lowcore pages (LC protection might be enabled). */ |
143 | if (pfn <= LC_PAGES) | 145 | if (pfn <= LC_PAGES) |
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn) | |||
145 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | 147 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
146 | return 1; | 148 | return 1; |
147 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ | 149 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ |
150 | if (pfn >= stext_pfn && pfn <= eshared_pfn) | ||
151 | return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; | ||
148 | if (tprot(PFN_PHYS(pfn))) | 152 | if (tprot(PFN_PHYS(pfn))) |
149 | return 1; | 153 | return 1; |
150 | return 0; | 154 | return 0; |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S index ca6294645dd3..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp.S | |||
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 23eb222c1658..f145490cce54 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, | |||
76 | return sys_ipc(call, first, second, third, ptr, third); | 76 | return sys_ipc(call, first, second, third, ptr, third); |
77 | } | 77 | } |
78 | 78 | ||
79 | #ifdef CONFIG_64BIT | ||
80 | SYSCALL_DEFINE1(s390_personality, unsigned int, personality) | 79 | SYSCALL_DEFINE1(s390_personality, unsigned int, personality) |
81 | { | 80 | { |
82 | unsigned int ret; | 81 | unsigned int ret; |
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) | |||
90 | 89 | ||
91 | return ret; | 90 | return ret; |
92 | } | 91 | } |
93 | #endif /* CONFIG_64BIT */ | ||
94 | |||
95 | /* | ||
96 | * Wrapper function for sys_fadvise64/fadvise64_64 | ||
97 | */ | ||
98 | #ifndef CONFIG_64BIT | ||
99 | |||
100 | SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low, | ||
101 | size_t, len, int, advice) | ||
102 | { | ||
103 | return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low, | ||
104 | len, advice); | ||
105 | } | ||
106 | |||
107 | struct fadvise64_64_args { | ||
108 | int fd; | ||
109 | long long offset; | ||
110 | long long len; | ||
111 | int advice; | ||
112 | }; | ||
113 | |||
114 | SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) | ||
115 | { | ||
116 | struct fadvise64_64_args a; | ||
117 | |||
118 | if ( copy_from_user(&a, args, sizeof(a)) ) | ||
119 | return -EFAULT; | ||
120 | return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last | ||
125 | * 64 bit argument "len" is split into the upper and lower 32 bits. The | ||
126 | * system call wrapper in the user space loads the value to %r6/%r7. | ||
127 | * The code in entry.S keeps the values in %r2 - %r6 where they are and | ||
128 | * stores %r7 to 96(%r15). But the standard C linkage requires that | ||
129 | * the whole 64 bit value for len is stored on the stack and doesn't | ||
130 | * use %r6 at all. So s390_fallocate has to convert the arguments from | ||
131 | * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len | ||
132 | * to | ||
133 | * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len | ||
134 | */ | ||
135 | SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset, | ||
136 | u32, len_high, u32, len_low) | ||
137 | { | ||
138 | return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); | ||
139 | } | ||
140 | #endif | ||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 939ec474b1dd..1acad02681c4 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -1,365 +1,365 @@ | |||
1 | /* | 1 | /* |
2 | * definitions for sys_call_table, each line represents an | 2 | * definitions for sys_call_table, each line represents an |
3 | * entry in the table in the form | 3 | * entry in the table in the form |
4 | * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall) | 4 | * SYSCALL(64 bit syscall, 31 bit emulated syscall) |
5 | * | 5 | * |
6 | * this file is meant to be included from entry.S and entry64.S | 6 | * this file is meant to be included from entry.S |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall) | 9 | #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) |
10 | 10 | ||
11 | NI_SYSCALL /* 0 */ | 11 | NI_SYSCALL /* 0 */ |
12 | SYSCALL(sys_exit,sys_exit,compat_sys_exit) | 12 | SYSCALL(sys_exit,compat_sys_exit) |
13 | SYSCALL(sys_fork,sys_fork,sys_fork) | 13 | SYSCALL(sys_fork,sys_fork) |
14 | SYSCALL(sys_read,sys_read,compat_sys_s390_read) | 14 | SYSCALL(sys_read,compat_sys_s390_read) |
15 | SYSCALL(sys_write,sys_write,compat_sys_s390_write) | 15 | SYSCALL(sys_write,compat_sys_s390_write) |
16 | SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */ | 16 | SYSCALL(sys_open,compat_sys_open) /* 5 */ |
17 | SYSCALL(sys_close,sys_close,compat_sys_close) | 17 | SYSCALL(sys_close,compat_sys_close) |
18 | SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) | 18 | SYSCALL(sys_restart_syscall,sys_restart_syscall) |
19 | SYSCALL(sys_creat,sys_creat,compat_sys_creat) | 19 | SYSCALL(sys_creat,compat_sys_creat) |
20 | SYSCALL(sys_link,sys_link,compat_sys_link) | 20 | SYSCALL(sys_link,compat_sys_link) |
21 | SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */ | 21 | SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */ |
22 | SYSCALL(sys_execve,sys_execve,compat_sys_execve) | 22 | SYSCALL(sys_execve,compat_sys_execve) |
23 | SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir) | 23 | SYSCALL(sys_chdir,compat_sys_chdir) |
24 | SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */ | 24 | SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */ |
25 | SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod) | 25 | SYSCALL(sys_mknod,compat_sys_mknod) |
26 | SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */ | 26 | SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */ |
27 | SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ | 27 | SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ |
28 | NI_SYSCALL /* old break syscall holder */ | 28 | NI_SYSCALL /* old break syscall holder */ |
29 | NI_SYSCALL /* old stat syscall holder */ | 29 | NI_SYSCALL /* old stat syscall holder */ |
30 | SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek) | 30 | SYSCALL(sys_lseek,compat_sys_lseek) |
31 | SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */ | 31 | SYSCALL(sys_getpid,sys_getpid) /* 20 */ |
32 | SYSCALL(sys_mount,sys_mount,compat_sys_mount) | 32 | SYSCALL(sys_mount,compat_sys_mount) |
33 | SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount) | 33 | SYSCALL(sys_oldumount,compat_sys_oldumount) |
34 | SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ | 34 | SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ |
35 | SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ | 35 | SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ |
36 | SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ | 36 | SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ |
37 | SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace) | 37 | SYSCALL(sys_ptrace,compat_sys_ptrace) |
38 | SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm) | 38 | SYSCALL(sys_alarm,compat_sys_alarm) |
39 | NI_SYSCALL /* old fstat syscall */ | 39 | NI_SYSCALL /* old fstat syscall */ |
40 | SYSCALL(sys_pause,sys_pause,sys_pause) | 40 | SYSCALL(sys_pause,sys_pause) |
41 | SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */ | 41 | SYSCALL(sys_utime,compat_sys_utime) /* 30 */ |
42 | NI_SYSCALL /* old stty syscall */ | 42 | NI_SYSCALL /* old stty syscall */ |
43 | NI_SYSCALL /* old gtty syscall */ | 43 | NI_SYSCALL /* old gtty syscall */ |
44 | SYSCALL(sys_access,sys_access,compat_sys_access) | 44 | SYSCALL(sys_access,compat_sys_access) |
45 | SYSCALL(sys_nice,sys_nice,compat_sys_nice) | 45 | SYSCALL(sys_nice,compat_sys_nice) |
46 | NI_SYSCALL /* 35 old ftime syscall */ | 46 | NI_SYSCALL /* 35 old ftime syscall */ |
47 | SYSCALL(sys_sync,sys_sync,sys_sync) | 47 | SYSCALL(sys_sync,sys_sync) |
48 | SYSCALL(sys_kill,sys_kill,compat_sys_kill) | 48 | SYSCALL(sys_kill,compat_sys_kill) |
49 | SYSCALL(sys_rename,sys_rename,compat_sys_rename) | 49 | SYSCALL(sys_rename,compat_sys_rename) |
50 | SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir) | 50 | SYSCALL(sys_mkdir,compat_sys_mkdir) |
51 | SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */ | 51 | SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ |
52 | SYSCALL(sys_dup,sys_dup,compat_sys_dup) | 52 | SYSCALL(sys_dup,compat_sys_dup) |
53 | SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe) | 53 | SYSCALL(sys_pipe,compat_sys_pipe) |
54 | SYSCALL(sys_times,sys_times,compat_sys_times) | 54 | SYSCALL(sys_times,compat_sys_times) |
55 | NI_SYSCALL /* old prof syscall */ | 55 | NI_SYSCALL /* old prof syscall */ |
56 | SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */ | 56 | SYSCALL(sys_brk,compat_sys_brk) /* 45 */ |
57 | SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ | 57 | SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ |
58 | SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ | 58 | SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ |
59 | SYSCALL(sys_signal,sys_signal,compat_sys_signal) | 59 | SYSCALL(sys_signal,compat_sys_signal) |
60 | SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ | 60 | SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ |
61 | SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ | 61 | SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ |
62 | SYSCALL(sys_acct,sys_acct,compat_sys_acct) | 62 | SYSCALL(sys_acct,compat_sys_acct) |
63 | SYSCALL(sys_umount,sys_umount,compat_sys_umount) | 63 | SYSCALL(sys_umount,compat_sys_umount) |
64 | NI_SYSCALL /* old lock syscall */ | 64 | NI_SYSCALL /* old lock syscall */ |
65 | SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl) | 65 | SYSCALL(sys_ioctl,compat_sys_ioctl) |
66 | SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */ | 66 | SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ |
67 | NI_SYSCALL /* intel mpx syscall */ | 67 | NI_SYSCALL /* intel mpx syscall */ |
68 | SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid) | 68 | SYSCALL(sys_setpgid,compat_sys_setpgid) |
69 | NI_SYSCALL /* old ulimit syscall */ | 69 | NI_SYSCALL /* old ulimit syscall */ |
70 | NI_SYSCALL /* old uname syscall */ | 70 | NI_SYSCALL /* old uname syscall */ |
71 | SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */ | 71 | SYSCALL(sys_umask,compat_sys_umask) /* 60 */ |
72 | SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot) | 72 | SYSCALL(sys_chroot,compat_sys_chroot) |
73 | SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat) | 73 | SYSCALL(sys_ustat,compat_sys_ustat) |
74 | SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2) | 74 | SYSCALL(sys_dup2,compat_sys_dup2) |
75 | SYSCALL(sys_getppid,sys_getppid,sys_getppid) | 75 | SYSCALL(sys_getppid,sys_getppid) |
76 | SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ | 76 | SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ |
77 | SYSCALL(sys_setsid,sys_setsid,sys_setsid) | 77 | SYSCALL(sys_setsid,sys_setsid) |
78 | SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction) | 78 | SYSCALL(sys_sigaction,compat_sys_sigaction) |
79 | NI_SYSCALL /* old sgetmask syscall*/ | 79 | NI_SYSCALL /* old sgetmask syscall*/ |
80 | NI_SYSCALL /* old ssetmask syscall*/ | 80 | NI_SYSCALL /* old ssetmask syscall*/ |
81 | SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ | 81 | SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ |
82 | SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ | 82 | SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ |
83 | SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend) | 83 | SYSCALL(sys_sigsuspend,compat_sys_sigsuspend) |
84 | SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending) | 84 | SYSCALL(sys_sigpending,compat_sys_sigpending) |
85 | SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname) | 85 | SYSCALL(sys_sethostname,compat_sys_sethostname) |
86 | SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */ | 86 | SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */ |
87 | SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit) | 87 | SYSCALL(sys_getrlimit,compat_sys_old_getrlimit) |
88 | SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage) | 88 | SYSCALL(sys_getrusage,compat_sys_getrusage) |
89 | SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday) | 89 | SYSCALL(sys_gettimeofday,compat_sys_gettimeofday) |
90 | SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday) | 90 | SYSCALL(sys_settimeofday,compat_sys_settimeofday) |
91 | SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ | 91 | SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ |
92 | SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ | 92 | SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ |
93 | NI_SYSCALL /* old select syscall */ | 93 | NI_SYSCALL /* old select syscall */ |
94 | SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink) | 94 | SYSCALL(sys_symlink,compat_sys_symlink) |
95 | NI_SYSCALL /* old lstat syscall */ | 95 | NI_SYSCALL /* old lstat syscall */ |
96 | SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */ | 96 | SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */ |
97 | SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib) | 97 | SYSCALL(sys_uselib,compat_sys_uselib) |
98 | SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon) | 98 | SYSCALL(sys_swapon,compat_sys_swapon) |
99 | SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot) | 99 | SYSCALL(sys_reboot,compat_sys_reboot) |
100 | SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ | 100 | SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ |
101 | SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ | 101 | SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ |
102 | SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap) | 102 | SYSCALL(sys_munmap,compat_sys_munmap) |
103 | SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate) | 103 | SYSCALL(sys_truncate,compat_sys_truncate) |
104 | SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate) | 104 | SYSCALL(sys_ftruncate,compat_sys_ftruncate) |
105 | SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod) | 105 | SYSCALL(sys_fchmod,compat_sys_fchmod) |
106 | SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ | 106 | SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ |
107 | SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority) | 107 | SYSCALL(sys_getpriority,compat_sys_getpriority) |
108 | SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority) | 108 | SYSCALL(sys_setpriority,compat_sys_setpriority) |
109 | NI_SYSCALL /* old profil syscall */ | 109 | NI_SYSCALL /* old profil syscall */ |
110 | SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs) | 110 | SYSCALL(sys_statfs,compat_sys_statfs) |
111 | SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */ | 111 | SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ |
112 | NI_SYSCALL /* ioperm for i386 */ | 112 | NI_SYSCALL /* ioperm for i386 */ |
113 | SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall) | 113 | SYSCALL(sys_socketcall,compat_sys_socketcall) |
114 | SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog) | 114 | SYSCALL(sys_syslog,compat_sys_syslog) |
115 | SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer) | 115 | SYSCALL(sys_setitimer,compat_sys_setitimer) |
116 | SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */ | 116 | SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */ |
117 | SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat) | 117 | SYSCALL(sys_newstat,compat_sys_newstat) |
118 | SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat) | 118 | SYSCALL(sys_newlstat,compat_sys_newlstat) |
119 | SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat) | 119 | SYSCALL(sys_newfstat,compat_sys_newfstat) |
120 | NI_SYSCALL /* old uname syscall */ | 120 | NI_SYSCALL /* old uname syscall */ |
121 | SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ | 121 | SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ |
122 | SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) | 122 | SYSCALL(sys_vhangup,sys_vhangup) |
123 | NI_SYSCALL /* old "idle" system call */ | 123 | NI_SYSCALL /* old "idle" system call */ |
124 | NI_SYSCALL /* vm86old for i386 */ | 124 | NI_SYSCALL /* vm86old for i386 */ |
125 | SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) | 125 | SYSCALL(sys_wait4,compat_sys_wait4) |
126 | SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */ | 126 | SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ |
127 | SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo) | 127 | SYSCALL(sys_sysinfo,compat_sys_sysinfo) |
128 | SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc) | 128 | SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) |
129 | SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync) | 129 | SYSCALL(sys_fsync,compat_sys_fsync) |
130 | SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn) | 130 | SYSCALL(sys_sigreturn,compat_sys_sigreturn) |
131 | SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */ | 131 | SYSCALL(sys_clone,compat_sys_clone) /* 120 */ |
132 | SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname) | 132 | SYSCALL(sys_setdomainname,compat_sys_setdomainname) |
133 | SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname) | 133 | SYSCALL(sys_newuname,compat_sys_newuname) |
134 | NI_SYSCALL /* modify_ldt for i386 */ | 134 | NI_SYSCALL /* modify_ldt for i386 */ |
135 | SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex) | 135 | SYSCALL(sys_adjtimex,compat_sys_adjtimex) |
136 | SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */ | 136 | SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */ |
137 | SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask) | 137 | SYSCALL(sys_sigprocmask,compat_sys_sigprocmask) |
138 | NI_SYSCALL /* old "create module" */ | 138 | NI_SYSCALL /* old "create module" */ |
139 | SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module) | 139 | SYSCALL(sys_init_module,compat_sys_init_module) |
140 | SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module) | 140 | SYSCALL(sys_delete_module,compat_sys_delete_module) |
141 | NI_SYSCALL /* 130: old get_kernel_syms */ | 141 | NI_SYSCALL /* 130: old get_kernel_syms */ |
142 | SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl) | 142 | SYSCALL(sys_quotactl,compat_sys_quotactl) |
143 | SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid) | 143 | SYSCALL(sys_getpgid,compat_sys_getpgid) |
144 | SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir) | 144 | SYSCALL(sys_fchdir,compat_sys_fchdir) |
145 | SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush) | 145 | SYSCALL(sys_bdflush,compat_sys_bdflush) |
146 | SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */ | 146 | SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ |
147 | SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality) | 147 | SYSCALL(sys_s390_personality,compat_sys_s390_personality) |
148 | NI_SYSCALL /* for afs_syscall */ | 148 | NI_SYSCALL /* for afs_syscall */ |
149 | SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ | 149 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ |
150 | SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ | 150 | SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ |
151 | SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */ | 151 | SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ |
152 | SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents) | 152 | SYSCALL(sys_getdents,compat_sys_getdents) |
153 | SYSCALL(sys_select,sys_select,compat_sys_select) | 153 | SYSCALL(sys_select,compat_sys_select) |
154 | SYSCALL(sys_flock,sys_flock,compat_sys_flock) | 154 | SYSCALL(sys_flock,compat_sys_flock) |
155 | SYSCALL(sys_msync,sys_msync,compat_sys_msync) | 155 | SYSCALL(sys_msync,compat_sys_msync) |
156 | SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */ | 156 | SYSCALL(sys_readv,compat_sys_readv) /* 145 */ |
157 | SYSCALL(sys_writev,sys_writev,compat_sys_writev) | 157 | SYSCALL(sys_writev,compat_sys_writev) |
158 | SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid) | 158 | SYSCALL(sys_getsid,compat_sys_getsid) |
159 | SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync) | 159 | SYSCALL(sys_fdatasync,compat_sys_fdatasync) |
160 | SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) | 160 | SYSCALL(sys_sysctl,compat_sys_sysctl) |
161 | SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */ | 161 | SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ |
162 | SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock) | 162 | SYSCALL(sys_munlock,compat_sys_munlock) |
163 | SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall) | 163 | SYSCALL(sys_mlockall,compat_sys_mlockall) |
164 | SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall) | 164 | SYSCALL(sys_munlockall,sys_munlockall) |
165 | SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam) | 165 | SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) |
166 | SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ | 166 | SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ |
167 | SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler) | 167 | SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) |
168 | SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler) | 168 | SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) |
169 | SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) | 169 | SYSCALL(sys_sched_yield,sys_sched_yield) |
170 | SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max) | 170 | SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) |
171 | SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ | 171 | SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ |
172 | SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) | 172 | SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) |
173 | SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep) | 173 | SYSCALL(sys_nanosleep,compat_sys_nanosleep) |
174 | SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap) | 174 | SYSCALL(sys_mremap,compat_sys_mremap) |
175 | SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ | 175 | SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ |
176 | SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ | 176 | SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ |
177 | NI_SYSCALL /* for vm86 */ | 177 | NI_SYSCALL /* for vm86 */ |
178 | NI_SYSCALL /* old sys_query_module */ | 178 | NI_SYSCALL /* old sys_query_module */ |
179 | SYSCALL(sys_poll,sys_poll,compat_sys_poll) | 179 | SYSCALL(sys_poll,compat_sys_poll) |
180 | NI_SYSCALL /* old nfsservctl */ | 180 | NI_SYSCALL /* old nfsservctl */ |
181 | SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ | 181 | SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ |
182 | SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ | 182 | SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ |
183 | SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl) | 183 | SYSCALL(sys_prctl,compat_sys_prctl) |
184 | SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn) | 184 | SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn) |
185 | SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction) | 185 | SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction) |
186 | SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ | 186 | SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ |
187 | SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending) | 187 | SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending) |
188 | SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) | 188 | SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) |
189 | SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) | 189 | SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) |
190 | SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend) | 190 | SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend) |
191 | SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */ | 191 | SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */ |
192 | SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64) | 192 | SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64) |
193 | SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ | 193 | SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ |
194 | SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd) | 194 | SYSCALL(sys_getcwd,compat_sys_getcwd) |
195 | SYSCALL(sys_capget,sys_capget,compat_sys_capget) | 195 | SYSCALL(sys_capget,compat_sys_capget) |
196 | SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */ | 196 | SYSCALL(sys_capset,compat_sys_capset) /* 185 */ |
197 | SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) | 197 | SYSCALL(sys_sigaltstack,compat_sys_sigaltstack) |
198 | SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile) | 198 | SYSCALL(sys_sendfile64,compat_sys_sendfile) |
199 | NI_SYSCALL /* streams1 */ | 199 | NI_SYSCALL /* streams1 */ |
200 | NI_SYSCALL /* streams2 */ | 200 | NI_SYSCALL /* streams2 */ |
201 | SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ | 201 | SYSCALL(sys_vfork,sys_vfork) /* 190 */ |
202 | SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit) | 202 | SYSCALL(sys_getrlimit,compat_sys_getrlimit) |
203 | SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2) | 203 | SYSCALL(sys_mmap2,compat_sys_s390_mmap2) |
204 | SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64) | 204 | SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64) |
205 | SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64) | 205 | SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64) |
206 | SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ | 206 | SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ |
207 | SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64) | 207 | SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64) |
208 | SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64) | 208 | SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64) |
209 | SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown) | 209 | SYSCALL(sys_lchown,compat_sys_lchown) |
210 | SYSCALL(sys_getuid,sys_getuid,sys_getuid) | 210 | SYSCALL(sys_getuid,sys_getuid) |
211 | SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */ | 211 | SYSCALL(sys_getgid,sys_getgid) /* 200 */ |
212 | SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid) | 212 | SYSCALL(sys_geteuid,sys_geteuid) |
213 | SYSCALL(sys_getegid,sys_getegid,sys_getegid) | 213 | SYSCALL(sys_getegid,sys_getegid) |
214 | SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid) | 214 | SYSCALL(sys_setreuid,compat_sys_setreuid) |
215 | SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid) | 215 | SYSCALL(sys_setregid,compat_sys_setregid) |
216 | SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */ | 216 | SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ |
217 | SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups) | 217 | SYSCALL(sys_setgroups,compat_sys_setgroups) |
218 | SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown) | 218 | SYSCALL(sys_fchown,compat_sys_fchown) |
219 | SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid) | 219 | SYSCALL(sys_setresuid,compat_sys_setresuid) |
220 | SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid) | 220 | SYSCALL(sys_getresuid,compat_sys_getresuid) |
221 | SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */ | 221 | SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ |
222 | SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid) | 222 | SYSCALL(sys_getresgid,compat_sys_getresgid) |
223 | SYSCALL(sys_chown,sys_chown,compat_sys_chown) | 223 | SYSCALL(sys_chown,compat_sys_chown) |
224 | SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid) | 224 | SYSCALL(sys_setuid,compat_sys_setuid) |
225 | SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid) | 225 | SYSCALL(sys_setgid,compat_sys_setgid) |
226 | SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */ | 226 | SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ |
227 | SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid) | 227 | SYSCALL(sys_setfsgid,compat_sys_setfsgid) |
228 | SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root) | 228 | SYSCALL(sys_pivot_root,compat_sys_pivot_root) |
229 | SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore) | 229 | SYSCALL(sys_mincore,compat_sys_mincore) |
230 | SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise) | 230 | SYSCALL(sys_madvise,compat_sys_madvise) |
231 | SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */ | 231 | SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */ |
232 | SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64) | 232 | SYSCALL(sys_ni_syscall,compat_sys_fcntl64) |
233 | SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead) | 233 | SYSCALL(sys_readahead,compat_sys_s390_readahead) |
234 | SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64) | 234 | SYSCALL(sys_ni_syscall,compat_sys_sendfile64) |
235 | SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr) | 235 | SYSCALL(sys_setxattr,compat_sys_setxattr) |
236 | SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ | 236 | SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ |
237 | SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr) | 237 | SYSCALL(sys_fsetxattr,compat_sys_fsetxattr) |
238 | SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr) | 238 | SYSCALL(sys_getxattr,compat_sys_getxattr) |
239 | SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr) | 239 | SYSCALL(sys_lgetxattr,compat_sys_lgetxattr) |
240 | SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr) | 240 | SYSCALL(sys_fgetxattr,compat_sys_fgetxattr) |
241 | SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */ | 241 | SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */ |
242 | SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr) | 242 | SYSCALL(sys_llistxattr,compat_sys_llistxattr) |
243 | SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr) | 243 | SYSCALL(sys_flistxattr,compat_sys_flistxattr) |
244 | SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr) | 244 | SYSCALL(sys_removexattr,compat_sys_removexattr) |
245 | SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr) | 245 | SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) |
246 | SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ | 246 | SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ |
247 | SYSCALL(sys_gettid,sys_gettid,sys_gettid) | 247 | SYSCALL(sys_gettid,sys_gettid) |
248 | SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill) | 248 | SYSCALL(sys_tkill,compat_sys_tkill) |
249 | SYSCALL(sys_futex,sys_futex,compat_sys_futex) | 249 | SYSCALL(sys_futex,compat_sys_futex) |
250 | SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity) | 250 | SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) |
251 | SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ | 251 | SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ |
252 | SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill) | 252 | SYSCALL(sys_tgkill,compat_sys_tgkill) |
253 | NI_SYSCALL /* reserved for TUX */ | 253 | NI_SYSCALL /* reserved for TUX */ |
254 | SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup) | 254 | SYSCALL(sys_io_setup,compat_sys_io_setup) |
255 | SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy) | 255 | SYSCALL(sys_io_destroy,compat_sys_io_destroy) |
256 | SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */ | 256 | SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ |
257 | SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit) | 257 | SYSCALL(sys_io_submit,compat_sys_io_submit) |
258 | SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel) | 258 | SYSCALL(sys_io_cancel,compat_sys_io_cancel) |
259 | SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group) | 259 | SYSCALL(sys_exit_group,compat_sys_exit_group) |
260 | SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create) | 260 | SYSCALL(sys_epoll_create,compat_sys_epoll_create) |
261 | SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ | 261 | SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ |
262 | SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait) | 262 | SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) |
263 | SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address) | 263 | SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) |
264 | SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64) | 264 | SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64) |
265 | SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create) | 265 | SYSCALL(sys_timer_create,compat_sys_timer_create) |
266 | SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */ | 266 | SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ |
267 | SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime) | 267 | SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) |
268 | SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun) | 268 | SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) |
269 | SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete) | 269 | SYSCALL(sys_timer_delete,compat_sys_timer_delete) |
270 | SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime) | 270 | SYSCALL(sys_clock_settime,compat_sys_clock_settime) |
271 | SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ | 271 | SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ |
272 | SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres) | 272 | SYSCALL(sys_clock_getres,compat_sys_clock_getres) |
273 | SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep) | 273 | SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep) |
274 | NI_SYSCALL /* reserved for vserver */ | 274 | NI_SYSCALL /* reserved for vserver */ |
275 | SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64) | 275 | SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64) |
276 | SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64) | 276 | SYSCALL(sys_statfs64,compat_sys_statfs64) |
277 | SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64) | 277 | SYSCALL(sys_fstatfs64,compat_sys_fstatfs64) |
278 | SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages) | 278 | SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages) |
279 | NI_SYSCALL /* 268 sys_mbind */ | 279 | NI_SYSCALL /* 268 sys_mbind */ |
280 | NI_SYSCALL /* 269 sys_get_mempolicy */ | 280 | NI_SYSCALL /* 269 sys_get_mempolicy */ |
281 | NI_SYSCALL /* 270 sys_set_mempolicy */ | 281 | NI_SYSCALL /* 270 sys_set_mempolicy */ |
282 | SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open) | 282 | SYSCALL(sys_mq_open,compat_sys_mq_open) |
283 | SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink) | 283 | SYSCALL(sys_mq_unlink,compat_sys_mq_unlink) |
284 | SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend) | 284 | SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend) |
285 | SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive) | 285 | SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive) |
286 | SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */ | 286 | SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */ |
287 | SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr) | 287 | SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr) |
288 | SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load) | 288 | SYSCALL(sys_kexec_load,compat_sys_kexec_load) |
289 | SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key) | 289 | SYSCALL(sys_add_key,compat_sys_add_key) |
290 | SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key) | 290 | SYSCALL(sys_request_key,compat_sys_request_key) |
291 | SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */ | 291 | SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ |
292 | SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid) | 292 | SYSCALL(sys_waitid,compat_sys_waitid) |
293 | SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set) | 293 | SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) |
294 | SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get) | 294 | SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) |
295 | SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init) | 295 | SYSCALL(sys_inotify_init,sys_inotify_init) |
296 | SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ | 296 | SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ |
297 | SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch) | 297 | SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) |
298 | NI_SYSCALL /* 287 sys_migrate_pages */ | 298 | NI_SYSCALL /* 287 sys_migrate_pages */ |
299 | SYSCALL(sys_openat,sys_openat,compat_sys_openat) | 299 | SYSCALL(sys_openat,compat_sys_openat) |
300 | SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat) | 300 | SYSCALL(sys_mkdirat,compat_sys_mkdirat) |
301 | SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */ | 301 | SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */ |
302 | SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat) | 302 | SYSCALL(sys_fchownat,compat_sys_fchownat) |
303 | SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat) | 303 | SYSCALL(sys_futimesat,compat_sys_futimesat) |
304 | SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64) | 304 | SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64) |
305 | SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat) | 305 | SYSCALL(sys_unlinkat,compat_sys_unlinkat) |
306 | SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */ | 306 | SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */ |
307 | SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat) | 307 | SYSCALL(sys_linkat,compat_sys_linkat) |
308 | SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat) | 308 | SYSCALL(sys_symlinkat,compat_sys_symlinkat) |
309 | SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat) | 309 | SYSCALL(sys_readlinkat,compat_sys_readlinkat) |
310 | SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat) | 310 | SYSCALL(sys_fchmodat,compat_sys_fchmodat) |
311 | SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */ | 311 | SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */ |
312 | SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6) | 312 | SYSCALL(sys_pselect6,compat_sys_pselect6) |
313 | SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll) | 313 | SYSCALL(sys_ppoll,compat_sys_ppoll) |
314 | SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare) | 314 | SYSCALL(sys_unshare,compat_sys_unshare) |
315 | SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list) | 315 | SYSCALL(sys_set_robust_list,compat_sys_set_robust_list) |
316 | SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list) | 316 | SYSCALL(sys_get_robust_list,compat_sys_get_robust_list) |
317 | SYSCALL(sys_splice,sys_splice,compat_sys_splice) | 317 | SYSCALL(sys_splice,compat_sys_splice) |
318 | SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range) | 318 | SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range) |
319 | SYSCALL(sys_tee,sys_tee,compat_sys_tee) | 319 | SYSCALL(sys_tee,compat_sys_tee) |
320 | SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice) | 320 | SYSCALL(sys_vmsplice,compat_sys_vmsplice) |
321 | NI_SYSCALL /* 310 sys_move_pages */ | 321 | NI_SYSCALL /* 310 sys_move_pages */ |
322 | SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu) | 322 | SYSCALL(sys_getcpu,compat_sys_getcpu) |
323 | SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait) | 323 | SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait) |
324 | SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes) | 324 | SYSCALL(sys_utimes,compat_sys_utimes) |
325 | SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate) | 325 | SYSCALL(sys_fallocate,compat_sys_s390_fallocate) |
326 | SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */ | 326 | SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ |
327 | SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd) | 327 | SYSCALL(sys_signalfd,compat_sys_signalfd) |
328 | NI_SYSCALL /* 317 old sys_timer_fd */ | 328 | NI_SYSCALL /* 317 old sys_timer_fd */ |
329 | SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd) | 329 | SYSCALL(sys_eventfd,compat_sys_eventfd) |
330 | SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create) | 330 | SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) |
331 | SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ | 331 | SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ |
332 | SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) | 332 | SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) |
333 | SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4) | 333 | SYSCALL(sys_signalfd4,compat_sys_signalfd4) |
334 | SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2) | 334 | SYSCALL(sys_eventfd2,compat_sys_eventfd2) |
335 | SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1) | 335 | SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) |
336 | SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */ | 336 | SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ |
337 | SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3) | 337 | SYSCALL(sys_dup3,compat_sys_dup3) |
338 | SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1) | 338 | SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) |
339 | SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv) | 339 | SYSCALL(sys_preadv,compat_sys_preadv) |
340 | SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) | 340 | SYSCALL(sys_pwritev,compat_sys_pwritev) |
341 | SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ | 341 | SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ |
342 | SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open) | 342 | SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) |
343 | SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init) | 343 | SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) |
344 | SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) | 344 | SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) |
345 | SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64) | 345 | SYSCALL(sys_prlimit64,compat_sys_prlimit64) |
346 | SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ | 346 | SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ |
347 | SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) | 347 | SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) |
348 | SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime) | 348 | SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) |
349 | SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs) | 349 | SYSCALL(sys_syncfs,compat_sys_syncfs) |
350 | SYSCALL(sys_setns,sys_setns,compat_sys_setns) | 350 | SYSCALL(sys_setns,compat_sys_setns) |
351 | SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ | 351 | SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ |
352 | SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev) | 352 | SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) |
353 | SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr) | 353 | SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) |
354 | SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp) | 354 | SYSCALL(sys_kcmp,compat_sys_kcmp) |
355 | SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) | 355 | SYSCALL(sys_finit_module,compat_sys_finit_module) |
356 | SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ | 356 | SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ |
357 | SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) | 357 | SYSCALL(sys_sched_getattr,compat_sys_sched_getattr) |
358 | SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) | 358 | SYSCALL(sys_renameat2,compat_sys_renameat2) |
359 | SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) | 359 | SYSCALL(sys_seccomp,compat_sys_seccomp) |
360 | SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) | 360 | SYSCALL(sys_getrandom,compat_sys_getrandom) |
361 | SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ | 361 | SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */ |
362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) | 362 | SYSCALL(sys_bpf,compat_sys_bpf) |
363 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) | 363 | SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) |
364 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) | 364 | SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) |
365 | SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) | 365 | SYSCALL(sys_execveat,compat_sys_execveat) |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14da43b801d9..5728c5bd44a8 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu) | |||
421 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); | 421 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); |
422 | } | 422 | } |
423 | 423 | ||
424 | const struct cpumask *cpu_thread_mask(int cpu) | 424 | static const struct cpumask *cpu_thread_mask(int cpu) |
425 | { | 425 | { |
426 | return &per_cpu(cpu_topology, cpu).thread_mask; | 426 | return &per_cpu(cpu_topology, cpu).thread_mask; |
427 | } | 427 | } |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f081cf1157c3..4d96c9f53455 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1; | |||
26 | 26 | ||
27 | static inline void __user *get_trap_ip(struct pt_regs *regs) | 27 | static inline void __user *get_trap_ip(struct pt_regs *regs) |
28 | { | 28 | { |
29 | #ifdef CONFIG_64BIT | ||
30 | unsigned long address; | 29 | unsigned long address; |
31 | 30 | ||
32 | if (regs->int_code & 0x200) | 31 | if (regs->int_code & 0x200) |
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) | |||
35 | address = regs->psw.addr; | 34 | address = regs->psw.addr; |
36 | return (void __user *) | 35 | return (void __user *) |
37 | ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); | 36 | ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); |
38 | #else | ||
39 | return (void __user *) | ||
40 | ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); | ||
41 | #endif | ||
42 | } | 37 | } |
43 | 38 | ||
44 | static inline void report_user_fault(struct pt_regs *regs, int signr) | 39 | static inline void report_user_fault(struct pt_regs *regs, int signr) |
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, | |||
153 | "privileged operation") | 148 | "privileged operation") |
154 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, | 149 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, |
155 | "special operation exception") | 150 | "special operation exception") |
156 | |||
157 | #ifdef CONFIG_64BIT | ||
158 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, | 151 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, |
159 | "transaction constraint exception") | 152 | "transaction constraint exception") |
160 | #endif | ||
161 | 153 | ||
162 | static inline void do_fp_trap(struct pt_regs *regs, int fpc) | 154 | static inline void do_fp_trap(struct pt_regs *regs, int fpc) |
163 | { | 155 | { |
@@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) | |||
182 | void translation_exception(struct pt_regs *regs) | 174 | void translation_exception(struct pt_regs *regs) |
183 | { | 175 | { |
184 | /* May never happen. */ | 176 | /* May never happen. */ |
185 | die(regs, "Translation exception"); | 177 | panic("Translation exception"); |
186 | } | 178 | } |
187 | 179 | ||
188 | void illegal_op(struct pt_regs *regs) | 180 | void illegal_op(struct pt_regs *regs) |
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs) | |||
211 | } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { | 203 | } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { |
212 | is_uprobe_insn = 1; | 204 | is_uprobe_insn = 1; |
213 | #endif | 205 | #endif |
214 | #ifdef CONFIG_MATHEMU | ||
215 | } else if (opcode[0] == 0xb3) { | ||
216 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
217 | return; | ||
218 | signal = math_emu_b3(opcode, regs); | ||
219 | } else if (opcode[0] == 0xed) { | ||
220 | if (get_user(*((__u32 *) (opcode+2)), | ||
221 | (__u32 __user *)(location+1))) | ||
222 | return; | ||
223 | signal = math_emu_ed(opcode, regs); | ||
224 | } else if (*((__u16 *) opcode) == 0xb299) { | ||
225 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
226 | return; | ||
227 | signal = math_emu_srnm(opcode, regs); | ||
228 | } else if (*((__u16 *) opcode) == 0xb29c) { | ||
229 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
230 | return; | ||
231 | signal = math_emu_stfpc(opcode, regs); | ||
232 | } else if (*((__u16 *) opcode) == 0xb29d) { | ||
233 | if (get_user(*((__u16 *) (opcode+2)), location+1)) | ||
234 | return; | ||
235 | signal = math_emu_lfpc(opcode, regs); | ||
236 | #endif | ||
237 | } else | 206 | } else |
238 | signal = SIGILL; | 207 | signal = SIGILL; |
239 | } | 208 | } |
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs) | |||
247 | 3, SIGTRAP) != NOTIFY_STOP) | 216 | 3, SIGTRAP) != NOTIFY_STOP) |
248 | signal = SIGILL; | 217 | signal = SIGILL; |
249 | } | 218 | } |
250 | |||
251 | #ifdef CONFIG_MATHEMU | ||
252 | if (signal == SIGFPE) | ||
253 | do_fp_trap(regs, current->thread.fp_regs.fpc); | ||
254 | else if (signal == SIGSEGV) | ||
255 | do_trap(regs, signal, SEGV_MAPERR, "user address fault"); | ||
256 | else | ||
257 | #endif | ||
258 | if (signal) | 219 | if (signal) |
259 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); | 220 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); |
260 | } | 221 | } |
261 | NOKPROBE_SYMBOL(illegal_op); | 222 | NOKPROBE_SYMBOL(illegal_op); |
262 | 223 | ||
263 | #ifdef CONFIG_MATHEMU | ||
264 | void specification_exception(struct pt_regs *regs) | ||
265 | { | ||
266 | __u8 opcode[6]; | ||
267 | __u16 __user *location = NULL; | ||
268 | int signal = 0; | ||
269 | |||
270 | location = (__u16 __user *) get_trap_ip(regs); | ||
271 | |||
272 | if (user_mode(regs)) { | ||
273 | get_user(*((__u16 *) opcode), location); | ||
274 | switch (opcode[0]) { | ||
275 | case 0x28: /* LDR Rx,Ry */ | ||
276 | signal = math_emu_ldr(opcode); | ||
277 | break; | ||
278 | case 0x38: /* LER Rx,Ry */ | ||
279 | signal = math_emu_ler(opcode); | ||
280 | break; | ||
281 | case 0x60: /* STD R,D(X,B) */ | ||
282 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
283 | signal = math_emu_std(opcode, regs); | ||
284 | break; | ||
285 | case 0x68: /* LD R,D(X,B) */ | ||
286 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
287 | signal = math_emu_ld(opcode, regs); | ||
288 | break; | ||
289 | case 0x70: /* STE R,D(X,B) */ | ||
290 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
291 | signal = math_emu_ste(opcode, regs); | ||
292 | break; | ||
293 | case 0x78: /* LE R,D(X,B) */ | ||
294 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
295 | signal = math_emu_le(opcode, regs); | ||
296 | break; | ||
297 | default: | ||
298 | signal = SIGILL; | ||
299 | break; | ||
300 | } | ||
301 | } else | ||
302 | signal = SIGILL; | ||
303 | |||
304 | if (signal == SIGFPE) | ||
305 | do_fp_trap(regs, current->thread.fp_regs.fpc); | ||
306 | else if (signal) | ||
307 | do_trap(regs, signal, ILL_ILLOPN, "specification exception"); | ||
308 | } | ||
309 | #else | ||
310 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, | 224 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, |
311 | "specification exception"); | 225 | "specification exception"); |
312 | #endif | ||
313 | 226 | ||
314 | #ifdef CONFIG_64BIT | ||
315 | int alloc_vector_registers(struct task_struct *tsk) | 227 | int alloc_vector_registers(struct task_struct *tsk) |
316 | { | 228 | { |
317 | __vector128 *vxrs; | 229 | __vector128 *vxrs; |
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str) | |||
377 | return 1; | 289 | return 1; |
378 | } | 290 | } |
379 | __setup("novx", disable_vector_extension); | 291 | __setup("novx", disable_vector_extension); |
380 | #endif | ||
381 | 292 | ||
382 | void data_exception(struct pt_regs *regs) | 293 | void data_exception(struct pt_regs *regs) |
383 | { | 294 | { |
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs) | |||
386 | 297 | ||
387 | location = get_trap_ip(regs); | 298 | location = get_trap_ip(regs); |
388 | 299 | ||
389 | if (MACHINE_HAS_IEEE) | 300 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); |
390 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | ||
391 | |||
392 | #ifdef CONFIG_MATHEMU | ||
393 | else if (user_mode(regs)) { | ||
394 | __u8 opcode[6]; | ||
395 | get_user(*((__u16 *) opcode), location); | ||
396 | switch (opcode[0]) { | ||
397 | case 0x28: /* LDR Rx,Ry */ | ||
398 | signal = math_emu_ldr(opcode); | ||
399 | break; | ||
400 | case 0x38: /* LER Rx,Ry */ | ||
401 | signal = math_emu_ler(opcode); | ||
402 | break; | ||
403 | case 0x60: /* STD R,D(X,B) */ | ||
404 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
405 | signal = math_emu_std(opcode, regs); | ||
406 | break; | ||
407 | case 0x68: /* LD R,D(X,B) */ | ||
408 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
409 | signal = math_emu_ld(opcode, regs); | ||
410 | break; | ||
411 | case 0x70: /* STE R,D(X,B) */ | ||
412 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
413 | signal = math_emu_ste(opcode, regs); | ||
414 | break; | ||
415 | case 0x78: /* LE R,D(X,B) */ | ||
416 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
417 | signal = math_emu_le(opcode, regs); | ||
418 | break; | ||
419 | case 0xb3: | ||
420 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
421 | signal = math_emu_b3(opcode, regs); | ||
422 | break; | ||
423 | case 0xed: | ||
424 | get_user(*((__u32 *) (opcode+2)), | ||
425 | (__u32 __user *)(location+1)); | ||
426 | signal = math_emu_ed(opcode, regs); | ||
427 | break; | ||
428 | case 0xb2: | ||
429 | if (opcode[1] == 0x99) { | ||
430 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
431 | signal = math_emu_srnm(opcode, regs); | ||
432 | } else if (opcode[1] == 0x9c) { | ||
433 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
434 | signal = math_emu_stfpc(opcode, regs); | ||
435 | } else if (opcode[1] == 0x9d) { | ||
436 | get_user(*((__u16 *) (opcode+2)), location+1); | ||
437 | signal = math_emu_lfpc(opcode, regs); | ||
438 | } else | ||
439 | signal = SIGILL; | ||
440 | break; | ||
441 | default: | ||
442 | signal = SIGILL; | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | #endif | ||
447 | #ifdef CONFIG_64BIT | ||
448 | /* Check for vector register enablement */ | 301 | /* Check for vector register enablement */ |
449 | if (MACHINE_HAS_VX && !current->thread.vxrs && | 302 | if (MACHINE_HAS_VX && !current->thread.vxrs && |
450 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { | 303 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { |
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs) | |||
454 | clear_pt_regs_flag(regs, PIF_PER_TRAP); | 307 | clear_pt_regs_flag(regs, PIF_PER_TRAP); |
455 | return; | 308 | return; |
456 | } | 309 | } |
457 | #endif | ||
458 | |||
459 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) | 310 | if (current->thread.fp_regs.fpc & FPC_DXC_MASK) |
460 | signal = SIGFPE; | 311 | signal = SIGFPE; |
461 | else | 312 | else |
462 | signal = SIGILL; | 313 | signal = SIGILL; |
463 | if (signal == SIGFPE) | 314 | if (signal == SIGFPE) |
464 | do_fp_trap(regs, current->thread.fp_regs.fpc); | 315 | do_fp_trap(regs, current->thread.fp_regs.fpc); |
465 | else if (signal) | 316 | else if (signal) |
466 | do_trap(regs, signal, ILL_ILLOPN, "data exception"); | 317 | do_trap(regs, signal, ILL_ILLOPN, "data exception"); |
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index cc7328080b60..66956c09d5bf 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c | |||
@@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) | |||
188 | else if (put_user(*(input), __ptr)) \ | 188 | else if (put_user(*(input), __ptr)) \ |
189 | __rc = EMU_ADDRESSING; \ | 189 | __rc = EMU_ADDRESSING; \ |
190 | if (__rc == 0) \ | 190 | if (__rc == 0) \ |
191 | sim_stor_event(regs, __ptr, mask + 1); \ | 191 | sim_stor_event(regs, \ |
192 | (void __force *)__ptr, \ | ||
193 | mask + 1); \ | ||
192 | __rc; \ | 194 | __rc; \ |
193 | }) | 195 | }) |
194 | 196 | ||
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 0bbb7e027c5a..0d58269ff425 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -32,19 +32,17 @@ | |||
32 | #include <asm/vdso.h> | 32 | #include <asm/vdso.h> |
33 | #include <asm/facility.h> | 33 | #include <asm/facility.h> |
34 | 34 | ||
35 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | 35 | #ifdef CONFIG_COMPAT |
36 | extern char vdso32_start, vdso32_end; | 36 | extern char vdso32_start, vdso32_end; |
37 | static void *vdso32_kbase = &vdso32_start; | 37 | static void *vdso32_kbase = &vdso32_start; |
38 | static unsigned int vdso32_pages; | 38 | static unsigned int vdso32_pages; |
39 | static struct page **vdso32_pagelist; | 39 | static struct page **vdso32_pagelist; |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifdef CONFIG_64BIT | ||
43 | extern char vdso64_start, vdso64_end; | 42 | extern char vdso64_start, vdso64_end; |
44 | static void *vdso64_kbase = &vdso64_start; | 43 | static void *vdso64_kbase = &vdso64_start; |
45 | static unsigned int vdso64_pages; | 44 | static unsigned int vdso64_pages; |
46 | static struct page **vdso64_pagelist; | 45 | static struct page **vdso64_pagelist; |
47 | #endif /* CONFIG_64BIT */ | ||
48 | 46 | ||
49 | /* | 47 | /* |
50 | * Should the kernel map a VDSO page into processes and pass its | 48 | * Should the kernel map a VDSO page into processes and pass its |
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd) | |||
87 | vd->ectg_available = test_facility(31); | 85 | vd->ectg_available = test_facility(31); |
88 | } | 86 | } |
89 | 87 | ||
90 | #ifdef CONFIG_64BIT | ||
91 | /* | 88 | /* |
92 | * Allocate/free per cpu vdso data. | 89 | * Allocate/free per cpu vdso data. |
93 | */ | 90 | */ |
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void) | |||
169 | cr5 = offsetof(struct _lowcore, paste); | 166 | cr5 = offsetof(struct _lowcore, paste); |
170 | __ctl_load(cr5, 5, 5); | 167 | __ctl_load(cr5, 5, 5); |
171 | } | 168 | } |
172 | #endif /* CONFIG_64BIT */ | ||
173 | 169 | ||
174 | /* | 170 | /* |
175 | * This is called from binfmt_elf, we create the special vma for the | 171 | * This is called from binfmt_elf, we create the special vma for the |
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
191 | if (!uses_interp) | 187 | if (!uses_interp) |
192 | return 0; | 188 | return 0; |
193 | 189 | ||
194 | #ifdef CONFIG_64BIT | ||
195 | vdso_pagelist = vdso64_pagelist; | 190 | vdso_pagelist = vdso64_pagelist; |
196 | vdso_pages = vdso64_pages; | 191 | vdso_pages = vdso64_pages; |
197 | #ifdef CONFIG_COMPAT | 192 | #ifdef CONFIG_COMPAT |
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
200 | vdso_pages = vdso32_pages; | 195 | vdso_pages = vdso32_pages; |
201 | } | 196 | } |
202 | #endif | 197 | #endif |
203 | #else | ||
204 | vdso_pagelist = vdso32_pagelist; | ||
205 | vdso_pages = vdso32_pages; | ||
206 | #endif | ||
207 | |||
208 | /* | 198 | /* |
209 | * vDSO has a problem and was disabled, just don't "enable" it for | 199 | * vDSO has a problem and was disabled, just don't "enable" it for |
210 | * the process | 200 | * the process |
@@ -268,7 +258,7 @@ static int __init vdso_init(void) | |||
268 | if (!vdso_enabled) | 258 | if (!vdso_enabled) |
269 | return 0; | 259 | return 0; |
270 | vdso_init_data(vdso_data); | 260 | vdso_init_data(vdso_data); |
271 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | 261 | #ifdef CONFIG_COMPAT |
272 | /* Calculate the size of the 32 bit vDSO */ | 262 | /* Calculate the size of the 32 bit vDSO */ |
273 | vdso32_pages = ((&vdso32_end - &vdso32_start | 263 | vdso32_pages = ((&vdso32_end - &vdso32_start |
274 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | 264 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
@@ -287,7 +277,6 @@ static int __init vdso_init(void) | |||
287 | vdso32_pagelist[vdso32_pages] = NULL; | 277 | vdso32_pagelist[vdso32_pages] = NULL; |
288 | #endif | 278 | #endif |
289 | 279 | ||
290 | #ifdef CONFIG_64BIT | ||
291 | /* Calculate the size of the 64 bit vDSO */ | 280 | /* Calculate the size of the 64 bit vDSO */ |
292 | vdso64_pages = ((&vdso64_end - &vdso64_start | 281 | vdso64_pages = ((&vdso64_end - &vdso64_start |
293 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | 282 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
@@ -307,7 +296,6 @@ static int __init vdso_init(void) | |||
307 | if (vdso_alloc_per_cpu(&S390_lowcore)) | 296 | if (vdso_alloc_per_cpu(&S390_lowcore)) |
308 | BUG(); | 297 | BUG(); |
309 | vdso_init_cr5(); | 298 | vdso_init_cr5(); |
310 | #endif /* CONFIG_64BIT */ | ||
311 | 299 | ||
312 | get_page(virt_to_page(vdso_data)); | 300 | get_page(virt_to_page(vdso_data)); |
313 | 301 | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 35b13ed0af5f..445657fe658c 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -6,17 +6,10 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm-generic/vmlinux.lds.h> | 7 | #include <asm-generic/vmlinux.lds.h> |
8 | 8 | ||
9 | #ifndef CONFIG_64BIT | ||
10 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
11 | OUTPUT_ARCH(s390:31-bit) | ||
12 | ENTRY(startup) | ||
13 | jiffies = jiffies_64 + 4; | ||
14 | #else | ||
15 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | 9 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") |
16 | OUTPUT_ARCH(s390:64-bit) | 10 | OUTPUT_ARCH(s390:64-bit) |
17 | ENTRY(startup) | 11 | ENTRY(startup) |
18 | jiffies = jiffies_64; | 12 | jiffies = jiffies_64; |
19 | #endif | ||
20 | 13 | ||
21 | PHDRS { | 14 | PHDRS { |
22 | text PT_LOAD FLAGS(5); /* R_E */ | 15 | text PT_LOAD FLAGS(5); /* R_E */ |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index a01df233856f..0e8fefe5b0ce 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -3,8 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += delay.o string.o uaccess.o find.o | 5 | lib-y += delay.o string.o uaccess.o find.o |
6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o | 6 | obj-y += mem.o |
7 | obj-$(CONFIG_64BIT) += mem64.o | ||
8 | lib-$(CONFIG_SMP) += spinlock.o | 7 | lib-$(CONFIG_SMP) += spinlock.o |
9 | lib-$(CONFIG_KPROBES) += probes.o | 8 | lib-$(CONFIG_KPROBES) += probes.o |
10 | lib-$(CONFIG_UPROBES) += probes.o | 9 | lib-$(CONFIG_UPROBES) += probes.o |
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c deleted file mode 100644 index 261152f83242..000000000000 --- a/arch/s390/lib/div64.c +++ /dev/null | |||
@@ -1,147 +0,0 @@ | |||
1 | /* | ||
2 | * __div64_32 implementation for 31 bit. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | |||
11 | #ifdef CONFIG_MARCH_G5 | ||
12 | |||
13 | /* | ||
14 | * Function to divide an unsigned 64 bit integer by an unsigned | ||
15 | * 31 bit integer using signed 64/32 bit division. | ||
16 | */ | ||
17 | static uint32_t __div64_31(uint64_t *n, uint32_t base) | ||
18 | { | ||
19 | register uint32_t reg2 asm("2"); | ||
20 | register uint32_t reg3 asm("3"); | ||
21 | uint32_t *words = (uint32_t *) n; | ||
22 | uint32_t tmp; | ||
23 | |||
24 | /* Special case base==1, remainder = 0, quotient = n */ | ||
25 | if (base == 1) | ||
26 | return 0; | ||
27 | /* | ||
28 | * Special case base==0 will cause a fixed point divide exception | ||
29 | * on the dr instruction and may not happen anyway. For the | ||
30 | * following calculation we can assume base > 1. The first | ||
31 | * signed 64 / 32 bit division with an upper half of 0 will | ||
32 | * give the correct upper half of the 64 bit quotient. | ||
33 | */ | ||
34 | reg2 = 0UL; | ||
35 | reg3 = words[0]; | ||
36 | asm volatile( | ||
37 | " dr %0,%2\n" | ||
38 | : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); | ||
39 | words[0] = reg3; | ||
40 | reg3 = words[1]; | ||
41 | /* | ||
42 | * To get the lower half of the 64 bit quotient and the 32 bit | ||
43 | * remainder we have to use a little trick. Since we only have | ||
44 | * a signed division the quotient can get too big. To avoid this | ||
45 | * the 64 bit dividend is halved, then the signed division will | ||
46 | * work. Afterwards the quotient and the remainder are doubled. | ||
47 | * If the last bit of the dividend has been one the remainder | ||
48 | * is increased by one then checked against the base. If the | ||
49 | * remainder has overflown subtract base and increase the | ||
50 | * quotient. Simple, no ? | ||
51 | */ | ||
52 | asm volatile( | ||
53 | " nr %2,%1\n" | ||
54 | " srdl %0,1\n" | ||
55 | " dr %0,%3\n" | ||
56 | " alr %0,%0\n" | ||
57 | " alr %1,%1\n" | ||
58 | " alr %0,%2\n" | ||
59 | " clr %0,%3\n" | ||
60 | " jl 0f\n" | ||
61 | " slr %0,%3\n" | ||
62 | " ahi %1,1\n" | ||
63 | "0:\n" | ||
64 | : "+d" (reg2), "+d" (reg3), "=d" (tmp) | ||
65 | : "d" (base), "2" (1UL) : "cc" ); | ||
66 | words[1] = reg3; | ||
67 | return reg2; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Function to divide an unsigned 64 bit integer by an unsigned | ||
72 | * 32 bit integer using the unsigned 64/31 bit division. | ||
73 | */ | ||
74 | uint32_t __div64_32(uint64_t *n, uint32_t base) | ||
75 | { | ||
76 | uint32_t r; | ||
77 | |||
78 | /* | ||
79 | * If the most significant bit of base is set, divide n by | ||
80 | * (base/2). That allows to use 64/31 bit division and gives a | ||
81 | * good approximation of the result: n = (base/2)*q + r. The | ||
82 | * result needs to be corrected with two simple transformations. | ||
83 | * If base is already < 2^31-1 __div64_31 can be used directly. | ||
84 | */ | ||
85 | r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); | ||
86 | if ((signed) base < 0) { | ||
87 | uint64_t q = *n; | ||
88 | /* | ||
89 | * First transformation: | ||
90 | * n = (base/2)*q + r | ||
91 | * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r | ||
92 | * Since r < (base/2), r + (base/2) < base. | ||
93 | * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) | ||
94 | * n = ((base/2)*2)*q1 + r1 with r1 < base. | ||
95 | */ | ||
96 | if (q & 1) | ||
97 | r += base/2; | ||
98 | q >>= 1; | ||
99 | /* | ||
100 | * Second transformation. ((base/2)*2) could have lost the | ||
101 | * last bit. | ||
102 | * n = ((base/2)*2)*q1 + r1 | ||
103 | * = base*q1 - ((base&1) ? q1 : 0) + r1 | ||
104 | */ | ||
105 | if (base & 1) { | ||
106 | int64_t rx = r - q; | ||
107 | /* | ||
108 | * base is >= 2^31. The worst case for the while | ||
109 | * loop is n=2^64-1 base=2^31+1. That gives a | ||
110 | * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since | ||
111 | * base >= 2^31 the loop is finished after a maximum | ||
112 | * of three iterations. | ||
113 | */ | ||
114 | while (rx < 0) { | ||
115 | rx += base; | ||
116 | q--; | ||
117 | } | ||
118 | r = rx; | ||
119 | } | ||
120 | *n = q; | ||
121 | } | ||
122 | return r; | ||
123 | } | ||
124 | |||
125 | #else /* MARCH_G5 */ | ||
126 | |||
127 | uint32_t __div64_32(uint64_t *n, uint32_t base) | ||
128 | { | ||
129 | register uint32_t reg2 asm("2"); | ||
130 | register uint32_t reg3 asm("3"); | ||
131 | uint32_t *words = (uint32_t *) n; | ||
132 | |||
133 | reg2 = 0UL; | ||
134 | reg3 = words[0]; | ||
135 | asm volatile( | ||
136 | " dlr %0,%2\n" | ||
137 | : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); | ||
138 | words[0] = reg3; | ||
139 | reg3 = words[1]; | ||
140 | asm volatile( | ||
141 | " dlr %0,%2\n" | ||
142 | : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); | ||
143 | words[1] = reg3; | ||
144 | return reg2; | ||
145 | } | ||
146 | |||
147 | #endif /* MARCH_G5 */ | ||
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem.S index c6d553e85ab1..c6d553e85ab1 100644 --- a/arch/s390/lib/mem64.S +++ b/arch/s390/lib/mem.S | |||
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S deleted file mode 100644 index 14ca9244b615..000000000000 --- a/arch/s390/lib/mem32.S +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * String handling functions. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2012 | ||
5 | */ | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | |||
9 | /* | ||
10 | * memset implementation | ||
11 | * | ||
12 | * This code corresponds to the C construct below. We do distinguish | ||
13 | * between clearing (c == 0) and setting a memory array (c != 0) simply | ||
14 | * because nearly all memset invocations in the kernel clear memory and | ||
15 | * the xc instruction is preferred in such cases. | ||
16 | * | ||
17 | * void *memset(void *s, int c, size_t n) | ||
18 | * { | ||
19 | * if (likely(c == 0)) | ||
20 | * return __builtin_memset(s, 0, n); | ||
21 | * return __builtin_memset(s, c, n); | ||
22 | * } | ||
23 | */ | ||
24 | ENTRY(memset) | ||
25 | basr %r5,%r0 | ||
26 | .Lmemset_base: | ||
27 | ltr %r4,%r4 | ||
28 | bzr %r14 | ||
29 | ltr %r3,%r3 | ||
30 | jnz .Lmemset_fill | ||
31 | ahi %r4,-1 | ||
32 | lr %r3,%r4 | ||
33 | srl %r3,8 | ||
34 | ltr %r3,%r3 | ||
35 | lr %r1,%r2 | ||
36 | je .Lmemset_clear_rest | ||
37 | .Lmemset_clear_loop: | ||
38 | xc 0(256,%r1),0(%r1) | ||
39 | la %r1,256(%r1) | ||
40 | brct %r3,.Lmemset_clear_loop | ||
41 | .Lmemset_clear_rest: | ||
42 | ex %r4,.Lmemset_xc-.Lmemset_base(%r5) | ||
43 | br %r14 | ||
44 | .Lmemset_fill: | ||
45 | stc %r3,0(%r2) | ||
46 | chi %r4,1 | ||
47 | lr %r1,%r2 | ||
48 | ber %r14 | ||
49 | ahi %r4,-2 | ||
50 | lr %r3,%r4 | ||
51 | srl %r3,8 | ||
52 | ltr %r3,%r3 | ||
53 | je .Lmemset_fill_rest | ||
54 | .Lmemset_fill_loop: | ||
55 | mvc 1(256,%r1),0(%r1) | ||
56 | la %r1,256(%r1) | ||
57 | brct %r3,.Lmemset_fill_loop | ||
58 | .Lmemset_fill_rest: | ||
59 | ex %r4,.Lmemset_mvc-.Lmemset_base(%r5) | ||
60 | br %r14 | ||
61 | .Lmemset_xc: | ||
62 | xc 0(1,%r1),0(%r1) | ||
63 | .Lmemset_mvc: | ||
64 | mvc 1(1,%r1),0(%r1) | ||
65 | |||
66 | /* | ||
67 | * memcpy implementation | ||
68 | * | ||
69 | * void *memcpy(void *dest, const void *src, size_t n) | ||
70 | */ | ||
71 | ENTRY(memcpy) | ||
72 | basr %r5,%r0 | ||
73 | .Lmemcpy_base: | ||
74 | ltr %r4,%r4 | ||
75 | bzr %r14 | ||
76 | ahi %r4,-1 | ||
77 | lr %r0,%r4 | ||
78 | srl %r0,8 | ||
79 | ltr %r0,%r0 | ||
80 | lr %r1,%r2 | ||
81 | jnz .Lmemcpy_loop | ||
82 | .Lmemcpy_rest: | ||
83 | ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5) | ||
84 | br %r14 | ||
85 | .Lmemcpy_loop: | ||
86 | mvc 0(256,%r1),0(%r3) | ||
87 | la %r1,256(%r1) | ||
88 | la %r3,256(%r3) | ||
89 | brct %r0,.Lmemcpy_loop | ||
90 | j .Lmemcpy_rest | ||
91 | .Lmemcpy_mvc: | ||
92 | mvc 0(1,%r1),0(%r3) | ||
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S deleted file mode 100644 index d321329130ec..000000000000 --- a/arch/s390/lib/qrnnd.S +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | #include <linux/linkage.h> | ||
4 | |||
5 | # r2 : &__r | ||
6 | # r3 : upper half of 64 bit word n | ||
7 | # r4 : lower half of 64 bit word n | ||
8 | # r5 : divisor d | ||
9 | # the reminder r of the division is to be stored to &__r and | ||
10 | # the quotient q is to be returned | ||
11 | |||
12 | .text | ||
13 | ENTRY(__udiv_qrnnd) | ||
14 | st %r2,24(%r15) # store pointer to reminder for later | ||
15 | lr %r0,%r3 # reload n | ||
16 | lr %r1,%r4 | ||
17 | ltr %r2,%r5 # reload and test divisor | ||
18 | jp 5f | ||
19 | # divisor >= 0x80000000 | ||
20 | srdl %r0,2 # n/4 | ||
21 | srl %r2,1 # d/2 | ||
22 | slr %r1,%r2 # special case if last bit of d is set | ||
23 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
24 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
25 | 0: dr %r0,%r2 # signed division | ||
26 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
27 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
28 | lhi %r3,1 | ||
29 | nr %r3,%r1 # test last bit of q | ||
30 | jz 1f | ||
31 | alr %r0,%r2 # add (d>>1) to r | ||
32 | 1: srl %r1,1 # q >>= 1 | ||
33 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
34 | lhi %r3,1 | ||
35 | nr %r3,%r5 # test last bit of d | ||
36 | jz 2f | ||
37 | slr %r0,%r1 # r -= q | ||
38 | brc 3,2f # borrow ? | ||
39 | alr %r0,%r5 # r += d | ||
40 | ahi %r1,-1 | ||
41 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
42 | alr %r1,%r1 # q <<= 1 | ||
43 | alr %r0,%r0 # r <<= 1 | ||
44 | brc 12,3f # overflow on r ? | ||
45 | slr %r0,%r5 # r -= d | ||
46 | ahi %r1,1 # q += 1 | ||
47 | 3: lhi %r3,2 | ||
48 | nr %r3,%r4 # test next to last bit of n | ||
49 | jz 4f | ||
50 | ahi %r0,1 # r += 1 | ||
51 | 4: clr %r0,%r5 # r >= d ? | ||
52 | jl 6f | ||
53 | slr %r0,%r5 # r -= d | ||
54 | ahi %r1,1 # q += 1 | ||
55 | # now (n >> 1) = d * %r1 + %r0 | ||
56 | j 6f | ||
57 | 5: # divisor < 0x80000000 | ||
58 | srdl %r0,1 | ||
59 | dr %r0,%r2 # signed division | ||
60 | # now (n >> 1) = d * %r1 + %r0 | ||
61 | 6: alr %r1,%r1 # q <<= 1 | ||
62 | alr %r0,%r0 # r <<= 1 | ||
63 | brc 12,7f # overflow on r ? | ||
64 | slr %r0,%r5 # r -= d | ||
65 | ahi %r1,1 # q += 1 | ||
66 | 7: lhi %r3,1 | ||
67 | nr %r3,%r4 # isolate last bit of n | ||
68 | alr %r0,%r3 # r += (n & 1) | ||
69 | clr %r0,%r5 # r >= d ? | ||
70 | jl 8f | ||
71 | slr %r0,%r5 # r -= d | ||
72 | ahi %r1,1 # q += 1 | ||
73 | 8: # now n = d * %r1 + %r0 | ||
74 | l %r2,24(%r15) | ||
75 | st %r0,0(%r2) | ||
76 | lr %r2,%r1 | ||
77 | br %r14 | ||
78 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 53dd5d7a0c96..4614d415bb58 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c | |||
@@ -15,20 +15,6 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/facility.h> | 16 | #include <asm/facility.h> |
17 | 17 | ||
18 | #ifndef CONFIG_64BIT | ||
19 | #define AHI "ahi" | ||
20 | #define ALR "alr" | ||
21 | #define CLR "clr" | ||
22 | #define LHI "lhi" | ||
23 | #define SLR "slr" | ||
24 | #else | ||
25 | #define AHI "aghi" | ||
26 | #define ALR "algr" | ||
27 | #define CLR "clgr" | ||
28 | #define LHI "lghi" | ||
29 | #define SLR "slgr" | ||
30 | #endif | ||
31 | |||
32 | static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; | 18 | static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; |
33 | 19 | ||
34 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, | 20 | static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, |
@@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr | |||
41 | asm volatile( | 27 | asm volatile( |
42 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" | 28 | "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" |
43 | "9: jz 7f\n" | 29 | "9: jz 7f\n" |
44 | "1:"ALR" %0,%3\n" | 30 | "1: algr %0,%3\n" |
45 | " "SLR" %1,%3\n" | 31 | " slgr %1,%3\n" |
46 | " "SLR" %2,%3\n" | 32 | " slgr %2,%3\n" |
47 | " j 0b\n" | 33 | " j 0b\n" |
48 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | 34 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ |
49 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | 35 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ |
50 | " "SLR" %4,%1\n" | 36 | " slgr %4,%1\n" |
51 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | 37 | " clgr %0,%4\n" /* copy crosses next page boundary? */ |
52 | " jnh 4f\n" | 38 | " jnh 4f\n" |
53 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" | 39 | "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" |
54 | "10:"SLR" %0,%4\n" | 40 | "10:slgr %0,%4\n" |
55 | " "ALR" %2,%4\n" | 41 | " algr %2,%4\n" |
56 | "4:"LHI" %4,-1\n" | 42 | "4: lghi %4,-1\n" |
57 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | 43 | " algr %4,%0\n" /* copy remaining size, subtract 1 */ |
58 | " bras %3,6f\n" /* memset loop */ | 44 | " bras %3,6f\n" /* memset loop */ |
59 | " xc 0(1,%2),0(%2)\n" | 45 | " xc 0(1,%2),0(%2)\n" |
60 | "5: xc 0(256,%2),0(%2)\n" | 46 | "5: xc 0(256,%2),0(%2)\n" |
61 | " la %2,256(%2)\n" | 47 | " la %2,256(%2)\n" |
62 | "6:"AHI" %4,-256\n" | 48 | "6: aghi %4,-256\n" |
63 | " jnm 5b\n" | 49 | " jnm 5b\n" |
64 | " ex %4,0(%3)\n" | 50 | " ex %4,0(%3)\n" |
65 | " j 8f\n" | 51 | " j 8f\n" |
66 | "7:"SLR" %0,%0\n" | 52 | "7:slgr %0,%0\n" |
67 | "8:\n" | 53 | "8:\n" |
68 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) | 54 | EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) |
69 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | 55 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) |
@@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, | |||
82 | " sacf 0\n" | 68 | " sacf 0\n" |
83 | "0: mvcp 0(%0,%2),0(%1),%3\n" | 69 | "0: mvcp 0(%0,%2),0(%1),%3\n" |
84 | "10:jz 8f\n" | 70 | "10:jz 8f\n" |
85 | "1:"ALR" %0,%3\n" | 71 | "1: algr %0,%3\n" |
86 | " la %1,256(%1)\n" | 72 | " la %1,256(%1)\n" |
87 | " la %2,256(%2)\n" | 73 | " la %2,256(%2)\n" |
88 | "2: mvcp 0(%0,%2),0(%1),%3\n" | 74 | "2: mvcp 0(%0,%2),0(%1),%3\n" |
89 | "11:jnz 1b\n" | 75 | "11:jnz 1b\n" |
90 | " j 8f\n" | 76 | " j 8f\n" |
91 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | 77 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ |
92 | " "LHI" %3,-4096\n" | 78 | " lghi %3,-4096\n" |
93 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | 79 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ |
94 | " "SLR" %4,%1\n" | 80 | " slgr %4,%1\n" |
95 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | 81 | " clgr %0,%4\n" /* copy crosses next page boundary? */ |
96 | " jnh 5f\n" | 82 | " jnh 5f\n" |
97 | "4: mvcp 0(%4,%2),0(%1),%3\n" | 83 | "4: mvcp 0(%4,%2),0(%1),%3\n" |
98 | "12:"SLR" %0,%4\n" | 84 | "12:slgr %0,%4\n" |
99 | " "ALR" %2,%4\n" | 85 | " algr %2,%4\n" |
100 | "5:"LHI" %4,-1\n" | 86 | "5: lghi %4,-1\n" |
101 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | 87 | " algr %4,%0\n" /* copy remaining size, subtract 1 */ |
102 | " bras %3,7f\n" /* memset loop */ | 88 | " bras %3,7f\n" /* memset loop */ |
103 | " xc 0(1,%2),0(%2)\n" | 89 | " xc 0(1,%2),0(%2)\n" |
104 | "6: xc 0(256,%2),0(%2)\n" | 90 | "6: xc 0(256,%2),0(%2)\n" |
105 | " la %2,256(%2)\n" | 91 | " la %2,256(%2)\n" |
106 | "7:"AHI" %4,-256\n" | 92 | "7: aghi %4,-256\n" |
107 | " jnm 6b\n" | 93 | " jnm 6b\n" |
108 | " ex %4,0(%3)\n" | 94 | " ex %4,0(%3)\n" |
109 | " j 9f\n" | 95 | " j 9f\n" |
110 | "8:"SLR" %0,%0\n" | 96 | "8:slgr %0,%0\n" |
111 | "9: sacf 768\n" | 97 | "9: sacf 768\n" |
112 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) | 98 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) |
113 | EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) | 99 | EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) |
@@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, | |||
134 | asm volatile( | 120 | asm volatile( |
135 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | 121 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" |
136 | "6: jz 4f\n" | 122 | "6: jz 4f\n" |
137 | "1:"ALR" %0,%3\n" | 123 | "1: algr %0,%3\n" |
138 | " "SLR" %1,%3\n" | 124 | " slgr %1,%3\n" |
139 | " "SLR" %2,%3\n" | 125 | " slgr %2,%3\n" |
140 | " j 0b\n" | 126 | " j 0b\n" |
141 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ | 127 | "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ |
142 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ | 128 | " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ |
143 | " "SLR" %4,%1\n" | 129 | " slgr %4,%1\n" |
144 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | 130 | " clgr %0,%4\n" /* copy crosses next page boundary? */ |
145 | " jnh 5f\n" | 131 | " jnh 5f\n" |
146 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" | 132 | "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" |
147 | "7:"SLR" %0,%4\n" | 133 | "7: slgr %0,%4\n" |
148 | " j 5f\n" | 134 | " j 5f\n" |
149 | "4:"SLR" %0,%0\n" | 135 | "4: slgr %0,%0\n" |
150 | "5:\n" | 136 | "5:\n" |
151 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) | 137 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) |
152 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | 138 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) |
@@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, | |||
165 | " sacf 0\n" | 151 | " sacf 0\n" |
166 | "0: mvcs 0(%0,%1),0(%2),%3\n" | 152 | "0: mvcs 0(%0,%1),0(%2),%3\n" |
167 | "7: jz 5f\n" | 153 | "7: jz 5f\n" |
168 | "1:"ALR" %0,%3\n" | 154 | "1: algr %0,%3\n" |
169 | " la %1,256(%1)\n" | 155 | " la %1,256(%1)\n" |
170 | " la %2,256(%2)\n" | 156 | " la %2,256(%2)\n" |
171 | "2: mvcs 0(%0,%1),0(%2),%3\n" | 157 | "2: mvcs 0(%0,%1),0(%2),%3\n" |
172 | "8: jnz 1b\n" | 158 | "8: jnz 1b\n" |
173 | " j 5f\n" | 159 | " j 5f\n" |
174 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ | 160 | "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ |
175 | " "LHI" %3,-4096\n" | 161 | " lghi %3,-4096\n" |
176 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | 162 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ |
177 | " "SLR" %4,%1\n" | 163 | " slgr %4,%1\n" |
178 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | 164 | " clgr %0,%4\n" /* copy crosses next page boundary? */ |
179 | " jnh 6f\n" | 165 | " jnh 6f\n" |
180 | "4: mvcs 0(%4,%1),0(%2),%3\n" | 166 | "4: mvcs 0(%4,%1),0(%2),%3\n" |
181 | "9:"SLR" %0,%4\n" | 167 | "9: slgr %0,%4\n" |
182 | " j 6f\n" | 168 | " j 6f\n" |
183 | "5:"SLR" %0,%0\n" | 169 | "5: slgr %0,%0\n" |
184 | "6: sacf 768\n" | 170 | "6: sacf 768\n" |
185 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) | 171 | EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) |
186 | EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) | 172 | EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) |
@@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use | |||
208 | asm volatile( | 194 | asm volatile( |
209 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" | 195 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" |
210 | " jz 2f\n" | 196 | " jz 2f\n" |
211 | "1:"ALR" %0,%3\n" | 197 | "1: algr %0,%3\n" |
212 | " "SLR" %1,%3\n" | 198 | " slgr %1,%3\n" |
213 | " "SLR" %2,%3\n" | 199 | " slgr %2,%3\n" |
214 | " j 0b\n" | 200 | " j 0b\n" |
215 | "2:"SLR" %0,%0\n" | 201 | "2:slgr %0,%0\n" |
216 | "3: \n" | 202 | "3: \n" |
217 | EX_TABLE(0b,3b) | 203 | EX_TABLE(0b,3b) |
218 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) | 204 | : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) |
@@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user | |||
228 | load_kernel_asce(); | 214 | load_kernel_asce(); |
229 | asm volatile( | 215 | asm volatile( |
230 | " sacf 256\n" | 216 | " sacf 256\n" |
231 | " "AHI" %0,-1\n" | 217 | " aghi %0,-1\n" |
232 | " jo 5f\n" | 218 | " jo 5f\n" |
233 | " bras %3,3f\n" | 219 | " bras %3,3f\n" |
234 | "0:"AHI" %0,257\n" | 220 | "0: aghi %0,257\n" |
235 | "1: mvc 0(1,%1),0(%2)\n" | 221 | "1: mvc 0(1,%1),0(%2)\n" |
236 | " la %1,1(%1)\n" | 222 | " la %1,1(%1)\n" |
237 | " la %2,1(%2)\n" | 223 | " la %2,1(%2)\n" |
238 | " "AHI" %0,-1\n" | 224 | " aghi %0,-1\n" |
239 | " jnz 1b\n" | 225 | " jnz 1b\n" |
240 | " j 5f\n" | 226 | " j 5f\n" |
241 | "2: mvc 0(256,%1),0(%2)\n" | 227 | "2: mvc 0(256,%1),0(%2)\n" |
242 | " la %1,256(%1)\n" | 228 | " la %1,256(%1)\n" |
243 | " la %2,256(%2)\n" | 229 | " la %2,256(%2)\n" |
244 | "3:"AHI" %0,-256\n" | 230 | "3: aghi %0,-256\n" |
245 | " jnm 2b\n" | 231 | " jnm 2b\n" |
246 | "4: ex %0,1b-0b(%3)\n" | 232 | "4: ex %0,1b-0b(%3)\n" |
247 | "5: "SLR" %0,%0\n" | 233 | "5: slgr %0,%0\n" |
248 | "6: sacf 768\n" | 234 | "6: sacf 768\n" |
249 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | 235 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) |
250 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) | 236 | : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) |
@@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size | |||
269 | asm volatile( | 255 | asm volatile( |
270 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" | 256 | "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" |
271 | " jz 4f\n" | 257 | " jz 4f\n" |
272 | "1:"ALR" %0,%2\n" | 258 | "1: algr %0,%2\n" |
273 | " "SLR" %1,%2\n" | 259 | " slgr %1,%2\n" |
274 | " j 0b\n" | 260 | " j 0b\n" |
275 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ | 261 | "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ |
276 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ | 262 | " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ |
277 | " "SLR" %3,%1\n" | 263 | " slgr %3,%1\n" |
278 | " "CLR" %0,%3\n" /* copy crosses next page boundary? */ | 264 | " clgr %0,%3\n" /* copy crosses next page boundary? */ |
279 | " jnh 5f\n" | 265 | " jnh 5f\n" |
280 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" | 266 | "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" |
281 | " "SLR" %0,%3\n" | 267 | " slgr %0,%3\n" |
282 | " j 5f\n" | 268 | " j 5f\n" |
283 | "4:"SLR" %0,%0\n" | 269 | "4:slgr %0,%0\n" |
284 | "5:\n" | 270 | "5:\n" |
285 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) | 271 | EX_TABLE(0b,2b) EX_TABLE(3b,5b) |
286 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) | 272 | : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) |
@@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size) | |||
295 | load_kernel_asce(); | 281 | load_kernel_asce(); |
296 | asm volatile( | 282 | asm volatile( |
297 | " sacf 256\n" | 283 | " sacf 256\n" |
298 | " "AHI" %0,-1\n" | 284 | " aghi %0,-1\n" |
299 | " jo 5f\n" | 285 | " jo 5f\n" |
300 | " bras %3,3f\n" | 286 | " bras %3,3f\n" |
301 | " xc 0(1,%1),0(%1)\n" | 287 | " xc 0(1,%1),0(%1)\n" |
302 | "0:"AHI" %0,257\n" | 288 | "0: aghi %0,257\n" |
303 | " la %2,255(%1)\n" /* %2 = ptr + 255 */ | 289 | " la %2,255(%1)\n" /* %2 = ptr + 255 */ |
304 | " srl %2,12\n" | 290 | " srl %2,12\n" |
305 | " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ | 291 | " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ |
306 | " "SLR" %2,%1\n" | 292 | " slgr %2,%1\n" |
307 | " "CLR" %0,%2\n" /* clear crosses next page boundary? */ | 293 | " clgr %0,%2\n" /* clear crosses next page boundary? */ |
308 | " jnh 5f\n" | 294 | " jnh 5f\n" |
309 | " "AHI" %2,-1\n" | 295 | " aghi %2,-1\n" |
310 | "1: ex %2,0(%3)\n" | 296 | "1: ex %2,0(%3)\n" |
311 | " "AHI" %2,1\n" | 297 | " aghi %2,1\n" |
312 | " "SLR" %0,%2\n" | 298 | " slgr %0,%2\n" |
313 | " j 5f\n" | 299 | " j 5f\n" |
314 | "2: xc 0(256,%1),0(%1)\n" | 300 | "2: xc 0(256,%1),0(%1)\n" |
315 | " la %1,256(%1)\n" | 301 | " la %1,256(%1)\n" |
316 | "3:"AHI" %0,-256\n" | 302 | "3: aghi %0,-256\n" |
317 | " jnm 2b\n" | 303 | " jnm 2b\n" |
318 | "4: ex %0,0(%3)\n" | 304 | "4: ex %0,0(%3)\n" |
319 | "5: "SLR" %0,%0\n" | 305 | "5: slgr %0,%0\n" |
320 | "6: sacf 768\n" | 306 | "6: sacf 768\n" |
321 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) | 307 | EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) |
322 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) | 308 | : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) |
@@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src, | |||
341 | asm volatile( | 327 | asm volatile( |
342 | " la %2,0(%1)\n" | 328 | " la %2,0(%1)\n" |
343 | " la %3,0(%0,%1)\n" | 329 | " la %3,0(%0,%1)\n" |
344 | " "SLR" %0,%0\n" | 330 | " slgr %0,%0\n" |
345 | " sacf 256\n" | 331 | " sacf 256\n" |
346 | "0: srst %3,%2\n" | 332 | "0: srst %3,%2\n" |
347 | " jo 0b\n" | 333 | " jo 0b\n" |
348 | " la %0,1(%3)\n" /* strnlen_user results includes \0 */ | 334 | " la %0,1(%3)\n" /* strnlen_user results includes \0 */ |
349 | " "SLR" %0,%1\n" | 335 | " slgr %0,%1\n" |
350 | "1: sacf 768\n" | 336 | "1: sacf 768\n" |
351 | EX_TABLE(0b,1b) | 337 | EX_TABLE(0b,1b) |
352 | : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) | 338 | : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) |
@@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt); | |||
399 | 385 | ||
400 | static int __init uaccess_init(void) | 386 | static int __init uaccess_init(void) |
401 | { | 387 | { |
402 | if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) | 388 | if (!uaccess_primary && test_facility(27)) |
403 | static_key_slow_inc(&have_mvcos); | 389 | static_key_slow_inc(&have_mvcos); |
404 | return 0; | 390 | return 0; |
405 | } | 391 | } |
diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c deleted file mode 100644 index 3e05ff532582..000000000000 --- a/arch/s390/lib/ucmpdi2.c +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | union ull_union { | ||
4 | unsigned long long ull; | ||
5 | struct { | ||
6 | unsigned int high; | ||
7 | unsigned int low; | ||
8 | } ui; | ||
9 | }; | ||
10 | |||
11 | int __ucmpdi2(unsigned long long a, unsigned long long b) | ||
12 | { | ||
13 | union ull_union au = {.ull = a}; | ||
14 | union ull_union bu = {.ull = b}; | ||
15 | |||
16 | if (au.ui.high < bu.ui.high) | ||
17 | return 0; | ||
18 | else if (au.ui.high > bu.ui.high) | ||
19 | return 2; | ||
20 | if (au.ui.low < bu.ui.low) | ||
21 | return 0; | ||
22 | else if (au.ui.low > bu.ui.low) | ||
23 | return 2; | ||
24 | return 1; | ||
25 | } | ||
26 | EXPORT_SYMBOL(__ucmpdi2); | ||
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile deleted file mode 100644 index 51d399549f60..000000000000 --- a/arch/s390/math-emu/Makefile +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the FPU instruction emulation. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MATHEMU) := math.o | ||
6 | |||
7 | ccflags-y := -I$(src) -Iinclude/math-emu -w | ||
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c deleted file mode 100644 index a6ba0d724335..000000000000 --- a/arch/s390/math-emu/math.c +++ /dev/null | |||
@@ -1,2255 +0,0 @@ | |||
1 | /* | ||
2 | * S390 version | ||
3 | * Copyright IBM Corp. 1999, 2001 | ||
4 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
5 | * | ||
6 | * 'math.c' emulates IEEE instructions on a S390 processor | ||
7 | * that does not have the IEEE fpu (all processors before G5). | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/lowcore.h> | ||
15 | |||
16 | #include <asm/sfp-util.h> | ||
17 | #include <math-emu/soft-fp.h> | ||
18 | #include <math-emu/single.h> | ||
19 | #include <math-emu/double.h> | ||
20 | #include <math-emu/quad.h> | ||
21 | |||
22 | #define FPC_VALID_MASK 0xF8F8FF03 | ||
23 | |||
24 | /* | ||
25 | * I miss a macro to round a floating point number to the | ||
26 | * nearest integer in the same floating point format. | ||
27 | */ | ||
28 | #define _FP_TO_FPINT_ROUND(fs, wc, X) \ | ||
29 | do { \ | ||
30 | switch (X##_c) \ | ||
31 | { \ | ||
32 | case FP_CLS_NORMAL: \ | ||
33 | if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \ | ||
34 | { /* floating point number has no bits after the dot. */ \ | ||
35 | } \ | ||
36 | else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \ | ||
37 | X##_e > _FP_EXPBIAS_##fs) \ | ||
38 | { /* some bits before the dot, some after it. */ \ | ||
39 | _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \ | ||
40 | X##_e - _FP_EXPBIAS_##fs \ | ||
41 | + _FP_FRACBITS_##fs); \ | ||
42 | _FP_ROUND(wc, X); \ | ||
43 | _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \ | ||
44 | + _FP_FRACBITS_##fs); \ | ||
45 | } \ | ||
46 | else \ | ||
47 | { /* all bits after the dot. */ \ | ||
48 | FP_SET_EXCEPTION(FP_EX_INEXACT); \ | ||
49 | X##_c = FP_CLS_ZERO; \ | ||
50 | } \ | ||
51 | break; \ | ||
52 | case FP_CLS_NAN: \ | ||
53 | case FP_CLS_INF: \ | ||
54 | case FP_CLS_ZERO: \ | ||
55 | break; \ | ||
56 | } \ | ||
57 | } while (0) | ||
58 | |||
59 | #define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X) | ||
60 | #define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X) | ||
61 | #define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X) | ||
62 | |||
63 | typedef union { | ||
64 | long double ld; | ||
65 | struct { | ||
66 | __u64 high; | ||
67 | __u64 low; | ||
68 | } w; | ||
69 | } mathemu_ldcv; | ||
70 | |||
71 | #ifdef CONFIG_SYSCTL | ||
72 | int sysctl_ieee_emulation_warnings=1; | ||
73 | #endif | ||
74 | |||
75 | #define mathemu_put_user(x, p) \ | ||
76 | do { \ | ||
77 | if (put_user((x),(p))) \ | ||
78 | return SIGSEGV; \ | ||
79 | } while (0) | ||
80 | |||
81 | #define mathemu_get_user(x, p) \ | ||
82 | do { \ | ||
83 | if (get_user((x),(p))) \ | ||
84 | return SIGSEGV; \ | ||
85 | } while (0) | ||
86 | |||
87 | #define mathemu_copy_from_user(d, s, n)\ | ||
88 | do { \ | ||
89 | if (copy_from_user((d),(s),(n)) != 0) \ | ||
90 | return SIGSEGV; \ | ||
91 | } while (0) | ||
92 | |||
93 | #define mathemu_copy_to_user(d, s, n) \ | ||
94 | do { \ | ||
95 | if (copy_to_user((d),(s),(n)) != 0) \ | ||
96 | return SIGSEGV; \ | ||
97 | } while (0) | ||
98 | |||
99 | static void display_emulation_not_implemented(struct pt_regs *regs, char *instr) | ||
100 | { | ||
101 | __u16 *location; | ||
102 | |||
103 | #ifdef CONFIG_SYSCTL | ||
104 | if(sysctl_ieee_emulation_warnings) | ||
105 | #endif | ||
106 | { | ||
107 | location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); | ||
108 | printk("%s ieee fpu instruction not emulated " | ||
109 | "process name: %s pid: %d \n", | ||
110 | instr, current->comm, current->pid); | ||
111 | printk("%s's PSW: %08lx %08lx\n", instr, | ||
112 | (unsigned long) regs->psw.mask, | ||
113 | (unsigned long) location); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static inline void emu_set_CC (struct pt_regs *regs, int cc) | ||
118 | { | ||
119 | regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Set the condition code in the user psw. | ||
124 | * 0 : Result is zero | ||
125 | * 1 : Result is less than zero | ||
126 | * 2 : Result is greater than zero | ||
127 | * 3 : Result is NaN or INF | ||
128 | */ | ||
129 | static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign) | ||
130 | { | ||
131 | switch (class) { | ||
132 | case FP_CLS_NORMAL: | ||
133 | case FP_CLS_INF: | ||
134 | emu_set_CC(regs, sign ? 1 : 2); | ||
135 | break; | ||
136 | case FP_CLS_ZERO: | ||
137 | emu_set_CC(regs, 0); | ||
138 | break; | ||
139 | case FP_CLS_NAN: | ||
140 | emu_set_CC(regs, 3); | ||
141 | break; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /* Add long double */ | ||
146 | static int emu_axbr (struct pt_regs *regs, int rx, int ry) { | ||
147 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
148 | FP_DECL_EX; | ||
149 | mathemu_ldcv cvt; | ||
150 | int mode; | ||
151 | |||
152 | mode = current->thread.fp_regs.fpc & 3; | ||
153 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
154 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
155 | FP_UNPACK_QP(QA, &cvt.ld); | ||
156 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
157 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
158 | FP_UNPACK_QP(QB, &cvt.ld); | ||
159 | FP_ADD_Q(QR, QA, QB); | ||
160 | FP_PACK_QP(&cvt.ld, QR); | ||
161 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
162 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
163 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
164 | return _fex; | ||
165 | } | ||
166 | |||
167 | /* Add double */ | ||
168 | static int emu_adbr (struct pt_regs *regs, int rx, int ry) { | ||
169 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
170 | FP_DECL_EX; | ||
171 | int mode; | ||
172 | |||
173 | mode = current->thread.fp_regs.fpc & 3; | ||
174 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
175 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
176 | FP_ADD_D(DR, DA, DB); | ||
177 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
178 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
179 | return _fex; | ||
180 | } | ||
181 | |||
182 | /* Add double */ | ||
183 | static int emu_adb (struct pt_regs *regs, int rx, double *val) { | ||
184 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
185 | FP_DECL_EX; | ||
186 | int mode; | ||
187 | |||
188 | mode = current->thread.fp_regs.fpc & 3; | ||
189 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
190 | FP_UNPACK_DP(DB, val); | ||
191 | FP_ADD_D(DR, DA, DB); | ||
192 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
193 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
194 | return _fex; | ||
195 | } | ||
196 | |||
197 | /* Add float */ | ||
198 | static int emu_aebr (struct pt_regs *regs, int rx, int ry) { | ||
199 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
200 | FP_DECL_EX; | ||
201 | int mode; | ||
202 | |||
203 | mode = current->thread.fp_regs.fpc & 3; | ||
204 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
205 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
206 | FP_ADD_S(SR, SA, SB); | ||
207 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
208 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
209 | return _fex; | ||
210 | } | ||
211 | |||
212 | /* Add float */ | ||
213 | static int emu_aeb (struct pt_regs *regs, int rx, float *val) { | ||
214 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
215 | FP_DECL_EX; | ||
216 | int mode; | ||
217 | |||
218 | mode = current->thread.fp_regs.fpc & 3; | ||
219 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
220 | FP_UNPACK_SP(SB, val); | ||
221 | FP_ADD_S(SR, SA, SB); | ||
222 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
223 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
224 | return _fex; | ||
225 | } | ||
226 | |||
227 | /* Compare long double */ | ||
228 | static int emu_cxbr (struct pt_regs *regs, int rx, int ry) { | ||
229 | FP_DECL_Q(QA); FP_DECL_Q(QB); | ||
230 | mathemu_ldcv cvt; | ||
231 | int IR; | ||
232 | |||
233 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
234 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
235 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
236 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
237 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
238 | FP_UNPACK_RAW_QP(QB, &cvt.ld); | ||
239 | FP_CMP_Q(IR, QA, QB, 3); | ||
240 | /* | ||
241 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
242 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
243 | */ | ||
244 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /* Compare double */ | ||
249 | static int emu_cdbr (struct pt_regs *regs, int rx, int ry) { | ||
250 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
251 | int IR; | ||
252 | |||
253 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
254 | FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
255 | FP_CMP_D(IR, DA, DB, 3); | ||
256 | /* | ||
257 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
258 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
259 | */ | ||
260 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | /* Compare double */ | ||
265 | static int emu_cdb (struct pt_regs *regs, int rx, double *val) { | ||
266 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
267 | int IR; | ||
268 | |||
269 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
270 | FP_UNPACK_RAW_DP(DB, val); | ||
271 | FP_CMP_D(IR, DA, DB, 3); | ||
272 | /* | ||
273 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
274 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
275 | */ | ||
276 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | /* Compare float */ | ||
281 | static int emu_cebr (struct pt_regs *regs, int rx, int ry) { | ||
282 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
283 | int IR; | ||
284 | |||
285 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
286 | FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
287 | FP_CMP_S(IR, SA, SB, 3); | ||
288 | /* | ||
289 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
290 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
291 | */ | ||
292 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | /* Compare float */ | ||
297 | static int emu_ceb (struct pt_regs *regs, int rx, float *val) { | ||
298 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
299 | int IR; | ||
300 | |||
301 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
302 | FP_UNPACK_RAW_SP(SB, val); | ||
303 | FP_CMP_S(IR, SA, SB, 3); | ||
304 | /* | ||
305 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
306 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
307 | */ | ||
308 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /* Compare and signal long double */ | ||
313 | static int emu_kxbr (struct pt_regs *regs, int rx, int ry) { | ||
314 | FP_DECL_Q(QA); FP_DECL_Q(QB); | ||
315 | FP_DECL_EX; | ||
316 | mathemu_ldcv cvt; | ||
317 | int IR; | ||
318 | |||
319 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
320 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
321 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
322 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
323 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
324 | FP_UNPACK_QP(QB, &cvt.ld); | ||
325 | FP_CMP_Q(IR, QA, QB, 3); | ||
326 | /* | ||
327 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
328 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
329 | */ | ||
330 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
331 | if (IR == 3) | ||
332 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
333 | return _fex; | ||
334 | } | ||
335 | |||
336 | /* Compare and signal double */ | ||
337 | static int emu_kdbr (struct pt_regs *regs, int rx, int ry) { | ||
338 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
339 | FP_DECL_EX; | ||
340 | int IR; | ||
341 | |||
342 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
343 | FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
344 | FP_CMP_D(IR, DA, DB, 3); | ||
345 | /* | ||
346 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
347 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
348 | */ | ||
349 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
350 | if (IR == 3) | ||
351 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
352 | return _fex; | ||
353 | } | ||
354 | |||
355 | /* Compare and signal double */ | ||
356 | static int emu_kdb (struct pt_regs *regs, int rx, double *val) { | ||
357 | FP_DECL_D(DA); FP_DECL_D(DB); | ||
358 | FP_DECL_EX; | ||
359 | int IR; | ||
360 | |||
361 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
362 | FP_UNPACK_RAW_DP(DB, val); | ||
363 | FP_CMP_D(IR, DA, DB, 3); | ||
364 | /* | ||
365 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
366 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
367 | */ | ||
368 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
369 | if (IR == 3) | ||
370 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
371 | return _fex; | ||
372 | } | ||
373 | |||
374 | /* Compare and signal float */ | ||
375 | static int emu_kebr (struct pt_regs *regs, int rx, int ry) { | ||
376 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
377 | FP_DECL_EX; | ||
378 | int IR; | ||
379 | |||
380 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
381 | FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
382 | FP_CMP_S(IR, SA, SB, 3); | ||
383 | /* | ||
384 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
385 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
386 | */ | ||
387 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
388 | if (IR == 3) | ||
389 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
390 | return _fex; | ||
391 | } | ||
392 | |||
393 | /* Compare and signal float */ | ||
394 | static int emu_keb (struct pt_regs *regs, int rx, float *val) { | ||
395 | FP_DECL_S(SA); FP_DECL_S(SB); | ||
396 | FP_DECL_EX; | ||
397 | int IR; | ||
398 | |||
399 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
400 | FP_UNPACK_RAW_SP(SB, val); | ||
401 | FP_CMP_S(IR, SA, SB, 3); | ||
402 | /* | ||
403 | * IR == -1 if DA < DB, IR == 0 if DA == DB, | ||
404 | * IR == 1 if DA > DB and IR == 3 if unorderded | ||
405 | */ | ||
406 | emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); | ||
407 | if (IR == 3) | ||
408 | FP_SET_EXCEPTION (FP_EX_INVALID); | ||
409 | return _fex; | ||
410 | } | ||
411 | |||
412 | /* Convert from fixed long double */ | ||
413 | static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) { | ||
414 | FP_DECL_Q(QR); | ||
415 | FP_DECL_EX; | ||
416 | mathemu_ldcv cvt; | ||
417 | __s32 si; | ||
418 | int mode; | ||
419 | |||
420 | mode = current->thread.fp_regs.fpc & 3; | ||
421 | si = regs->gprs[ry]; | ||
422 | FP_FROM_INT_Q(QR, si, 32, int); | ||
423 | FP_PACK_QP(&cvt.ld, QR); | ||
424 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
425 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
426 | return _fex; | ||
427 | } | ||
428 | |||
429 | /* Convert from fixed double */ | ||
430 | static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) { | ||
431 | FP_DECL_D(DR); | ||
432 | FP_DECL_EX; | ||
433 | __s32 si; | ||
434 | int mode; | ||
435 | |||
436 | mode = current->thread.fp_regs.fpc & 3; | ||
437 | si = regs->gprs[ry]; | ||
438 | FP_FROM_INT_D(DR, si, 32, int); | ||
439 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
440 | return _fex; | ||
441 | } | ||
442 | |||
443 | /* Convert from fixed float */ | ||
444 | static int emu_cefbr (struct pt_regs *regs, int rx, int ry) { | ||
445 | FP_DECL_S(SR); | ||
446 | FP_DECL_EX; | ||
447 | __s32 si; | ||
448 | int mode; | ||
449 | |||
450 | mode = current->thread.fp_regs.fpc & 3; | ||
451 | si = regs->gprs[ry]; | ||
452 | FP_FROM_INT_S(SR, si, 32, int); | ||
453 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
454 | return _fex; | ||
455 | } | ||
456 | |||
457 | /* Convert to fixed long double */ | ||
458 | static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
459 | FP_DECL_Q(QA); | ||
460 | FP_DECL_EX; | ||
461 | mathemu_ldcv cvt; | ||
462 | __s32 si; | ||
463 | int mode; | ||
464 | |||
465 | if (mask == 0) | ||
466 | mode = current->thread.fp_regs.fpc & 3; | ||
467 | else if (mask == 1) | ||
468 | mode = FP_RND_NEAREST; | ||
469 | else | ||
470 | mode = mask - 4; | ||
471 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
472 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
473 | FP_UNPACK_QP(QA, &cvt.ld); | ||
474 | FP_TO_INT_ROUND_Q(si, QA, 32, 1); | ||
475 | regs->gprs[rx] = si; | ||
476 | emu_set_CC_cs(regs, QA_c, QA_s); | ||
477 | return _fex; | ||
478 | } | ||
479 | |||
480 | /* Convert to fixed double */ | ||
481 | static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
482 | FP_DECL_D(DA); | ||
483 | FP_DECL_EX; | ||
484 | __s32 si; | ||
485 | int mode; | ||
486 | |||
487 | if (mask == 0) | ||
488 | mode = current->thread.fp_regs.fpc & 3; | ||
489 | else if (mask == 1) | ||
490 | mode = FP_RND_NEAREST; | ||
491 | else | ||
492 | mode = mask - 4; | ||
493 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
494 | FP_TO_INT_ROUND_D(si, DA, 32, 1); | ||
495 | regs->gprs[rx] = si; | ||
496 | emu_set_CC_cs(regs, DA_c, DA_s); | ||
497 | return _fex; | ||
498 | } | ||
499 | |||
500 | /* Convert to fixed float */ | ||
501 | static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
502 | FP_DECL_S(SA); | ||
503 | FP_DECL_EX; | ||
504 | __s32 si; | ||
505 | int mode; | ||
506 | |||
507 | if (mask == 0) | ||
508 | mode = current->thread.fp_regs.fpc & 3; | ||
509 | else if (mask == 1) | ||
510 | mode = FP_RND_NEAREST; | ||
511 | else | ||
512 | mode = mask - 4; | ||
513 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
514 | FP_TO_INT_ROUND_S(si, SA, 32, 1); | ||
515 | regs->gprs[rx] = si; | ||
516 | emu_set_CC_cs(regs, SA_c, SA_s); | ||
517 | return _fex; | ||
518 | } | ||
519 | |||
520 | /* Divide long double */ | ||
521 | static int emu_dxbr (struct pt_regs *regs, int rx, int ry) { | ||
522 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
523 | FP_DECL_EX; | ||
524 | mathemu_ldcv cvt; | ||
525 | int mode; | ||
526 | |||
527 | mode = current->thread.fp_regs.fpc & 3; | ||
528 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
529 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
530 | FP_UNPACK_QP(QA, &cvt.ld); | ||
531 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
532 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
533 | FP_UNPACK_QP(QB, &cvt.ld); | ||
534 | FP_DIV_Q(QR, QA, QB); | ||
535 | FP_PACK_QP(&cvt.ld, QR); | ||
536 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
537 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
538 | return _fex; | ||
539 | } | ||
540 | |||
541 | /* Divide double */ | ||
542 | static int emu_ddbr (struct pt_regs *regs, int rx, int ry) { | ||
543 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
544 | FP_DECL_EX; | ||
545 | int mode; | ||
546 | |||
547 | mode = current->thread.fp_regs.fpc & 3; | ||
548 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
549 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
550 | FP_DIV_D(DR, DA, DB); | ||
551 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
552 | return _fex; | ||
553 | } | ||
554 | |||
555 | /* Divide double */ | ||
556 | static int emu_ddb (struct pt_regs *regs, int rx, double *val) { | ||
557 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
558 | FP_DECL_EX; | ||
559 | int mode; | ||
560 | |||
561 | mode = current->thread.fp_regs.fpc & 3; | ||
562 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
563 | FP_UNPACK_DP(DB, val); | ||
564 | FP_DIV_D(DR, DA, DB); | ||
565 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
566 | return _fex; | ||
567 | } | ||
568 | |||
569 | /* Divide float */ | ||
570 | static int emu_debr (struct pt_regs *regs, int rx, int ry) { | ||
571 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
572 | FP_DECL_EX; | ||
573 | int mode; | ||
574 | |||
575 | mode = current->thread.fp_regs.fpc & 3; | ||
576 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
577 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
578 | FP_DIV_S(SR, SA, SB); | ||
579 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
580 | return _fex; | ||
581 | } | ||
582 | |||
583 | /* Divide float */ | ||
584 | static int emu_deb (struct pt_regs *regs, int rx, float *val) { | ||
585 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
586 | FP_DECL_EX; | ||
587 | int mode; | ||
588 | |||
589 | mode = current->thread.fp_regs.fpc & 3; | ||
590 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
591 | FP_UNPACK_SP(SB, val); | ||
592 | FP_DIV_S(SR, SA, SB); | ||
593 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
594 | return _fex; | ||
595 | } | ||
596 | |||
597 | /* Divide to integer double */ | ||
598 | static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
599 | display_emulation_not_implemented(regs, "didbr"); | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | /* Divide to integer float */ | ||
604 | static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
605 | display_emulation_not_implemented(regs, "diebr"); | ||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | /* Extract fpc */ | ||
610 | static int emu_efpc (struct pt_regs *regs, int rx, int ry) { | ||
611 | regs->gprs[rx] = current->thread.fp_regs.fpc; | ||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | /* Load and test long double */ | ||
616 | static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) { | ||
617 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
618 | mathemu_ldcv cvt; | ||
619 | FP_DECL_Q(QA); | ||
620 | FP_DECL_EX; | ||
621 | |||
622 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
623 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
624 | FP_UNPACK_QP(QA, &cvt.ld); | ||
625 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
626 | fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui; | ||
627 | emu_set_CC_cs(regs, QA_c, QA_s); | ||
628 | return _fex; | ||
629 | } | ||
630 | |||
631 | /* Load and test double */ | ||
632 | static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) { | ||
633 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
634 | FP_DECL_D(DA); | ||
635 | FP_DECL_EX; | ||
636 | |||
637 | FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); | ||
638 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
639 | emu_set_CC_cs(regs, DA_c, DA_s); | ||
640 | return _fex; | ||
641 | } | ||
642 | |||
643 | /* Load and test double */ | ||
644 | static int emu_ltebr (struct pt_regs *regs, int rx, int ry) { | ||
645 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
646 | FP_DECL_S(SA); | ||
647 | FP_DECL_EX; | ||
648 | |||
649 | FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); | ||
650 | fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; | ||
651 | emu_set_CC_cs(regs, SA_c, SA_s); | ||
652 | return _fex; | ||
653 | } | ||
654 | |||
655 | /* Load complement long double */ | ||
656 | static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) { | ||
657 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
658 | FP_DECL_EX; | ||
659 | mathemu_ldcv cvt; | ||
660 | int mode; | ||
661 | |||
662 | mode = current->thread.fp_regs.fpc & 3; | ||
663 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
664 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
665 | FP_UNPACK_QP(QA, &cvt.ld); | ||
666 | FP_NEG_Q(QR, QA); | ||
667 | FP_PACK_QP(&cvt.ld, QR); | ||
668 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
669 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
670 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
671 | return _fex; | ||
672 | } | ||
673 | |||
674 | /* Load complement double */ | ||
675 | static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) { | ||
676 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
677 | FP_DECL_EX; | ||
678 | int mode; | ||
679 | |||
680 | mode = current->thread.fp_regs.fpc & 3; | ||
681 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
682 | FP_NEG_D(DR, DA); | ||
683 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
684 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
685 | return _fex; | ||
686 | } | ||
687 | |||
688 | /* Load complement float */ | ||
689 | static int emu_lcebr (struct pt_regs *regs, int rx, int ry) { | ||
690 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
691 | FP_DECL_EX; | ||
692 | int mode; | ||
693 | |||
694 | mode = current->thread.fp_regs.fpc & 3; | ||
695 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
696 | FP_NEG_S(SR, SA); | ||
697 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
698 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
699 | return _fex; | ||
700 | } | ||
701 | |||
702 | /* Load floating point integer long double */ | ||
703 | static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
704 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
705 | FP_DECL_Q(QA); | ||
706 | FP_DECL_EX; | ||
707 | mathemu_ldcv cvt; | ||
708 | __s32 si; | ||
709 | int mode; | ||
710 | |||
711 | if (mask == 0) | ||
712 | mode = fp_regs->fpc & 3; | ||
713 | else if (mask == 1) | ||
714 | mode = FP_RND_NEAREST; | ||
715 | else | ||
716 | mode = mask - 4; | ||
717 | cvt.w.high = fp_regs->fprs[ry].ui; | ||
718 | cvt.w.low = fp_regs->fprs[ry+2].ui; | ||
719 | FP_UNPACK_QP(QA, &cvt.ld); | ||
720 | FP_TO_FPINT_ROUND_Q(QA); | ||
721 | FP_PACK_QP(&cvt.ld, QA); | ||
722 | fp_regs->fprs[rx].ui = cvt.w.high; | ||
723 | fp_regs->fprs[rx+2].ui = cvt.w.low; | ||
724 | return _fex; | ||
725 | } | ||
726 | |||
727 | /* Load floating point integer double */ | ||
728 | static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
729 | /* FIXME: rounding mode !! */ | ||
730 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
731 | FP_DECL_D(DA); | ||
732 | FP_DECL_EX; | ||
733 | __s32 si; | ||
734 | int mode; | ||
735 | |||
736 | if (mask == 0) | ||
737 | mode = fp_regs->fpc & 3; | ||
738 | else if (mask == 1) | ||
739 | mode = FP_RND_NEAREST; | ||
740 | else | ||
741 | mode = mask - 4; | ||
742 | FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); | ||
743 | FP_TO_FPINT_ROUND_D(DA); | ||
744 | FP_PACK_DP(&fp_regs->fprs[rx].d, DA); | ||
745 | return _fex; | ||
746 | } | ||
747 | |||
748 | /* Load floating point integer float */ | ||
749 | static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) { | ||
750 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
751 | FP_DECL_S(SA); | ||
752 | FP_DECL_EX; | ||
753 | __s32 si; | ||
754 | int mode; | ||
755 | |||
756 | if (mask == 0) | ||
757 | mode = fp_regs->fpc & 3; | ||
758 | else if (mask == 1) | ||
759 | mode = FP_RND_NEAREST; | ||
760 | else | ||
761 | mode = mask - 4; | ||
762 | FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); | ||
763 | FP_TO_FPINT_ROUND_S(SA); | ||
764 | FP_PACK_SP(&fp_regs->fprs[rx].f, SA); | ||
765 | return _fex; | ||
766 | } | ||
767 | |||
768 | /* Load lengthened double to long double */ | ||
769 | static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) { | ||
770 | FP_DECL_D(DA); FP_DECL_Q(QR); | ||
771 | FP_DECL_EX; | ||
772 | mathemu_ldcv cvt; | ||
773 | int mode; | ||
774 | |||
775 | mode = current->thread.fp_regs.fpc & 3; | ||
776 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
777 | FP_CONV (Q, D, 4, 2, QR, DA); | ||
778 | FP_PACK_QP(&cvt.ld, QR); | ||
779 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
780 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
781 | return _fex; | ||
782 | } | ||
783 | |||
784 | /* Load lengthened double to long double */ | ||
785 | static int emu_lxdb (struct pt_regs *regs, int rx, double *val) { | ||
786 | FP_DECL_D(DA); FP_DECL_Q(QR); | ||
787 | FP_DECL_EX; | ||
788 | mathemu_ldcv cvt; | ||
789 | int mode; | ||
790 | |||
791 | mode = current->thread.fp_regs.fpc & 3; | ||
792 | FP_UNPACK_DP(DA, val); | ||
793 | FP_CONV (Q, D, 4, 2, QR, DA); | ||
794 | FP_PACK_QP(&cvt.ld, QR); | ||
795 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
796 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
797 | return _fex; | ||
798 | } | ||
799 | |||
800 | /* Load lengthened float to long double */ | ||
801 | static int emu_lxebr (struct pt_regs *regs, int rx, int ry) { | ||
802 | FP_DECL_S(SA); FP_DECL_Q(QR); | ||
803 | FP_DECL_EX; | ||
804 | mathemu_ldcv cvt; | ||
805 | int mode; | ||
806 | |||
807 | mode = current->thread.fp_regs.fpc & 3; | ||
808 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
809 | FP_CONV (Q, S, 4, 1, QR, SA); | ||
810 | FP_PACK_QP(&cvt.ld, QR); | ||
811 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
812 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
813 | return _fex; | ||
814 | } | ||
815 | |||
816 | /* Load lengthened float to long double */ | ||
817 | static int emu_lxeb (struct pt_regs *regs, int rx, float *val) { | ||
818 | FP_DECL_S(SA); FP_DECL_Q(QR); | ||
819 | FP_DECL_EX; | ||
820 | mathemu_ldcv cvt; | ||
821 | int mode; | ||
822 | |||
823 | mode = current->thread.fp_regs.fpc & 3; | ||
824 | FP_UNPACK_SP(SA, val); | ||
825 | FP_CONV (Q, S, 4, 1, QR, SA); | ||
826 | FP_PACK_QP(&cvt.ld, QR); | ||
827 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
828 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
829 | return _fex; | ||
830 | } | ||
831 | |||
832 | /* Load lengthened float to double */ | ||
833 | static int emu_ldebr (struct pt_regs *regs, int rx, int ry) { | ||
834 | FP_DECL_S(SA); FP_DECL_D(DR); | ||
835 | FP_DECL_EX; | ||
836 | int mode; | ||
837 | |||
838 | mode = current->thread.fp_regs.fpc & 3; | ||
839 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
840 | FP_CONV (D, S, 2, 1, DR, SA); | ||
841 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
842 | return _fex; | ||
843 | } | ||
844 | |||
845 | /* Load lengthened float to double */ | ||
846 | static int emu_ldeb (struct pt_regs *regs, int rx, float *val) { | ||
847 | FP_DECL_S(SA); FP_DECL_D(DR); | ||
848 | FP_DECL_EX; | ||
849 | int mode; | ||
850 | |||
851 | mode = current->thread.fp_regs.fpc & 3; | ||
852 | FP_UNPACK_SP(SA, val); | ||
853 | FP_CONV (D, S, 2, 1, DR, SA); | ||
854 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
855 | return _fex; | ||
856 | } | ||
857 | |||
858 | /* Load negative long double */ | ||
859 | static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) { | ||
860 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
861 | FP_DECL_EX; | ||
862 | mathemu_ldcv cvt; | ||
863 | int mode; | ||
864 | |||
865 | mode = current->thread.fp_regs.fpc & 3; | ||
866 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
867 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
868 | FP_UNPACK_QP(QA, &cvt.ld); | ||
869 | if (QA_s == 0) { | ||
870 | FP_NEG_Q(QR, QA); | ||
871 | FP_PACK_QP(&cvt.ld, QR); | ||
872 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
873 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
874 | } else { | ||
875 | current->thread.fp_regs.fprs[rx].ui = | ||
876 | current->thread.fp_regs.fprs[ry].ui; | ||
877 | current->thread.fp_regs.fprs[rx+2].ui = | ||
878 | current->thread.fp_regs.fprs[ry+2].ui; | ||
879 | } | ||
880 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
881 | return _fex; | ||
882 | } | ||
883 | |||
884 | /* Load negative double */ | ||
885 | static int emu_lndbr (struct pt_regs *regs, int rx, int ry) { | ||
886 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
887 | FP_DECL_EX; | ||
888 | int mode; | ||
889 | |||
890 | mode = current->thread.fp_regs.fpc & 3; | ||
891 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
892 | if (DA_s == 0) { | ||
893 | FP_NEG_D(DR, DA); | ||
894 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
895 | } else | ||
896 | current->thread.fp_regs.fprs[rx].ui = | ||
897 | current->thread.fp_regs.fprs[ry].ui; | ||
898 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
899 | return _fex; | ||
900 | } | ||
901 | |||
902 | /* Load negative float */ | ||
903 | static int emu_lnebr (struct pt_regs *regs, int rx, int ry) { | ||
904 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
905 | FP_DECL_EX; | ||
906 | int mode; | ||
907 | |||
908 | mode = current->thread.fp_regs.fpc & 3; | ||
909 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
910 | if (SA_s == 0) { | ||
911 | FP_NEG_S(SR, SA); | ||
912 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
913 | } else | ||
914 | current->thread.fp_regs.fprs[rx].ui = | ||
915 | current->thread.fp_regs.fprs[ry].ui; | ||
916 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
917 | return _fex; | ||
918 | } | ||
919 | |||
920 | /* Load positive long double */ | ||
921 | static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) { | ||
922 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
923 | FP_DECL_EX; | ||
924 | mathemu_ldcv cvt; | ||
925 | int mode; | ||
926 | |||
927 | mode = current->thread.fp_regs.fpc & 3; | ||
928 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
929 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
930 | FP_UNPACK_QP(QA, &cvt.ld); | ||
931 | if (QA_s != 0) { | ||
932 | FP_NEG_Q(QR, QA); | ||
933 | FP_PACK_QP(&cvt.ld, QR); | ||
934 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
935 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
936 | } else{ | ||
937 | current->thread.fp_regs.fprs[rx].ui = | ||
938 | current->thread.fp_regs.fprs[ry].ui; | ||
939 | current->thread.fp_regs.fprs[rx+2].ui = | ||
940 | current->thread.fp_regs.fprs[ry+2].ui; | ||
941 | } | ||
942 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
943 | return _fex; | ||
944 | } | ||
945 | |||
946 | /* Load positive double */ | ||
947 | static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) { | ||
948 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
949 | FP_DECL_EX; | ||
950 | int mode; | ||
951 | |||
952 | mode = current->thread.fp_regs.fpc & 3; | ||
953 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
954 | if (DA_s != 0) { | ||
955 | FP_NEG_D(DR, DA); | ||
956 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
957 | } else | ||
958 | current->thread.fp_regs.fprs[rx].ui = | ||
959 | current->thread.fp_regs.fprs[ry].ui; | ||
960 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
961 | return _fex; | ||
962 | } | ||
963 | |||
964 | /* Load positive float */ | ||
965 | static int emu_lpebr (struct pt_regs *regs, int rx, int ry) { | ||
966 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
967 | FP_DECL_EX; | ||
968 | int mode; | ||
969 | |||
970 | mode = current->thread.fp_regs.fpc & 3; | ||
971 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
972 | if (SA_s != 0) { | ||
973 | FP_NEG_S(SR, SA); | ||
974 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
975 | } else | ||
976 | current->thread.fp_regs.fprs[rx].ui = | ||
977 | current->thread.fp_regs.fprs[ry].ui; | ||
978 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
979 | return _fex; | ||
980 | } | ||
981 | |||
982 | /* Load rounded long double to double */ | ||
983 | static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) { | ||
984 | FP_DECL_Q(QA); FP_DECL_D(DR); | ||
985 | FP_DECL_EX; | ||
986 | mathemu_ldcv cvt; | ||
987 | int mode; | ||
988 | |||
989 | mode = current->thread.fp_regs.fpc & 3; | ||
990 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
991 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
992 | FP_UNPACK_QP(QA, &cvt.ld); | ||
993 | FP_CONV (D, Q, 2, 4, DR, QA); | ||
994 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].f, DR); | ||
995 | return _fex; | ||
996 | } | ||
997 | |||
998 | /* Load rounded long double to float */ | ||
999 | static int emu_lexbr (struct pt_regs *regs, int rx, int ry) { | ||
1000 | FP_DECL_Q(QA); FP_DECL_S(SR); | ||
1001 | FP_DECL_EX; | ||
1002 | mathemu_ldcv cvt; | ||
1003 | int mode; | ||
1004 | |||
1005 | mode = current->thread.fp_regs.fpc & 3; | ||
1006 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1007 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1008 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1009 | FP_CONV (S, Q, 1, 4, SR, QA); | ||
1010 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1011 | return _fex; | ||
1012 | } | ||
1013 | |||
1014 | /* Load rounded double to float */ | ||
1015 | static int emu_ledbr (struct pt_regs *regs, int rx, int ry) { | ||
1016 | FP_DECL_D(DA); FP_DECL_S(SR); | ||
1017 | FP_DECL_EX; | ||
1018 | int mode; | ||
1019 | |||
1020 | mode = current->thread.fp_regs.fpc & 3; | ||
1021 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1022 | FP_CONV (S, D, 1, 2, SR, DA); | ||
1023 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1024 | return _fex; | ||
1025 | } | ||
1026 | |||
1027 | /* Multiply long double */ | ||
1028 | static int emu_mxbr (struct pt_regs *regs, int rx, int ry) { | ||
1029 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1030 | FP_DECL_EX; | ||
1031 | mathemu_ldcv cvt; | ||
1032 | int mode; | ||
1033 | |||
1034 | mode = current->thread.fp_regs.fpc & 3; | ||
1035 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1036 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1037 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1038 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1039 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1040 | FP_UNPACK_QP(QB, &cvt.ld); | ||
1041 | FP_MUL_Q(QR, QA, QB); | ||
1042 | FP_PACK_QP(&cvt.ld, QR); | ||
1043 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1044 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1045 | return _fex; | ||
1046 | } | ||
1047 | |||
1048 | /* Multiply double */ | ||
1049 | static int emu_mdbr (struct pt_regs *regs, int rx, int ry) { | ||
1050 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1051 | FP_DECL_EX; | ||
1052 | int mode; | ||
1053 | |||
1054 | mode = current->thread.fp_regs.fpc & 3; | ||
1055 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1056 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1057 | FP_MUL_D(DR, DA, DB); | ||
1058 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1059 | return _fex; | ||
1060 | } | ||
1061 | |||
1062 | /* Multiply double */ | ||
1063 | static int emu_mdb (struct pt_regs *regs, int rx, double *val) { | ||
1064 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1065 | FP_DECL_EX; | ||
1066 | int mode; | ||
1067 | |||
1068 | mode = current->thread.fp_regs.fpc & 3; | ||
1069 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1070 | FP_UNPACK_DP(DB, val); | ||
1071 | FP_MUL_D(DR, DA, DB); | ||
1072 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1073 | return _fex; | ||
1074 | } | ||
1075 | |||
1076 | /* Multiply double to long double */ | ||
1077 | static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) { | ||
1078 | FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1079 | FP_DECL_EX; | ||
1080 | mathemu_ldcv cvt; | ||
1081 | int mode; | ||
1082 | |||
1083 | mode = current->thread.fp_regs.fpc & 3; | ||
1084 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1085 | FP_CONV (Q, D, 4, 2, QA, DA); | ||
1086 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1087 | FP_CONV (Q, D, 4, 2, QB, DA); | ||
1088 | FP_MUL_Q(QR, QA, QB); | ||
1089 | FP_PACK_QP(&cvt.ld, QR); | ||
1090 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1091 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1092 | return _fex; | ||
1093 | } | ||
1094 | |||
1095 | /* Multiply double to long double */ | ||
1096 | static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) { | ||
1097 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1098 | FP_DECL_EX; | ||
1099 | mathemu_ldcv cvt; | ||
1100 | int mode; | ||
1101 | |||
1102 | mode = current->thread.fp_regs.fpc & 3; | ||
1103 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1104 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1105 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1106 | FP_UNPACK_QP(QB, val); | ||
1107 | FP_MUL_Q(QR, QA, QB); | ||
1108 | FP_PACK_QP(&cvt.ld, QR); | ||
1109 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1110 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1111 | return _fex; | ||
1112 | } | ||
1113 | |||
1114 | /* Multiply float */ | ||
1115 | static int emu_meebr (struct pt_regs *regs, int rx, int ry) { | ||
1116 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1117 | FP_DECL_EX; | ||
1118 | int mode; | ||
1119 | |||
1120 | mode = current->thread.fp_regs.fpc & 3; | ||
1121 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1122 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1123 | FP_MUL_S(SR, SA, SB); | ||
1124 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1125 | return _fex; | ||
1126 | } | ||
1127 | |||
1128 | /* Multiply float */ | ||
1129 | static int emu_meeb (struct pt_regs *regs, int rx, float *val) { | ||
1130 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1131 | FP_DECL_EX; | ||
1132 | int mode; | ||
1133 | |||
1134 | mode = current->thread.fp_regs.fpc & 3; | ||
1135 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1136 | FP_UNPACK_SP(SB, val); | ||
1137 | FP_MUL_S(SR, SA, SB); | ||
1138 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1139 | return _fex; | ||
1140 | } | ||
1141 | |||
1142 | /* Multiply float to double */ | ||
1143 | static int emu_mdebr (struct pt_regs *regs, int rx, int ry) { | ||
1144 | FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1145 | FP_DECL_EX; | ||
1146 | int mode; | ||
1147 | |||
1148 | mode = current->thread.fp_regs.fpc & 3; | ||
1149 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1150 | FP_CONV (D, S, 2, 1, DA, SA); | ||
1151 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
1152 | FP_CONV (D, S, 2, 1, DB, SA); | ||
1153 | FP_MUL_D(DR, DA, DB); | ||
1154 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1155 | return _fex; | ||
1156 | } | ||
1157 | |||
1158 | /* Multiply float to double */ | ||
1159 | static int emu_mdeb (struct pt_regs *regs, int rx, float *val) { | ||
1160 | FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1161 | FP_DECL_EX; | ||
1162 | int mode; | ||
1163 | |||
1164 | mode = current->thread.fp_regs.fpc & 3; | ||
1165 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1166 | FP_CONV (D, S, 2, 1, DA, SA); | ||
1167 | FP_UNPACK_SP(SA, val); | ||
1168 | FP_CONV (D, S, 2, 1, DB, SA); | ||
1169 | FP_MUL_D(DR, DA, DB); | ||
1170 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1171 | return _fex; | ||
1172 | } | ||
1173 | |||
1174 | /* Multiply and add double */ | ||
1175 | static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1176 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1177 | FP_DECL_EX; | ||
1178 | int mode; | ||
1179 | |||
1180 | mode = current->thread.fp_regs.fpc & 3; | ||
1181 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1182 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1183 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1184 | FP_MUL_D(DR, DA, DB); | ||
1185 | FP_ADD_D(DR, DR, DC); | ||
1186 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1187 | return _fex; | ||
1188 | } | ||
1189 | |||
1190 | /* Multiply and add double */ | ||
1191 | static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) { | ||
1192 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1193 | FP_DECL_EX; | ||
1194 | int mode; | ||
1195 | |||
1196 | mode = current->thread.fp_regs.fpc & 3; | ||
1197 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1198 | FP_UNPACK_DP(DB, val); | ||
1199 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1200 | FP_MUL_D(DR, DA, DB); | ||
1201 | FP_ADD_D(DR, DR, DC); | ||
1202 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1203 | return _fex; | ||
1204 | } | ||
1205 | |||
1206 | /* Multiply and add float */ | ||
1207 | static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1208 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1209 | FP_DECL_EX; | ||
1210 | int mode; | ||
1211 | |||
1212 | mode = current->thread.fp_regs.fpc & 3; | ||
1213 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1214 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1215 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1216 | FP_MUL_S(SR, SA, SB); | ||
1217 | FP_ADD_S(SR, SR, SC); | ||
1218 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1219 | return _fex; | ||
1220 | } | ||
1221 | |||
1222 | /* Multiply and add float */ | ||
1223 | static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) { | ||
1224 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1225 | FP_DECL_EX; | ||
1226 | int mode; | ||
1227 | |||
1228 | mode = current->thread.fp_regs.fpc & 3; | ||
1229 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1230 | FP_UNPACK_SP(SB, val); | ||
1231 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1232 | FP_MUL_S(SR, SA, SB); | ||
1233 | FP_ADD_S(SR, SR, SC); | ||
1234 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1235 | return _fex; | ||
1236 | } | ||
1237 | |||
1238 | /* Multiply and subtract double */ | ||
1239 | static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1240 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1241 | FP_DECL_EX; | ||
1242 | int mode; | ||
1243 | |||
1244 | mode = current->thread.fp_regs.fpc & 3; | ||
1245 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1246 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1247 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1248 | FP_MUL_D(DR, DA, DB); | ||
1249 | FP_SUB_D(DR, DR, DC); | ||
1250 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1251 | return _fex; | ||
1252 | } | ||
1253 | |||
1254 | /* Multiply and subtract double */ | ||
1255 | static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) { | ||
1256 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); | ||
1257 | FP_DECL_EX; | ||
1258 | int mode; | ||
1259 | |||
1260 | mode = current->thread.fp_regs.fpc & 3; | ||
1261 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1262 | FP_UNPACK_DP(DB, val); | ||
1263 | FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); | ||
1264 | FP_MUL_D(DR, DA, DB); | ||
1265 | FP_SUB_D(DR, DR, DC); | ||
1266 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); | ||
1267 | return _fex; | ||
1268 | } | ||
1269 | |||
1270 | /* Multiply and subtract float */ | ||
1271 | static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) { | ||
1272 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1273 | FP_DECL_EX; | ||
1274 | int mode; | ||
1275 | |||
1276 | mode = current->thread.fp_regs.fpc & 3; | ||
1277 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1278 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1279 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1280 | FP_MUL_S(SR, SA, SB); | ||
1281 | FP_SUB_S(SR, SR, SC); | ||
1282 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1283 | return _fex; | ||
1284 | } | ||
1285 | |||
1286 | /* Multiply and subtract float */ | ||
1287 | static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) { | ||
1288 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); | ||
1289 | FP_DECL_EX; | ||
1290 | int mode; | ||
1291 | |||
1292 | mode = current->thread.fp_regs.fpc & 3; | ||
1293 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1294 | FP_UNPACK_SP(SB, val); | ||
1295 | FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); | ||
1296 | FP_MUL_S(SR, SA, SB); | ||
1297 | FP_SUB_S(SR, SR, SC); | ||
1298 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); | ||
1299 | return _fex; | ||
1300 | } | ||
1301 | |||
1302 | /* Set floating point control word */ | ||
1303 | static int emu_sfpc (struct pt_regs *regs, int rx, int ry) { | ||
1304 | __u32 temp; | ||
1305 | |||
1306 | temp = regs->gprs[rx]; | ||
1307 | if ((temp & ~FPC_VALID_MASK) != 0) | ||
1308 | return SIGILL; | ||
1309 | current->thread.fp_regs.fpc = temp; | ||
1310 | return 0; | ||
1311 | } | ||
1312 | |||
1313 | /* Square root long double */ | ||
1314 | static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) { | ||
1315 | FP_DECL_Q(QA); FP_DECL_Q(QR); | ||
1316 | FP_DECL_EX; | ||
1317 | mathemu_ldcv cvt; | ||
1318 | int mode; | ||
1319 | |||
1320 | mode = current->thread.fp_regs.fpc & 3; | ||
1321 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1322 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1323 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1324 | FP_SQRT_Q(QR, QA); | ||
1325 | FP_PACK_QP(&cvt.ld, QR); | ||
1326 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1327 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1328 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
1329 | return _fex; | ||
1330 | } | ||
1331 | |||
1332 | /* Square root double */ | ||
1333 | static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) { | ||
1334 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
1335 | FP_DECL_EX; | ||
1336 | int mode; | ||
1337 | |||
1338 | mode = current->thread.fp_regs.fpc & 3; | ||
1339 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); | ||
1340 | FP_SQRT_D(DR, DA); | ||
1341 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1342 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1343 | return _fex; | ||
1344 | } | ||
1345 | |||
1346 | /* Square root double */ | ||
1347 | static int emu_sqdb (struct pt_regs *regs, int rx, double *val) { | ||
1348 | FP_DECL_D(DA); FP_DECL_D(DR); | ||
1349 | FP_DECL_EX; | ||
1350 | int mode; | ||
1351 | |||
1352 | mode = current->thread.fp_regs.fpc & 3; | ||
1353 | FP_UNPACK_DP(DA, val); | ||
1354 | FP_SQRT_D(DR, DA); | ||
1355 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1356 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1357 | return _fex; | ||
1358 | } | ||
1359 | |||
1360 | /* Square root float */ | ||
1361 | static int emu_sqebr (struct pt_regs *regs, int rx, int ry) { | ||
1362 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
1363 | FP_DECL_EX; | ||
1364 | int mode; | ||
1365 | |||
1366 | mode = current->thread.fp_regs.fpc & 3; | ||
1367 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); | ||
1368 | FP_SQRT_S(SR, SA); | ||
1369 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1370 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1371 | return _fex; | ||
1372 | } | ||
1373 | |||
1374 | /* Square root float */ | ||
1375 | static int emu_sqeb (struct pt_regs *regs, int rx, float *val) { | ||
1376 | FP_DECL_S(SA); FP_DECL_S(SR); | ||
1377 | FP_DECL_EX; | ||
1378 | int mode; | ||
1379 | |||
1380 | mode = current->thread.fp_regs.fpc & 3; | ||
1381 | FP_UNPACK_SP(SA, val); | ||
1382 | FP_SQRT_S(SR, SA); | ||
1383 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1384 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1385 | return _fex; | ||
1386 | } | ||
1387 | |||
1388 | /* Subtract long double */ | ||
1389 | static int emu_sxbr (struct pt_regs *regs, int rx, int ry) { | ||
1390 | FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); | ||
1391 | FP_DECL_EX; | ||
1392 | mathemu_ldcv cvt; | ||
1393 | int mode; | ||
1394 | |||
1395 | mode = current->thread.fp_regs.fpc & 3; | ||
1396 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1397 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1398 | FP_UNPACK_QP(QA, &cvt.ld); | ||
1399 | cvt.w.high = current->thread.fp_regs.fprs[ry].ui; | ||
1400 | cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; | ||
1401 | FP_UNPACK_QP(QB, &cvt.ld); | ||
1402 | FP_SUB_Q(QR, QA, QB); | ||
1403 | FP_PACK_QP(&cvt.ld, QR); | ||
1404 | current->thread.fp_regs.fprs[rx].ui = cvt.w.high; | ||
1405 | current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; | ||
1406 | emu_set_CC_cs(regs, QR_c, QR_s); | ||
1407 | return _fex; | ||
1408 | } | ||
1409 | |||
1410 | /* Subtract double */ | ||
1411 | static int emu_sdbr (struct pt_regs *regs, int rx, int ry) { | ||
1412 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1413 | FP_DECL_EX; | ||
1414 | int mode; | ||
1415 | |||
1416 | mode = current->thread.fp_regs.fpc & 3; | ||
1417 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1418 | FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); | ||
1419 | FP_SUB_D(DR, DA, DB); | ||
1420 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1421 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1422 | return _fex; | ||
1423 | } | ||
1424 | |||
1425 | /* Subtract double */ | ||
1426 | static int emu_sdb (struct pt_regs *regs, int rx, double *val) { | ||
1427 | FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); | ||
1428 | FP_DECL_EX; | ||
1429 | int mode; | ||
1430 | |||
1431 | mode = current->thread.fp_regs.fpc & 3; | ||
1432 | FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1433 | FP_UNPACK_DP(DB, val); | ||
1434 | FP_SUB_D(DR, DA, DB); | ||
1435 | FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); | ||
1436 | emu_set_CC_cs(regs, DR_c, DR_s); | ||
1437 | return _fex; | ||
1438 | } | ||
1439 | |||
1440 | /* Subtract float */ | ||
1441 | static int emu_sebr (struct pt_regs *regs, int rx, int ry) { | ||
1442 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1443 | FP_DECL_EX; | ||
1444 | int mode; | ||
1445 | |||
1446 | mode = current->thread.fp_regs.fpc & 3; | ||
1447 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1448 | FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); | ||
1449 | FP_SUB_S(SR, SA, SB); | ||
1450 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1451 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1452 | return _fex; | ||
1453 | } | ||
1454 | |||
1455 | /* Subtract float */ | ||
1456 | static int emu_seb (struct pt_regs *regs, int rx, float *val) { | ||
1457 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
1458 | FP_DECL_EX; | ||
1459 | int mode; | ||
1460 | |||
1461 | mode = current->thread.fp_regs.fpc & 3; | ||
1462 | FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1463 | FP_UNPACK_SP(SB, val); | ||
1464 | FP_SUB_S(SR, SA, SB); | ||
1465 | FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); | ||
1466 | emu_set_CC_cs(regs, SR_c, SR_s); | ||
1467 | return _fex; | ||
1468 | } | ||
1469 | |||
1470 | /* Test data class long double */ | ||
1471 | static int emu_tcxb (struct pt_regs *regs, int rx, long val) { | ||
1472 | FP_DECL_Q(QA); | ||
1473 | mathemu_ldcv cvt; | ||
1474 | int bit; | ||
1475 | |||
1476 | cvt.w.high = current->thread.fp_regs.fprs[rx].ui; | ||
1477 | cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; | ||
1478 | FP_UNPACK_RAW_QP(QA, &cvt.ld); | ||
1479 | switch (QA_e) { | ||
1480 | default: | ||
1481 | bit = 8; /* normalized number */ | ||
1482 | break; | ||
1483 | case 0: | ||
1484 | if (_FP_FRAC_ZEROP_4(QA)) | ||
1485 | bit = 10; /* zero */ | ||
1486 | else | ||
1487 | bit = 6; /* denormalized number */ | ||
1488 | break; | ||
1489 | case _FP_EXPMAX_Q: | ||
1490 | if (_FP_FRAC_ZEROP_4(QA)) | ||
1491 | bit = 4; /* infinity */ | ||
1492 | else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q) | ||
1493 | bit = 2; /* quiet NAN */ | ||
1494 | else | ||
1495 | bit = 0; /* signaling NAN */ | ||
1496 | break; | ||
1497 | } | ||
1498 | if (!QA_s) | ||
1499 | bit++; | ||
1500 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1501 | return 0; | ||
1502 | } | ||
1503 | |||
1504 | /* Test data class double */ | ||
1505 | static int emu_tcdb (struct pt_regs *regs, int rx, long val) { | ||
1506 | FP_DECL_D(DA); | ||
1507 | int bit; | ||
1508 | |||
1509 | FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); | ||
1510 | switch (DA_e) { | ||
1511 | default: | ||
1512 | bit = 8; /* normalized number */ | ||
1513 | break; | ||
1514 | case 0: | ||
1515 | if (_FP_FRAC_ZEROP_2(DA)) | ||
1516 | bit = 10; /* zero */ | ||
1517 | else | ||
1518 | bit = 6; /* denormalized number */ | ||
1519 | break; | ||
1520 | case _FP_EXPMAX_D: | ||
1521 | if (_FP_FRAC_ZEROP_2(DA)) | ||
1522 | bit = 4; /* infinity */ | ||
1523 | else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D) | ||
1524 | bit = 2; /* quiet NAN */ | ||
1525 | else | ||
1526 | bit = 0; /* signaling NAN */ | ||
1527 | break; | ||
1528 | } | ||
1529 | if (!DA_s) | ||
1530 | bit++; | ||
1531 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1532 | return 0; | ||
1533 | } | ||
1534 | |||
1535 | /* Test data class float */ | ||
1536 | static int emu_tceb (struct pt_regs *regs, int rx, long val) { | ||
1537 | FP_DECL_S(SA); | ||
1538 | int bit; | ||
1539 | |||
1540 | FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); | ||
1541 | switch (SA_e) { | ||
1542 | default: | ||
1543 | bit = 8; /* normalized number */ | ||
1544 | break; | ||
1545 | case 0: | ||
1546 | if (_FP_FRAC_ZEROP_1(SA)) | ||
1547 | bit = 10; /* zero */ | ||
1548 | else | ||
1549 | bit = 6; /* denormalized number */ | ||
1550 | break; | ||
1551 | case _FP_EXPMAX_S: | ||
1552 | if (_FP_FRAC_ZEROP_1(SA)) | ||
1553 | bit = 4; /* infinity */ | ||
1554 | else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S) | ||
1555 | bit = 2; /* quiet NAN */ | ||
1556 | else | ||
1557 | bit = 0; /* signaling NAN */ | ||
1558 | break; | ||
1559 | } | ||
1560 | if (!SA_s) | ||
1561 | bit++; | ||
1562 | emu_set_CC(regs, ((__u32) val >> bit) & 1); | ||
1563 | return 0; | ||
1564 | } | ||
1565 | |||
1566 | static inline void emu_load_regd(int reg) { | ||
1567 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1568 | return; | ||
1569 | asm volatile( /* load reg from fp_regs.fprs[reg] */ | ||
1570 | " bras 1,0f\n" | ||
1571 | " ld 0,0(%1)\n" | ||
1572 | "0: ex %0,0(1)" | ||
1573 | : /* no output */ | ||
1574 | : "a" (reg<<4),"a" (¤t->thread.fp_regs.fprs[reg].d) | ||
1575 | : "1"); | ||
1576 | } | ||
1577 | |||
1578 | static inline void emu_load_rege(int reg) { | ||
1579 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1580 | return; | ||
1581 | asm volatile( /* load reg from fp_regs.fprs[reg] */ | ||
1582 | " bras 1,0f\n" | ||
1583 | " le 0,0(%1)\n" | ||
1584 | "0: ex %0,0(1)" | ||
1585 | : /* no output */ | ||
1586 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) | ||
1587 | : "1"); | ||
1588 | } | ||
1589 | |||
1590 | static inline void emu_store_regd(int reg) { | ||
1591 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1592 | return; | ||
1593 | asm volatile( /* store reg to fp_regs.fprs[reg] */ | ||
1594 | " bras 1,0f\n" | ||
1595 | " std 0,0(%1)\n" | ||
1596 | "0: ex %0,0(1)" | ||
1597 | : /* no output */ | ||
1598 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].d) | ||
1599 | : "1"); | ||
1600 | } | ||
1601 | |||
1602 | |||
1603 | static inline void emu_store_rege(int reg) { | ||
1604 | if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ | ||
1605 | return; | ||
1606 | asm volatile( /* store reg to fp_regs.fprs[reg] */ | ||
1607 | " bras 1,0f\n" | ||
1608 | " ste 0,0(%1)\n" | ||
1609 | "0: ex %0,0(1)" | ||
1610 | : /* no output */ | ||
1611 | : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) | ||
1612 | : "1"); | ||
1613 | } | ||
1614 | |||
1615 | int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { | ||
1616 | int _fex = 0; | ||
1617 | static const __u8 format_table[256] = { | ||
1618 | [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03, | ||
1619 | [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d, | ||
1620 | [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03, | ||
1621 | [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06, | ||
1622 | [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02, | ||
1623 | [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03, | ||
1624 | [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02, | ||
1625 | [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05, | ||
1626 | [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01, | ||
1627 | [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04, | ||
1628 | [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01, | ||
1629 | [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06, | ||
1630 | [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13, | ||
1631 | [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c, | ||
1632 | [0x99] = 0x0b,[0x9a] = 0x0a | ||
1633 | }; | ||
1634 | static const void *jump_table[256]= { | ||
1635 | [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr, | ||
1636 | [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr, | ||
1637 | [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr, | ||
1638 | [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr, | ||
1639 | [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr, | ||
1640 | [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr, | ||
1641 | [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr, | ||
1642 | [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr, | ||
1643 | [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr, | ||
1644 | [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr, | ||
1645 | [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr, | ||
1646 | [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr, | ||
1647 | [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr, | ||
1648 | [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr, | ||
1649 | [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr, | ||
1650 | [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr, | ||
1651 | [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc, | ||
1652 | [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr, | ||
1653 | [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr, | ||
1654 | [0x9a] = emu_cfxbr | ||
1655 | }; | ||
1656 | |||
1657 | switch (format_table[opcode[1]]) { | ||
1658 | case 1: /* RRE format, long double operation */ | ||
1659 | if (opcode[3] & 0x22) | ||
1660 | return SIGILL; | ||
1661 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1662 | emu_store_regd(((opcode[3] >> 4) & 15) + 2); | ||
1663 | emu_store_regd(opcode[3] & 15); | ||
1664 | emu_store_regd((opcode[3] & 15) + 2); | ||
1665 | /* call the emulation function */ | ||
1666 | _fex = ((int (*)(struct pt_regs *,int, int)) | ||
1667 | jump_table[opcode[1]]) | ||
1668 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1669 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1670 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1671 | emu_load_regd(opcode[3] & 15); | ||
1672 | emu_load_regd((opcode[3] & 15) + 2); | ||
1673 | break; | ||
1674 | case 2: /* RRE format, double operation */ | ||
1675 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1676 | emu_store_regd(opcode[3] & 15); | ||
1677 | /* call the emulation function */ | ||
1678 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1679 | jump_table[opcode[1]]) | ||
1680 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1681 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1682 | emu_load_regd(opcode[3] & 15); | ||
1683 | break; | ||
1684 | case 3: /* RRE format, float operation */ | ||
1685 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1686 | emu_store_rege(opcode[3] & 15); | ||
1687 | /* call the emulation function */ | ||
1688 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1689 | jump_table[opcode[1]]) | ||
1690 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1691 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1692 | emu_load_rege(opcode[3] & 15); | ||
1693 | break; | ||
1694 | case 4: /* RRF format, long double operation */ | ||
1695 | if (opcode[3] & 0x22) | ||
1696 | return SIGILL; | ||
1697 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1698 | emu_store_regd(((opcode[3] >> 4) & 15) + 2); | ||
1699 | emu_store_regd(opcode[3] & 15); | ||
1700 | emu_store_regd((opcode[3] & 15) + 2); | ||
1701 | /* call the emulation function */ | ||
1702 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1703 | jump_table[opcode[1]]) | ||
1704 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1705 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1706 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1707 | emu_load_regd(opcode[3] & 15); | ||
1708 | emu_load_regd((opcode[3] & 15) + 2); | ||
1709 | break; | ||
1710 | case 5: /* RRF format, double operation */ | ||
1711 | emu_store_regd((opcode[2] >> 4) & 15); | ||
1712 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1713 | emu_store_regd(opcode[3] & 15); | ||
1714 | /* call the emulation function */ | ||
1715 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1716 | jump_table[opcode[1]]) | ||
1717 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1718 | emu_load_regd((opcode[2] >> 4) & 15); | ||
1719 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1720 | emu_load_regd(opcode[3] & 15); | ||
1721 | break; | ||
1722 | case 6: /* RRF format, float operation */ | ||
1723 | emu_store_rege((opcode[2] >> 4) & 15); | ||
1724 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1725 | emu_store_rege(opcode[3] & 15); | ||
1726 | /* call the emulation function */ | ||
1727 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1728 | jump_table[opcode[1]]) | ||
1729 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1730 | emu_load_rege((opcode[2] >> 4) & 15); | ||
1731 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1732 | emu_load_rege(opcode[3] & 15); | ||
1733 | break; | ||
1734 | case 7: /* RRE format, cxfbr instruction */ | ||
1735 | /* call the emulation function */ | ||
1736 | if (opcode[3] & 0x20) | ||
1737 | return SIGILL; | ||
1738 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1739 | jump_table[opcode[1]]) | ||
1740 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1741 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1742 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1743 | break; | ||
1744 | case 8: /* RRE format, cdfbr instruction */ | ||
1745 | /* call the emulation function */ | ||
1746 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1747 | jump_table[opcode[1]]) | ||
1748 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1749 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1750 | break; | ||
1751 | case 9: /* RRE format, cefbr instruction */ | ||
1752 | /* call the emulation function */ | ||
1753 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1754 | jump_table[opcode[1]]) | ||
1755 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1756 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1757 | break; | ||
1758 | case 10: /* RRF format, cfxbr instruction */ | ||
1759 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1760 | /* mask of { 2,3,8-15 } is invalid */ | ||
1761 | return SIGILL; | ||
1762 | if (opcode[3] & 2) | ||
1763 | return SIGILL; | ||
1764 | emu_store_regd(opcode[3] & 15); | ||
1765 | emu_store_regd((opcode[3] & 15) + 2); | ||
1766 | /* call the emulation function */ | ||
1767 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1768 | jump_table[opcode[1]]) | ||
1769 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1770 | break; | ||
1771 | case 11: /* RRF format, cfdbr instruction */ | ||
1772 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1773 | /* mask of { 2,3,8-15 } is invalid */ | ||
1774 | return SIGILL; | ||
1775 | emu_store_regd(opcode[3] & 15); | ||
1776 | /* call the emulation function */ | ||
1777 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1778 | jump_table[opcode[1]]) | ||
1779 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1780 | break; | ||
1781 | case 12: /* RRF format, cfebr instruction */ | ||
1782 | if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) | ||
1783 | /* mask of { 2,3,8-15 } is invalid */ | ||
1784 | return SIGILL; | ||
1785 | emu_store_rege(opcode[3] & 15); | ||
1786 | /* call the emulation function */ | ||
1787 | _fex = ((int (*)(struct pt_regs *, int, int, int)) | ||
1788 | jump_table[opcode[1]]) | ||
1789 | (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); | ||
1790 | break; | ||
1791 | case 13: /* RRE format, ldxbr & mdxbr instruction */ | ||
1792 | /* double store but long double load */ | ||
1793 | if (opcode[3] & 0x20) | ||
1794 | return SIGILL; | ||
1795 | emu_store_regd((opcode[3] >> 4) & 15); | ||
1796 | emu_store_regd(opcode[3] & 15); | ||
1797 | /* call the emulation function */ | ||
1798 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1799 | jump_table[opcode[1]]) | ||
1800 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1801 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1802 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1803 | break; | ||
1804 | case 14: /* RRE format, ldxbr & mdxbr instruction */ | ||
1805 | /* float store but long double load */ | ||
1806 | if (opcode[3] & 0x20) | ||
1807 | return SIGILL; | ||
1808 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1809 | emu_store_rege(opcode[3] & 15); | ||
1810 | /* call the emulation function */ | ||
1811 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1812 | jump_table[opcode[1]]) | ||
1813 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1814 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1815 | emu_load_regd(((opcode[3] >> 4) & 15) + 2); | ||
1816 | break; | ||
1817 | case 15: /* RRE format, ldebr & mdebr instruction */ | ||
1818 | /* float store but double load */ | ||
1819 | emu_store_rege((opcode[3] >> 4) & 15); | ||
1820 | emu_store_rege(opcode[3] & 15); | ||
1821 | /* call the emulation function */ | ||
1822 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1823 | jump_table[opcode[1]]) | ||
1824 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1825 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1826 | break; | ||
1827 | case 16: /* RRE format, ldxbr instruction */ | ||
1828 | /* long double store but double load */ | ||
1829 | if (opcode[3] & 2) | ||
1830 | return SIGILL; | ||
1831 | emu_store_regd(opcode[3] & 15); | ||
1832 | emu_store_regd((opcode[3] & 15) + 2); | ||
1833 | /* call the emulation function */ | ||
1834 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1835 | jump_table[opcode[1]]) | ||
1836 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1837 | emu_load_regd((opcode[3] >> 4) & 15); | ||
1838 | break; | ||
1839 | case 17: /* RRE format, ldxbr instruction */ | ||
1840 | /* long double store but float load */ | ||
1841 | if (opcode[3] & 2) | ||
1842 | return SIGILL; | ||
1843 | emu_store_regd(opcode[3] & 15); | ||
1844 | emu_store_regd((opcode[3] & 15) + 2); | ||
1845 | /* call the emulation function */ | ||
1846 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1847 | jump_table[opcode[1]]) | ||
1848 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1849 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1850 | break; | ||
1851 | case 18: /* RRE format, ledbr instruction */ | ||
1852 | /* double store but float load */ | ||
1853 | emu_store_regd(opcode[3] & 15); | ||
1854 | /* call the emulation function */ | ||
1855 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1856 | jump_table[opcode[1]]) | ||
1857 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1858 | emu_load_rege((opcode[3] >> 4) & 15); | ||
1859 | break; | ||
1860 | case 19: /* RRE format, efpc & sfpc instruction */ | ||
1861 | /* call the emulation function */ | ||
1862 | _fex = ((int (*)(struct pt_regs *, int, int)) | ||
1863 | jump_table[opcode[1]]) | ||
1864 | (regs, opcode[3] >> 4, opcode[3] & 15); | ||
1865 | break; | ||
1866 | default: /* invalid operation */ | ||
1867 | return SIGILL; | ||
1868 | } | ||
1869 | if (_fex != 0) { | ||
1870 | current->thread.fp_regs.fpc |= _fex; | ||
1871 | if (current->thread.fp_regs.fpc & (_fex << 8)) | ||
1872 | return SIGFPE; | ||
1873 | } | ||
1874 | return 0; | ||
1875 | } | ||
1876 | |||
1877 | static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp) | ||
1878 | { | ||
1879 | addr_t addr; | ||
1880 | |||
1881 | rx &= 15; | ||
1882 | rb &= 15; | ||
1883 | addr = disp & 0xfff; | ||
1884 | addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */ | ||
1885 | addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */ | ||
1886 | return (void*) addr; | ||
1887 | } | ||
1888 | |||
1889 | int math_emu_ed(__u8 *opcode, struct pt_regs * regs) { | ||
1890 | int _fex = 0; | ||
1891 | |||
1892 | static const __u8 format_table[256] = { | ||
1893 | [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05, | ||
1894 | [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02, | ||
1895 | [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04, | ||
1896 | [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02, | ||
1897 | [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01, | ||
1898 | [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01, | ||
1899 | [0x1e] = 0x03,[0x1f] = 0x03, | ||
1900 | }; | ||
1901 | static const void *jump_table[]= { | ||
1902 | [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb, | ||
1903 | [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb, | ||
1904 | [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb, | ||
1905 | [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb, | ||
1906 | [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb, | ||
1907 | [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb, | ||
1908 | [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb, | ||
1909 | [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb, | ||
1910 | [0x1e] = emu_madb,[0x1f] = emu_msdb | ||
1911 | }; | ||
1912 | |||
1913 | switch (format_table[opcode[5]]) { | ||
1914 | case 1: /* RXE format, double constant */ { | ||
1915 | __u64 *dxb, temp; | ||
1916 | __u32 opc; | ||
1917 | |||
1918 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1919 | opc = *((__u32 *) opcode); | ||
1920 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1921 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1922 | /* call the emulation function */ | ||
1923 | _fex = ((int (*)(struct pt_regs *, int, double *)) | ||
1924 | jump_table[opcode[5]]) | ||
1925 | (regs, opcode[1] >> 4, (double *) &temp); | ||
1926 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1927 | break; | ||
1928 | } | ||
1929 | case 2: /* RXE format, float constant */ { | ||
1930 | __u32 *dxb, temp; | ||
1931 | __u32 opc; | ||
1932 | |||
1933 | emu_store_rege((opcode[1] >> 4) & 15); | ||
1934 | opc = *((__u32 *) opcode); | ||
1935 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1936 | mathemu_get_user(temp, dxb); | ||
1937 | /* call the emulation function */ | ||
1938 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
1939 | jump_table[opcode[5]]) | ||
1940 | (regs, opcode[1] >> 4, (float *) &temp); | ||
1941 | emu_load_rege((opcode[1] >> 4) & 15); | ||
1942 | break; | ||
1943 | } | ||
1944 | case 3: /* RXF format, double constant */ { | ||
1945 | __u64 *dxb, temp; | ||
1946 | __u32 opc; | ||
1947 | |||
1948 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1949 | emu_store_regd((opcode[4] >> 4) & 15); | ||
1950 | opc = *((__u32 *) opcode); | ||
1951 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1952 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1953 | /* call the emulation function */ | ||
1954 | _fex = ((int (*)(struct pt_regs *, int, double *, int)) | ||
1955 | jump_table[opcode[5]]) | ||
1956 | (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4); | ||
1957 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1958 | break; | ||
1959 | } | ||
1960 | case 4: /* RXF format, float constant */ { | ||
1961 | __u32 *dxb, temp; | ||
1962 | __u32 opc; | ||
1963 | |||
1964 | emu_store_rege((opcode[1] >> 4) & 15); | ||
1965 | emu_store_rege((opcode[4] >> 4) & 15); | ||
1966 | opc = *((__u32 *) opcode); | ||
1967 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1968 | mathemu_get_user(temp, dxb); | ||
1969 | /* call the emulation function */ | ||
1970 | _fex = ((int (*)(struct pt_regs *, int, float *, int)) | ||
1971 | jump_table[opcode[5]]) | ||
1972 | (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4); | ||
1973 | emu_load_rege((opcode[4] >> 4) & 15); | ||
1974 | break; | ||
1975 | } | ||
1976 | case 5: /* RXE format, double constant */ | ||
1977 | /* store double and load long double */ | ||
1978 | { | ||
1979 | __u64 *dxb, temp; | ||
1980 | __u32 opc; | ||
1981 | if ((opcode[1] >> 4) & 0x20) | ||
1982 | return SIGILL; | ||
1983 | emu_store_regd((opcode[1] >> 4) & 15); | ||
1984 | opc = *((__u32 *) opcode); | ||
1985 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
1986 | mathemu_copy_from_user(&temp, dxb, 8); | ||
1987 | /* call the emulation function */ | ||
1988 | _fex = ((int (*)(struct pt_regs *, int, double *)) | ||
1989 | jump_table[opcode[5]]) | ||
1990 | (regs, opcode[1] >> 4, (double *) &temp); | ||
1991 | emu_load_regd((opcode[1] >> 4) & 15); | ||
1992 | emu_load_regd(((opcode[1] >> 4) & 15) + 2); | ||
1993 | break; | ||
1994 | } | ||
1995 | case 6: /* RXE format, float constant */ | ||
1996 | /* store float and load double */ | ||
1997 | { | ||
1998 | __u32 *dxb, temp; | ||
1999 | __u32 opc; | ||
2000 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2001 | opc = *((__u32 *) opcode); | ||
2002 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2003 | mathemu_get_user(temp, dxb); | ||
2004 | /* call the emulation function */ | ||
2005 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
2006 | jump_table[opcode[5]]) | ||
2007 | (regs, opcode[1] >> 4, (float *) &temp); | ||
2008 | emu_load_regd((opcode[1] >> 4) & 15); | ||
2009 | break; | ||
2010 | } | ||
2011 | case 7: /* RXE format, float constant */ | ||
2012 | /* store float and load long double */ | ||
2013 | { | ||
2014 | __u32 *dxb, temp; | ||
2015 | __u32 opc; | ||
2016 | if ((opcode[1] >> 4) & 0x20) | ||
2017 | return SIGILL; | ||
2018 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2019 | opc = *((__u32 *) opcode); | ||
2020 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2021 | mathemu_get_user(temp, dxb); | ||
2022 | /* call the emulation function */ | ||
2023 | _fex = ((int (*)(struct pt_regs *, int, float *)) | ||
2024 | jump_table[opcode[5]]) | ||
2025 | (regs, opcode[1] >> 4, (float *) &temp); | ||
2026 | emu_load_regd((opcode[1] >> 4) & 15); | ||
2027 | emu_load_regd(((opcode[1] >> 4) & 15) + 2); | ||
2028 | break; | ||
2029 | } | ||
2030 | case 8: /* RXE format, RX address used as int value */ { | ||
2031 | __u64 dxb; | ||
2032 | __u32 opc; | ||
2033 | |||
2034 | emu_store_rege((opcode[1] >> 4) & 15); | ||
2035 | opc = *((__u32 *) opcode); | ||
2036 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2037 | /* call the emulation function */ | ||
2038 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2039 | jump_table[opcode[5]]) | ||
2040 | (regs, opcode[1] >> 4, dxb); | ||
2041 | break; | ||
2042 | } | ||
2043 | case 9: /* RXE format, RX address used as int value */ { | ||
2044 | __u64 dxb; | ||
2045 | __u32 opc; | ||
2046 | |||
2047 | emu_store_regd((opcode[1] >> 4) & 15); | ||
2048 | opc = *((__u32 *) opcode); | ||
2049 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2050 | /* call the emulation function */ | ||
2051 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2052 | jump_table[opcode[5]]) | ||
2053 | (regs, opcode[1] >> 4, dxb); | ||
2054 | break; | ||
2055 | } | ||
2056 | case 10: /* RXE format, RX address used as int value */ { | ||
2057 | __u64 dxb; | ||
2058 | __u32 opc; | ||
2059 | |||
2060 | if ((opcode[1] >> 4) & 2) | ||
2061 | return SIGILL; | ||
2062 | emu_store_regd((opcode[1] >> 4) & 15); | ||
2063 | emu_store_regd(((opcode[1] >> 4) & 15) + 2); | ||
2064 | opc = *((__u32 *) opcode); | ||
2065 | dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2066 | /* call the emulation function */ | ||
2067 | _fex = ((int (*)(struct pt_regs *, int, long)) | ||
2068 | jump_table[opcode[5]]) | ||
2069 | (regs, opcode[1] >> 4, dxb); | ||
2070 | break; | ||
2071 | } | ||
2072 | default: /* invalid operation */ | ||
2073 | return SIGILL; | ||
2074 | } | ||
2075 | if (_fex != 0) { | ||
2076 | current->thread.fp_regs.fpc |= _fex; | ||
2077 | if (current->thread.fp_regs.fpc & (_fex << 8)) | ||
2078 | return SIGFPE; | ||
2079 | } | ||
2080 | return 0; | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2084 | * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6} | ||
2085 | */ | ||
2086 | int math_emu_ldr(__u8 *opcode) { | ||
2087 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2088 | __u16 opc = *((__u16 *) opcode); | ||
2089 | |||
2090 | if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ | ||
2091 | /* we got an exception therefore ry can't be in {0,2,4,6} */ | ||
2092 | asm volatile( /* load rx from fp_regs.fprs[ry] */ | ||
2093 | " bras 1,0f\n" | ||
2094 | " ld 0,0(%1)\n" | ||
2095 | "0: ex %0,0(1)" | ||
2096 | : /* no output */ | ||
2097 | : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d) | ||
2098 | : "1"); | ||
2099 | } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ | ||
2100 | asm volatile ( /* store ry to fp_regs.fprs[rx] */ | ||
2101 | " bras 1,0f\n" | ||
2102 | " std 0,0(%1)\n" | ||
2103 | "0: ex %0,0(1)" | ||
2104 | : /* no output */ | ||
2105 | : "a" ((opc & 0xf) << 4), | ||
2106 | "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) | ||
2107 | : "1"); | ||
2108 | } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ | ||
2109 | fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; | ||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | /* | ||
2114 | * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6} | ||
2115 | */ | ||
2116 | int math_emu_ler(__u8 *opcode) { | ||
2117 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2118 | __u16 opc = *((__u16 *) opcode); | ||
2119 | |||
2120 | if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ | ||
2121 | /* we got an exception therefore ry can't be in {0,2,4,6} */ | ||
2122 | asm volatile( /* load rx from fp_regs.fprs[ry] */ | ||
2123 | " bras 1,0f\n" | ||
2124 | " le 0,0(%1)\n" | ||
2125 | "0: ex %0,0(1)" | ||
2126 | : /* no output */ | ||
2127 | : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f) | ||
2128 | : "1"); | ||
2129 | } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ | ||
2130 | asm volatile( /* store ry to fp_regs.fprs[rx] */ | ||
2131 | " bras 1,0f\n" | ||
2132 | " ste 0,0(%1)\n" | ||
2133 | "0: ex %0,0(1)" | ||
2134 | : /* no output */ | ||
2135 | : "a" ((opc & 0xf) << 4), | ||
2136 | "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) | ||
2137 | : "1"); | ||
2138 | } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ | ||
2139 | fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; | ||
2140 | return 0; | ||
2141 | } | ||
2142 | |||
2143 | /* | ||
2144 | * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6} | ||
2145 | */ | ||
2146 | int math_emu_ld(__u8 *opcode, struct pt_regs * regs) { | ||
2147 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2148 | __u32 opc = *((__u32 *) opcode); | ||
2149 | __u64 *dxb; | ||
2150 | |||
2151 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2152 | mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8); | ||
2153 | return 0; | ||
2154 | } | ||
2155 | |||
2156 | /* | ||
2157 | * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6} | ||
2158 | */ | ||
2159 | int math_emu_le(__u8 *opcode, struct pt_regs * regs) { | ||
2160 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2161 | __u32 opc = *((__u32 *) opcode); | ||
2162 | __u32 *mem, *dxb; | ||
2163 | |||
2164 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2165 | mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); | ||
2166 | mathemu_get_user(mem[0], dxb); | ||
2167 | return 0; | ||
2168 | } | ||
2169 | |||
2170 | /* | ||
2171 | * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6} | ||
2172 | */ | ||
2173 | int math_emu_std(__u8 *opcode, struct pt_regs * regs) { | ||
2174 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2175 | __u32 opc = *((__u32 *) opcode); | ||
2176 | __u64 *dxb; | ||
2177 | |||
2178 | dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2179 | mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8); | ||
2180 | return 0; | ||
2181 | } | ||
2182 | |||
2183 | /* | ||
2184 | * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6} | ||
2185 | */ | ||
2186 | int math_emu_ste(__u8 *opcode, struct pt_regs * regs) { | ||
2187 | s390_fp_regs *fp_regs = ¤t->thread.fp_regs; | ||
2188 | __u32 opc = *((__u32 *) opcode); | ||
2189 | __u32 *mem, *dxb; | ||
2190 | |||
2191 | dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); | ||
2192 | mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); | ||
2193 | mathemu_put_user(mem[0], dxb); | ||
2194 | return 0; | ||
2195 | } | ||
2196 | |||
2197 | /* | ||
2198 | * Emulate LFPC D(B) | ||
2199 | */ | ||
2200 | int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) { | ||
2201 | __u32 opc = *((__u32 *) opcode); | ||
2202 | __u32 *dxb, temp; | ||
2203 | |||
2204 | dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); | ||
2205 | mathemu_get_user(temp, dxb); | ||
2206 | if ((temp & ~FPC_VALID_MASK) != 0) | ||
2207 | return SIGILL; | ||
2208 | current->thread.fp_regs.fpc = temp; | ||
2209 | return 0; | ||
2210 | } | ||
2211 | |||
2212 | /* | ||
2213 | * Emulate STFPC D(B) | ||
2214 | */ | ||
2215 | int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) { | ||
2216 | __u32 opc = *((__u32 *) opcode); | ||
2217 | __u32 *dxb; | ||
2218 | |||
2219 | dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); | ||
2220 | mathemu_put_user(current->thread.fp_regs.fpc, dxb); | ||
2221 | return 0; | ||
2222 | } | ||
2223 | |||
2224 | /* | ||
2225 | * Emulate SRNM D(B) | ||
2226 | */ | ||
2227 | int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) { | ||
2228 | __u32 opc = *((__u32 *) opcode); | ||
2229 | __u32 temp; | ||
2230 | |||
2231 | temp = calc_addr(regs, 0, opc>>12, opc); | ||
2232 | current->thread.fp_regs.fpc &= ~3; | ||
2233 | current->thread.fp_regs.fpc |= (temp & 3); | ||
2234 | return 0; | ||
2235 | } | ||
2236 | |||
2237 | /* broken compiler ... */ | ||
2238 | long long | ||
2239 | __negdi2 (long long u) | ||
2240 | { | ||
2241 | |||
2242 | union lll { | ||
2243 | long long ll; | ||
2244 | long s[2]; | ||
2245 | }; | ||
2246 | |||
2247 | union lll w,uu; | ||
2248 | |||
2249 | uu.ll = u; | ||
2250 | |||
2251 | w.s[1] = -uu.s[1]; | ||
2252 | w.s[0] = -uu.s[0] - ((int) w.s[1] != 0); | ||
2253 | |||
2254 | return w.ll; | ||
2255 | } | ||
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index d46cadeda204..8556d6be9b54 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c | |||
@@ -18,9 +18,7 @@ enum address_markers_idx { | |||
18 | KERNEL_END_NR, | 18 | KERNEL_END_NR, |
19 | VMEMMAP_NR, | 19 | VMEMMAP_NR, |
20 | VMALLOC_NR, | 20 | VMALLOC_NR, |
21 | #ifdef CONFIG_64BIT | ||
22 | MODULES_NR, | 21 | MODULES_NR, |
23 | #endif | ||
24 | }; | 22 | }; |
25 | 23 | ||
26 | static struct addr_marker address_markers[] = { | 24 | static struct addr_marker address_markers[] = { |
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = { | |||
29 | [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, | 27 | [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, |
30 | [VMEMMAP_NR] = {0, "vmemmap Area"}, | 28 | [VMEMMAP_NR] = {0, "vmemmap Area"}, |
31 | [VMALLOC_NR] = {0, "vmalloc Area"}, | 29 | [VMALLOC_NR] = {0, "vmalloc Area"}, |
32 | #ifdef CONFIG_64BIT | ||
33 | [MODULES_NR] = {0, "Modules Area"}, | 30 | [MODULES_NR] = {0, "Modules Area"}, |
34 | #endif | ||
35 | { -1, NULL } | 31 | { -1, NULL } |
36 | }; | 32 | }; |
37 | 33 | ||
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, | |||
127 | } | 123 | } |
128 | } | 124 | } |
129 | 125 | ||
130 | #ifdef CONFIG_64BIT | ||
131 | #define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT | ||
132 | #else | ||
133 | #define _PMD_PROT_MASK 0 | ||
134 | #endif | ||
135 | |||
136 | static void walk_pmd_level(struct seq_file *m, struct pg_state *st, | 126 | static void walk_pmd_level(struct seq_file *m, struct pg_state *st, |
137 | pud_t *pud, unsigned long addr) | 127 | pud_t *pud, unsigned long addr) |
138 | { | 128 | { |
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, | |||
145 | pmd = pmd_offset(pud, addr); | 135 | pmd = pmd_offset(pud, addr); |
146 | if (!pmd_none(*pmd)) { | 136 | if (!pmd_none(*pmd)) { |
147 | if (pmd_large(*pmd)) { | 137 | if (pmd_large(*pmd)) { |
148 | prot = pmd_val(*pmd) & _PMD_PROT_MASK; | 138 | prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; |
149 | note_page(m, st, prot, 3); | 139 | note_page(m, st, prot, 3); |
150 | } else | 140 | } else |
151 | walk_pte_level(m, st, pmd, addr); | 141 | walk_pte_level(m, st, pmd, addr); |
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, | |||
155 | } | 145 | } |
156 | } | 146 | } |
157 | 147 | ||
158 | #ifdef CONFIG_64BIT | ||
159 | #define _PUD_PROT_MASK _REGION3_ENTRY_RO | ||
160 | #else | ||
161 | #define _PUD_PROT_MASK 0 | ||
162 | #endif | ||
163 | |||
164 | static void walk_pud_level(struct seq_file *m, struct pg_state *st, | 148 | static void walk_pud_level(struct seq_file *m, struct pg_state *st, |
165 | pgd_t *pgd, unsigned long addr) | 149 | pgd_t *pgd, unsigned long addr) |
166 | { | 150 | { |
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, | |||
173 | pud = pud_offset(pgd, addr); | 157 | pud = pud_offset(pgd, addr); |
174 | if (!pud_none(*pud)) | 158 | if (!pud_none(*pud)) |
175 | if (pud_large(*pud)) { | 159 | if (pud_large(*pud)) { |
176 | prot = pud_val(*pud) & _PUD_PROT_MASK; | 160 | prot = pud_val(*pud) & _REGION3_ENTRY_RO; |
177 | note_page(m, st, prot, 2); | 161 | note_page(m, st, prot, 2); |
178 | } else | 162 | } else |
179 | walk_pmd_level(m, st, pud, addr); | 163 | walk_pmd_level(m, st, pud, addr); |
@@ -230,13 +214,9 @@ static int pt_dump_init(void) | |||
230 | * kernel ASCE. We need this to keep the page table walker functions | 214 | * kernel ASCE. We need this to keep the page table walker functions |
231 | * from accessing non-existent entries. | 215 | * from accessing non-existent entries. |
232 | */ | 216 | */ |
233 | #ifdef CONFIG_32BIT | ||
234 | max_addr = 1UL << 31; | ||
235 | #else | ||
236 | max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; | 217 | max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; |
237 | max_addr = 1UL << (max_addr * 11 + 31); | 218 | max_addr = 1UL << (max_addr * 11 + 31); |
238 | address_markers[MODULES_NR].start_address = MODULES_VADDR; | 219 | address_markers[MODULES_NR].start_address = MODULES_VADDR; |
239 | #endif | ||
240 | address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; | 220 | address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; |
241 | address_markers[VMALLOC_NR].start_address = VMALLOC_START; | 221 | address_markers[VMALLOC_NR].start_address = VMALLOC_START; |
242 | debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); | 222 | debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 519bba716cc3..23c496957c22 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -51,7 +51,6 @@ struct qout64 { | |||
51 | struct qrange range[6]; | 51 | struct qrange range[6]; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | #ifdef CONFIG_64BIT | ||
55 | struct qrange_old { | 54 | struct qrange_old { |
56 | unsigned int start; /* last byte type */ | 55 | unsigned int start; /* last byte type */ |
57 | unsigned int end; /* last byte reserved */ | 56 | unsigned int end; /* last byte reserved */ |
@@ -65,7 +64,6 @@ struct qout64_old { | |||
65 | int segrcnt; | 64 | int segrcnt; |
66 | struct qrange_old range[6]; | 65 | struct qrange_old range[6]; |
67 | }; | 66 | }; |
68 | #endif | ||
69 | 67 | ||
70 | struct qin64 { | 68 | struct qin64 { |
71 | char qopcode; | 69 | char qopcode; |
@@ -103,7 +101,6 @@ static int scode_set; | |||
103 | static int | 101 | static int |
104 | dcss_set_subcodes(void) | 102 | dcss_set_subcodes(void) |
105 | { | 103 | { |
106 | #ifdef CONFIG_64BIT | ||
107 | char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); | 104 | char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); |
108 | unsigned long rx, ry; | 105 | unsigned long rx, ry; |
109 | int rc; | 106 | int rc; |
@@ -135,7 +132,6 @@ dcss_set_subcodes(void) | |||
135 | segext_scode = DCSS_SEGEXTX; | 132 | segext_scode = DCSS_SEGEXTX; |
136 | return 0; | 133 | return 0; |
137 | } | 134 | } |
138 | #endif | ||
139 | /* Diag x'64' new subcodes are not supported, set to old subcodes */ | 135 | /* Diag x'64' new subcodes are not supported, set to old subcodes */ |
140 | loadshr_scode = DCSS_LOADNOLY; | 136 | loadshr_scode = DCSS_LOADNOLY; |
141 | loadnsr_scode = DCSS_LOADNSR; | 137 | loadnsr_scode = DCSS_LOADNSR; |
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter, | |||
208 | rx = (unsigned long) parameter; | 204 | rx = (unsigned long) parameter; |
209 | ry = (unsigned long) *func; | 205 | ry = (unsigned long) *func; |
210 | 206 | ||
211 | #ifdef CONFIG_64BIT | ||
212 | /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ | 207 | /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ |
213 | if (*func > DCSS_SEGEXT) | 208 | if (*func > DCSS_SEGEXT) |
214 | asm volatile( | 209 | asm volatile( |
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter, | |||
225 | " ipm %2\n" | 220 | " ipm %2\n" |
226 | " srl %2,28\n" | 221 | " srl %2,28\n" |
227 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); | 222 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); |
228 | #else | ||
229 | asm volatile( | ||
230 | " diag %0,%1,0x64\n" | ||
231 | " ipm %2\n" | ||
232 | " srl %2,28\n" | ||
233 | : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); | ||
234 | #endif | ||
235 | *ret1 = rx; | 223 | *ret1 = rx; |
236 | *ret2 = ry; | 224 | *ret2 = ry; |
237 | return rc; | 225 | return rc; |
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg) | |||
281 | goto out_free; | 269 | goto out_free; |
282 | } | 270 | } |
283 | 271 | ||
284 | #ifdef CONFIG_64BIT | ||
285 | /* Only old format of output area of Diagnose x'64' is supported, | 272 | /* Only old format of output area of Diagnose x'64' is supported, |
286 | copy data for the new format. */ | 273 | copy data for the new format. */ |
287 | if (segext_scode == DCSS_SEGEXT) { | 274 | if (segext_scode == DCSS_SEGEXT) { |
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg) | |||
307 | } | 294 | } |
308 | kfree(qout_old); | 295 | kfree(qout_old); |
309 | } | 296 | } |
310 | #endif | ||
311 | if (qout->segcnt > 6) { | 297 | if (qout->segcnt > 6) { |
312 | rc = -EOPNOTSUPP; | 298 | rc = -EOPNOTSUPP; |
313 | goto out_free; | 299 | goto out_free; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 3ff86533f7db..76515bcea2f1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -36,15 +36,9 @@ | |||
36 | #include <asm/facility.h> | 36 | #include <asm/facility.h> |
37 | #include "../kernel/entry.h" | 37 | #include "../kernel/entry.h" |
38 | 38 | ||
39 | #ifndef CONFIG_64BIT | ||
40 | #define __FAIL_ADDR_MASK 0x7ffff000 | ||
41 | #define __SUBCODE_MASK 0x0200 | ||
42 | #define __PF_RES_FIELD 0ULL | ||
43 | #else /* CONFIG_64BIT */ | ||
44 | #define __FAIL_ADDR_MASK -4096L | 39 | #define __FAIL_ADDR_MASK -4096L |
45 | #define __SUBCODE_MASK 0x0600 | 40 | #define __SUBCODE_MASK 0x0600 |
46 | #define __PF_RES_FIELD 0x8000000000000000ULL | 41 | #define __PF_RES_FIELD 0x8000000000000000ULL |
47 | #endif /* CONFIG_64BIT */ | ||
48 | 42 | ||
49 | #define VM_FAULT_BADCONTEXT 0x010000 | 43 | #define VM_FAULT_BADCONTEXT 0x010000 |
50 | #define VM_FAULT_BADMAP 0x020000 | 44 | #define VM_FAULT_BADMAP 0x020000 |
@@ -54,7 +48,6 @@ | |||
54 | 48 | ||
55 | static unsigned long store_indication __read_mostly; | 49 | static unsigned long store_indication __read_mostly; |
56 | 50 | ||
57 | #ifdef CONFIG_64BIT | ||
58 | static int __init fault_init(void) | 51 | static int __init fault_init(void) |
59 | { | 52 | { |
60 | if (test_facility(75)) | 53 | if (test_facility(75)) |
@@ -62,7 +55,6 @@ static int __init fault_init(void) | |||
62 | return 0; | 55 | return 0; |
63 | } | 56 | } |
64 | early_initcall(fault_init); | 57 | early_initcall(fault_init); |
65 | #endif | ||
66 | 58 | ||
67 | static inline int notify_page_fault(struct pt_regs *regs) | 59 | static inline int notify_page_fault(struct pt_regs *regs) |
68 | { | 60 | { |
@@ -133,7 +125,6 @@ static int bad_address(void *p) | |||
133 | return probe_kernel_address((unsigned long *)p, dummy); | 125 | return probe_kernel_address((unsigned long *)p, dummy); |
134 | } | 126 | } |
135 | 127 | ||
136 | #ifdef CONFIG_64BIT | ||
137 | static void dump_pagetable(unsigned long asce, unsigned long address) | 128 | static void dump_pagetable(unsigned long asce, unsigned long address) |
138 | { | 129 | { |
139 | unsigned long *table = __va(asce & PAGE_MASK); | 130 | unsigned long *table = __va(asce & PAGE_MASK); |
@@ -187,33 +178,6 @@ bad: | |||
187 | pr_cont("BAD\n"); | 178 | pr_cont("BAD\n"); |
188 | } | 179 | } |
189 | 180 | ||
190 | #else /* CONFIG_64BIT */ | ||
191 | |||
192 | static void dump_pagetable(unsigned long asce, unsigned long address) | ||
193 | { | ||
194 | unsigned long *table = __va(asce & PAGE_MASK); | ||
195 | |||
196 | pr_alert("AS:%08lx ", asce); | ||
197 | table = table + ((address >> 20) & 0x7ff); | ||
198 | if (bad_address(table)) | ||
199 | goto bad; | ||
200 | pr_cont("S:%08lx ", *table); | ||
201 | if (*table & _SEGMENT_ENTRY_INVALID) | ||
202 | goto out; | ||
203 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
204 | table = table + ((address >> 12) & 0xff); | ||
205 | if (bad_address(table)) | ||
206 | goto bad; | ||
207 | pr_cont("P:%08lx ", *table); | ||
208 | out: | ||
209 | pr_cont("\n"); | ||
210 | return; | ||
211 | bad: | ||
212 | pr_cont("BAD\n"); | ||
213 | } | ||
214 | |||
215 | #endif /* CONFIG_64BIT */ | ||
216 | |||
217 | static void dump_fault_info(struct pt_regs *regs) | 181 | static void dump_fault_info(struct pt_regs *regs) |
218 | { | 182 | { |
219 | unsigned long asce; | 183 | unsigned long asce; |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 5c586c78ca8d..1eb41bb3010c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, | |||
106 | pmd_t *pmdp, pmd; | 106 | pmd_t *pmdp, pmd; |
107 | 107 | ||
108 | pmdp = (pmd_t *) pudp; | 108 | pmdp = (pmd_t *) pudp; |
109 | #ifdef CONFIG_64BIT | ||
110 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | 109 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
111 | pmdp = (pmd_t *) pud_deref(pud); | 110 | pmdp = (pmd_t *) pud_deref(pud); |
112 | pmdp += pmd_index(addr); | 111 | pmdp += pmd_index(addr); |
113 | #endif | ||
114 | do { | 112 | do { |
115 | pmd = *pmdp; | 113 | pmd = *pmdp; |
116 | barrier(); | 114 | barrier(); |
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, | |||
145 | pud_t *pudp, pud; | 143 | pud_t *pudp, pud; |
146 | 144 | ||
147 | pudp = (pud_t *) pgdp; | 145 | pudp = (pud_t *) pgdp; |
148 | #ifdef CONFIG_64BIT | ||
149 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | 146 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
150 | pudp = (pud_t *) pgd_deref(pgd); | 147 | pudp = (pud_t *) pgd_deref(pgd); |
151 | pudp += pud_index(addr); | 148 | pudp += pud_index(addr); |
152 | #endif | ||
153 | do { | 149 | do { |
154 | pud = *pudp; | 150 | pud = *pudp; |
155 | barrier(); | 151 | barrier(); |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d35b15113b17..80875c43a4a4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -105,7 +105,6 @@ void __init paging_init(void) | |||
105 | unsigned long pgd_type, asce_bits; | 105 | unsigned long pgd_type, asce_bits; |
106 | 106 | ||
107 | init_mm.pgd = swapper_pg_dir; | 107 | init_mm.pgd = swapper_pg_dir; |
108 | #ifdef CONFIG_64BIT | ||
109 | if (VMALLOC_END > (1UL << 42)) { | 108 | if (VMALLOC_END > (1UL << 42)) { |
110 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; | 109 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; |
111 | pgd_type = _REGION2_ENTRY_EMPTY; | 110 | pgd_type = _REGION2_ENTRY_EMPTY; |
@@ -113,10 +112,6 @@ void __init paging_init(void) | |||
113 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; | 112 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
114 | pgd_type = _REGION3_ENTRY_EMPTY; | 113 | pgd_type = _REGION3_ENTRY_EMPTY; |
115 | } | 114 | } |
116 | #else | ||
117 | asce_bits = _ASCE_TABLE_LENGTH; | ||
118 | pgd_type = _SEGMENT_ENTRY_EMPTY; | ||
119 | #endif | ||
120 | S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; | 115 | S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
121 | clear_table((unsigned long *) init_mm.pgd, pgd_type, | 116 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
122 | sizeof(unsigned long)*2048); | 117 | sizeof(unsigned long)*2048); |
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 2eb34bdfc613..8a993a53fcd6 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Access kernel memory without faulting -- s390 specific implementation. | 2 | * Access kernel memory without faulting -- s390 specific implementation. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009 | 4 | * Copyright IBM Corp. 2009, 2015 |
5 | * | 5 | * |
6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | 6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
7 | * | 7 | * |
@@ -16,51 +16,55 @@ | |||
16 | #include <asm/ctl_reg.h> | 16 | #include <asm/ctl_reg.h> |
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | /* | 19 | static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) |
20 | * This function writes to kernel memory bypassing DAT and possible | ||
21 | * write protection. It copies one to four bytes from src to dst | ||
22 | * using the stura instruction. | ||
23 | * Returns the number of bytes copied or -EFAULT. | ||
24 | */ | ||
25 | static long probe_kernel_write_odd(void *dst, const void *src, size_t size) | ||
26 | { | 20 | { |
27 | unsigned long count, aligned; | 21 | unsigned long aligned, offset, count; |
28 | int offset, mask; | 22 | char tmp[8]; |
29 | int rc = -EFAULT; | ||
30 | 23 | ||
31 | aligned = (unsigned long) dst & ~3UL; | 24 | aligned = (unsigned long) dst & ~7UL; |
32 | offset = (unsigned long) dst & 3; | 25 | offset = (unsigned long) dst & 7UL; |
33 | count = min_t(unsigned long, 4 - offset, size); | 26 | size = min(8UL - offset, size); |
34 | mask = (0xf << (4 - count)) & 0xf; | 27 | count = size - 1; |
35 | mask >>= offset; | ||
36 | asm volatile( | 28 | asm volatile( |
37 | " bras 1,0f\n" | 29 | " bras 1,0f\n" |
38 | " icm 0,0,0(%3)\n" | 30 | " mvc 0(1,%4),0(%5)\n" |
39 | "0: l 0,0(%1)\n" | 31 | "0: mvc 0(8,%3),0(%0)\n" |
40 | " lra %1,0(%1)\n" | 32 | " ex %1,0(1)\n" |
41 | "1: ex %2,0(1)\n" | 33 | " lg %1,0(%3)\n" |
42 | "2: stura 0,%1\n" | 34 | " lra %0,0(%0)\n" |
43 | " la %0,0\n" | 35 | " sturg %1,%0\n" |
44 | "3:\n" | 36 | : "+&a" (aligned), "+&a" (count), "=m" (tmp) |
45 | EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) | 37 | : "a" (&tmp), "a" (&tmp[offset]), "a" (src) |
46 | : "+d" (rc), "+a" (aligned) | 38 | : "cc", "memory", "1"); |
47 | : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); | 39 | return size; |
48 | return rc ? rc : count; | ||
49 | } | 40 | } |
50 | 41 | ||
51 | long probe_kernel_write(void *dst, const void *src, size_t size) | 42 | /* |
43 | * s390_kernel_write - write to kernel memory bypassing DAT | ||
44 | * @dst: destination address | ||
45 | * @src: source address | ||
46 | * @size: number of bytes to copy | ||
47 | * | ||
48 | * This function writes to kernel memory bypassing DAT and possible page table | ||
49 | * write protection. It writes to the destination using the sturg instruction. | ||
50 | * Therefore we have a read-modify-write sequence: the function reads eight | ||
51 | * bytes from destination at an eight byte boundary, modifies the bytes | ||
52 | * requested and writes the result back in a loop. | ||
53 | * | ||
54 | * Note: this means that this function may not be called concurrently on | ||
55 | * several cpus with overlapping words, since this may potentially | ||
56 | * cause data corruption. | ||
57 | */ | ||
58 | void notrace s390_kernel_write(void *dst, const void *src, size_t size) | ||
52 | { | 59 | { |
53 | long copied = 0; | 60 | long copied; |
54 | 61 | ||
55 | while (size) { | 62 | while (size) { |
56 | copied = probe_kernel_write_odd(dst, src, size); | 63 | copied = s390_kernel_write_odd(dst, src, size); |
57 | if (copied < 0) | ||
58 | break; | ||
59 | dst += copied; | 64 | dst += copied; |
60 | src += copied; | 65 | src += copied; |
61 | size -= copied; | 66 | size -= copied; |
62 | } | 67 | } |
63 | return copied < 0 ? -EFAULT : 0; | ||
64 | } | 68 | } |
65 | 69 | ||
66 | static int __memcpy_real(void *dest, void *src, size_t count) | 70 | static int __memcpy_real(void *dest, void *src, size_t count) |
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index 5535cfe0ee11..0f3604395805 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c | |||
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void) | |||
36 | memsize = rzm * rnmax; | 36 | memsize = rzm * rnmax; |
37 | if (!rzm) | 37 | if (!rzm) |
38 | rzm = 1ULL << 17; | 38 | rzm = 1ULL << 17; |
39 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
40 | rzm = min(ADDR2G, rzm); | ||
41 | memsize = min(ADDR2G, memsize); | ||
42 | } | ||
43 | max_physmem_end = memsize; | 39 | max_physmem_end = memsize; |
44 | addr = 0; | 40 | addr = 0; |
45 | /* keep memblock lists close to the kernel */ | 41 | /* keep memblock lists close to the kernel */ |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index bb3367c5cb0b..6e552af08c76 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
33 | 33 | ||
34 | unsigned long mmap_rnd_mask; | 34 | unsigned long mmap_rnd_mask; |
35 | unsigned long mmap_align_mask; | 35 | static unsigned long mmap_align_mask; |
36 | 36 | ||
37 | static unsigned long stack_maxrandom_size(void) | 37 | static unsigned long stack_maxrandom_size(void) |
38 | { | 38 | { |
@@ -177,34 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
177 | return addr; | 177 | return addr; |
178 | } | 178 | } |
179 | 179 | ||
180 | #ifndef CONFIG_64BIT | ||
181 | |||
182 | /* | ||
183 | * This function, called very early during the creation of a new | ||
184 | * process VM image, sets up which VM layout function to use: | ||
185 | */ | ||
186 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
187 | { | ||
188 | unsigned long random_factor = 0UL; | ||
189 | |||
190 | if (current->flags & PF_RANDOMIZE) | ||
191 | random_factor = arch_mmap_rnd(); | ||
192 | |||
193 | /* | ||
194 | * Fall back to the standard layout if the personality | ||
195 | * bit is set, or if the expected stack growth is unlimited: | ||
196 | */ | ||
197 | if (mmap_is_legacy()) { | ||
198 | mm->mmap_base = mmap_base_legacy(random_factor); | ||
199 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
200 | } else { | ||
201 | mm->mmap_base = mmap_base(random_factor); | ||
202 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | #else | ||
207 | |||
208 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) | 180 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) |
209 | { | 181 | { |
210 | if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) | 182 | if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) |
@@ -314,5 +286,3 @@ static int __init setup_mmap_rnd(void) | |||
314 | return 0; | 286 | return 0; |
315 | } | 287 | } |
316 | early_initcall(setup_mmap_rnd); | 288 | early_initcall(setup_mmap_rnd); |
317 | |||
318 | #endif | ||
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 426c9d462d1c..749c98407b41 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) | |||
109 | { | 109 | { |
110 | int i; | 110 | int i; |
111 | 111 | ||
112 | if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { | 112 | if (test_facility(13)) { |
113 | __ptep_ipte_range(address, nr - 1, pte); | 113 | __ptep_ipte_range(address, nr - 1, pte); |
114 | return; | 114 | return; |
115 | } | 115 | } |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b2c1542f2ba2..33f589459113 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -27,14 +27,8 @@ | |||
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
29 | 29 | ||
30 | #ifndef CONFIG_64BIT | ||
31 | #define ALLOC_ORDER 1 | ||
32 | #define FRAG_MASK 0x0f | ||
33 | #else | ||
34 | #define ALLOC_ORDER 2 | 30 | #define ALLOC_ORDER 2 |
35 | #define FRAG_MASK 0x03 | 31 | #define FRAG_MASK 0x03 |
36 | #endif | ||
37 | |||
38 | 32 | ||
39 | unsigned long *crst_table_alloc(struct mm_struct *mm) | 33 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
40 | { | 34 | { |
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) | |||
50 | free_pages((unsigned long) table, ALLOC_ORDER); | 44 | free_pages((unsigned long) table, ALLOC_ORDER); |
51 | } | 45 | } |
52 | 46 | ||
53 | #ifdef CONFIG_64BIT | ||
54 | static void __crst_table_upgrade(void *arg) | 47 | static void __crst_table_upgrade(void *arg) |
55 | { | 48 | { |
56 | struct mm_struct *mm = arg; | 49 | struct mm_struct *mm = arg; |
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
140 | if (current->active_mm == mm) | 133 | if (current->active_mm == mm) |
141 | set_user_asce(mm); | 134 | set_user_asce(mm); |
142 | } | 135 | } |
143 | #endif | ||
144 | 136 | ||
145 | #ifdef CONFIG_PGSTE | 137 | #ifdef CONFIG_PGSTE |
146 | 138 | ||
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b1593c2f751a..ef7d6c8fea66 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void) | |||
38 | { | 38 | { |
39 | pud_t *pud = NULL; | 39 | pud_t *pud = NULL; |
40 | 40 | ||
41 | #ifdef CONFIG_64BIT | ||
42 | pud = vmem_alloc_pages(2); | 41 | pud = vmem_alloc_pages(2); |
43 | if (!pud) | 42 | if (!pud) |
44 | return NULL; | 43 | return NULL; |
45 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); | 44 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
46 | #endif | ||
47 | return pud; | 45 | return pud; |
48 | } | 46 | } |
49 | 47 | ||
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
51 | { | 49 | { |
52 | pmd_t *pmd = NULL; | 50 | pmd_t *pmd = NULL; |
53 | 51 | ||
54 | #ifdef CONFIG_64BIT | ||
55 | pmd = vmem_alloc_pages(2); | 52 | pmd = vmem_alloc_pages(2); |
56 | if (!pmd) | 53 | if (!pmd) |
57 | return NULL; | 54 | return NULL; |
58 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); | 55 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
59 | #endif | ||
60 | return pmd; | 56 | return pmd; |
61 | } | 57 | } |
62 | 58 | ||
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
98 | pgd_populate(&init_mm, pg_dir, pu_dir); | 94 | pgd_populate(&init_mm, pg_dir, pu_dir); |
99 | } | 95 | } |
100 | pu_dir = pud_offset(pg_dir, address); | 96 | pu_dir = pud_offset(pg_dir, address); |
101 | #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) | 97 | #ifndef CONFIG_DEBUG_PAGEALLOC |
102 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && | 98 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && |
103 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { | 99 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { |
104 | pud_val(*pu_dir) = __pa(address) | | 100 | pud_val(*pu_dir) = __pa(address) | |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
115 | pud_populate(&init_mm, pu_dir, pm_dir); | 111 | pud_populate(&init_mm, pu_dir, pm_dir); |
116 | } | 112 | } |
117 | pm_dir = pmd_offset(pu_dir, address); | 113 | pm_dir = pmd_offset(pu_dir, address); |
118 | #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) | 114 | #ifndef CONFIG_DEBUG_PAGEALLOC |
119 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && | 115 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && |
120 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { | 116 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { |
121 | pmd_val(*pm_dir) = __pa(address) | | 117 | pmd_val(*pm_dir) = __pa(address) | |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
222 | 218 | ||
223 | pm_dir = pmd_offset(pu_dir, address); | 219 | pm_dir = pmd_offset(pu_dir, address); |
224 | if (pmd_none(*pm_dir)) { | 220 | if (pmd_none(*pm_dir)) { |
225 | #ifdef CONFIG_64BIT | ||
226 | /* Use 1MB frames for vmemmap if available. We always | 221 | /* Use 1MB frames for vmemmap if available. We always |
227 | * use large frames even if they are only partially | 222 | * use large frames even if they are only partially |
228 | * used. | 223 | * used. |
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
240 | address = (address + PMD_SIZE) & PMD_MASK; | 235 | address = (address + PMD_SIZE) & PMD_MASK; |
241 | continue; | 236 | continue; |
242 | } | 237 | } |
243 | #endif | ||
244 | pt_dir = vmem_pte_alloc(address); | 238 | pt_dir = vmem_pte_alloc(address); |
245 | if (!pt_dir) | 239 | if (!pt_dir) |
246 | goto out; | 240 | goto out; |
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile index 524c4b615821..1bd23017191e 100644 --- a/arch/s390/oprofile/Makefile +++ b/arch/s390/oprofile/Makefile | |||
@@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
7 | timer_int.o ) | 7 | timer_int.o ) |
8 | 8 | ||
9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o | 9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o |
10 | oprofile-$(CONFIG_64BIT) += hwsampler.o | 10 | oprofile-y += hwsampler.o |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 9ffe645d5989..bc927a09a172 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -21,8 +21,6 @@ | |||
21 | 21 | ||
22 | extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); | 22 | extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); |
23 | 23 | ||
24 | #ifdef CONFIG_64BIT | ||
25 | |||
26 | #include "hwsampler.h" | 24 | #include "hwsampler.h" |
27 | #include "op_counter.h" | 25 | #include "op_counter.h" |
28 | 26 | ||
@@ -495,14 +493,10 @@ static void oprofile_hwsampler_exit(void) | |||
495 | hwsampler_shutdown(); | 493 | hwsampler_shutdown(); |
496 | } | 494 | } |
497 | 495 | ||
498 | #endif /* CONFIG_64BIT */ | ||
499 | |||
500 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 496 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
501 | { | 497 | { |
502 | ops->backtrace = s390_backtrace; | 498 | ops->backtrace = s390_backtrace; |
503 | 499 | ||
504 | #ifdef CONFIG_64BIT | ||
505 | |||
506 | /* | 500 | /* |
507 | * -ENODEV is not reported to the caller. The module itself | 501 | * -ENODEV is not reported to the caller. The module itself |
508 | * will use the timer mode sampling as fallback and this is | 502 | * will use the timer mode sampling as fallback and this is |
@@ -511,14 +505,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
511 | hwsampler_available = oprofile_hwsampler_init(ops) == 0; | 505 | hwsampler_available = oprofile_hwsampler_init(ops) == 0; |
512 | 506 | ||
513 | return 0; | 507 | return 0; |
514 | #else | ||
515 | return -ENODEV; | ||
516 | #endif | ||
517 | } | 508 | } |
518 | 509 | ||
519 | void oprofile_arch_exit(void) | 510 | void oprofile_arch_exit(void) |
520 | { | 511 | { |
521 | #ifdef CONFIG_64BIT | ||
522 | oprofile_hwsampler_exit(); | 512 | oprofile_hwsampler_exit(); |
523 | #endif | ||
524 | } | 513 | } |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b2c76f64c530..98336200c7b2 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -913,8 +913,7 @@ static int __init pci_base_init(void) | |||
913 | if (!s390_pci_probe) | 913 | if (!s390_pci_probe) |
914 | return 0; | 914 | return 0; |
915 | 915 | ||
916 | if (!test_facility(2) || !test_facility(69) | 916 | if (!test_facility(69) || !test_facility(71) || !test_facility(72)) |
917 | || !test_facility(71) || !test_facility(72)) | ||
918 | return 0; | 917 | return 0; |
919 | 918 | ||
920 | rc = zpci_debug_init(); | 919 | rc = zpci_debug_init(); |