aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
committerMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
commit772320e84588dcbe1600ffb83e5f328f2209ac2a (patch)
treea7de21b79340aeaa17c58126f6b801b82c77b53a /arch/s390
parent1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff)
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts: arch/powerpc/Makefile
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig22
-rw-r--r--arch/s390/Kconfig.debug13
-rw-r--r--arch/s390/Makefile19
-rw-r--r--arch/s390/appldata/appldata_mem.c1
-rw-r--r--arch/s390/appldata/appldata_net_sum.c1
-rw-r--r--arch/s390/appldata/appldata_os.c2
-rw-r--r--arch/s390/boot/Makefile8
-rw-r--r--arch/s390/boot/compressed/Makefile63
-rw-r--r--arch/s390/boot/compressed/head31.S51
-rw-r--r--arch/s390/boot/compressed/head64.S48
-rw-r--r--arch/s390/boot/compressed/misc.c162
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S55
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr10
-rw-r--r--arch/s390/crypto/aes_s390.c6
-rw-r--r--arch/s390/crypto/prng.c1
-rw-r--r--arch/s390/crypto/sha_common.c2
-rw-r--r--arch/s390/defconfig64
-rw-r--r--arch/s390/hypfs/hypfs.h4
-rw-r--r--arch/s390/hypfs/hypfs_diag.c128
-rw-r--r--arch/s390/hypfs/hypfs_vm.c87
-rw-r--r--arch/s390/hypfs/inode.c86
-rw-r--r--arch/s390/include/asm/atomic.h105
-rw-r--r--arch/s390/include/asm/bitops.h83
-rw-r--r--arch/s390/include/asm/bug.h18
-rw-r--r--arch/s390/include/asm/ccwdev.h10
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h3
-rw-r--r--arch/s390/include/asm/cputime.h9
-rw-r--r--arch/s390/include/asm/crw.h1
-rw-r--r--arch/s390/include/asm/etr.h12
-rw-r--r--arch/s390/include/asm/irqflags.h36
-rw-r--r--arch/s390/include/asm/lowcore.h341
-rw-r--r--arch/s390/include/asm/page.h3
-rw-r--r--arch/s390/include/asm/pgtable.h8
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h19
-rw-r--r--arch/s390/include/asm/qdio.h12
-rw-r--r--arch/s390/include/asm/rwsem.h147
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/setup.h14
-rw-r--r--arch/s390/include/asm/sigp.h142
-rw-r--r--arch/s390/include/asm/smp.h38
-rw-r--r--arch/s390/include/asm/spinlock.h18
-rw-r--r--arch/s390/include/asm/swab.h16
-rw-r--r--arch/s390/include/asm/syscall.h7
-rw-r--r--arch/s390/include/asm/sysinfo.h3
-rw-r--r--arch/s390/include/asm/system.h182
-rw-r--r--arch/s390/include/asm/thread_info.h5
-rw-r--r--arch/s390/include/asm/timex.h30
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/include/asm/uaccess.h12
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/asm/vdso.h3
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c104
-rw-r--r--arch/s390/kernel/base.S2
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/dis.c369
-rw-r--r--arch/s390/kernel/early.c29
-rw-r--r--arch/s390/kernel/entry.S329
-rw-r--r--arch/s390/kernel/entry.h10
-rw-r--r--arch/s390/kernel/entry64.S623
-rw-r--r--arch/s390/kernel/ftrace.c12
-rw-r--r--arch/s390/kernel/head.S67
-rw-r--r--arch/s390/kernel/head31.S18
-rw-r--r--arch/s390/kernel/head64.S96
-rw-r--r--arch/s390/kernel/ipl.c56
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c10
-rw-r--r--arch/s390/kernel/module.c6
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/processor.c37
-rw-r--r--arch/s390/kernel/ptrace.c131
-rw-r--r--arch/s390/kernel/reipl.S2
-rw-r--r--arch/s390/kernel/reipl64.S2
-rw-r--r--arch/s390/kernel/s390_ext.c3
-rw-r--r--arch/s390/kernel/sclp.S38
-rw-r--r--arch/s390/kernel/setup.c41
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c131
-rw-r--r--arch/s390/kernel/switch_cpu.S58
-rw-r--r--arch/s390/kernel/switch_cpu64.S51
-rw-r--r--arch/s390/kernel/swsusp_asm64.S5
-rw-r--r--arch/s390/kernel/sys_s390.c43
-rw-r--r--arch/s390/kernel/syscalls.S6
-rw-r--r--arch/s390/kernel/sysinfo.c1
-rw-r--r--arch/s390/kernel/time.c88
-rw-r--r--arch/s390/kernel/topology.c10
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/s390/kernel/vtime.c15
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/interrupt.c13
-rw-r--r--arch/s390/kvm/kvm-s390.c57
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/kvm/priv.c3
-rw-r--r--arch/s390/kvm/sie64a.S75
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/s390/lib/spinlock.c53
-rw-r--r--arch/s390/lib/usercopy.c8
-rw-r--r--arch/s390/mm/cmm.c110
-rw-r--r--arch/s390/mm/extmem.c31
-rw-r--r--arch/s390/mm/fault.c37
-rw-r--r--arch/s390/mm/init.c34
-rw-r--r--arch/s390/mm/maccess.c26
-rw-r--r--arch/s390/mm/page-states.c1
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c12
118 files changed, 3052 insertions, 2058 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c80235206c01..bee1c0f794cf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -54,6 +54,9 @@ config GENERIC_BUG
54 depends on BUG 54 depends on BUG
55 default y 55 default y
56 56
57config GENERIC_BUG_RELATIVE_POINTERS
58 def_bool y
59
57config NO_IOMEM 60config NO_IOMEM
58 def_bool y 61 def_bool y
59 62
@@ -87,6 +90,7 @@ config S390
87 select HAVE_SYSCALL_TRACEPOINTS 90 select HAVE_SYSCALL_TRACEPOINTS
88 select HAVE_DYNAMIC_FTRACE 91 select HAVE_DYNAMIC_FTRACE
89 select HAVE_FUNCTION_GRAPH_TRACER 92 select HAVE_FUNCTION_GRAPH_TRACER
93 select HAVE_REGS_AND_STACK_ACCESS_API
90 select HAVE_DEFAULT_NO_SPIN_MUTEXES 94 select HAVE_DEFAULT_NO_SPIN_MUTEXES
91 select HAVE_OPROFILE 95 select HAVE_OPROFILE
92 select HAVE_KPROBES 96 select HAVE_KPROBES
@@ -95,6 +99,10 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 99 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 100 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_EVENTS 101 select HAVE_PERF_EVENTS
102 select HAVE_KERNEL_GZIP
103 select HAVE_KERNEL_BZIP2
104 select HAVE_KERNEL_LZMA
105 select HAVE_KERNEL_LZO
98 select ARCH_INLINE_SPIN_TRYLOCK 106 select ARCH_INLINE_SPIN_TRYLOCK
99 select ARCH_INLINE_SPIN_TRYLOCK_BH 107 select ARCH_INLINE_SPIN_TRYLOCK_BH
100 select ARCH_INLINE_SPIN_LOCK 108 select ARCH_INLINE_SPIN_LOCK
@@ -437,13 +445,6 @@ config FORCE_MAX_ZONEORDER
437 int 445 int
438 default "9" 446 default "9"
439 447
440config PROCESS_DEBUG
441 bool "Show crashed user process info"
442 help
443 Say Y to print all process fault locations to the console. This is
444 a debugging option; you probably do not want to set it unless you
445 are an S390 port maintainer.
446
447config PFAULT 448config PFAULT
448 bool "Pseudo page fault support" 449 bool "Pseudo page fault support"
449 help 450 help
@@ -479,13 +480,6 @@ config CMM
479 Everybody who wants to run Linux under VM should select this 480 Everybody who wants to run Linux under VM should select this
480 option. 481 option.
481 482
482config CMM_PROC
483 bool "/proc interface to cooperative memory management"
484 depends on CMM
485 help
486 Select this option to enable the /proc interface to the
487 cooperative memory management.
488
489config CMM_IUCV 483config CMM_IUCV
490 bool "IUCV special message interface to cooperative memory management" 484 bool "IUCV special message interface to cooperative memory management"
491 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) 485 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2283933a9a93..45e0c6199f36 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT
6 6
7source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
8 8
9config DEBUG_STRICT_USER_COPY_CHECKS
10 bool "Strict user copy size checks"
11 ---help---
12 Enabling this option turns a certain set of sanity checks for user
13 copy operations into compile time warnings.
14
15 The copy_from_user() etc checks are there to help test if there
16 are sufficient security checks on the length argument of
17 the copy operation, by having gcc prove that the argument is
18 within bounds.
19
20 If unsure, or if you run an older (pre 4.4) gcc, say N.
21
9endmenu 22endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 83ef8724c833..0c9e6c6d2a64 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,6 +14,7 @@
14# 14#
15 15
16ifndef CONFIG_64BIT 16ifndef CONFIG_64BIT
17LD_BFD := elf32-s390
17LDFLAGS := -m elf_s390 18LDFLAGS := -m elf_s390
18KBUILD_CFLAGS += -m31 19KBUILD_CFLAGS += -m31
19KBUILD_AFLAGS += -m31 20KBUILD_AFLAGS += -m31
@@ -21,6 +22,7 @@ UTS_MACHINE := s390
21STACK_SIZE := 8192 22STACK_SIZE := 8192
22CHECKFLAGS += -D__s390__ -msize-long 23CHECKFLAGS += -D__s390__ -msize-long
23else 24else
25LD_BFD := elf64-s390
24LDFLAGS := -m elf64_s390 26LDFLAGS := -m elf64_s390
25KBUILD_AFLAGS_MODULE += -fpic -D__PIC__ 27KBUILD_AFLAGS_MODULE += -fpic -D__PIC__
26KBUILD_CFLAGS_MODULE += -fpic -D__PIC__ 28KBUILD_CFLAGS_MODULE += -fpic -D__PIC__
@@ -31,6 +33,8 @@ STACK_SIZE := 16384
31CHECKFLAGS += -D__s390__ -D__s390x__ 33CHECKFLAGS += -D__s390__ -D__s390x__
32endif 34endif
33 35
36export LD_BFD
37
34cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) 38cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
35cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) 39cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
36cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) 40cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
@@ -86,7 +90,9 @@ KBUILD_AFLAGS += $(aflags-y)
86OBJCOPYFLAGS := -O binary 90OBJCOPYFLAGS := -O binary
87LDFLAGS_vmlinux := -e start 91LDFLAGS_vmlinux := -e start
88 92
89head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o 93head-y := arch/s390/kernel/head.o
94head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
95head-y += arch/s390/kernel/init_task.o
90 96
91core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ 97core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
92 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ 98 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
@@ -100,21 +106,28 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
100 106
101boot := arch/s390/boot 107boot := arch/s390/boot
102 108
103all: image 109all: image bzImage
104 110
105install: vmlinux 111install: vmlinux
106 $(Q)$(MAKE) $(build)=$(boot) $@ 112 $(Q)$(MAKE) $(build)=$(boot) $@
107 113
108image: vmlinux 114image bzImage: vmlinux
109 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 115 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
110 116
111zfcpdump: 117zfcpdump:
112 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 118 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
113 119
120vdso_install:
121ifeq ($(CONFIG_64BIT),y)
122 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
123endif
124 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
125
114archclean: 126archclean:
115 $(Q)$(MAKE) $(clean)=$(boot) 127 $(Q)$(MAKE) $(clean)=$(boot)
116 128
117# Don't use tabs in echo arguments 129# Don't use tabs in echo arguments
118define archhelp 130define archhelp
119 echo '* image - Kernel image for IPL ($(boot)/image)' 131 echo '* image - Kernel image for IPL ($(boot)/image)'
132 echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
120endef 133endef
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 4188cbe63a54..e43fe7537031 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -11,7 +11,6 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/errno.h> 14#include <linux/errno.h>
16#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
17#include <linux/pagemap.h> 16#include <linux/pagemap.h>
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 4ce7fa95880f..9a9586f4103f 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
18#include <linux/netdevice.h> 17#include <linux/netdevice.h>
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 55c80ffd42b9..92f1cb745d69 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -181,7 +181,7 @@ static int __init appldata_os_init(void)
181 goto out; 181 goto out;
182 } 182 }
183 183
184 appldata_os_data = kzalloc(max_size, GFP_DMA); 184 appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
185 if (appldata_os_data == NULL) { 185 if (appldata_os_data == NULL) {
186 rc = -ENOMEM; 186 rc = -ENOMEM;
187 goto out; 187 goto out;
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 4d97eef36b8d..8800cf090694 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -9,10 +9,18 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \
9EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. 9EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
10 10
11targets := image 11targets := image
12targets += bzImage
13subdir- := compressed
12 14
13$(obj)/image: vmlinux FORCE 15$(obj)/image: vmlinux FORCE
14 $(call if_changed,objcopy) 16 $(call if_changed,objcopy)
15 17
18$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
19 $(call if_changed,objcopy)
20
21$(obj)/compressed/vmlinux: FORCE
22 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
23
16install: $(CONFIGURE) $(obj)/image 24install: $(CONFIGURE) $(obj)/image
17 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ 25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
18 System.map Kerntypes "$(INSTALL_PATH)" 26 System.map Kerntypes "$(INSTALL_PATH)"
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
new file mode 100644
index 000000000000..1c999f726a58
--- /dev/null
+++ b/arch/s390/boot/compressed/Makefile
@@ -0,0 +1,63 @@
1#
2# linux/arch/s390/boot/compressed/Makefile
3#
4# create a compressed vmlinux image from the original vmlinux
5#
6
7BITS := $(if $(CONFIG_64BIT),64,31)
8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
10 vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o
11
12KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
13KBUILD_CFLAGS += $(cflags-y)
14KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
15KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
16
17GCOV_PROFILE := n
18
19OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
20OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o
21
22LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
23$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
24 $(call if_changed,ld)
25 @:
26
27sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p'
28
29quiet_cmd_sizes = GEN $@
30 cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
31
32$(obj)/sizes.h: vmlinux
33 $(call if_changed,sizes)
34
35AFLAGS_head$(BITS).o += -I$(obj)
36$(obj)/head$(BITS).o: $(obj)/sizes.h
37
38CFLAGS_misc.o += -I$(obj)
39$(obj)/misc.o: $(obj)/sizes.h
40
41OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
42$(obj)/vmlinux.bin: vmlinux
43 $(call if_changed,objcopy)
44
45vmlinux.bin.all-y := $(obj)/vmlinux.bin
46
47suffix-$(CONFIG_KERNEL_GZIP) := gz
48suffix-$(CONFIG_KERNEL_BZIP2) := bz2
49suffix-$(CONFIG_KERNEL_LZMA) := lzma
50suffix-$(CONFIG_KERNEL_LZO) := lzo
51
52$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
53 $(call if_changed,gzip)
54$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
55 $(call if_changed,bzip2)
56$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
57 $(call if_changed,lzma)
58$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
59 $(call if_changed,lzo)
60
61LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
62$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
63 $(call if_changed,ld)
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
new file mode 100644
index 000000000000..2a5523a32bcc
--- /dev/null
+++ b/arch/s390/boot/compressed/head31.S
@@ -0,0 +1,51 @@
1/*
2 * Startup glue code to uncompress the kernel
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <asm/asm-offsets.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include "sizes.h"
14
15__HEAD
16 .globl startup_continue
17startup_continue:
18 basr %r13,0 # get base
19.LPG1:
20 # setup stack
21 l %r15,.Lstack-.LPG1(%r13)
22 ahi %r15,-96
23 l %r1,.Ldecompress-.LPG1(%r13)
24 basr %r14,%r1
25 # setup registers for memory mover & branch to target
26 lr %r4,%r2
27 l %r2,.Loffset-.LPG1(%r13)
28 la %r4,0(%r2,%r4)
29 l %r3,.Lmvsize-.LPG1(%r13)
30 lr %r5,%r3
31 # move the memory mover someplace safe
32 la %r1,0x200
33 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
34 # decompress image is started at 0x11000
35 lr %r6,%r2
36 br %r1
37mover:
38 mvcle %r2,%r4,0
39 jo mover
40 br %r6
41mover_end:
42
43 .align 8
44.Lstack:
45 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
46.Ldecompress:
47 .long decompress_kernel
48.Loffset:
49 .long 0x11000
50.Lmvsize:
51 .long SZ__bss_start
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
new file mode 100644
index 000000000000..2982cb140550
--- /dev/null
+++ b/arch/s390/boot/compressed/head64.S
@@ -0,0 +1,48 @@
1/*
2 * Startup glue code to uncompress the kernel
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <asm/asm-offsets.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include "sizes.h"
14
15__HEAD
16 .globl startup_continue
17startup_continue:
18 basr %r13,0 # get base
19.LPG1:
20 # setup stack
21 lg %r15,.Lstack-.LPG1(%r13)
22 aghi %r15,-160
23 brasl %r14,decompress_kernel
24 # setup registers for memory mover & branch to target
25 lgr %r4,%r2
26 lg %r2,.Loffset-.LPG1(%r13)
27 la %r4,0(%r2,%r4)
28 lg %r3,.Lmvsize-.LPG1(%r13)
29 lgr %r5,%r3
30 # move the memory mover someplace safe
31 la %r1,0x200
32 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
33 # decompress image is started at 0x11000
34 lgr %r6,%r2
35 br %r1
36mover:
37 mvcle %r2,%r4,0
38 jo mover
39 br %r6
40mover_end:
41
42 .align 8
43.Lstack:
44 .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
45.Loffset:
46 .quad 0x11000
47.Lmvsize:
48 .quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
new file mode 100644
index 000000000000..0851eb1e919e
--- /dev/null
+++ b/arch/s390/boot/compressed/misc.c
@@ -0,0 +1,162 @@
1/*
2 * Definitions and wrapper functions for kernel decompressor
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <asm/uaccess.h>
10#include <asm/page.h>
11#include <asm/ipl.h>
12#include "sizes.h"
13
14/*
15 * gzip declarations
16 */
17#define STATIC static
18
19#undef memset
20#undef memcpy
21#undef memmove
22#define memzero(s, n) memset((s), 0, (n))
23
24/* Symbols defined by linker scripts */
25extern char input_data[];
26extern int input_len;
27extern char _text, _end;
28extern char _bss, _ebss;
29
30static void error(char *m);
31
32static unsigned long free_mem_ptr;
33static unsigned long free_mem_end_ptr;
34
35#ifdef CONFIG_HAVE_KERNEL_BZIP2
36#define HEAP_SIZE 0x400000
37#else
38#define HEAP_SIZE 0x10000
39#endif
40
41#ifdef CONFIG_KERNEL_GZIP
42#include "../../../../lib/decompress_inflate.c"
43#endif
44
45#ifdef CONFIG_KERNEL_BZIP2
46#include "../../../../lib/decompress_bunzip2.c"
47#endif
48
49#ifdef CONFIG_KERNEL_LZMA
50#include "../../../../lib/decompress_unlzma.c"
51#endif
52
53#ifdef CONFIG_KERNEL_LZO
54#include "../../../../lib/decompress_unlzo.c"
55#endif
56
57extern _sclp_print_early(const char *);
58
59int puts(const char *s)
60{
61 _sclp_print_early(s);
62 return 0;
63}
64
65void *memset(void *s, int c, size_t n)
66{
67 char *xs;
68
69 if (c == 0)
70 return __builtin_memset(s, 0, n);
71
72 xs = (char *) s;
73 if (n > 0)
74 do {
75 *xs++ = c;
76 } while (--n > 0);
77 return s;
78}
79
80void *memcpy(void *__dest, __const void *__src, size_t __n)
81{
82 return __builtin_memcpy(__dest, __src, __n);
83}
84
85void *memmove(void *__dest, __const void *__src, size_t __n)
86{
87 char *d;
88 const char *s;
89
90 if (__dest <= __src)
91 return __builtin_memcpy(__dest, __src, __n);
92 d = __dest + __n;
93 s = __src + __n;
94 while (__n--)
95 *--d = *--s;
96 return __dest;
97}
98
99static void error(char *x)
100{
101 unsigned long long psw = 0x000a0000deadbeefULL;
102
103 puts("\n\n");
104 puts(x);
105 puts("\n\n -- System halted");
106
107 asm volatile("lpsw %0" : : "Q" (psw));
108}
109
110/*
111 * Safe guard the ipl parameter block against a memory area that will be
112 * overwritten. The validity check for the ipl parameter block is complex
113 * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
114 * the ipl parameter block intersects with the passed memory area we can
115 * safely assume that we can read from that memory. In that case just copy
116 * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
117 * block.
118 */
119static void check_ipl_parmblock(void *start, unsigned long size)
120{
121 void *src, *dst;
122
123 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
124 if (src + PAGE_SIZE <= start || src >= start + size)
125 return;
126 dst = (void *) IPL_PARMBLOCK_ORIGIN;
127 memmove(dst, src, PAGE_SIZE);
128 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
129}
130
131unsigned long decompress_kernel(void)
132{
133 unsigned long output_addr;
134 unsigned char *output;
135
136 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
137 memset(&_bss, 0, &_ebss - &_bss);
138 free_mem_ptr = (unsigned long)&_end;
139 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
140 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
141
142#ifdef CONFIG_BLK_DEV_INITRD
143 /*
144 * Move the initrd right behind the end of the decompressed
145 * kernel image.
146 */
147 if (INITRD_START && INITRD_SIZE &&
148 INITRD_START < (unsigned long) output + SZ__bss_start) {
149 check_ipl_parmblock(output + SZ__bss_start,
150 INITRD_START + INITRD_SIZE);
151 memmove(output + SZ__bss_start,
152 (void *) INITRD_START, INITRD_SIZE);
153 INITRD_START = (unsigned long) output + SZ__bss_start;
154 }
155#endif
156
157 puts("Uncompressing Linux... ");
158 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
159 puts("Ok, booting the kernel.\n");
160 return (unsigned long) output;
161}
162
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
new file mode 100644
index 000000000000..d80f79d8dd9c
--- /dev/null
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -0,0 +1,55 @@
1#include <asm-generic/vmlinux.lds.h>
2
3#ifdef CONFIG_64BIT
4OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
5OUTPUT_ARCH(s390:64-bit)
6#else
7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
8OUTPUT_ARCH(s390)
9#endif
10
11ENTRY(startup)
12
13SECTIONS
14{
15 /* Be careful parts of head_64.S assume startup_32 is at
16 * address 0.
17 */
18 . = 0;
19 .head.text : {
20 _head = . ;
21 HEAD_TEXT
22 _ehead = . ;
23 }
24 .rodata.compressed : {
25 *(.rodata.compressed)
26 }
27 .text : {
28 _text = .; /* Text */
29 *(.text)
30 *(.text.*)
31 _etext = . ;
32 }
33 .rodata : {
34 _rodata = . ;
35 *(.rodata) /* read-only data */
36 *(.rodata.*)
37 _erodata = . ;
38 }
39 .data : {
40 _data = . ;
41 *(.data)
42 *(.data.*)
43 _edata = . ;
44 }
45 . = ALIGN(256);
46 .bss : {
47 _bss = . ;
48 *(.bss)
49 *(.bss.*)
50 *(COMMON)
51 . = ALIGN(8); /* For convenience during zeroing */
52 _ebss = .;
53 }
54 _end = .;
55}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr
new file mode 100644
index 000000000000..f02382ae5c48
--- /dev/null
+++ b/arch/s390/boot/compressed/vmlinux.scr
@@ -0,0 +1,10 @@
1SECTIONS
2{
3 .rodata.compressed : {
4 input_len = .;
5 LONG(input_data_end - input_data) input_data = .;
6 *(.data)
7 output_len = . - 4;
8 input_data_end = .;
9 }
10}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 6be4503201ac..58f46734465f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
78 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 78 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
79 int ret; 79 int ret;
80 80
81 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 81 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
82 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & 82 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
83 CRYPTO_TFM_REQ_MASK); 83 CRYPTO_TFM_REQ_MASK);
84 84
85 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 85 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
86 if (ret) { 86 if (ret) {
87 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 87 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
88 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & 88 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
89 CRYPTO_TFM_RES_MASK); 89 CRYPTO_TFM_RES_MASK);
90 } 90 }
91 return ret; 91 return ret;
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index a3209906739e..aa819dac2360 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/slab.h>
13#include <asm/debug.h> 14#include <asm/debug.h>
14#include <asm/uaccess.h> 15#include <asm/uaccess.h>
15 16
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 7903ec47e6b9..f42dbabc0d30 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -79,7 +79,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
79 memset(ctx->buf + index, 0x00, end - index - 8); 79 memset(ctx->buf + index, 0x00, end - index - 8);
80 80
81 /* 81 /*
82 * Append message length. Well, SHA-512 wants a 128 bit lenght value, 82 * Append message length. Well, SHA-512 wants a 128 bit length value,
83 * nevertheless we use u64, should be enough for now... 83 * nevertheless we use u64, should be enough for now...
84 */ 84 */
85 bits = ctx->count * 8; 85 bits = ctx->count * 8;
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index b416aa11b91e..253f158db668 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.35-rc1
4# Mon Jan 4 09:03:07 2010 4# Fri Jun 4 11:32:40 2010
5# 5#
6CONFIG_SCHED_MC=y 6CONFIG_SCHED_MC=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -17,6 +17,7 @@ CONFIG_GENERIC_TIME=y
17CONFIG_GENERIC_TIME_VSYSCALL=y 17CONFIG_GENERIC_TIME_VSYSCALL=y
18CONFIG_GENERIC_CLOCKEVENTS=y 18CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_BUG=y 19CONFIG_GENERIC_BUG=y
20CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
20CONFIG_NO_IOMEM=y 21CONFIG_NO_IOMEM=y
21CONFIG_NO_DMA=y 22CONFIG_NO_DMA=y
22CONFIG_GENERIC_LOCKBREAK=y 23CONFIG_GENERIC_LOCKBREAK=y
@@ -34,8 +35,17 @@ CONFIG_CONSTRUCTORS=y
34CONFIG_EXPERIMENTAL=y 35CONFIG_EXPERIMENTAL=y
35CONFIG_LOCK_KERNEL=y 36CONFIG_LOCK_KERNEL=y
36CONFIG_INIT_ENV_ARG_LIMIT=32 37CONFIG_INIT_ENV_ARG_LIMIT=32
38CONFIG_CROSS_COMPILE=""
37CONFIG_LOCALVERSION="" 39CONFIG_LOCALVERSION=""
38CONFIG_LOCALVERSION_AUTO=y 40CONFIG_LOCALVERSION_AUTO=y
41CONFIG_HAVE_KERNEL_GZIP=y
42CONFIG_HAVE_KERNEL_BZIP2=y
43CONFIG_HAVE_KERNEL_LZMA=y
44CONFIG_HAVE_KERNEL_LZO=y
45CONFIG_KERNEL_GZIP=y
46# CONFIG_KERNEL_BZIP2 is not set
47# CONFIG_KERNEL_LZMA is not set
48# CONFIG_KERNEL_LZO is not set
39CONFIG_SWAP=y 49CONFIG_SWAP=y
40CONFIG_SYSVIPC=y 50CONFIG_SYSVIPC=y
41CONFIG_SYSVIPC_SYSCTL=y 51CONFIG_SYSVIPC_SYSCTL=y
@@ -55,15 +65,11 @@ CONFIG_TREE_RCU=y
55# CONFIG_RCU_TRACE is not set 65# CONFIG_RCU_TRACE is not set
56CONFIG_RCU_FANOUT=64 66CONFIG_RCU_FANOUT=64
57# CONFIG_RCU_FANOUT_EXACT is not set 67# CONFIG_RCU_FANOUT_EXACT is not set
68# CONFIG_RCU_FAST_NO_HZ is not set
58# CONFIG_TREE_RCU_TRACE is not set 69# CONFIG_TREE_RCU_TRACE is not set
59CONFIG_IKCONFIG=y 70CONFIG_IKCONFIG=y
60CONFIG_IKCONFIG_PROC=y 71CONFIG_IKCONFIG_PROC=y
61CONFIG_LOG_BUF_SHIFT=17 72CONFIG_LOG_BUF_SHIFT=17
62CONFIG_GROUP_SCHED=y
63CONFIG_FAIR_GROUP_SCHED=y
64# CONFIG_RT_GROUP_SCHED is not set
65CONFIG_USER_SCHED=y
66# CONFIG_CGROUP_SCHED is not set
67CONFIG_CGROUPS=y 73CONFIG_CGROUPS=y
68# CONFIG_CGROUP_DEBUG is not set 74# CONFIG_CGROUP_DEBUG is not set
69CONFIG_CGROUP_NS=y 75CONFIG_CGROUP_NS=y
@@ -72,6 +78,8 @@ CONFIG_CGROUP_NS=y
72# CONFIG_CPUSETS is not set 78# CONFIG_CPUSETS is not set
73# CONFIG_CGROUP_CPUACCT is not set 79# CONFIG_CGROUP_CPUACCT is not set
74# CONFIG_RESOURCE_COUNTERS is not set 80# CONFIG_RESOURCE_COUNTERS is not set
81# CONFIG_CGROUP_SCHED is not set
82# CONFIG_BLK_CGROUP is not set
75CONFIG_SYSFS_DEPRECATED=y 83CONFIG_SYSFS_DEPRECATED=y
76CONFIG_SYSFS_DEPRECATED_V2=y 84CONFIG_SYSFS_DEPRECATED_V2=y
77# CONFIG_RELAY is not set 85# CONFIG_RELAY is not set
@@ -86,6 +94,7 @@ CONFIG_INITRAMFS_SOURCE=""
86CONFIG_RD_GZIP=y 94CONFIG_RD_GZIP=y
87CONFIG_RD_BZIP2=y 95CONFIG_RD_BZIP2=y
88CONFIG_RD_LZMA=y 96CONFIG_RD_LZMA=y
97CONFIG_RD_LZO=y
89# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 98# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
90CONFIG_SYSCTL=y 99CONFIG_SYSCTL=y
91CONFIG_ANON_INODES=y 100CONFIG_ANON_INODES=y
@@ -119,6 +128,7 @@ CONFIG_SLAB=y
119# CONFIG_SLUB is not set 128# CONFIG_SLUB is not set
120# CONFIG_SLOB is not set 129# CONFIG_SLOB is not set
121# CONFIG_PROFILING is not set 130# CONFIG_PROFILING is not set
131CONFIG_TRACEPOINTS=y
122CONFIG_HAVE_OPROFILE=y 132CONFIG_HAVE_OPROFILE=y
123CONFIG_KPROBES=y 133CONFIG_KPROBES=y
124CONFIG_HAVE_SYSCALL_WRAPPERS=y 134CONFIG_HAVE_SYSCALL_WRAPPERS=y
@@ -127,6 +137,7 @@ CONFIG_HAVE_KPROBES=y
127CONFIG_HAVE_KRETPROBES=y 137CONFIG_HAVE_KRETPROBES=y
128CONFIG_HAVE_ARCH_TRACEHOOK=y 138CONFIG_HAVE_ARCH_TRACEHOOK=y
129CONFIG_USE_GENERIC_SMP_HELPERS=y 139CONFIG_USE_GENERIC_SMP_HELPERS=y
140CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
130CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y 141CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
131 142
132# 143#
@@ -149,7 +160,6 @@ CONFIG_STOP_MACHINE=y
149CONFIG_BLOCK=y 160CONFIG_BLOCK=y
150CONFIG_BLK_DEV_BSG=y 161CONFIG_BLK_DEV_BSG=y
151# CONFIG_BLK_DEV_INTEGRITY is not set 162# CONFIG_BLK_DEV_INTEGRITY is not set
152# CONFIG_BLK_CGROUP is not set
153CONFIG_BLOCK_COMPAT=y 163CONFIG_BLOCK_COMPAT=y
154 164
155# 165#
@@ -158,7 +168,6 @@ CONFIG_BLOCK_COMPAT=y
158CONFIG_IOSCHED_NOOP=y 168CONFIG_IOSCHED_NOOP=y
159CONFIG_IOSCHED_DEADLINE=y 169CONFIG_IOSCHED_DEADLINE=y
160CONFIG_IOSCHED_CFQ=y 170CONFIG_IOSCHED_CFQ=y
161# CONFIG_CFQ_GROUP_IOSCHED is not set
162CONFIG_DEFAULT_DEADLINE=y 171CONFIG_DEFAULT_DEADLINE=y
163# CONFIG_DEFAULT_CFQ is not set 172# CONFIG_DEFAULT_CFQ is not set
164# CONFIG_DEFAULT_NOOP is not set 173# CONFIG_DEFAULT_NOOP is not set
@@ -311,7 +320,6 @@ CONFIG_COMPAT_BINFMT_ELF=y
311# CONFIG_HAVE_AOUT is not set 320# CONFIG_HAVE_AOUT is not set
312CONFIG_BINFMT_MISC=m 321CONFIG_BINFMT_MISC=m
313CONFIG_FORCE_MAX_ZONEORDER=9 322CONFIG_FORCE_MAX_ZONEORDER=9
314# CONFIG_PROCESS_DEBUG is not set
315CONFIG_PFAULT=y 323CONFIG_PFAULT=y
316# CONFIG_SHARED_KERNEL is not set 324# CONFIG_SHARED_KERNEL is not set
317# CONFIG_CMM is not set 325# CONFIG_CMM is not set
@@ -338,13 +346,13 @@ CONFIG_PM_SLEEP=y
338CONFIG_HIBERNATION=y 346CONFIG_HIBERNATION=y
339CONFIG_PM_STD_PARTITION="" 347CONFIG_PM_STD_PARTITION=""
340# CONFIG_PM_RUNTIME is not set 348# CONFIG_PM_RUNTIME is not set
349CONFIG_PM_OPS=y
341CONFIG_NET=y 350CONFIG_NET=y
342 351
343# 352#
344# Networking options 353# Networking options
345# 354#
346CONFIG_PACKET=y 355CONFIG_PACKET=y
347# CONFIG_PACKET_MMAP is not set
348CONFIG_UNIX=y 356CONFIG_UNIX=y
349CONFIG_XFRM=y 357CONFIG_XFRM=y
350# CONFIG_XFRM_USER is not set 358# CONFIG_XFRM_USER is not set
@@ -448,6 +456,7 @@ CONFIG_NF_CONNTRACK=m
448# CONFIG_IP6_NF_IPTABLES is not set 456# CONFIG_IP6_NF_IPTABLES is not set
449# CONFIG_IP_DCCP is not set 457# CONFIG_IP_DCCP is not set
450CONFIG_IP_SCTP=m 458CONFIG_IP_SCTP=m
459# CONFIG_NET_SCTPPROBE is not set
451# CONFIG_SCTP_DBG_MSG is not set 460# CONFIG_SCTP_DBG_MSG is not set
452# CONFIG_SCTP_DBG_OBJCNT is not set 461# CONFIG_SCTP_DBG_OBJCNT is not set
453# CONFIG_SCTP_HMAC_NONE is not set 462# CONFIG_SCTP_HMAC_NONE is not set
@@ -456,6 +465,7 @@ CONFIG_SCTP_HMAC_MD5=y
456# CONFIG_RDS is not set 465# CONFIG_RDS is not set
457# CONFIG_TIPC is not set 466# CONFIG_TIPC is not set
458# CONFIG_ATM is not set 467# CONFIG_ATM is not set
468# CONFIG_L2TP is not set
459# CONFIG_BRIDGE is not set 469# CONFIG_BRIDGE is not set
460# CONFIG_VLAN_8021Q is not set 470# CONFIG_VLAN_8021Q is not set
461# CONFIG_DECNET is not set 471# CONFIG_DECNET is not set
@@ -516,12 +526,14 @@ CONFIG_NET_ACT_NAT=m
516# CONFIG_NET_CLS_IND is not set 526# CONFIG_NET_CLS_IND is not set
517CONFIG_NET_SCH_FIFO=y 527CONFIG_NET_SCH_FIFO=y
518# CONFIG_DCB is not set 528# CONFIG_DCB is not set
529CONFIG_RPS=y
519 530
520# 531#
521# Network testing 532# Network testing
522# 533#
523# CONFIG_NET_PKTGEN is not set 534# CONFIG_NET_PKTGEN is not set
524# CONFIG_NET_TCPPROBE is not set 535# CONFIG_NET_TCPPROBE is not set
536# CONFIG_NET_DROP_MONITOR is not set
525CONFIG_CAN=m 537CONFIG_CAN=m
526CONFIG_CAN_RAW=m 538CONFIG_CAN_RAW=m
527CONFIG_CAN_BCM=m 539CONFIG_CAN_BCM=m
@@ -536,6 +548,7 @@ CONFIG_CAN_VCAN=m
536# CONFIG_WIMAX is not set 548# CONFIG_WIMAX is not set
537# CONFIG_RFKILL is not set 549# CONFIG_RFKILL is not set
538# CONFIG_NET_9P is not set 550# CONFIG_NET_9P is not set
551# CONFIG_CAIF is not set
539# CONFIG_PCMCIA is not set 552# CONFIG_PCMCIA is not set
540CONFIG_CCW=y 553CONFIG_CCW=y
541 554
@@ -598,6 +611,7 @@ CONFIG_MISC_DEVICES=y
598# 611#
599# SCSI device support 612# SCSI device support
600# 613#
614CONFIG_SCSI_MOD=y
601# CONFIG_RAID_ATTRS is not set 615# CONFIG_RAID_ATTRS is not set
602CONFIG_SCSI=y 616CONFIG_SCSI=y
603# CONFIG_SCSI_DMA is not set 617# CONFIG_SCSI_DMA is not set
@@ -717,6 +731,7 @@ CONFIG_VIRTIO_NET=m
717# Character devices 731# Character devices
718# 732#
719CONFIG_DEVKMEM=y 733CONFIG_DEVKMEM=y
734# CONFIG_N_GSM is not set
720CONFIG_UNIX98_PTYS=y 735CONFIG_UNIX98_PTYS=y
721# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 736# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
722CONFIG_LEGACY_PTYS=y 737CONFIG_LEGACY_PTYS=y
@@ -764,6 +779,7 @@ CONFIG_S390_TAPE_34XX=m
764# CONFIG_MONREADER is not set 779# CONFIG_MONREADER is not set
765CONFIG_MONWRITER=m 780CONFIG_MONWRITER=m
766CONFIG_S390_VMUR=m 781CONFIG_S390_VMUR=m
782# CONFIG_RAMOOPS is not set
767 783
768# 784#
769# PPS support 785# PPS support
@@ -777,10 +793,6 @@ CONFIG_S390_VMUR=m
777# CONFIG_NEW_LEDS is not set 793# CONFIG_NEW_LEDS is not set
778CONFIG_ACCESSIBILITY=y 794CONFIG_ACCESSIBILITY=y
779# CONFIG_AUXDISPLAY is not set 795# CONFIG_AUXDISPLAY is not set
780
781#
782# TI VLYNQ
783#
784# CONFIG_STAGING is not set 796# CONFIG_STAGING is not set
785 797
786# 798#
@@ -856,6 +868,7 @@ CONFIG_MISC_FILESYSTEMS=y
856# CONFIG_BEFS_FS is not set 868# CONFIG_BEFS_FS is not set
857# CONFIG_BFS_FS is not set 869# CONFIG_BFS_FS is not set
858# CONFIG_EFS_FS is not set 870# CONFIG_EFS_FS is not set
871# CONFIG_LOGFS is not set
859# CONFIG_CRAMFS is not set 872# CONFIG_CRAMFS is not set
860# CONFIG_SQUASHFS is not set 873# CONFIG_SQUASHFS is not set
861# CONFIG_VXFS_FS is not set 874# CONFIG_VXFS_FS is not set
@@ -884,6 +897,7 @@ CONFIG_SUNRPC=y
884# CONFIG_RPCSEC_GSS_KRB5 is not set 897# CONFIG_RPCSEC_GSS_KRB5 is not set
885# CONFIG_RPCSEC_GSS_SPKM3 is not set 898# CONFIG_RPCSEC_GSS_SPKM3 is not set
886# CONFIG_SMB_FS is not set 899# CONFIG_SMB_FS is not set
900# CONFIG_CEPH_FS is not set
887# CONFIG_CIFS is not set 901# CONFIG_CIFS is not set
888# CONFIG_NCP_FS is not set 902# CONFIG_NCP_FS is not set
889# CONFIG_CODA_FS is not set 903# CONFIG_CODA_FS is not set
@@ -945,6 +959,7 @@ CONFIG_DEBUG_MUTEXES=y
945# CONFIG_LOCK_STAT is not set 959# CONFIG_LOCK_STAT is not set
946CONFIG_DEBUG_SPINLOCK_SLEEP=y 960CONFIG_DEBUG_SPINLOCK_SLEEP=y
947# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 961# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
962CONFIG_STACKTRACE=y
948# CONFIG_DEBUG_KOBJECT is not set 963# CONFIG_DEBUG_KOBJECT is not set
949CONFIG_DEBUG_BUGVERBOSE=y 964CONFIG_DEBUG_BUGVERBOSE=y
950# CONFIG_DEBUG_INFO is not set 965# CONFIG_DEBUG_INFO is not set
@@ -962,16 +977,22 @@ CONFIG_DEBUG_MEMORY_INIT=y
962# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 977# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
963CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 978CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
964# CONFIG_LKDTM is not set 979# CONFIG_LKDTM is not set
980# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
965# CONFIG_FAULT_INJECTION is not set 981# CONFIG_FAULT_INJECTION is not set
966# CONFIG_LATENCYTOP is not set 982# CONFIG_LATENCYTOP is not set
967CONFIG_SYSCTL_SYSCALL_CHECK=y 983CONFIG_SYSCTL_SYSCALL_CHECK=y
968# CONFIG_DEBUG_PAGEALLOC is not set 984# CONFIG_DEBUG_PAGEALLOC is not set
985CONFIG_NOP_TRACER=y
969CONFIG_HAVE_FUNCTION_TRACER=y 986CONFIG_HAVE_FUNCTION_TRACER=y
970CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y 987CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
971CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y 988CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
972CONFIG_HAVE_DYNAMIC_FTRACE=y 989CONFIG_HAVE_DYNAMIC_FTRACE=y
973CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 990CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
974CONFIG_HAVE_SYSCALL_TRACEPOINTS=y 991CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
992CONFIG_RING_BUFFER=y
993CONFIG_EVENT_TRACING=y
994CONFIG_CONTEXT_SWITCH_TRACER=y
995CONFIG_TRACING=y
975CONFIG_TRACING_SUPPORT=y 996CONFIG_TRACING_SUPPORT=y
976CONFIG_FTRACE=y 997CONFIG_FTRACE=y
977# CONFIG_FUNCTION_TRACER is not set 998# CONFIG_FUNCTION_TRACER is not set
@@ -988,10 +1009,16 @@ CONFIG_BRANCH_PROFILE_NONE=y
988# CONFIG_KMEMTRACE is not set 1009# CONFIG_KMEMTRACE is not set
989# CONFIG_WORKQUEUE_TRACER is not set 1010# CONFIG_WORKQUEUE_TRACER is not set
990# CONFIG_BLK_DEV_IO_TRACE is not set 1011# CONFIG_BLK_DEV_IO_TRACE is not set
1012CONFIG_KPROBE_EVENT=y
1013# CONFIG_RING_BUFFER_BENCHMARK is not set
991# CONFIG_DYNAMIC_DEBUG is not set 1014# CONFIG_DYNAMIC_DEBUG is not set
1015# CONFIG_ATOMIC64_SELFTEST is not set
992CONFIG_SAMPLES=y 1016CONFIG_SAMPLES=y
1017# CONFIG_SAMPLE_TRACEPOINTS is not set
1018# CONFIG_SAMPLE_TRACE_EVENTS is not set
993# CONFIG_SAMPLE_KOBJECT is not set 1019# CONFIG_SAMPLE_KOBJECT is not set
994# CONFIG_SAMPLE_KPROBES is not set 1020# CONFIG_SAMPLE_KPROBES is not set
1021# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
995 1022
996# 1023#
997# Security options 1024# Security options
@@ -1025,6 +1052,7 @@ CONFIG_CRYPTO_MANAGER=y
1025CONFIG_CRYPTO_MANAGER2=y 1052CONFIG_CRYPTO_MANAGER2=y
1026CONFIG_CRYPTO_GF128MUL=m 1053CONFIG_CRYPTO_GF128MUL=m
1027# CONFIG_CRYPTO_NULL is not set 1054# CONFIG_CRYPTO_NULL is not set
1055# CONFIG_CRYPTO_PCRYPT is not set
1028CONFIG_CRYPTO_WORKQUEUE=y 1056CONFIG_CRYPTO_WORKQUEUE=y
1029# CONFIG_CRYPTO_CRYPTD is not set 1057# CONFIG_CRYPTO_CRYPTD is not set
1030CONFIG_CRYPTO_AUTHENC=m 1058CONFIG_CRYPTO_AUTHENC=m
@@ -1112,7 +1140,7 @@ CONFIG_CRYPTO_SHA512_S390=m
1112# CONFIG_CRYPTO_DES_S390 is not set 1140# CONFIG_CRYPTO_DES_S390 is not set
1113# CONFIG_CRYPTO_AES_S390 is not set 1141# CONFIG_CRYPTO_AES_S390 is not set
1114CONFIG_S390_PRNG=m 1142CONFIG_S390_PRNG=m
1115# CONFIG_BINARY_PRINTF is not set 1143CONFIG_BINARY_PRINTF=y
1116 1144
1117# 1145#
1118# Library routines 1146# Library routines
@@ -1129,14 +1157,16 @@ CONFIG_LIBCRC32C=m
1129CONFIG_ZLIB_INFLATE=y 1157CONFIG_ZLIB_INFLATE=y
1130CONFIG_ZLIB_DEFLATE=m 1158CONFIG_ZLIB_DEFLATE=m
1131CONFIG_LZO_COMPRESS=m 1159CONFIG_LZO_COMPRESS=m
1132CONFIG_LZO_DECOMPRESS=m 1160CONFIG_LZO_DECOMPRESS=y
1133CONFIG_DECOMPRESS_GZIP=y 1161CONFIG_DECOMPRESS_GZIP=y
1134CONFIG_DECOMPRESS_BZIP2=y 1162CONFIG_DECOMPRESS_BZIP2=y
1135CONFIG_DECOMPRESS_LZMA=y 1163CONFIG_DECOMPRESS_LZMA=y
1164CONFIG_DECOMPRESS_LZO=y
1136CONFIG_NLATTR=y 1165CONFIG_NLATTR=y
1137CONFIG_HAVE_KVM=y 1166CONFIG_HAVE_KVM=y
1138CONFIG_VIRTUALIZATION=y 1167CONFIG_VIRTUALIZATION=y
1139CONFIG_KVM=m 1168CONFIG_KVM=m
1169# CONFIG_VHOST_NET is not set
1140CONFIG_VIRTIO=y 1170CONFIG_VIRTIO=y
1141CONFIG_VIRTIO_RING=y 1171CONFIG_VIRTIO_RING=y
1142CONFIG_VIRTIO_BALLOON=m 1172CONFIG_VIRTIO_BALLOON=m
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index aea572009d60..fa487d4cc08b 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/debugfs.h>
14 15
15#define REG_FILE_MODE 0440 16#define REG_FILE_MODE 0440
16#define UPDATE_FILE_MODE 0220 17#define UPDATE_FILE_MODE 0220
@@ -34,6 +35,9 @@ extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34 35
35/* VM Hypervisor */ 36/* VM Hypervisor */
36extern int hypfs_vm_init(void); 37extern int hypfs_vm_init(void);
38extern void hypfs_vm_exit(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); 39extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38 40
41/* Directory for debugfs files */
42extern struct dentry *hypfs_dbfs_dir;
39#endif /* _HYPFS_H_ */ 43#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 2b92d501425f..1211bb1d2f24 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -12,10 +12,10 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/gfp.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/string.h> 16#include <linux/string.h>
18#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/mm.h>
19#include <asm/ebcdic.h> 19#include <asm/ebcdic.h>
20#include "hypfs.h" 20#include "hypfs.h"
21 21
@@ -23,6 +23,8 @@
23#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */ 23#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
24#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
25 25
26#define DBFS_D204_HDR_VERSION 0
27
26/* diag 204 subcodes */ 28/* diag 204 subcodes */
27enum diag204_sc { 29enum diag204_sc {
28 SUBC_STIB4 = 4, 30 SUBC_STIB4 = 4,
@@ -48,6 +50,8 @@ static void *diag204_buf; /* 4K aligned buffer for diag204 data */
48static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */ 50static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
49static int diag204_buf_pages; /* number of pages for diag204 data */ 51static int diag204_buf_pages; /* number of pages for diag204 data */
50 52
53static struct dentry *dbfs_d204_file;
54
51/* 55/*
52 * DIAG 204 data structures and member access functions. 56 * DIAG 204 data structures and member access functions.
53 * 57 *
@@ -365,18 +369,21 @@ static void diag204_free_buffer(void)
365 } else { 369 } else {
366 free_pages((unsigned long) diag204_buf, 0); 370 free_pages((unsigned long) diag204_buf, 0);
367 } 371 }
368 diag204_buf_pages = 0;
369 diag204_buf = NULL; 372 diag204_buf = NULL;
370} 373}
371 374
375static void *page_align_ptr(void *ptr)
376{
377 return (void *) PAGE_ALIGN((unsigned long) ptr);
378}
379
372static void *diag204_alloc_vbuf(int pages) 380static void *diag204_alloc_vbuf(int pages)
373{ 381{
374 /* The buffer has to be page aligned! */ 382 /* The buffer has to be page aligned! */
375 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); 383 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
376 if (!diag204_buf_vmalloc) 384 if (!diag204_buf_vmalloc)
377 return ERR_PTR(-ENOMEM); 385 return ERR_PTR(-ENOMEM);
378 diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc 386 diag204_buf = page_align_ptr(diag204_buf_vmalloc);
379 & ~0xfffUL) + 0x1000;
380 diag204_buf_pages = pages; 387 diag204_buf_pages = pages;
381 return diag204_buf; 388 return diag204_buf;
382} 389}
@@ -469,17 +476,26 @@ fail_alloc:
469 return rc; 476 return rc;
470} 477}
471 478
479static int diag204_do_store(void *buf, int pages)
480{
481 int rc;
482
483 rc = diag204((unsigned long) diag204_store_sc |
484 (unsigned long) diag204_info_type, pages, buf);
485 return rc < 0 ? -ENOSYS : 0;
486}
487
472static void *diag204_store(void) 488static void *diag204_store(void)
473{ 489{
474 void *buf; 490 void *buf;
475 int pages; 491 int pages, rc;
476 492
477 buf = diag204_get_buffer(diag204_info_type, &pages); 493 buf = diag204_get_buffer(diag204_info_type, &pages);
478 if (IS_ERR(buf)) 494 if (IS_ERR(buf))
479 goto out; 495 goto out;
480 if (diag204((unsigned long)diag204_store_sc | 496 rc = diag204_do_store(buf, pages);
481 (unsigned long)diag204_info_type, pages, buf) < 0) 497 if (rc)
482 return ERR_PTR(-ENOSYS); 498 return ERR_PTR(rc);
483out: 499out:
484 return buf; 500 return buf;
485} 501}
@@ -488,7 +504,7 @@ out:
488 504
489static int diag224(void *ptr) 505static int diag224(void *ptr)
490{ 506{
491 int rc = -ENOTSUPP; 507 int rc = -EOPNOTSUPP;
492 508
493 asm volatile( 509 asm volatile(
494 " diag %1,%2,0x224\n" 510 " diag %1,%2,0x224\n"
@@ -507,7 +523,7 @@ static int diag224_get_name_table(void)
507 return -ENOMEM; 523 return -ENOMEM;
508 if (diag224(diag224_cpu_names)) { 524 if (diag224(diag224_cpu_names)) {
509 kfree(diag224_cpu_names); 525 kfree(diag224_cpu_names);
510 return -ENOTSUPP; 526 return -EOPNOTSUPP;
511 } 527 }
512 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); 528 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
513 return 0; 529 return 0;
@@ -527,6 +543,92 @@ static int diag224_idx2name(int index, char *name)
527 return 0; 543 return 0;
528} 544}
529 545
546struct dbfs_d204_hdr {
547 u64 len; /* Length of d204 buffer without header */
548 u16 version; /* Version of header */
549 u8 sc; /* Used subcode */
550 char reserved[53];
551} __attribute__ ((packed));
552
553struct dbfs_d204 {
554 struct dbfs_d204_hdr hdr; /* 64 byte header */
555 char buf[]; /* d204 buffer */
556} __attribute__ ((packed));
557
558struct dbfs_d204_private {
559 struct dbfs_d204 *d204; /* Aligned d204 data with header */
560 void *base; /* Base pointer (needed for vfree) */
561};
562
563static int dbfs_d204_open(struct inode *inode, struct file *file)
564{
565 struct dbfs_d204_private *data;
566 struct dbfs_d204 *d204;
567 int rc, buf_size;
568
569 data = kzalloc(sizeof(*data), GFP_KERNEL);
570 if (!data)
571 return -ENOMEM;
572 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
573 data->base = vmalloc(buf_size);
574 if (!data->base) {
575 rc = -ENOMEM;
576 goto fail_kfree_data;
577 }
578 memset(data->base, 0, buf_size);
579 d204 = page_align_ptr(data->base + sizeof(d204->hdr))
580 - sizeof(d204->hdr);
581 rc = diag204_do_store(&d204->buf, diag204_buf_pages);
582 if (rc)
583 goto fail_vfree_base;
584 d204->hdr.version = DBFS_D204_HDR_VERSION;
585 d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
586 d204->hdr.sc = diag204_store_sc;
587 data->d204 = d204;
588 file->private_data = data;
589 return nonseekable_open(inode, file);
590
591fail_vfree_base:
592 vfree(data->base);
593fail_kfree_data:
594 kfree(data);
595 return rc;
596}
597
598static int dbfs_d204_release(struct inode *inode, struct file *file)
599{
600 struct dbfs_d204_private *data = file->private_data;
601
602 vfree(data->base);
603 kfree(data);
604 return 0;
605}
606
607static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
608 size_t size, loff_t *ppos)
609{
610 struct dbfs_d204_private *data = file->private_data;
611
612 return simple_read_from_buffer(buf, size, ppos, data->d204,
613 data->d204->hdr.len +
614 sizeof(data->d204->hdr));
615}
616
617static const struct file_operations dbfs_d204_ops = {
618 .open = dbfs_d204_open,
619 .read = dbfs_d204_read,
620 .release = dbfs_d204_release,
621};
622
623static int hypfs_dbfs_init(void)
624{
625 dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
626 NULL, &dbfs_d204_ops);
627 if (IS_ERR(dbfs_d204_file))
628 return PTR_ERR(dbfs_d204_file);
629 return 0;
630}
631
530__init int hypfs_diag_init(void) 632__init int hypfs_diag_init(void)
531{ 633{
532 int rc; 634 int rc;
@@ -541,11 +643,17 @@ __init int hypfs_diag_init(void)
541 pr_err("The hardware system does not provide all " 643 pr_err("The hardware system does not provide all "
542 "functions required by hypfs\n"); 644 "functions required by hypfs\n");
543 } 645 }
646 if (diag204_info_type == INFO_EXT) {
647 rc = hypfs_dbfs_init();
648 if (rc)
649 diag204_free_buffer();
650 }
544 return rc; 651 return rc;
545} 652}
546 653
547void hypfs_diag_exit(void) 654void hypfs_diag_exit(void)
548{ 655{
656 debugfs_remove(dbfs_d204_file);
549 diag224_delete_name_table(); 657 diag224_delete_name_table();
550 diag204_free_buffer(); 658 diag204_free_buffer();
551} 659}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index f0b0d31f0b48..ee5ab1a578e7 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -10,14 +10,18 @@
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
13#include <asm/timex.h>
13#include "hypfs.h" 14#include "hypfs.h"
14 15
15#define NAME_LEN 8 16#define NAME_LEN 8
17#define DBFS_D2FC_HDR_VERSION 0
16 18
17static char local_guest[] = " "; 19static char local_guest[] = " ";
18static char all_guests[] = "* "; 20static char all_guests[] = "* ";
19static char *guest_query; 21static char *guest_query;
20 22
23static struct dentry *dbfs_d2fc_file;
24
21struct diag2fc_data { 25struct diag2fc_data {
22 __u32 version; 26 __u32 version;
23 __u32 flags; 27 __u32 flags;
@@ -76,23 +80,26 @@ static int diag2fc(int size, char* query, void *addr)
76 return -residual_cnt; 80 return -residual_cnt;
77} 81}
78 82
79static struct diag2fc_data *diag2fc_store(char *query, int *count) 83/*
84 * Allocate buffer for "query" and store diag 2fc at "offset"
85 */
86static void *diag2fc_store(char *query, unsigned int *count, int offset)
80{ 87{
88 void *data;
81 int size; 89 int size;
82 struct diag2fc_data *data;
83 90
84 do { 91 do {
85 size = diag2fc(0, query, NULL); 92 size = diag2fc(0, query, NULL);
86 if (size < 0) 93 if (size < 0)
87 return ERR_PTR(-EACCES); 94 return ERR_PTR(-EACCES);
88 data = vmalloc(size); 95 data = vmalloc(size + offset);
89 if (!data) 96 if (!data)
90 return ERR_PTR(-ENOMEM); 97 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0) 98 if (diag2fc(size, query, data + offset) == 0)
92 break; 99 break;
93 vfree(data); 100 vfree(data);
94 } while (1); 101 } while (1);
95 *count = (size / sizeof(*data)); 102 *count = (size / sizeof(struct diag2fc_data));
96 103
97 return data; 104 return data;
98} 105}
@@ -168,9 +175,10 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{ 175{
169 struct dentry *dir, *file; 176 struct dentry *dir, *file;
170 struct diag2fc_data *data; 177 struct diag2fc_data *data;
171 int rc, i, count = 0; 178 unsigned int count = 0;
179 int rc, i;
172 180
173 data = diag2fc_store(guest_query, &count); 181 data = diag2fc_store(guest_query, &count, 0);
174 if (IS_ERR(data)) 182 if (IS_ERR(data))
175 return PTR_ERR(data); 183 return PTR_ERR(data);
176 184
@@ -218,8 +226,61 @@ failed:
218 return rc; 226 return rc;
219} 227}
220 228
229struct dbfs_d2fc_hdr {
230 u64 len; /* Length of d2fc buffer without header */
231 u16 version; /* Version of header */
232 char tod_ext[16]; /* TOD clock for d2fc */
233 u64 count; /* Number of VM guests in d2fc buffer */
234 char reserved[30];
235} __attribute__ ((packed));
236
237struct dbfs_d2fc {
238 struct dbfs_d2fc_hdr hdr; /* 64 byte header */
239 char buf[]; /* d2fc buffer */
240} __attribute__ ((packed));
241
242static int dbfs_d2fc_open(struct inode *inode, struct file *file)
243{
244 struct dbfs_d2fc *data;
245 unsigned int count;
246
247 data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
248 if (IS_ERR(data))
249 return PTR_ERR(data);
250 get_clock_ext(data->hdr.tod_ext);
251 data->hdr.len = count * sizeof(struct diag2fc_data);
252 data->hdr.version = DBFS_D2FC_HDR_VERSION;
253 data->hdr.count = count;
254 memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
255 file->private_data = data;
256 return nonseekable_open(inode, file);
257}
258
259static int dbfs_d2fc_release(struct inode *inode, struct file *file)
260{
261 diag2fc_free(file->private_data);
262 return 0;
263}
264
265static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
266 size_t size, loff_t *ppos)
267{
268 struct dbfs_d2fc *data = file->private_data;
269
270 return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
271 sizeof(struct dbfs_d2fc_hdr));
272}
273
274static const struct file_operations dbfs_d2fc_ops = {
275 .open = dbfs_d2fc_open,
276 .read = dbfs_d2fc_read,
277 .release = dbfs_d2fc_release,
278};
279
221int hypfs_vm_init(void) 280int hypfs_vm_init(void)
222{ 281{
282 if (!MACHINE_IS_VM)
283 return 0;
223 if (diag2fc(0, all_guests, NULL) > 0) 284 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests; 285 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0) 286 else if (diag2fc(0, local_guest, NULL) > 0)
@@ -227,5 +288,17 @@ int hypfs_vm_init(void)
227 else 288 else
228 return -EACCES; 289 return -EACCES;
229 290
291 dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
292 NULL, &dbfs_d2fc_ops);
293 if (IS_ERR(dbfs_d2fc_file))
294 return PTR_ERR(dbfs_d2fc_file);
295
230 return 0; 296 return 0;
231} 297}
298
299void hypfs_vm_exit(void)
300{
301 if (!MACHINE_IS_VM)
302 return;
303 debugfs_remove(dbfs_d2fc_file);
304}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 341aff2687a5..6b120f073043 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -14,8 +14,8 @@
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/vfs.h> 16#include <linux/vfs.h>
17#include <linux/slab.h>
17#include <linux/pagemap.h> 18#include <linux/pagemap.h>
18#include <linux/gfp.h>
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/parser.h> 20#include <linux/parser.h>
21#include <linux/sysfs.h> 21#include <linux/sysfs.h>
@@ -46,6 +46,8 @@ static const struct super_operations hypfs_s_ops;
46/* start of list of all dentries, which have to be deleted on update */ 46/* start of list of all dentries, which have to be deleted on update */
47static struct dentry *hypfs_last_dentry; 47static struct dentry *hypfs_last_dentry;
48 48
49struct dentry *hypfs_dbfs_dir;
50
49static void hypfs_update_update(struct super_block *sb) 51static void hypfs_update_update(struct super_block *sb)
50{ 52{
51 struct hypfs_sb_info *sb_info = sb->s_fs_info; 53 struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -145,7 +147,7 @@ static int hypfs_open(struct inode *inode, struct file *filp)
145 } 147 }
146 mutex_unlock(&fs_info->lock); 148 mutex_unlock(&fs_info->lock);
147 } 149 }
148 return 0; 150 return nonseekable_open(inode, filp);
149} 151}
150 152
151static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, 153static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -288,46 +290,30 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
288 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 290 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
289 sb->s_magic = HYPFS_MAGIC; 291 sb->s_magic = HYPFS_MAGIC;
290 sb->s_op = &hypfs_s_ops; 292 sb->s_op = &hypfs_s_ops;
291 if (hypfs_parse_options(data, sb)) { 293 if (hypfs_parse_options(data, sb))
292 rc = -EINVAL; 294 return -EINVAL;
293 goto err_alloc;
294 }
295 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755); 295 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
296 if (!root_inode) { 296 if (!root_inode)
297 rc = -ENOMEM; 297 return -ENOMEM;
298 goto err_alloc;
299 }
300 root_inode->i_op = &simple_dir_inode_operations; 298 root_inode->i_op = &simple_dir_inode_operations;
301 root_inode->i_fop = &simple_dir_operations; 299 root_inode->i_fop = &simple_dir_operations;
302 root_dentry = d_alloc_root(root_inode); 300 sb->s_root = root_dentry = d_alloc_root(root_inode);
303 if (!root_dentry) { 301 if (!root_dentry) {
304 iput(root_inode); 302 iput(root_inode);
305 rc = -ENOMEM; 303 return -ENOMEM;
306 goto err_alloc;
307 } 304 }
308 if (MACHINE_IS_VM) 305 if (MACHINE_IS_VM)
309 rc = hypfs_vm_create_files(sb, root_dentry); 306 rc = hypfs_vm_create_files(sb, root_dentry);
310 else 307 else
311 rc = hypfs_diag_create_files(sb, root_dentry); 308 rc = hypfs_diag_create_files(sb, root_dentry);
312 if (rc) 309 if (rc)
313 goto err_tree; 310 return rc;
314 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 311 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
315 if (IS_ERR(sbi->update_file)) { 312 if (IS_ERR(sbi->update_file))
316 rc = PTR_ERR(sbi->update_file); 313 return PTR_ERR(sbi->update_file);
317 goto err_tree;
318 }
319 hypfs_update_update(sb); 314 hypfs_update_update(sb);
320 sb->s_root = root_dentry;
321 pr_info("Hypervisor filesystem mounted\n"); 315 pr_info("Hypervisor filesystem mounted\n");
322 return 0; 316 return 0;
323
324err_tree:
325 hypfs_delete_tree(root_dentry);
326 d_genocide(root_dentry);
327 dput(root_dentry);
328err_alloc:
329 kfree(sbi);
330 return rc;
331} 317}
332 318
333static int hypfs_get_super(struct file_system_type *fst, int flags, 319static int hypfs_get_super(struct file_system_type *fst, int flags,
@@ -340,12 +326,12 @@ static void hypfs_kill_super(struct super_block *sb)
340{ 326{
341 struct hypfs_sb_info *sb_info = sb->s_fs_info; 327 struct hypfs_sb_info *sb_info = sb->s_fs_info;
342 328
343 if (sb->s_root) { 329 if (sb->s_root)
344 hypfs_delete_tree(sb->s_root); 330 hypfs_delete_tree(sb->s_root);
331 if (sb_info->update_file)
345 hypfs_remove(sb_info->update_file); 332 hypfs_remove(sb_info->update_file);
346 kfree(sb->s_fs_info); 333 kfree(sb->s_fs_info);
347 sb->s_fs_info = NULL; 334 sb->s_fs_info = NULL;
348 }
349 kill_litter_super(sb); 335 kill_litter_super(sb);
350} 336}
351 337
@@ -484,20 +470,22 @@ static int __init hypfs_init(void)
484{ 470{
485 int rc; 471 int rc;
486 472
487 if (MACHINE_IS_VM) { 473 hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
488 if (hypfs_vm_init()) 474 if (IS_ERR(hypfs_dbfs_dir))
489 /* no diag 2fc, just exit */ 475 return PTR_ERR(hypfs_dbfs_dir);
490 return -ENODATA; 476
491 } else { 477 if (hypfs_diag_init()) {
492 if (hypfs_diag_init()) { 478 rc = -ENODATA;
493 rc = -ENODATA; 479 goto fail_debugfs_remove;
494 goto fail_diag; 480 }
495 } 481 if (hypfs_vm_init()) {
482 rc = -ENODATA;
483 goto fail_hypfs_diag_exit;
496 } 484 }
497 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); 485 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
498 if (!s390_kobj) { 486 if (!s390_kobj) {
499 rc = -ENOMEM; 487 rc = -ENOMEM;
500 goto fail_sysfs; 488 goto fail_hypfs_vm_exit;
501 } 489 }
502 rc = register_filesystem(&hypfs_type); 490 rc = register_filesystem(&hypfs_type);
503 if (rc) 491 if (rc)
@@ -506,18 +494,22 @@ static int __init hypfs_init(void)
506 494
507fail_filesystem: 495fail_filesystem:
508 kobject_put(s390_kobj); 496 kobject_put(s390_kobj);
509fail_sysfs: 497fail_hypfs_vm_exit:
510 if (!MACHINE_IS_VM) 498 hypfs_vm_exit();
511 hypfs_diag_exit(); 499fail_hypfs_diag_exit:
512fail_diag: 500 hypfs_diag_exit();
501fail_debugfs_remove:
502 debugfs_remove(hypfs_dbfs_dir);
503
513 pr_err("Initialization of hypfs failed with rc=%i\n", rc); 504 pr_err("Initialization of hypfs failed with rc=%i\n", rc);
514 return rc; 505 return rc;
515} 506}
516 507
517static void __exit hypfs_exit(void) 508static void __exit hypfs_exit(void)
518{ 509{
519 if (!MACHINE_IS_VM) 510 hypfs_diag_exit();
520 hypfs_diag_exit(); 511 hypfs_vm_exit();
512 debugfs_remove(hypfs_dbfs_dir);
521 unregister_filesystem(&hypfs_type); 513 unregister_filesystem(&hypfs_type);
522 kobject_put(s390_kobj); 514 kobject_put(s390_kobj);
523} 515}
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2a113d6a7dfd..76daea117181 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,11 +15,10 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/system.h>
18 19
19#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
20 21
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 22#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 int old_val, new_val; \ 23 int old_val, new_val; \
25 asm volatile( \ 24 asm volatile( \
@@ -35,26 +34,6 @@
35 new_val; \ 34 new_val; \
36}) 35})
37 36
38#else /* __GNUC__ */
39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 int old_val, new_val; \
42 asm volatile( \
43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \
45 op_string " %1,%4\n" \
46 " cs %0,%1,0(%3)\n" \
47 " jl 0b" \
48 : "=&d" (old_val), "=&d" (new_val), \
49 "=m" (((atomic_t *)(ptr))->counter) \
50 : "a" (ptr), "d" (op_val), \
51 "m" (((atomic_t *)(ptr))->counter) \
52 : "cc", "memory"); \
53 new_val; \
54})
55
56#endif /* __GNUC__ */
57
58static inline int atomic_read(const atomic_t *v) 37static inline int atomic_read(const atomic_t *v)
59{ 38{
60 barrier(); 39 barrier();
@@ -101,19 +80,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
101 80
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 81static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{ 82{
104#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
105 asm volatile( 83 asm volatile(
106 " cs %0,%2,%1" 84 " cs %0,%2,%1"
107 : "+d" (old), "=Q" (v->counter) 85 : "+d" (old), "=Q" (v->counter)
108 : "d" (new), "Q" (v->counter) 86 : "d" (new), "Q" (v->counter)
109 : "cc", "memory"); 87 : "cc", "memory");
110#else /* __GNUC__ */
111 asm volatile(
112 " cs %0,%3,0(%2)"
113 : "+d" (old), "=m" (v->counter)
114 : "a" (v), "d" (new), "m" (v->counter)
115 : "cc", "memory");
116#endif /* __GNUC__ */
117 return old; 88 return old;
118} 89}
119 90
@@ -140,8 +111,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
140 111
141#ifdef CONFIG_64BIT 112#ifdef CONFIG_64BIT
142 113
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 114#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 long long old_val, new_val; \ 115 long long old_val, new_val; \
147 asm volatile( \ 116 asm volatile( \
@@ -157,26 +126,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
157 new_val; \ 126 new_val; \
158}) 127})
159 128
160#else /* __GNUC__ */
161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 long long old_val, new_val; \
164 asm volatile( \
165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \
167 op_string " %1,%4\n" \
168 " csg %0,%1,0(%3)\n" \
169 " jl 0b" \
170 : "=&d" (old_val), "=&d" (new_val), \
171 "=m" (((atomic_t *)(ptr))->counter) \
172 : "a" (ptr), "d" (op_val), \
173 "m" (((atomic_t *)(ptr))->counter) \
174 : "cc", "memory"); \
175 new_val; \
176})
177
178#endif /* __GNUC__ */
179
180static inline long long atomic64_read(const atomic64_t *v) 129static inline long long atomic64_read(const atomic64_t *v)
181{ 130{
182 barrier(); 131 barrier();
@@ -214,19 +163,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
214static inline long long atomic64_cmpxchg(atomic64_t *v, 163static inline long long atomic64_cmpxchg(atomic64_t *v,
215 long long old, long long new) 164 long long old, long long new)
216{ 165{
217#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
218 asm volatile( 166 asm volatile(
219 " csg %0,%2,%1" 167 " csg %0,%2,%1"
220 : "+d" (old), "=Q" (v->counter) 168 : "+d" (old), "=Q" (v->counter)
221 : "d" (new), "Q" (v->counter) 169 : "d" (new), "Q" (v->counter)
222 : "cc", "memory"); 170 : "cc", "memory");
223#else /* __GNUC__ */
224 asm volatile(
225 " csg %0,%3,0(%2)"
226 : "+d" (old), "=m" (v->counter)
227 : "a" (v), "d" (new), "m" (v->counter)
228 : "cc", "memory");
229#endif /* __GNUC__ */
230 return old; 171 return old;
231} 172}
232 173
@@ -243,10 +184,8 @@ static inline long long atomic64_read(const atomic64_t *v)
243 register_pair rp; 184 register_pair rp;
244 185
245 asm volatile( 186 asm volatile(
246 " lm %0,%N0,0(%1)" 187 " lm %0,%N0,%1"
247 : "=&d" (rp) 188 : "=&d" (rp) : "Q" (v->counter) );
248 : "a" (&v->counter), "m" (v->counter)
249 );
250 return rp.pair; 189 return rp.pair;
251} 190}
252 191
@@ -255,10 +194,8 @@ static inline void atomic64_set(atomic64_t *v, long long i)
255 register_pair rp = {.pair = i}; 194 register_pair rp = {.pair = i};
256 195
257 asm volatile( 196 asm volatile(
258 " stm %1,%N1,0(%2)" 197 " stm %1,%N1,%0"
259 : "=m" (v->counter) 198 : "=Q" (v->counter) : "d" (rp) );
260 : "d" (rp), "a" (&v->counter)
261 );
262} 199}
263 200
264static inline long long atomic64_xchg(atomic64_t *v, long long new) 201static inline long long atomic64_xchg(atomic64_t *v, long long new)
@@ -267,11 +204,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
267 register_pair rp_old; 204 register_pair rp_old;
268 205
269 asm volatile( 206 asm volatile(
270 " lm %0,%N0,0(%2)\n" 207 " lm %0,%N0,%1\n"
271 "0: cds %0,%3,0(%2)\n" 208 "0: cds %0,%2,%1\n"
272 " jl 0b\n" 209 " jl 0b\n"
273 : "=&d" (rp_old), "+m" (v->counter) 210 : "=&d" (rp_old), "=Q" (v->counter)
274 : "a" (&v->counter), "d" (rp_new) 211 : "d" (rp_new), "Q" (v->counter)
275 : "cc"); 212 : "cc");
276 return rp_old.pair; 213 return rp_old.pair;
277} 214}
@@ -283,9 +220,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
283 register_pair rp_new = {.pair = new}; 220 register_pair rp_new = {.pair = new};
284 221
285 asm volatile( 222 asm volatile(
286 " cds %0,%3,0(%2)" 223 " cds %0,%2,%1"
287 : "+&d" (rp_old), "+m" (v->counter) 224 : "+&d" (rp_old), "=Q" (v->counter)
288 : "a" (&v->counter), "d" (rp_new) 225 : "d" (rp_new), "Q" (v->counter)
289 : "cc"); 226 : "cc");
290 return rp_old.pair; 227 return rp_old.pair;
291} 228}
@@ -338,6 +275,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
338static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 275static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
339{ 276{
340 long long c, old; 277 long long c, old;
278
341 c = atomic64_read(v); 279 c = atomic64_read(v);
342 for (;;) { 280 for (;;) {
343 if (unlikely(c == u)) 281 if (unlikely(c == u))
@@ -350,6 +288,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
350 return c != u; 288 return c != u;
351} 289}
352 290
291static inline long long atomic64_dec_if_positive(atomic64_t *v)
292{
293 long long c, old, dec;
294
295 c = atomic64_read(v);
296 for (;;) {
297 dec = c - 1;
298 if (unlikely(dec < 0))
299 break;
300 old = atomic64_cmpxchg((v), c, dec);
301 if (likely(old == c))
302 break;
303 c = old;
304 }
305 return dec;
306}
307
353#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) 308#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
354#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) 309#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
355#define atomic64_inc(_v) atomic64_add_return(1, _v) 310#define atomic64_inc(_v) atomic64_add_return(1, _v)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index b30606f6d523..2e05972c5085 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -71,8 +71,6 @@ extern const char _sb_findmap[];
71#define __BITOPS_AND "nr" 71#define __BITOPS_AND "nr"
72#define __BITOPS_XOR "xr" 72#define __BITOPS_XOR "xr"
73 73
74#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
75
76#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 74#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
77 asm volatile( \ 75 asm volatile( \
78 " l %0,%2\n" \ 76 " l %0,%2\n" \
@@ -85,22 +83,6 @@ extern const char _sb_findmap[];
85 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 83 : "d" (__val), "Q" (*(unsigned long *) __addr) \
86 : "cc"); 84 : "cc");
87 85
88#else /* __GNUC__ */
89
90#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
91 asm volatile( \
92 " l %0,0(%4)\n" \
93 "0: lr %1,%0\n" \
94 __op_string " %1,%3\n" \
95 " cs %0,%1,0(%4)\n" \
96 " jl 0b" \
97 : "=&d" (__old), "=&d" (__new), \
98 "=m" (*(unsigned long *) __addr) \
99 : "d" (__val), "a" (__addr), \
100 "m" (*(unsigned long *) __addr) : "cc");
101
102#endif /* __GNUC__ */
103
104#else /* __s390x__ */ 86#else /* __s390x__ */
105 87
106#define __BITOPS_ALIGN 7 88#define __BITOPS_ALIGN 7
@@ -109,8 +91,6 @@ extern const char _sb_findmap[];
109#define __BITOPS_AND "ngr" 91#define __BITOPS_AND "ngr"
110#define __BITOPS_XOR "xgr" 92#define __BITOPS_XOR "xgr"
111 93
112#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
113
114#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 94#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
115 asm volatile( \ 95 asm volatile( \
116 " lg %0,%2\n" \ 96 " lg %0,%2\n" \
@@ -123,23 +103,6 @@ extern const char _sb_findmap[];
123 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 103 : "d" (__val), "Q" (*(unsigned long *) __addr) \
124 : "cc"); 104 : "cc");
125 105
126#else /* __GNUC__ */
127
128#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
129 asm volatile( \
130 " lg %0,0(%4)\n" \
131 "0: lgr %1,%0\n" \
132 __op_string " %1,%3\n" \
133 " csg %0,%1,0(%4)\n" \
134 " jl 0b" \
135 : "=&d" (__old), "=&d" (__new), \
136 "=m" (*(unsigned long *) __addr) \
137 : "d" (__val), "a" (__addr), \
138 "m" (*(unsigned long *) __addr) : "cc");
139
140
141#endif /* __GNUC__ */
142
143#endif /* __s390x__ */ 106#endif /* __s390x__ */
144 107
145#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 108#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
@@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
261 224
262 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 225 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
263 asm volatile( 226 asm volatile(
264 " oc 0(1,%1),0(%2)" 227 " oc %O0(1,%R0),%1"
265 : "=m" (*(char *) addr) : "a" (addr), 228 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
266 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
267} 229}
268 230
269static inline void 231static inline void
@@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
290 252
291 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 253 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
292 asm volatile( 254 asm volatile(
293 " nc 0(1,%1),0(%2)" 255 " nc %O0(1,%R0),%1"
294 : "=m" (*(char *) addr) : "a" (addr), 256 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
295 "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
296} 257}
297 258
298static inline void 259static inline void
@@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
318 279
319 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 280 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
320 asm volatile( 281 asm volatile(
321 " xc 0(1,%1),0(%2)" 282 " xc %O0(1,%R0),%1"
322 : "=m" (*(char *) addr) : "a" (addr), 283 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
323 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
324} 284}
325 285
326static inline void 286static inline void
@@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
349 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 309 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
350 ch = *(unsigned char *) addr; 310 ch = *(unsigned char *) addr;
351 asm volatile( 311 asm volatile(
352 " oc 0(1,%1),0(%2)" 312 " oc %O0(1,%R0),%1"
353 : "=m" (*(char *) addr) 313 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
354 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 314 : "cc", "memory");
355 "m" (*(char *) addr) : "cc", "memory");
356 return (ch >> (nr & 7)) & 1; 315 return (ch >> (nr & 7)) & 1;
357} 316}
358#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 317#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
369 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 328 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
370 ch = *(unsigned char *) addr; 329 ch = *(unsigned char *) addr;
371 asm volatile( 330 asm volatile(
372 " nc 0(1,%1),0(%2)" 331 " nc %O0(1,%R0),%1"
373 : "=m" (*(char *) addr) 332 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
374 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 333 : "cc", "memory");
375 "m" (*(char *) addr) : "cc", "memory");
376 return (ch >> (nr & 7)) & 1; 334 return (ch >> (nr & 7)) & 1;
377} 335}
378#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 336#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
389 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 347 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
390 ch = *(unsigned char *) addr; 348 ch = *(unsigned char *) addr;
391 asm volatile( 349 asm volatile(
392 " xc 0(1,%1),0(%2)" 350 " xc %O0(1,%R0),%1"
393 : "=m" (*(char *) addr) 351 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
394 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 352 : "cc", "memory");
395 "m" (*(char *) addr) : "cc", "memory");
396 return (ch >> (nr & 7)) & 1; 353 return (ch >> (nr & 7)) & 1;
397} 354}
398#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 355#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
591 p = (unsigned long *)((unsigned long) p + offset); 548 p = (unsigned long *)((unsigned long) p + offset);
592#ifndef __s390x__ 549#ifndef __s390x__
593 asm volatile( 550 asm volatile(
594 " ic %0,0(%1)\n" 551 " ic %0,%O1(%R1)\n"
595 " icm %0,2,1(%1)\n" 552 " icm %0,2,%O1+1(%R1)\n"
596 " icm %0,4,2(%1)\n" 553 " icm %0,4,%O1+2(%R1)\n"
597 " icm %0,8,3(%1)" 554 " icm %0,8,%O1+3(%R1)"
598 : "=&d" (word) : "a" (p), "m" (*p) : "cc"); 555 : "=&d" (word) : "Q" (*p) : "cc");
599#else 556#else
600 asm volatile( 557 asm volatile(
601 " lrvg %0,%1" 558 " lrvg %0,%1"
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index b1066b9fb5f8..bf90d1fd97a5 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -5,12 +5,6 @@
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7 7
8#ifdef CONFIG_64BIT
9#define S390_LONG ".quad"
10#else
11#define S390_LONG ".long"
12#endif
13
14#ifdef CONFIG_DEBUG_BUGVERBOSE 8#ifdef CONFIG_DEBUG_BUGVERBOSE
15 9
16#define __EMIT_BUG(x) do { \ 10#define __EMIT_BUG(x) do { \
@@ -21,7 +15,7 @@
21 "2: .asciz \""__FILE__"\"\n" \ 15 "2: .asciz \""__FILE__"\"\n" \
22 ".previous\n" \ 16 ".previous\n" \
23 ".section __bug_table,\"a\"\n" \ 17 ".section __bug_table,\"a\"\n" \
24 "3:\t" S390_LONG "\t1b,2b\n" \ 18 "3: .long 1b-3b,2b-3b\n" \
25 " .short %0,%1\n" \ 19 " .short %0,%1\n" \
26 " .org 3b+%2\n" \ 20 " .org 3b+%2\n" \
27 ".previous\n" \ 21 ".previous\n" \
@@ -37,7 +31,7 @@
37 "0: j 0b+2\n" \ 31 "0: j 0b+2\n" \
38 "1:\n" \ 32 "1:\n" \
39 ".section __bug_table,\"a\"\n" \ 33 ".section __bug_table,\"a\"\n" \
40 "2:\t" S390_LONG "\t1b\n" \ 34 "2: .long 1b-2b\n" \
41 " .short %0\n" \ 35 " .short %0\n" \
42 " .org 2b+%1\n" \ 36 " .org 2b+%1\n" \
43 ".previous\n" \ 37 ".previous\n" \
@@ -52,18 +46,18 @@
52 unreachable(); \ 46 unreachable(); \
53} while (0) 47} while (0)
54 48
55#define __WARN() do { \ 49#define __WARN_TAINT(taint) do { \
56 __EMIT_BUG(BUGFLAG_WARNING); \ 50 __EMIT_BUG(BUGFLAG_TAINT(taint)); \
57} while (0) 51} while (0)
58 52
59#define WARN_ON(x) ({ \ 53#define WARN_ON(x) ({ \
60 int __ret_warn_on = !!(x); \ 54 int __ret_warn_on = !!(x); \
61 if (__builtin_constant_p(__ret_warn_on)) { \ 55 if (__builtin_constant_p(__ret_warn_on)) { \
62 if (__ret_warn_on) \ 56 if (__ret_warn_on) \
63 __EMIT_BUG(BUGFLAG_WARNING); \ 57 __WARN(); \
64 } else { \ 58 } else { \
65 if (unlikely(__ret_warn_on)) \ 59 if (unlikely(__ret_warn_on)) \
66 __EMIT_BUG(BUGFLAG_WARNING); \ 60 __WARN(); \
67 } \ 61 } \
68 unlikely(__ret_warn_on); \ 62 unlikely(__ret_warn_on); \
69}) 63})
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f4bd346a52d3..1c0030f9b890 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -91,6 +91,14 @@ struct ccw_device {
91 void (*handler) (struct ccw_device *, unsigned long, struct irb *); 91 void (*handler) (struct ccw_device *, unsigned long, struct irb *);
92}; 92};
93 93
94/*
95 * Possible CIO actions triggered by the unit check handler.
96 */
97enum uc_todo {
98 UC_TODO_RETRY,
99 UC_TODO_RETRY_ON_NEW_PATH,
100 UC_TODO_STOP
101};
94 102
95/** 103/**
96 * struct ccw driver - device driver for channel attached devices 104 * struct ccw driver - device driver for channel attached devices
@@ -107,6 +115,7 @@ struct ccw_device {
107 * @freeze: callback for freezing during hibernation snapshotting 115 * @freeze: callback for freezing during hibernation snapshotting
108 * @thaw: undo work done in @freeze 116 * @thaw: undo work done in @freeze
109 * @restore: callback for restoring after hibernation 117 * @restore: callback for restoring after hibernation
118 * @uc_handler: callback for unit check handler
110 * @driver: embedded device driver structure 119 * @driver: embedded device driver structure
111 * @name: device driver name 120 * @name: device driver name
112 */ 121 */
@@ -124,6 +133,7 @@ struct ccw_driver {
124 int (*freeze)(struct ccw_device *); 133 int (*freeze)(struct ccw_device *);
125 int (*thaw) (struct ccw_device *); 134 int (*thaw) (struct ccw_device *);
126 int (*restore)(struct ccw_device *); 135 int (*restore)(struct ccw_device *);
136 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
127 struct device_driver driver; 137 struct device_driver driver;
128 char *name; 138 char *name;
129}; 139};
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index e85679af54dd..e34347d567a6 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -20,7 +20,7 @@
20/** 20/**
21 * struct ccw1 - channel command word 21 * struct ccw1 - channel command word
22 * @cmd_code: command code 22 * @cmd_code: command code
23 * @flags: flags, like IDA adressing, etc. 23 * @flags: flags, like IDA addressing, etc.
24 * @count: byte count 24 * @count: byte count
25 * @cda: data address 25 * @cda: data address
26 * 26 *
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 01a08020bc0e..104f2007f097 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -35,7 +35,8 @@
35 35
36extern long psw32_user_bits; 36extern long psw32_user_bits;
37 37
38#define COMPAT_USER_HZ 100 38#define COMPAT_USER_HZ 100
39#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
39 40
40typedef u32 compat_size_t; 41typedef u32 compat_size_t;
41typedef s32 compat_ssize_t; 42typedef s32 compat_ssize_t;
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 258ba88b7b50..8b1a52a137c5 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -188,15 +188,16 @@ struct s390_idle_data {
188 188
189DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 189DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
190 190
191void vtime_start_cpu(void); 191void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
192cputime64_t s390_get_idle_time(int cpu); 192cputime64_t s390_get_idle_time(int cpu);
193 193
194#define arch_idle_time(cpu) s390_get_idle_time(cpu) 194#define arch_idle_time(cpu) s390_get_idle_time(cpu)
195 195
196static inline void s390_idle_check(void) 196static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
197 __u64 enter_timer)
197{ 198{
198 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) 199 if (regs->psw.mask & PSW_MASK_WAIT)
199 vtime_start_cpu(); 200 vtime_start_cpu(int_clock, enter_timer);
200} 201}
201 202
202static inline int s390_nohz_delay(int cpu) 203static inline int s390_nohz_delay(int cpu)
diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h
index 2185a6d619d3..749a97e61bea 100644
--- a/arch/s390/include/asm/crw.h
+++ b/arch/s390/include/asm/crw.h
@@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
32extern int crw_register_handler(int rsc, crw_handler_t handler); 32extern int crw_register_handler(int rsc, crw_handler_t handler);
33extern void crw_unregister_handler(int rsc); 33extern void crw_unregister_handler(int rsc);
34extern void crw_handle_channel_report(void); 34extern void crw_handle_channel_report(void);
35void crw_wait_for_channel_report(void);
35 36
36#define NR_RSCS 16 37#define NR_RSCS 16
37 38
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 80ef58c61970..538e1b36a726 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl)
145 int rc = -ENOSYS; 145 int rc = -ENOSYS;
146 146
147 asm volatile( 147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n" 148 " .insn s,0xb2160000,%1\n"
149 "0: la %0,0\n" 149 "0: la %0,0\n"
150 "1:\n" 150 "1:\n"
151 EX_TABLE(0b,1b) 151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); 152 : "+d" (rc) : "Q" (*ctrl));
153 return rc; 153 return rc;
154} 154}
155 155
@@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib)
159 int rc = -ENOSYS; 159 int rc = -ENOSYS;
160 160
161 asm volatile( 161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n" 162 " .insn s,0xb2170000,%1\n"
163 "0: la %0,0\n" 163 "0: la %0,0\n"
164 "1:\n" 164 "1:\n"
165 EX_TABLE(0b,1b) 165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib)); 166 : "+d" (rc) : "Q" (*aib));
167 return rc; 167 return rc;
168} 168}
169 169
@@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func)
174 int rc = -ENOSYS; 174 int rc = -ENOSYS;
175 175
176 asm volatile( 176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n" 177 " .insn s,0xb2b30000,%1\n"
178 "0: la %0,0\n" 178 "0: la %0,0\n"
179 "1:\n" 179 "1:\n"
180 EX_TABLE(0b,1b) 180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); 181 : "+d" (rc) : "Q" (*aib), "d" (reg0));
182 return rc; 182 return rc;
183} 183}
184 184
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index c2fb432f576a..15b3ac253898 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
12
13/* store then or system mask. */ 11/* store then or system mask. */
14#define __raw_local_irq_stosm(__or) \ 12#define __raw_local_irq_stosm(__or) \
15({ \ 13({ \
@@ -36,40 +34,6 @@
36 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
37}) 35})
38 36
39#else /* __GNUC__ */
40
41/* store then or system mask. */
42#define __raw_local_irq_stosm(__or) \
43({ \
44 unsigned long __mask; \
45 asm volatile( \
46 " stosm 0(%1),%2" \
47 : "=m" (__mask) \
48 : "a" (&__mask), "i" (__or) : "memory"); \
49 __mask; \
50})
51
52/* store then and system mask. */
53#define __raw_local_irq_stnsm(__and) \
54({ \
55 unsigned long __mask; \
56 asm volatile( \
57 " stnsm 0(%1),%2" \
58 : "=m" (__mask) \
59 : "a" (&__mask), "i" (__and) : "memory"); \
60 __mask; \
61})
62
63/* set system mask. */
64#define __raw_local_irq_ssm(__mask) \
65({ \
66 asm volatile( \
67 " ssm 0(%0)" \
68 : : "a" (&__mask), "m" (__mask) : "memory"); \
69})
70
71#endif /* __GNUC__ */
72
73/* interrupt control.. */ 37/* interrupt control.. */
74static inline unsigned long raw_local_irq_enable(void) 38static inline unsigned long raw_local_irq_enable(void)
75{ 39{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..0f97ef2d92ac 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,141 +1,16 @@
1/* 1/*
2 * include/asm-s390/lowcore.h 2 * Copyright IBM Corp. 1999,2010
3 * 3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Denis Joseph Barrow,
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
9 */ 6 */
10 7
11#ifndef _ASM_S390_LOWCORE_H 8#ifndef _ASM_S390_LOWCORE_H
12#define _ASM_S390_LOWCORE_H 9#define _ASM_S390_LOWCORE_H
13 10
14#define __LC_IPL_PARMBLOCK_PTR 0x0014
15#define __LC_EXT_PARAMS 0x0080
16#define __LC_CPU_ADDRESS 0x0084
17#define __LC_EXT_INT_CODE 0x0086
18
19#define __LC_SVC_ILC 0x0088
20#define __LC_SVC_INT_CODE 0x008a
21#define __LC_PGM_ILC 0x008c
22#define __LC_PGM_INT_CODE 0x008e
23
24#define __LC_PER_ATMID 0x0096
25#define __LC_PER_ADDRESS 0x0098
26#define __LC_PER_ACCESS_ID 0x00a1
27#define __LC_AR_MODE_ID 0x00a3
28
29#define __LC_SUBCHANNEL_ID 0x00b8
30#define __LC_SUBCHANNEL_NR 0x00ba
31#define __LC_IO_INT_PARM 0x00bc
32#define __LC_IO_INT_WORD 0x00c0
33#define __LC_STFL_FAC_LIST 0x00c8
34#define __LC_MCCK_CODE 0x00e8
35
36#define __LC_DUMP_REIPL 0x0e00
37
38#ifndef __s390x__
39#define __LC_EXT_OLD_PSW 0x0018
40#define __LC_SVC_OLD_PSW 0x0020
41#define __LC_PGM_OLD_PSW 0x0028
42#define __LC_MCK_OLD_PSW 0x0030
43#define __LC_IO_OLD_PSW 0x0038
44#define __LC_EXT_NEW_PSW 0x0058
45#define __LC_SVC_NEW_PSW 0x0060
46#define __LC_PGM_NEW_PSW 0x0068
47#define __LC_MCK_NEW_PSW 0x0070
48#define __LC_IO_NEW_PSW 0x0078
49#define __LC_SAVE_AREA 0x0200
50#define __LC_RETURN_PSW 0x0240
51#define __LC_RETURN_MCCK_PSW 0x0248
52#define __LC_SYNC_ENTER_TIMER 0x0250
53#define __LC_ASYNC_ENTER_TIMER 0x0258
54#define __LC_EXIT_TIMER 0x0260
55#define __LC_USER_TIMER 0x0268
56#define __LC_SYSTEM_TIMER 0x0270
57#define __LC_STEAL_TIMER 0x0278
58#define __LC_LAST_UPDATE_TIMER 0x0280
59#define __LC_LAST_UPDATE_CLOCK 0x0288
60#define __LC_CURRENT 0x0290
61#define __LC_THREAD_INFO 0x0294
62#define __LC_KERNEL_STACK 0x0298
63#define __LC_ASYNC_STACK 0x029c
64#define __LC_PANIC_STACK 0x02a0
65#define __LC_KERNEL_ASCE 0x02a4
66#define __LC_USER_ASCE 0x02a8
67#define __LC_USER_EXEC_ASCE 0x02ac
68#define __LC_CPUID 0x02b0
69#define __LC_INT_CLOCK 0x02c8
70#define __LC_MACHINE_FLAGS 0x02d8
71#define __LC_FTRACE_FUNC 0x02dc
72#define __LC_IRB 0x0300
73#define __LC_PFAULT_INTPARM 0x0080
74#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
75#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0
76#define __LC_PSW_SAVE_AREA 0x0100
77#define __LC_PREFIX_SAVE_AREA 0x0108
78#define __LC_AREGS_SAVE_AREA 0x0120
79#define __LC_FPREGS_SAVE_AREA 0x0160
80#define __LC_GPREGS_SAVE_AREA 0x0180
81#define __LC_CREGS_SAVE_AREA 0x01c0
82#else /* __s390x__ */
83#define __LC_LAST_BREAK 0x0110
84#define __LC_EXT_OLD_PSW 0x0130
85#define __LC_SVC_OLD_PSW 0x0140
86#define __LC_PGM_OLD_PSW 0x0150
87#define __LC_MCK_OLD_PSW 0x0160
88#define __LC_IO_OLD_PSW 0x0170
89#define __LC_RESTART_PSW 0x01a0
90#define __LC_EXT_NEW_PSW 0x01b0
91#define __LC_SVC_NEW_PSW 0x01c0
92#define __LC_PGM_NEW_PSW 0x01d0
93#define __LC_MCK_NEW_PSW 0x01e0
94#define __LC_IO_NEW_PSW 0x01f0
95#define __LC_SAVE_AREA 0x0200
96#define __LC_RETURN_PSW 0x0280
97#define __LC_RETURN_MCCK_PSW 0x0290
98#define __LC_SYNC_ENTER_TIMER 0x02a0
99#define __LC_ASYNC_ENTER_TIMER 0x02a8
100#define __LC_EXIT_TIMER 0x02b0
101#define __LC_USER_TIMER 0x02b8
102#define __LC_SYSTEM_TIMER 0x02c0
103#define __LC_STEAL_TIMER 0x02c8
104#define __LC_LAST_UPDATE_TIMER 0x02d0
105#define __LC_LAST_UPDATE_CLOCK 0x02d8
106#define __LC_CURRENT 0x02e0
107#define __LC_THREAD_INFO 0x02e8
108#define __LC_KERNEL_STACK 0x02f0
109#define __LC_ASYNC_STACK 0x02f8
110#define __LC_PANIC_STACK 0x0300
111#define __LC_KERNEL_ASCE 0x0308
112#define __LC_USER_ASCE 0x0310
113#define __LC_USER_EXEC_ASCE 0x0318
114#define __LC_CPUID 0x0320
115#define __LC_INT_CLOCK 0x0340
116#define __LC_VDSO_PER_CPU 0x0350
117#define __LC_MACHINE_FLAGS 0x0358
118#define __LC_FTRACE_FUNC 0x0360
119#define __LC_IRB 0x0380
120#define __LC_PASTE 0x03c0
121#define __LC_PFAULT_INTPARM 0x11b8
122#define __LC_FPREGS_SAVE_AREA 0x1200
123#define __LC_GPREGS_SAVE_AREA 0x1280
124#define __LC_PSW_SAVE_AREA 0x1300
125#define __LC_PREFIX_SAVE_AREA 0x1318
126#define __LC_FP_CREG_SAVE_AREA 0x131c
127#define __LC_TODREG_SAVE_AREA 0x1324
128#define __LC_CPU_TIMER_SAVE_AREA 0x1328
129#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
130#define __LC_AREGS_SAVE_AREA 0x1340
131#define __LC_CREGS_SAVE_AREA 0x1380
132#endif /* __s390x__ */
133
134#ifndef __ASSEMBLY__
135
136#include <asm/cpu.h>
137#include <asm/ptrace.h>
138#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h>
13#include <asm/cpu.h>
139 14
140void restart_int_handler(void); 15void restart_int_handler(void);
141void ext_int_handler(void); 16void ext_int_handler(void);
@@ -144,7 +19,12 @@ void pgm_check_handler(void);
144void mcck_int_handler(void); 19void mcck_int_handler(void);
145void io_int_handler(void); 20void io_int_handler(void);
146 21
147struct save_area_s390 { 22#ifdef CONFIG_32BIT
23
24#define LC_ORDER 0
25#define LC_PAGES 1
26
27struct save_area {
148 u32 ext_save; 28 u32 ext_save;
149 u64 timer; 29 u64 timer;
150 u64 clk_cmp; 30 u64 clk_cmp;
@@ -156,54 +36,13 @@ struct save_area_s390 {
156 u64 fp_regs[4]; 36 u64 fp_regs[4];
157 u32 gp_regs[16]; 37 u32 gp_regs[16];
158 u32 ctrl_regs[16]; 38 u32 ctrl_regs[16];
159} __attribute__((packed)); 39} __packed;
160
161struct save_area_s390x {
162 u64 fp_regs[16];
163 u64 gp_regs[16];
164 u8 psw[16];
165 u8 pad1[8];
166 u32 pref_reg;
167 u32 fp_ctrl_reg;
168 u8 pad2[4];
169 u32 tod_reg;
170 u64 timer;
171 u64 clk_cmp;
172 u8 pad3[8];
173 u32 acc_regs[16];
174 u64 ctrl_regs[16];
175} __attribute__((packed));
176
177union save_area {
178 struct save_area_s390 s390;
179 struct save_area_s390x s390x;
180};
181
182#define SAVE_AREA_BASE_S390 0xd4
183#define SAVE_AREA_BASE_S390X 0x1200
184
185#ifndef __s390x__
186#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
187#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
188#else
189#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
190#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
191#endif
192
193#ifndef __s390x__
194#define LC_ORDER 0
195#else
196#define LC_ORDER 1
197#endif
198
199#define LC_PAGES (1UL << LC_ORDER)
200 40
201struct _lowcore 41struct _lowcore {
202{
203#ifndef __s390x__
204 /* 0x0000 - 0x01ff: defined by architecture */
205 psw_t restart_psw; /* 0x0000 */ 42 psw_t restart_psw; /* 0x0000 */
206 __u32 ccw2[4]; /* 0x0008 */ 43 psw_t restart_old_psw; /* 0x0008 */
44 __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
45 __u32 ipl_parmblock_ptr; /* 0x0014 */
207 psw_t external_old_psw; /* 0x0018 */ 46 psw_t external_old_psw; /* 0x0018 */
208 psw_t svc_old_psw; /* 0x0020 */ 47 psw_t svc_old_psw; /* 0x0020 */
209 psw_t program_old_psw; /* 0x0028 */ 48 psw_t program_old_psw; /* 0x0028 */
@@ -229,7 +68,9 @@ struct _lowcore
229 __u32 monitor_code; /* 0x009c */ 68 __u32 monitor_code; /* 0x009c */
230 __u8 exc_access_id; /* 0x00a0 */ 69 __u8 exc_access_id; /* 0x00a0 */
231 __u8 per_access_id; /* 0x00a1 */ 70 __u8 per_access_id; /* 0x00a1 */
232 __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ 71 __u8 op_access_id; /* 0x00a2 */
72 __u8 ar_access_id; /* 0x00a3 */
73 __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
233 __u16 subchannel_id; /* 0x00b8 */ 74 __u16 subchannel_id; /* 0x00b8 */
234 __u16 subchannel_nr; /* 0x00ba */ 75 __u16 subchannel_nr; /* 0x00ba */
235 __u32 io_int_parm; /* 0x00bc */ 76 __u32 io_int_parm; /* 0x00bc */
@@ -245,8 +86,9 @@ struct _lowcore
245 __u32 external_damage_code; /* 0x00f4 */ 86 __u32 external_damage_code; /* 0x00f4 */
246 __u32 failing_storage_address; /* 0x00f8 */ 87 __u32 failing_storage_address; /* 0x00f8 */
247 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ 88 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
248 __u32 st_status_fixed_logout[4]; /* 0x0100 */ 89 psw_t psw_save_area; /* 0x0100 */
249 __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ 90 __u32 prefixreg_save_area; /* 0x0108 */
91 __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
250 92
251 /* CPU register save area: defined by architecture */ 93 /* CPU register save area: defined by architecture */
252 __u32 access_regs_save_area[16]; /* 0x0120 */ 94 __u32 access_regs_save_area[16]; /* 0x0120 */
@@ -262,43 +104,44 @@ struct _lowcore
262 /* CPU time accounting values */ 104 /* CPU time accounting values */
263 __u64 sync_enter_timer; /* 0x0250 */ 105 __u64 sync_enter_timer; /* 0x0250 */
264 __u64 async_enter_timer; /* 0x0258 */ 106 __u64 async_enter_timer; /* 0x0258 */
265 __u64 exit_timer; /* 0x0260 */ 107 __u64 mcck_enter_timer; /* 0x0260 */
266 __u64 user_timer; /* 0x0268 */ 108 __u64 exit_timer; /* 0x0268 */
267 __u64 system_timer; /* 0x0270 */ 109 __u64 user_timer; /* 0x0270 */
268 __u64 steal_timer; /* 0x0278 */ 110 __u64 system_timer; /* 0x0278 */
269 __u64 last_update_timer; /* 0x0280 */ 111 __u64 steal_timer; /* 0x0280 */
270 __u64 last_update_clock; /* 0x0288 */ 112 __u64 last_update_timer; /* 0x0288 */
113 __u64 last_update_clock; /* 0x0290 */
271 114
272 /* Current process. */ 115 /* Current process. */
273 __u32 current_task; /* 0x0290 */ 116 __u32 current_task; /* 0x0298 */
274 __u32 thread_info; /* 0x0294 */ 117 __u32 thread_info; /* 0x029c */
275 __u32 kernel_stack; /* 0x0298 */ 118 __u32 kernel_stack; /* 0x02a0 */
276 119
277 /* Interrupt and panic stack. */ 120 /* Interrupt and panic stack. */
278 __u32 async_stack; /* 0x029c */ 121 __u32 async_stack; /* 0x02a4 */
279 __u32 panic_stack; /* 0x02a0 */ 122 __u32 panic_stack; /* 0x02a8 */
280 123
281 /* Address space pointer. */ 124 /* Address space pointer. */
282 __u32 kernel_asce; /* 0x02a4 */ 125 __u32 kernel_asce; /* 0x02ac */
283 __u32 user_asce; /* 0x02a8 */ 126 __u32 user_asce; /* 0x02b0 */
284 __u32 user_exec_asce; /* 0x02ac */ 127 __u32 user_exec_asce; /* 0x02b4 */
285 128
286 /* SMP info area */ 129 /* SMP info area */
287 struct cpuid cpu_id; /* 0x02b0 */
288 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
289 __u32 softirq_pending; /* 0x02bc */ 131 __u32 softirq_pending; /* 0x02bc */
290 __u32 percpu_offset; /* 0x02c0 */ 132 __u32 percpu_offset; /* 0x02c0 */
291 __u32 ext_call_fast; /* 0x02c4 */ 133 __u32 ext_call_fast; /* 0x02c4 */
292 __u64 int_clock; /* 0x02c8 */ 134 __u64 int_clock; /* 0x02c8 */
293 __u64 clock_comparator; /* 0x02d0 */ 135 __u64 mcck_clock; /* 0x02d0 */
294 __u32 machine_flags; /* 0x02d8 */ 136 __u64 clock_comparator; /* 0x02d8 */
295 __u32 ftrace_func; /* 0x02dc */ 137 __u32 machine_flags; /* 0x02e0 */
296 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 138 __u32 ftrace_func; /* 0x02e4 */
139 __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
297 140
298 /* Interrupt response block */ 141 /* Interrupt response block */
299 __u8 irb[64]; /* 0x0300 */ 142 __u8 irb[64]; /* 0x0300 */
300 143
301 __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ 144 __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
302 145
303 /* 146 /*
304 * 0xe00 contains the address of the IPL Parameter Information 147 * 0xe00 contains the address of the IPL Parameter Information
@@ -310,10 +153,32 @@ struct _lowcore
310 153
311 /* Align to the top 1k of prefix area */ 154 /* Align to the top 1k of prefix area */
312 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ 155 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */
313#else /* !__s390x__ */ 156} __packed;
314 /* 0x0000 - 0x01ff: defined by architecture */ 157
315 __u32 ccw1[2]; /* 0x0000 */ 158#else /* CONFIG_32BIT */
316 __u32 ccw2[4]; /* 0x0008 */ 159
160#define LC_ORDER 1
161#define LC_PAGES 2
162
163struct save_area {
164 u64 fp_regs[16];
165 u64 gp_regs[16];
166 u8 psw[16];
167 u8 pad1[8];
168 u32 pref_reg;
169 u32 fp_ctrl_reg;
170 u8 pad2[4];
171 u32 tod_reg;
172 u64 timer;
173 u64 clk_cmp;
174 u8 pad3[8];
175 u32 acc_regs[16];
176 u64 ctrl_regs[16];
177} __packed;
178
179struct _lowcore {
180 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
181 __u32 ipl_parmblock_ptr; /* 0x0014 */
317 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ 182 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
318 __u32 ext_params; /* 0x0080 */ 183 __u32 ext_params; /* 0x0080 */
319 __u16 cpu_addr; /* 0x0084 */ 184 __u16 cpu_addr; /* 0x0084 */
@@ -325,14 +190,14 @@ struct _lowcore
325 __u32 data_exc_code; /* 0x0090 */ 190 __u32 data_exc_code; /* 0x0090 */
326 __u16 mon_class_num; /* 0x0094 */ 191 __u16 mon_class_num; /* 0x0094 */
327 __u16 per_perc_atmid; /* 0x0096 */ 192 __u16 per_perc_atmid; /* 0x0096 */
328 addr_t per_address; /* 0x0098 */ 193 __u64 per_address; /* 0x0098 */
329 __u8 exc_access_id; /* 0x00a0 */ 194 __u8 exc_access_id; /* 0x00a0 */
330 __u8 per_access_id; /* 0x00a1 */ 195 __u8 per_access_id; /* 0x00a1 */
331 __u8 op_access_id; /* 0x00a2 */ 196 __u8 op_access_id; /* 0x00a2 */
332 __u8 ar_access_id; /* 0x00a3 */ 197 __u8 ar_access_id; /* 0x00a3 */
333 __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ 198 __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
334 addr_t trans_exc_code; /* 0x00a8 */ 199 __u64 trans_exc_code; /* 0x00a8 */
335 addr_t monitor_code; /* 0x00b0 */ 200 __u64 monitor_code; /* 0x00b0 */
336 __u16 subchannel_id; /* 0x00b8 */ 201 __u16 subchannel_id; /* 0x00b8 */
337 __u16 subchannel_nr; /* 0x00ba */ 202 __u16 subchannel_nr; /* 0x00ba */
338 __u32 io_int_parm; /* 0x00bc */ 203 __u32 io_int_parm; /* 0x00bc */
@@ -343,8 +208,10 @@ struct _lowcore
343 __u32 mcck_interruption_code[2]; /* 0x00e8 */ 208 __u32 mcck_interruption_code[2]; /* 0x00e8 */
344 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 209 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
345 __u32 external_damage_code; /* 0x00f4 */ 210 __u32 external_damage_code; /* 0x00f4 */
346 addr_t failing_storage_address; /* 0x00f8 */ 211 __u64 failing_storage_address; /* 0x00f8 */
347 __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ 212 __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
213 __u64 breaking_event_addr; /* 0x0110 */
214 __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
348 psw_t restart_old_psw; /* 0x0120 */ 215 psw_t restart_old_psw; /* 0x0120 */
349 psw_t external_old_psw; /* 0x0130 */ 216 psw_t external_old_psw; /* 0x0130 */
350 psw_t svc_old_psw; /* 0x0140 */ 217 psw_t svc_old_psw; /* 0x0140 */
@@ -367,39 +234,41 @@ struct _lowcore
367 /* CPU accounting and timing values. */ 234 /* CPU accounting and timing values. */
368 __u64 sync_enter_timer; /* 0x02a0 */ 235 __u64 sync_enter_timer; /* 0x02a0 */
369 __u64 async_enter_timer; /* 0x02a8 */ 236 __u64 async_enter_timer; /* 0x02a8 */
370 __u64 exit_timer; /* 0x02b0 */ 237 __u64 mcck_enter_timer; /* 0x02b0 */
371 __u64 user_timer; /* 0x02b8 */ 238 __u64 exit_timer; /* 0x02b8 */
372 __u64 system_timer; /* 0x02c0 */ 239 __u64 user_timer; /* 0x02c0 */
373 __u64 steal_timer; /* 0x02c8 */ 240 __u64 system_timer; /* 0x02c8 */
374 __u64 last_update_timer; /* 0x02d0 */ 241 __u64 steal_timer; /* 0x02d0 */
375 __u64 last_update_clock; /* 0x02d8 */ 242 __u64 last_update_timer; /* 0x02d8 */
243 __u64 last_update_clock; /* 0x02e0 */
376 244
377 /* Current process. */ 245 /* Current process. */
378 __u64 current_task; /* 0x02e0 */ 246 __u64 current_task; /* 0x02e8 */
379 __u64 thread_info; /* 0x02e8 */ 247 __u64 thread_info; /* 0x02f0 */
380 __u64 kernel_stack; /* 0x02f0 */ 248 __u64 kernel_stack; /* 0x02f8 */
381 249
382 /* Interrupt and panic stack. */ 250 /* Interrupt and panic stack. */
383 __u64 async_stack; /* 0x02f8 */ 251 __u64 async_stack; /* 0x0300 */
384 __u64 panic_stack; /* 0x0300 */ 252 __u64 panic_stack; /* 0x0308 */
385 253
386 /* Address space pointer. */ 254 /* Address space pointer. */
387 __u64 kernel_asce; /* 0x0308 */ 255 __u64 kernel_asce; /* 0x0310 */
388 __u64 user_asce; /* 0x0310 */ 256 __u64 user_asce; /* 0x0318 */
389 __u64 user_exec_asce; /* 0x0318 */ 257 __u64 user_exec_asce; /* 0x0320 */
390 258
391 /* SMP info area */ 259 /* SMP info area */
392 struct cpuid cpu_id; /* 0x0320 */
393 __u32 cpu_nr; /* 0x0328 */ 260 __u32 cpu_nr; /* 0x0328 */
394 __u32 softirq_pending; /* 0x032c */ 261 __u32 softirq_pending; /* 0x032c */
395 __u64 percpu_offset; /* 0x0330 */ 262 __u64 percpu_offset; /* 0x0330 */
396 __u64 ext_call_fast; /* 0x0338 */ 263 __u64 ext_call_fast; /* 0x0338 */
397 __u64 int_clock; /* 0x0340 */ 264 __u64 int_clock; /* 0x0340 */
398 __u64 clock_comparator; /* 0x0348 */ 265 __u64 mcck_clock; /* 0x0348 */
399 __u64 vdso_per_cpu_data; /* 0x0350 */ 266 __u64 clock_comparator; /* 0x0350 */
400 __u64 machine_flags; /* 0x0358 */ 267 __u64 vdso_per_cpu_data; /* 0x0358 */
401 __u64 ftrace_func; /* 0x0360 */ 268 __u64 machine_flags; /* 0x0360 */
402 __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */ 269 __u64 ftrace_func; /* 0x0368 */
270 __u64 sie_hook; /* 0x0370 */
271 __u64 cmf_hpp; /* 0x0378 */
403 272
404 /* Interrupt response block. */ 273 /* Interrupt response block. */
405 __u8 irb[64]; /* 0x0380 */ 274 __u8 irb[64]; /* 0x0380 */
@@ -425,7 +294,7 @@ struct _lowcore
425 /* CPU register save area: defined by architecture */ 294 /* CPU register save area: defined by architecture */
426 __u64 floating_pt_save_area[16]; /* 0x1200 */ 295 __u64 floating_pt_save_area[16]; /* 0x1200 */
427 __u64 gpregs_save_area[16]; /* 0x1280 */ 296 __u64 gpregs_save_area[16]; /* 0x1280 */
428 __u32 st_status_fixed_logout[4]; /* 0x1300 */ 297 psw_t psw_save_area; /* 0x1300 */
429 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ 298 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
430 __u32 prefixreg_save_area; /* 0x1318 */ 299 __u32 prefixreg_save_area; /* 0x1318 */
431 __u32 fpt_creg_save_area; /* 0x131c */ 300 __u32 fpt_creg_save_area; /* 0x131c */
@@ -439,10 +308,12 @@ struct _lowcore
439 308
440 /* align to the top of the prefix area */ 309 /* align to the top of the prefix area */
441 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ 310 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */
442#endif /* !__s390x__ */ 311} __packed;
443} __attribute__((packed)); /* End structure*/ 312
313#endif /* CONFIG_32BIT */
444 314
445#define S390_lowcore (*((struct _lowcore *) 0)) 315#define S390_lowcore (*((struct _lowcore *) 0))
316
446extern struct _lowcore *lowcore_ptr[]; 317extern struct _lowcore *lowcore_ptr[];
447 318
448static inline void set_prefix(__u32 address) 319static inline void set_prefix(__u32 address)
@@ -458,6 +329,4 @@ static inline __u32 store_prefix(void)
458 return address; 329 return address;
459} 330}
460 331
461#endif 332#endif /* _ASM_S390_LOWCORE_H */
462
463#endif
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5e9daf5d7f22..af650fb47206 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -107,9 +107,6 @@ typedef pte_t *pgtable_t;
107#define __pgd(x) ((pgd_t) { (x) } ) 107#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 108#define __pgprot(x) ((pgprot_t) { (x) } )
109 109
110/* default storage key used for all pages */
111extern unsigned int default_storage_key;
112
113static inline void 110static inline void
114page_set_storage_key(unsigned long addr, unsigned int skey) 111page_set_storage_key(unsigned long addr, unsigned int skey)
115{ 112{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e2fa79cf0614..89a504c3f12e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -43,7 +43,7 @@ extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page 43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information. 44 * tables contain all the necessary information.
45 */ 45 */
46#define update_mmu_cache(vma, address, pte) do { } while (0) 46#define update_mmu_cache(vma, address, ptep) do { } while (0)
47 47
48/* 48/*
49 * ZERO_PAGE is a global shared page that is always zero: used 49 * ZERO_PAGE is a global shared page that is always zero: used
@@ -105,7 +105,7 @@ extern char empty_zero_page[PAGE_SIZE];
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106/* 106/*
107 * The vmalloc area will always be on the topmost area of the kernel 107 * The vmalloc area will always be on the topmost area of the kernel
108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, 108 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
109 * which should be enough for any sane case. 109 * which should be enough for any sane case.
110 * By putting vmalloc at the top, we maximise the gap between physical 110 * By putting vmalloc at the top, we maximise the gap between physical
111 * memory and vmalloc to catch misplaced memory accesses. As a side 111 * memory and vmalloc to catch misplaced memory accesses. As a side
@@ -120,8 +120,8 @@ extern unsigned long VMALLOC_START;
120#define VMALLOC_END 0x7e000000UL 120#define VMALLOC_END 0x7e000000UL
121#define VMEM_MAP_END 0x80000000UL 121#define VMEM_MAP_END 0x80000000UL
122#else /* __s390x__ */ 122#else /* __s390x__ */
123#define VMALLOC_SIZE (1UL << 30) 123#define VMALLOC_SIZE (128UL << 30)
124#define VMALLOC_END 0x3e040000000UL 124#define VMALLOC_END 0x3e000000000UL
125#define VMEM_MAP_END 0x40000000000UL 125#define VMEM_MAP_END 0x40000000000UL
126#endif /* __s390x__ */ 126#endif /* __s390x__ */
127 127
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b42715458312..73e259834e10 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -28,7 +28,7 @@
28 28
29static inline void get_cpu_id(struct cpuid *ptr) 29static inline void get_cpu_id(struct cpuid *ptr)
30{ 30{
31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 asm volatile("stidp %0" : "=Q" (*ptr));
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
@@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key)
184static inline void __load_psw(psw_t psw) 184static inline void __load_psw(psw_t psw)
185{ 185{
186#ifndef __s390x__ 186#ifndef __s390x__
187 asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 187 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
188#else 188#else
189 asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 189 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
190#endif 190#endif
191} 191}
192 192
@@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask)
206 asm volatile( 206 asm volatile(
207 " basr %0,0\n" 207 " basr %0,0\n"
208 "0: ahi %0,1f-0b\n" 208 "0: ahi %0,1f-0b\n"
209 " st %0,4(%1)\n" 209 " st %0,%O1+4(%R1)\n"
210 " lpsw 0(%1)\n" 210 " lpsw %1\n"
211 "1:" 211 "1:"
212 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 212 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
213#else /* __s390x__ */ 213#else /* __s390x__ */
214 asm volatile( 214 asm volatile(
215 " larl %0,1f\n" 215 " larl %0,1f\n"
216 " stg %0,8(%1)\n" 216 " stg %0,%O1+8(%R1)\n"
217 " lpswe 0(%1)\n" 217 " lpswe %1\n"
218 "1:" 218 "1:"
219 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 219 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
220#endif /* __s390x__ */ 220#endif /* __s390x__ */
221} 221}
222 222
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 95dcf183a28d..e2c218dc68a6 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -328,8 +328,8 @@ struct pt_regs
328 psw_t psw; 328 psw_t psw;
329 unsigned long gprs[NUM_GPRS]; 329 unsigned long gprs[NUM_GPRS];
330 unsigned long orig_gpr2; 330 unsigned long orig_gpr2;
331 unsigned short svcnr;
332 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr;
333}; 333};
334#endif 334#endif
335 335
@@ -436,6 +436,7 @@ typedef struct
436#define PTRACE_PEEKDATA_AREA 0x5003 436#define PTRACE_PEEKDATA_AREA 0x5003
437#define PTRACE_POKETEXT_AREA 0x5004 437#define PTRACE_POKETEXT_AREA 0x5004
438#define PTRACE_POKEDATA_AREA 0x5005 438#define PTRACE_POKEDATA_AREA 0x5005
439#define PTRACE_GET_LAST_BREAK 0x5006
439 440
440/* 441/*
441 * PT_PROT definition is loosely based on hppa bsd definition in 442 * PT_PROT definition is loosely based on hppa bsd definition in
@@ -489,16 +490,24 @@ struct user_regs_struct
489 * These are defined as per linux/ptrace.h, which see. 490 * These are defined as per linux/ptrace.h, which see.
490 */ 491 */
491#define arch_has_single_step() (1) 492#define arch_has_single_step() (1)
492struct task_struct; 493extern void show_regs(struct pt_regs * regs);
493extern void user_enable_single_step(struct task_struct *);
494extern void user_disable_single_step(struct task_struct *);
495 494
496#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 495#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
497#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 496#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
498#define user_stack_pointer(regs)((regs)->gprs[15]) 497#define user_stack_pointer(regs)((regs)->gprs[15])
499#define regs_return_value(regs)((regs)->gprs[2]) 498#define regs_return_value(regs)((regs)->gprs[2])
500#define profile_pc(regs) instruction_pointer(regs) 499#define profile_pc(regs) instruction_pointer(regs)
501extern void show_regs(struct pt_regs * regs); 500
501int regs_query_register_offset(const char *name);
502const char *regs_query_register_name(unsigned int offset);
503unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
504unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
505
506static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
507{
508 return regs->gprs[15] & PSW_ADDR_INSN;
509}
510
502#endif /* __KERNEL__ */ 511#endif /* __KERNEL__ */
503#endif /* __ASSEMBLY__ */ 512#endif /* __ASSEMBLY__ */
504 513
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 79d849f014f0..0eaae6260274 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -13,7 +13,8 @@
13#include <asm/cio.h> 13#include <asm/cio.h>
14#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
15 15
16#define QDIO_MAX_QUEUES_PER_IRQ 32 16/* only use 4 queues to save some cachelines */
17#define QDIO_MAX_QUEUES_PER_IRQ 4
17#define QDIO_MAX_BUFFERS_PER_Q 128 18#define QDIO_MAX_BUFFERS_PER_Q 128
18#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) 19#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
19#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 20#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
@@ -320,11 +321,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
320#define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40 321#define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40
321#define QDIO_ERROR_SLSB_STATE 0x80 322#define QDIO_ERROR_SLSB_STATE 0x80
322 323
323/* for qdio_initialize */
324#define QDIO_INBOUND_0COPY_SBALS 0x01
325#define QDIO_OUTBOUND_0COPY_SBALS 0x02
326#define QDIO_USE_OUTBOUND_PCIS 0x04
327
328/* for qdio_cleanup */ 324/* for qdio_cleanup */
329#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01 325#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
330#define QDIO_FLAG_CLEANUP_USING_HALT 0x02 326#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
@@ -343,7 +339,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
343 * @input_handler: handler to be called for input queues 339 * @input_handler: handler to be called for input queues
344 * @output_handler: handler to be called for output queues 340 * @output_handler: handler to be called for output queues
345 * @int_parm: interruption parameter 341 * @int_parm: interruption parameter
346 * @flags: initialization flags
347 * @input_sbal_addr_array: address of no_input_qs * 128 pointers 342 * @input_sbal_addr_array: address of no_input_qs * 128 pointers
348 * @output_sbal_addr_array: address of no_output_qs * 128 pointers 343 * @output_sbal_addr_array: address of no_output_qs * 128 pointers
349 */ 344 */
@@ -360,7 +355,6 @@ struct qdio_initialize {
360 qdio_handler_t *input_handler; 355 qdio_handler_t *input_handler;
361 qdio_handler_t *output_handler; 356 qdio_handler_t *output_handler;
362 unsigned long int_parm; 357 unsigned long int_parm;
363 unsigned long flags;
364 void **input_sbal_addr_array; 358 void **input_sbal_addr_array;
365 void **output_sbal_addr_array; 359 void **output_sbal_addr_array;
366}; 360};
@@ -374,14 +368,12 @@ struct qdio_initialize {
374#define QDIO_FLAG_SYNC_OUTPUT 0x02 368#define QDIO_FLAG_SYNC_OUTPUT 0x02
375#define QDIO_FLAG_PCI_OUT 0x10 369#define QDIO_FLAG_PCI_OUT 0x10
376 370
377extern int qdio_initialize(struct qdio_initialize *);
378extern int qdio_allocate(struct qdio_initialize *); 371extern int qdio_allocate(struct qdio_initialize *);
379extern int qdio_establish(struct qdio_initialize *); 372extern int qdio_establish(struct qdio_initialize *);
380extern int qdio_activate(struct ccw_device *); 373extern int qdio_activate(struct ccw_device *);
381 374
382extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 375extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
383 int q_nr, unsigned int bufnr, unsigned int count); 376 int q_nr, unsigned int bufnr, unsigned int count);
384extern int qdio_cleanup(struct ccw_device*, int);
385extern int qdio_shutdown(struct ccw_device*, int); 377extern int qdio_shutdown(struct ccw_device*, int);
386extern int qdio_free(struct ccw_device *); 378extern int qdio_free(struct ccw_device *);
387extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); 379extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 9d2a17971805..423fdda2322d 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem)
124 124
125 asm volatile( 125 asm volatile(
126#ifndef __s390x__ 126#ifndef __s390x__
127 " l %0,0(%3)\n" 127 " l %0,%2\n"
128 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
129 " ahi %1,%5\n" 129 " ahi %1,%4\n"
130 " cs %0,%1,0(%3)\n" 130 " cs %0,%1,%2\n"
131 " jl 0b" 131 " jl 0b"
132#else /* __s390x__ */ 132#else /* __s390x__ */
133 " lg %0,0(%3)\n" 133 " lg %0,%2\n"
134 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n" 135 " aghi %1,%4\n"
136 " csg %0,%1,0(%3)\n" 136 " csg %0,%1,%2\n"
137 " jl 0b" 137 " jl 0b"
138#endif /* __s390x__ */ 138#endif /* __s390x__ */
139 : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
140 : "a" (&sem->count), "m" (sem->count), 140 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 141 : "cc", "memory");
142 if (old < 0) 142 if (old < 0)
143 rwsem_down_read_failed(sem); 143 rwsem_down_read_failed(sem);
144} 144}
@@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
152 152
153 asm volatile( 153 asm volatile(
154#ifndef __s390x__ 154#ifndef __s390x__
155 " l %0,0(%3)\n" 155 " l %0,%2\n"
156 "0: ltr %1,%0\n" 156 "0: ltr %1,%0\n"
157 " jm 1f\n" 157 " jm 1f\n"
158 " ahi %1,%5\n" 158 " ahi %1,%4\n"
159 " cs %0,%1,0(%3)\n" 159 " cs %0,%1,%2\n"
160 " jl 0b\n" 160 " jl 0b\n"
161 "1:" 161 "1:"
162#else /* __s390x__ */ 162#else /* __s390x__ */
163 " lg %0,0(%3)\n" 163 " lg %0,%2\n"
164 "0: ltgr %1,%0\n" 164 "0: ltgr %1,%0\n"
165 " jm 1f\n" 165 " jm 1f\n"
166 " aghi %1,%5\n" 166 " aghi %1,%4\n"
167 " csg %0,%1,0(%3)\n" 167 " csg %0,%1,%2\n"
168 " jl 0b\n" 168 " jl 0b\n"
169 "1:" 169 "1:"
170#endif /* __s390x__ */ 170#endif /* __s390x__ */
171 : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
172 : "a" (&sem->count), "m" (sem->count), 172 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 173 : "cc", "memory");
174 return old >= 0 ? 1 : 0; 174 return old >= 0 ? 1 : 0;
175} 175}
176 176
@@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
185 asm volatile( 185 asm volatile(
186#ifndef __s390x__ 186#ifndef __s390x__
187 " l %0,0(%3)\n" 187 " l %0,%2\n"
188 "0: lr %1,%0\n" 188 "0: lr %1,%0\n"
189 " a %1,%5\n" 189 " a %1,%4\n"
190 " cs %0,%1,0(%3)\n" 190 " cs %0,%1,%2\n"
191 " jl 0b" 191 " jl 0b"
192#else /* __s390x__ */ 192#else /* __s390x__ */
193 " lg %0,0(%3)\n" 193 " lg %0,%2\n"
194 "0: lgr %1,%0\n" 194 "0: lgr %1,%0\n"
195 " ag %1,%5\n" 195 " ag %1,%4\n"
196 " csg %0,%1,0(%3)\n" 196 " csg %0,%1,%2\n"
197 " jl 0b" 197 " jl 0b"
198#endif /* __s390x__ */ 198#endif /* __s390x__ */
199 : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 200 : "Q" (sem->count), "m" (tmp)
201 : "cc", "memory"); 201 : "cc", "memory");
202 if (old != 0) 202 if (old != 0)
203 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
@@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
217 217
218 asm volatile( 218 asm volatile(
219#ifndef __s390x__ 219#ifndef __s390x__
220 " l %0,0(%2)\n" 220 " l %0,%1\n"
221 "0: ltr %0,%0\n" 221 "0: ltr %0,%0\n"
222 " jnz 1f\n" 222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n" 223 " cs %0,%3,%1\n"
224 " jl 0b\n" 224 " jl 0b\n"
225#else /* __s390x__ */ 225#else /* __s390x__ */
226 " lg %0,0(%2)\n" 226 " lg %0,%1\n"
227 "0: ltgr %0,%0\n" 227 "0: ltgr %0,%0\n"
228 " jnz 1f\n" 228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n" 229 " csg %0,%3,%1\n"
230 " jl 0b\n" 230 " jl 0b\n"
231#endif /* __s390x__ */ 231#endif /* __s390x__ */
232 "1:" 232 "1:"
233 : "=&d" (old), "=m" (sem->count) 233 : "=&d" (old), "=Q" (sem->count)
234 : "a" (&sem->count), "m" (sem->count), 234 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); 235 : "cc", "memory");
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237} 237}
238 238
@@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem)
245 245
246 asm volatile( 246 asm volatile(
247#ifndef __s390x__ 247#ifndef __s390x__
248 " l %0,0(%3)\n" 248 " l %0,%2\n"
249 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
250 " ahi %1,%5\n" 250 " ahi %1,%4\n"
251 " cs %0,%1,0(%3)\n" 251 " cs %0,%1,%2\n"
252 " jl 0b" 252 " jl 0b"
253#else /* __s390x__ */ 253#else /* __s390x__ */
254 " lg %0,0(%3)\n" 254 " lg %0,%2\n"
255 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n" 256 " aghi %1,%4\n"
257 " csg %0,%1,0(%3)\n" 257 " csg %0,%1,%2\n"
258 " jl 0b" 258 " jl 0b"
259#endif /* __s390x__ */ 259#endif /* __s390x__ */
260 : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
261 : "a" (&sem->count), "m" (sem->count), 261 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
262 "i" (-RWSEM_ACTIVE_READ_BIAS)
263 : "cc", "memory"); 262 : "cc", "memory");
264 if (new < 0) 263 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0) 264 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem)
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 275 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
277 asm volatile( 276 asm volatile(
278#ifndef __s390x__ 277#ifndef __s390x__
279 " l %0,0(%3)\n" 278 " l %0,%2\n"
280 "0: lr %1,%0\n" 279 "0: lr %1,%0\n"
281 " a %1,%5\n" 280 " a %1,%4\n"
282 " cs %0,%1,0(%3)\n" 281 " cs %0,%1,%2\n"
283 " jl 0b" 282 " jl 0b"
284#else /* __s390x__ */ 283#else /* __s390x__ */
285 " lg %0,0(%3)\n" 284 " lg %0,%2\n"
286 "0: lgr %1,%0\n" 285 "0: lgr %1,%0\n"
287 " ag %1,%5\n" 286 " ag %1,%4\n"
288 " csg %0,%1,0(%3)\n" 287 " csg %0,%1,%2\n"
289 " jl 0b" 288 " jl 0b"
290#endif /* __s390x__ */ 289#endif /* __s390x__ */
291 : "=&d" (old), "=&d" (new), "=m" (sem->count) 290 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 291 : "Q" (sem->count), "m" (tmp)
293 : "cc", "memory"); 292 : "cc", "memory");
294 if (new < 0) 293 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0) 294 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
306 tmp = -RWSEM_WAITING_BIAS; 305 tmp = -RWSEM_WAITING_BIAS;
307 asm volatile( 306 asm volatile(
308#ifndef __s390x__ 307#ifndef __s390x__
309 " l %0,0(%3)\n" 308 " l %0,%2\n"
310 "0: lr %1,%0\n" 309 "0: lr %1,%0\n"
311 " a %1,%5\n" 310 " a %1,%4\n"
312 " cs %0,%1,0(%3)\n" 311 " cs %0,%1,%2\n"
313 " jl 0b" 312 " jl 0b"
314#else /* __s390x__ */ 313#else /* __s390x__ */
315 " lg %0,0(%3)\n" 314 " lg %0,%2\n"
316 "0: lgr %1,%0\n" 315 "0: lgr %1,%0\n"
317 " ag %1,%5\n" 316 " ag %1,%4\n"
318 " csg %0,%1,0(%3)\n" 317 " csg %0,%1,%2\n"
319 " jl 0b" 318 " jl 0b"
320#endif /* __s390x__ */ 319#endif /* __s390x__ */
321 : "=&d" (old), "=&d" (new), "=m" (sem->count) 320 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 321 : "Q" (sem->count), "m" (tmp)
323 : "cc", "memory"); 322 : "cc", "memory");
324 if (new > 1) 323 if (new > 1)
325 rwsem_downgrade_wake(sem); 324 rwsem_downgrade_wake(sem);
@@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
334 333
335 asm volatile( 334 asm volatile(
336#ifndef __s390x__ 335#ifndef __s390x__
337 " l %0,0(%3)\n" 336 " l %0,%2\n"
338 "0: lr %1,%0\n" 337 "0: lr %1,%0\n"
339 " ar %1,%5\n" 338 " ar %1,%4\n"
340 " cs %0,%1,0(%3)\n" 339 " cs %0,%1,%2\n"
341 " jl 0b" 340 " jl 0b"
342#else /* __s390x__ */ 341#else /* __s390x__ */
343 " lg %0,0(%3)\n" 342 " lg %0,%2\n"
344 "0: lgr %1,%0\n" 343 "0: lgr %1,%0\n"
345 " agr %1,%5\n" 344 " agr %1,%4\n"
346 " csg %0,%1,0(%3)\n" 345 " csg %0,%1,%2\n"
347 " jl 0b" 346 " jl 0b"
348#endif /* __s390x__ */ 347#endif /* __s390x__ */
349 : "=&d" (old), "=&d" (new), "=m" (sem->count) 348 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 349 : "Q" (sem->count), "d" (delta)
351 : "cc", "memory"); 350 : "cc", "memory");
352} 351}
353 352
@@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
360 359
361 asm volatile( 360 asm volatile(
362#ifndef __s390x__ 361#ifndef __s390x__
363 " l %0,0(%3)\n" 362 " l %0,%2\n"
364 "0: lr %1,%0\n" 363 "0: lr %1,%0\n"
365 " ar %1,%5\n" 364 " ar %1,%4\n"
366 " cs %0,%1,0(%3)\n" 365 " cs %0,%1,%2\n"
367 " jl 0b" 366 " jl 0b"
368#else /* __s390x__ */ 367#else /* __s390x__ */
369 " lg %0,0(%3)\n" 368 " lg %0,%2\n"
370 "0: lgr %1,%0\n" 369 "0: lgr %1,%0\n"
371 " agr %1,%5\n" 370 " agr %1,%4\n"
372 " csg %0,%1,0(%3)\n" 371 " csg %0,%1,%2\n"
373 " jl 0b" 372 " jl 0b"
374#endif /* __s390x__ */ 373#endif /* __s390x__ */
375 : "=&d" (old), "=&d" (new), "=m" (sem->count) 374 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 375 : "Q" (sem->count), "d" (delta)
377 : "cc", "memory"); 376 : "cc", "memory");
378 return new; 377 return new;
379} 378}
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..be44d94cba54 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#define ISA_DMA_THRESHOLD (~0UL)
2
1#include <asm-generic/scatterlist.h> 3#include <asm-generic/scatterlist.h>
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 52a779c337e8..25e831d58e1e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -2,7 +2,7 @@
2 * include/asm-s390/setup.h 2 * include/asm-s390/setup.h
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2010
6 */ 6 */
7 7
8#ifndef _ASM_S390_SETUP_H 8#ifndef _ASM_S390_SETUP_H
@@ -14,14 +14,14 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#include <asm/lowcore.h>
18#include <asm/types.h>
19
20#define PARMAREA 0x10400 17#define PARMAREA 0x10400
21#define MEMORY_CHUNKS 256 18#define MEMORY_CHUNKS 256
22 19
23#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
24 21
22#include <asm/lowcore.h>
23#include <asm/types.h>
24
25#ifndef __s390x__ 25#ifndef __s390x__
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
@@ -71,9 +71,13 @@ extern unsigned int user_mode;
71#define MACHINE_FLAG_KVM (1UL << 9) 71#define MACHINE_FLAG_KVM (1UL << 9)
72#define MACHINE_FLAG_HPAGE (1UL << 10) 72#define MACHINE_FLAG_HPAGE (1UL << 10)
73#define MACHINE_FLAG_PFMF (1UL << 11) 73#define MACHINE_FLAG_PFMF (1UL << 11)
74#define MACHINE_FLAG_LPAR (1UL << 12)
75#define MACHINE_FLAG_SPP (1UL << 13)
74 76
75#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 77#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
76#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 78#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
79#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
80
77#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 81#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
78 82
79#ifndef __s390x__ 83#ifndef __s390x__
@@ -85,6 +89,7 @@ extern unsigned int user_mode;
85#define MACHINE_HAS_MVCOS (0) 89#define MACHINE_HAS_MVCOS (0)
86#define MACHINE_HAS_HPAGE (0) 90#define MACHINE_HAS_HPAGE (0)
87#define MACHINE_HAS_PFMF (0) 91#define MACHINE_HAS_PFMF (0)
92#define MACHINE_HAS_SPP (0)
88#else /* __s390x__ */ 93#else /* __s390x__ */
89#define MACHINE_HAS_IEEE (1) 94#define MACHINE_HAS_IEEE (1)
90#define MACHINE_HAS_CSP (1) 95#define MACHINE_HAS_CSP (1)
@@ -94,6 +99,7 @@ extern unsigned int user_mode;
94#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) 99#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
95#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) 100#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
96#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 101#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
102#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
97#endif /* __s390x__ */ 103#endif /* __s390x__ */
98 104
99#define ZFCPDUMP_HSA_SIZE (32UL<<20) 105#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index f72d611f7e13..e3bffd4e2d66 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -1,24 +1,19 @@
1/* 1/*
2 * include/asm-s390/sigp.h 2 * Routines and structures for signalling other processors.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow,
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 *
10 * sigp.h by D.J. Barrow (c) IBM 1999
11 * contains routines / structures for signalling other S/390 processors in an
12 * SMP configuration.
13 */ 8 */
14 9
15#ifndef __SIGP__ 10#ifndef __ASM_SIGP_H
16#define __SIGP__ 11#define __ASM_SIGP_H
17 12
18#include <asm/system.h> 13#include <asm/system.h>
19 14
20/* get real cpu address from logical cpu number */ 15/* Get real cpu address from logical cpu number. */
21extern int __cpu_logical_map[]; 16extern unsigned short __cpu_logical_map[];
22 17
23static inline int cpu_logical_map(int cpu) 18static inline int cpu_logical_map(int cpu)
24{ 19{
@@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu)
29#endif 24#endif
30} 25}
31 26
32typedef enum 27enum {
33{ 28 sigp_sense = 1,
34 sigp_unassigned=0x0, 29 sigp_external_call = 2,
35 sigp_sense, 30 sigp_emergency_signal = 3,
36 sigp_external_call, 31 sigp_start = 4,
37 sigp_emergency_signal, 32 sigp_stop = 5,
38 sigp_start, 33 sigp_restart = 6,
39 sigp_stop, 34 sigp_stop_and_store_status = 9,
40 sigp_restart, 35 sigp_initial_cpu_reset = 11,
41 sigp_unassigned1, 36 sigp_cpu_reset = 12,
42 sigp_unassigned2, 37 sigp_set_prefix = 13,
43 sigp_stop_and_store_status, 38 sigp_store_status_at_address = 14,
44 sigp_unassigned3, 39 sigp_store_extended_status_at_address = 15,
45 sigp_initial_cpu_reset, 40 sigp_set_architecture = 18,
46 sigp_cpu_reset, 41 sigp_conditional_emergency_signal = 19,
47 sigp_set_prefix, 42 sigp_sense_running = 21,
48 sigp_store_status_at_address, 43};
49 sigp_store_extended_status_at_address 44
50} sigp_order_code; 45enum {
51 46 sigp_order_code_accepted = 0,
52typedef __u32 sigp_status_word; 47 sigp_status_stored = 1,
53 48 sigp_busy = 2,
54typedef enum 49 sigp_not_operational = 3,
55{ 50};
56 sigp_order_code_accepted=0,
57 sigp_status_stored,
58 sigp_busy,
59 sigp_not_operational
60} sigp_ccode;
61
62 51
63/* 52/*
64 * Definitions for the external call 53 * Definitions for external call.
65 */ 54 */
66 55enum {
67/* 'Bit' signals, asynchronous */ 56 ec_schedule = 0,
68typedef enum
69{
70 ec_schedule=0,
71 ec_call_function, 57 ec_call_function,
72 ec_call_function_single, 58 ec_call_function_single,
73 ec_bit_last 59};
74} ec_bit_sig;
75 60
76/* 61/*
77 * Signal processor 62 * Signal processor.
78 */ 63 */
79static inline sigp_ccode 64static inline int raw_sigp(u16 cpu, int order)
80signal_processor(__u16 cpu_addr, sigp_order_code order_code)
81{ 65{
82 register unsigned long reg1 asm ("1") = 0; 66 register unsigned long reg1 asm ("1") = 0;
83 sigp_ccode ccode; 67 int ccode;
84 68
85 asm volatile( 69 asm volatile(
86 " sigp %1,%2,0(%3)\n" 70 " sigp %1,%2,0(%3)\n"
87 " ipm %0\n" 71 " ipm %0\n"
88 " srl %0,28\n" 72 " srl %0,28\n"
89 : "=d" (ccode) 73 : "=d" (ccode)
90 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 74 : "d" (reg1), "d" (cpu),
91 "a" (order_code) : "cc" , "memory"); 75 "a" (order) : "cc" , "memory");
92 return ccode; 76 return ccode;
93} 77}
94 78
95/* 79/*
96 * Signal processor with parameter 80 * Signal processor with parameter.
97 */ 81 */
98static inline sigp_ccode 82static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
99signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
100{ 83{
101 register unsigned int reg1 asm ("1") = parameter; 84 register unsigned int reg1 asm ("1") = parameter;
102 sigp_ccode ccode; 85 int ccode;
103 86
104 asm volatile( 87 asm volatile(
105 " sigp %1,%2,0(%3)\n" 88 " sigp %1,%2,0(%3)\n"
106 " ipm %0\n" 89 " ipm %0\n"
107 " srl %0,28\n" 90 " srl %0,28\n"
108 : "=d" (ccode) 91 : "=d" (ccode)
109 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 92 : "d" (reg1), "d" (cpu),
110 "a" (order_code) : "cc" , "memory"); 93 "a" (order) : "cc" , "memory");
111 return ccode; 94 return ccode;
112} 95}
113 96
114/* 97/*
115 * Signal processor with parameter and return status 98 * Signal processor with parameter and return status.
116 */ 99 */
117static inline sigp_ccode 100static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
118signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
119 sigp_order_code order_code)
120{ 101{
121 register unsigned int reg1 asm ("1") = parameter; 102 register unsigned int reg1 asm ("1") = parm;
122 sigp_ccode ccode; 103 int ccode;
123 104
124 asm volatile( 105 asm volatile(
125 " sigp %1,%2,0(%3)\n" 106 " sigp %1,%2,0(%3)\n"
126 " ipm %0\n" 107 " ipm %0\n"
127 " srl %0,28\n" 108 " srl %0,28\n"
128 : "=d" (ccode), "+d" (reg1) 109 : "=d" (ccode), "+d" (reg1)
129 : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) 110 : "d" (cpu), "a" (order)
130 : "cc" , "memory"); 111 : "cc" , "memory");
131 *statusptr = reg1; 112 *status = reg1;
132 return ccode; 113 return ccode;
133} 114}
134 115
135#endif /* __SIGP__ */ 116static inline int sigp(int cpu, int order)
117{
118 return raw_sigp(cpu_logical_map(cpu), order);
119}
120
121static inline int sigp_p(u32 parameter, int cpu, int order)
122{
123 return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
124}
125
126static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
127{
128 return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
129}
130
131#endif /* __ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 2ab1141eeb50..edc03cb9cd79 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,43 @@ extern int smp_cpu_polarization[];
29extern void arch_send_call_function_single_ipi(int cpu); 29extern void arch_send_call_function_single_ipi(int cpu);
30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
31 31
32extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 32extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
33
34extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
35extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
36 int from, int to);
37extern void smp_restart_cpu(void);
38
39/*
40 * returns 1 if (virtual) cpu is scheduled
41 * returns 0 otherwise
42 */
43static inline int smp_vcpu_scheduled(int cpu)
44{
45 u32 status;
46
47 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
48 case sigp_status_stored:
49 /* Check for running status */
50 if (status & 0x400)
51 return 0;
52 break;
53 case sigp_not_operational:
54 return 0;
55 default:
56 break;
57 }
58 return 1;
59}
60
61#else /* CONFIG_SMP */
62
63static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
64{
65 func(data);
66}
67
68#define smp_vcpu_scheduled (1)
33 69
34#endif /* CONFIG_SMP */ 70#endif /* CONFIG_SMP */
35 71
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index a587907d77f3..56612fc8186e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -13,8 +13,6 @@
13 13
14#include <linux/smp.h> 14#include <linux/smp.h>
15 15
16#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17
18static inline int 16static inline int
19_raw_compare_and_swap(volatile unsigned int *lock, 17_raw_compare_and_swap(volatile unsigned int *lock,
20 unsigned int old, unsigned int new) 18 unsigned int old, unsigned int new)
@@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock,
27 return old; 25 return old;
28} 26}
29 27
30#else /* __GNUC__ */
31
32static inline int
33_raw_compare_and_swap(volatile unsigned int *lock,
34 unsigned int old, unsigned int new)
35{
36 asm volatile(
37 " cs %0,%3,0(%4)"
38 : "=d" (old), "=m" (*lock)
39 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 : "cc", "memory" );
41 return old;
42}
43
44#endif /* __GNUC__ */
45
46/* 28/*
47 * Simple spin lock operations. There are two variants, one clears IRQ's 29 * Simple spin lock operations. There are two variants, one clears IRQ's
48 * on the local processor, one does not. 30 * on the local processor, one does not.
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index eb18dc1f327b..6bdee21c077e 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x)
47 47
48 asm volatile( 48 asm volatile(
49#ifndef __s390x__ 49#ifndef __s390x__
50 " icm %0,8,3(%1)\n" 50 " icm %0,8,%O1+3(%R1)\n"
51 " icm %0,4,2(%1)\n" 51 " icm %0,4,%O1+2(%R1)\n"
52 " icm %0,2,1(%1)\n" 52 " icm %0,2,%O1+1(%R1)\n"
53 " ic %0,0(%1)" 53 " ic %0,%1"
54 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 54 : "=&d" (result) : "Q" (*x) : "cc");
55#else /* __s390x__ */ 55#else /* __s390x__ */
56 " lrv %0,%1" 56 " lrv %0,%1"
57 : "=d" (result) : "m" (*x)); 57 : "=d" (result) : "m" (*x));
@@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x)
77 77
78 asm volatile( 78 asm volatile(
79#ifndef __s390x__ 79#ifndef __s390x__
80 " icm %0,2,1(%1)\n" 80 " icm %0,2,%O+1(%R1)\n"
81 " ic %0,0(%1)\n" 81 " ic %0,%1\n"
82 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 82 : "=&d" (result) : "Q" (*x) : "cc");
83#else /* __s390x__ */ 83#else /* __s390x__ */
84 " lrvh %0,%1" 84 " lrvh %0,%1"
85 : "=d" (result) : "m" (*x)); 85 : "=d" (result) : "m" (*x));
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index e0a73d3eb837..8429686951f9 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -15,6 +15,13 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17 17
18/*
19 * The syscall table always contains 32 bit pointers since we know that the
20 * address of the function to be called is (way) below 4GB. So the "int"
21 * type here is what we want [need] for both 32 bit and 64 bit systems.
22 */
23extern const unsigned int sys_call_table[];
24
18static inline long syscall_get_nr(struct task_struct *task, 25static inline long syscall_get_nr(struct task_struct *task,
19 struct pt_regs *regs) 26 struct pt_regs *regs)
20{ 27{
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 9d70057d828c..22bdb2a0ee5f 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -87,7 +87,8 @@ struct sysinfo_2_2_2 {
87 87
88struct sysinfo_3_2_2 { 88struct sysinfo_3_2_2 {
89 char reserved_0[31]; 89 char reserved_0[31];
90 unsigned char count; 90 unsigned char :4;
91 unsigned char count:4;
91 struct { 92 struct {
92 char reserved_0[4]; 93 char reserved_0[4];
93 unsigned short cpus_total; 94 unsigned short cpus_total;
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 379661d2f81a..cef66210c846 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *);
24static inline void save_fp_regs(s390_fp_regs *fpregs) 24static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 25{
26 asm volatile( 26 asm volatile(
27 " std 0,8(%1)\n" 27 " std 0,%O0+8(%R0)\n"
28 " std 2,24(%1)\n" 28 " std 2,%O0+24(%R0)\n"
29 " std 4,40(%1)\n" 29 " std 4,%O0+40(%R0)\n"
30 " std 6,56(%1)" 30 " std 6,%O0+56(%R0)"
31 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 31 : "=Q" (*fpregs) : "Q" (*fpregs));
32 if (!MACHINE_HAS_IEEE) 32 if (!MACHINE_HAS_IEEE)
33 return; 33 return;
34 asm volatile( 34 asm volatile(
35 " stfpc 0(%1)\n" 35 " stfpc %0\n"
36 " std 1,16(%1)\n" 36 " std 1,%O0+16(%R0)\n"
37 " std 3,32(%1)\n" 37 " std 3,%O0+32(%R0)\n"
38 " std 5,48(%1)\n" 38 " std 5,%O0+48(%R0)\n"
39 " std 7,64(%1)\n" 39 " std 7,%O0+64(%R0)\n"
40 " std 8,72(%1)\n" 40 " std 8,%O0+72(%R0)\n"
41 " std 9,80(%1)\n" 41 " std 9,%O0+80(%R0)\n"
42 " std 10,88(%1)\n" 42 " std 10,%O0+88(%R0)\n"
43 " std 11,96(%1)\n" 43 " std 11,%O0+96(%R0)\n"
44 " std 12,104(%1)\n" 44 " std 12,%O0+104(%R0)\n"
45 " std 13,112(%1)\n" 45 " std 13,%O0+112(%R0)\n"
46 " std 14,120(%1)\n" 46 " std 14,%O0+120(%R0)\n"
47 " std 15,128(%1)\n" 47 " std 15,%O0+128(%R0)\n"
48 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 48 : "=Q" (*fpregs) : "Q" (*fpregs));
49} 49}
50 50
51static inline void restore_fp_regs(s390_fp_regs *fpregs) 51static inline void restore_fp_regs(s390_fp_regs *fpregs)
52{ 52{
53 asm volatile( 53 asm volatile(
54 " ld 0,8(%0)\n" 54 " ld 0,%O0+8(%R0)\n"
55 " ld 2,24(%0)\n" 55 " ld 2,%O0+24(%R0)\n"
56 " ld 4,40(%0)\n" 56 " ld 4,%O0+40(%R0)\n"
57 " ld 6,56(%0)" 57 " ld 6,%O0+56(%R0)"
58 : : "a" (fpregs), "m" (*fpregs)); 58 : : "Q" (*fpregs));
59 if (!MACHINE_HAS_IEEE) 59 if (!MACHINE_HAS_IEEE)
60 return; 60 return;
61 asm volatile( 61 asm volatile(
62 " lfpc 0(%0)\n" 62 " lfpc %0\n"
63 " ld 1,16(%0)\n" 63 " ld 1,%O0+16(%R0)\n"
64 " ld 3,32(%0)\n" 64 " ld 3,%O0+32(%R0)\n"
65 " ld 5,48(%0)\n" 65 " ld 5,%O0+48(%R0)\n"
66 " ld 7,64(%0)\n" 66 " ld 7,%O0+64(%R0)\n"
67 " ld 8,72(%0)\n" 67 " ld 8,%O0+72(%R0)\n"
68 " ld 9,80(%0)\n" 68 " ld 9,%O0+80(%R0)\n"
69 " ld 10,88(%0)\n" 69 " ld 10,%O0+88(%R0)\n"
70 " ld 11,96(%0)\n" 70 " ld 11,%O0+96(%R0)\n"
71 " ld 12,104(%0)\n" 71 " ld 12,%O0+104(%R0)\n"
72 " ld 13,112(%0)\n" 72 " ld 13,%O0+112(%R0)\n"
73 " ld 14,120(%0)\n" 73 " ld 14,%O0+120(%R0)\n"
74 " ld 15,128(%0)\n" 74 " ld 15,%O0+128(%R0)\n"
75 : : "a" (fpregs), "m" (*fpregs)); 75 : : "Q" (*fpregs));
76} 76}
77 77
78static inline void save_access_regs(unsigned int *acrs) 78static inline void save_access_regs(unsigned int *acrs)
79{ 79{
80 asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); 80 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
81} 81}
82 82
83static inline void restore_access_regs(unsigned int *acrs) 83static inline void restore_access_regs(unsigned int *acrs)
84{ 84{
85 asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); 85 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
86} 86}
87 87
88#define switch_to(prev,next,last) do { \ 88#define switch_to(prev,next,last) do { \
@@ -110,6 +110,7 @@ extern void pfault_fini(void);
110#endif /* CONFIG_PFAULT */ 110#endif /* CONFIG_PFAULT */
111 111
112extern void cmma_init(void); 112extern void cmma_init(void);
113extern int memcpy_real(void *, void *, size_t);
113 114
114#define finish_arch_switch(prev) do { \ 115#define finish_arch_switch(prev) do { \
115 set_fs(current->thread.mm_segment); \ 116 set_fs(current->thread.mm_segment); \
@@ -139,48 +140,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
139 shift = (3 ^ (addr & 3)) << 3; 140 shift = (3 ^ (addr & 3)) << 3;
140 addr ^= addr & 3; 141 addr ^= addr & 3;
141 asm volatile( 142 asm volatile(
142 " l %0,0(%4)\n" 143 " l %0,%4\n"
143 "0: lr 0,%0\n" 144 "0: lr 0,%0\n"
144 " nr 0,%3\n" 145 " nr 0,%3\n"
145 " or 0,%2\n" 146 " or 0,%2\n"
146 " cs %0,0,0(%4)\n" 147 " cs %0,0,%4\n"
147 " jl 0b\n" 148 " jl 0b\n"
148 : "=&d" (old), "=m" (*(int *) addr) 149 : "=&d" (old), "=Q" (*(int *) addr)
149 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 150 : "d" (x << shift), "d" (~(255 << shift)),
150 "m" (*(int *) addr) : "memory", "cc", "0"); 151 "Q" (*(int *) addr) : "memory", "cc", "0");
151 return old >> shift; 152 return old >> shift;
152 case 2: 153 case 2:
153 addr = (unsigned long) ptr; 154 addr = (unsigned long) ptr;
154 shift = (2 ^ (addr & 2)) << 3; 155 shift = (2 ^ (addr & 2)) << 3;
155 addr ^= addr & 2; 156 addr ^= addr & 2;
156 asm volatile( 157 asm volatile(
157 " l %0,0(%4)\n" 158 " l %0,%4\n"
158 "0: lr 0,%0\n" 159 "0: lr 0,%0\n"
159 " nr 0,%3\n" 160 " nr 0,%3\n"
160 " or 0,%2\n" 161 " or 0,%2\n"
161 " cs %0,0,0(%4)\n" 162 " cs %0,0,%4\n"
162 " jl 0b\n" 163 " jl 0b\n"
163 : "=&d" (old), "=m" (*(int *) addr) 164 : "=&d" (old), "=Q" (*(int *) addr)
164 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 165 : "d" (x << shift), "d" (~(65535 << shift)),
165 "m" (*(int *) addr) : "memory", "cc", "0"); 166 "Q" (*(int *) addr) : "memory", "cc", "0");
166 return old >> shift; 167 return old >> shift;
167 case 4: 168 case 4:
168 asm volatile( 169 asm volatile(
169 " l %0,0(%3)\n" 170 " l %0,%3\n"
170 "0: cs %0,%2,0(%3)\n" 171 "0: cs %0,%2,%3\n"
171 " jl 0b\n" 172 " jl 0b\n"
172 : "=&d" (old), "=m" (*(int *) ptr) 173 : "=&d" (old), "=Q" (*(int *) ptr)
173 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 174 : "d" (x), "Q" (*(int *) ptr)
174 : "memory", "cc"); 175 : "memory", "cc");
175 return old; 176 return old;
176#ifdef __s390x__ 177#ifdef __s390x__
177 case 8: 178 case 8:
178 asm volatile( 179 asm volatile(
179 " lg %0,0(%3)\n" 180 " lg %0,%3\n"
180 "0: csg %0,%2,0(%3)\n" 181 "0: csg %0,%2,%3\n"
181 " jl 0b\n" 182 " jl 0b\n"
182 : "=&d" (old), "=m" (*(long *) ptr) 183 : "=&d" (old), "=m" (*(long *) ptr)
183 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 184 : "d" (x), "Q" (*(long *) ptr)
184 : "memory", "cc"); 185 : "memory", "cc");
185 return old; 186 return old;
186#endif /* __s390x__ */ 187#endif /* __s390x__ */
@@ -215,20 +216,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
215 shift = (3 ^ (addr & 3)) << 3; 216 shift = (3 ^ (addr & 3)) << 3;
216 addr ^= addr & 3; 217 addr ^= addr & 3;
217 asm volatile( 218 asm volatile(
218 " l %0,0(%4)\n" 219 " l %0,%2\n"
219 "0: nr %0,%5\n" 220 "0: nr %0,%5\n"
220 " lr %1,%0\n" 221 " lr %1,%0\n"
221 " or %0,%2\n" 222 " or %0,%3\n"
222 " or %1,%3\n" 223 " or %1,%4\n"
223 " cs %0,%1,0(%4)\n" 224 " cs %0,%1,%2\n"
224 " jnl 1f\n" 225 " jnl 1f\n"
225 " xr %1,%0\n" 226 " xr %1,%0\n"
226 " nr %1,%5\n" 227 " nr %1,%5\n"
227 " jnz 0b\n" 228 " jnz 0b\n"
228 "1:" 229 "1:"
229 : "=&d" (prev), "=&d" (tmp) 230 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
230 : "d" (old << shift), "d" (new << shift), "a" (ptr), 231 : "d" (old << shift), "d" (new << shift),
231 "d" (~(255 << shift)) 232 "d" (~(255 << shift)), "Q" (*(int *) ptr)
232 : "memory", "cc"); 233 : "memory", "cc");
233 return prev >> shift; 234 return prev >> shift;
234 case 2: 235 case 2:
@@ -236,33 +237,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
236 shift = (2 ^ (addr & 2)) << 3; 237 shift = (2 ^ (addr & 2)) << 3;
237 addr ^= addr & 2; 238 addr ^= addr & 2;
238 asm volatile( 239 asm volatile(
239 " l %0,0(%4)\n" 240 " l %0,%2\n"
240 "0: nr %0,%5\n" 241 "0: nr %0,%5\n"
241 " lr %1,%0\n" 242 " lr %1,%0\n"
242 " or %0,%2\n" 243 " or %0,%3\n"
243 " or %1,%3\n" 244 " or %1,%4\n"
244 " cs %0,%1,0(%4)\n" 245 " cs %0,%1,%2\n"
245 " jnl 1f\n" 246 " jnl 1f\n"
246 " xr %1,%0\n" 247 " xr %1,%0\n"
247 " nr %1,%5\n" 248 " nr %1,%5\n"
248 " jnz 0b\n" 249 " jnz 0b\n"
249 "1:" 250 "1:"
250 : "=&d" (prev), "=&d" (tmp) 251 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
251 : "d" (old << shift), "d" (new << shift), "a" (ptr), 252 : "d" (old << shift), "d" (new << shift),
252 "d" (~(65535 << shift)) 253 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
253 : "memory", "cc"); 254 : "memory", "cc");
254 return prev >> shift; 255 return prev >> shift;
255 case 4: 256 case 4:
256 asm volatile( 257 asm volatile(
257 " cs %0,%2,0(%3)\n" 258 " cs %0,%3,%1\n"
258 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 259 : "=&d" (prev), "=Q" (*(int *) ptr)
260 : "0" (old), "d" (new), "Q" (*(int *) ptr)
259 : "memory", "cc"); 261 : "memory", "cc");
260 return prev; 262 return prev;
261#ifdef __s390x__ 263#ifdef __s390x__
262 case 8: 264 case 8:
263 asm volatile( 265 asm volatile(
264 " csg %0,%2,0(%3)\n" 266 " csg %0,%3,%1\n"
265 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 267 : "=&d" (prev), "=Q" (*(long *) ptr)
268 : "0" (old), "d" (new), "Q" (*(long *) ptr)
266 : "memory", "cc"); 269 : "memory", "cc");
267 return prev; 270 return prev;
268#endif /* __s390x__ */ 271#endif /* __s390x__ */
@@ -302,17 +305,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
302#define __ctl_load(array, low, high) ({ \ 305#define __ctl_load(array, low, high) ({ \
303 typedef struct { char _[sizeof(array)]; } addrtype; \ 306 typedef struct { char _[sizeof(array)]; } addrtype; \
304 asm volatile( \ 307 asm volatile( \
305 " lctlg %1,%2,0(%0)\n" \ 308 " lctlg %1,%2,%0\n" \
306 : : "a" (&array), "i" (low), "i" (high), \ 309 : : "Q" (*(addrtype *)(&array)), \
307 "m" (*(addrtype *)(&array))); \ 310 "i" (low), "i" (high)); \
308 }) 311 })
309 312
310#define __ctl_store(array, low, high) ({ \ 313#define __ctl_store(array, low, high) ({ \
311 typedef struct { char _[sizeof(array)]; } addrtype; \ 314 typedef struct { char _[sizeof(array)]; } addrtype; \
312 asm volatile( \ 315 asm volatile( \
313 " stctg %2,%3,0(%1)\n" \ 316 " stctg %1,%2,%0\n" \
314 : "=m" (*(addrtype *)(&array)) \ 317 : "=Q" (*(addrtype *)(&array)) \
315 : "a" (&array), "i" (low), "i" (high)); \ 318 : "i" (low), "i" (high)); \
316 }) 319 })
317 320
318#else /* __s390x__ */ 321#else /* __s390x__ */
@@ -320,17 +323,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
320#define __ctl_load(array, low, high) ({ \ 323#define __ctl_load(array, low, high) ({ \
321 typedef struct { char _[sizeof(array)]; } addrtype; \ 324 typedef struct { char _[sizeof(array)]; } addrtype; \
322 asm volatile( \ 325 asm volatile( \
323 " lctl %1,%2,0(%0)\n" \ 326 " lctl %1,%2,%0\n" \
324 : : "a" (&array), "i" (low), "i" (high), \ 327 : : "Q" (*(addrtype *)(&array)), \
325 "m" (*(addrtype *)(&array))); \ 328 "i" (low), "i" (high)); \
326}) 329})
327 330
328#define __ctl_store(array, low, high) ({ \ 331#define __ctl_store(array, low, high) ({ \
329 typedef struct { char _[sizeof(array)]; } addrtype; \ 332 typedef struct { char _[sizeof(array)]; } addrtype; \
330 asm volatile( \ 333 asm volatile( \
331 " stctl %2,%3,0(%1)\n" \ 334 " stctl %1,%2,%0\n" \
332 : "=m" (*(addrtype *)(&array)) \ 335 : "=Q" (*(addrtype *)(&array)) \
333 : "a" (&array), "i" (low), "i" (high)); \ 336 : "i" (low), "i" (high)); \
334 }) 337 })
335 338
336#endif /* __s390x__ */ 339#endif /* __s390x__ */
@@ -456,11 +459,6 @@ extern void (*_machine_power_off)(void);
456 459
457#define arch_align_stack(x) (x) 460#define arch_align_stack(x) (x)
458 461
459#ifdef CONFIG_TRACE_IRQFLAGS
460extern psw_t sysc_restore_trace_psw;
461extern psw_t io_restore_trace_psw;
462#endif
463
464static inline int tprot(unsigned long addr) 462static inline int tprot(unsigned long addr)
465{ 463{
466 int rc = -EFAULT; 464 int rc = -EFAULT;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 66069e736842..5baf0230b29b 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -50,6 +50,7 @@ struct thread_info {
50 struct restart_block restart_block; 50 struct restart_block restart_block;
51 __u64 user_timer; 51 __u64 user_timer;
52 __u64 system_timer; 52 __u64 system_timer;
53 unsigned long last_break; /* last breaking-event-address. */
53}; 54};
54 55
55/* 56/*
@@ -73,7 +74,7 @@ struct thread_info {
73/* how to get the thread information struct from C */ 74/* how to get the thread information struct from C */
74static inline struct thread_info *current_thread_info(void) 75static inline struct thread_info *current_thread_info(void)
75{ 76{
76 return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); 77 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
77} 78}
78 79
79#define THREAD_SIZE_ORDER THREAD_ORDER 80#define THREAD_SIZE_ORDER THREAD_ORDER
@@ -96,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
96#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling 97#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
97 TIF_NEED_RESCHED */ 98 TIF_NEED_RESCHED */
98#define TIF_31BIT 17 /* 32bit process */ 99#define TIF_31BIT 17 /* 32bit process */
99#define TIF_MEMDIE 18 100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
100#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
101#define TIF_FREEZE 20 /* thread is freezing for suspend */ 102#define TIF_FREEZE 20 /* thread is freezing for suspend */
102 103
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 68d9fea34b4b..09d345a701dc 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -20,10 +20,10 @@ static inline int set_clock(__u64 time)
20 int cc; 20 int cc;
21 21
22 asm volatile( 22 asm volatile(
23 " sck 0(%2)\n" 23 " sck %1\n"
24 " ipm %0\n" 24 " ipm %0\n"
25 " srl %0,28\n" 25 " srl %0,28\n"
26 : "=d" (cc) : "m" (time), "a" (&time) : "cc"); 26 : "=d" (cc) : "Q" (time) : "cc");
27 return cc; 27 return cc;
28} 28}
29 29
@@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time)
32 int cc; 32 int cc;
33 33
34 asm volatile( 34 asm volatile(
35 " stck 0(%2)\n" 35 " stck %1\n"
36 " ipm %0\n" 36 " ipm %0\n"
37 " srl %0,28\n" 37 " srl %0,28\n"
38 : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); 38 : "=d" (cc), "=Q" (*time) : : "cc");
39 return cc; 39 return cc;
40} 40}
41 41
42static inline void set_clock_comparator(__u64 time) 42static inline void set_clock_comparator(__u64 time)
43{ 43{
44 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); 44 asm volatile("sckc %0" : : "Q" (time));
45} 45}
46 46
47static inline void store_clock_comparator(__u64 *time) 47static inline void store_clock_comparator(__u64 *time)
48{ 48{
49 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); 49 asm volatile("stckc %0" : "=Q" (*time));
50} 50}
51 51
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
@@ -57,25 +57,19 @@ static inline unsigned long long get_clock (void)
57{ 57{
58 unsigned long long clk; 58 unsigned long long clk;
59 59
60#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
61 asm volatile("stck %0" : "=Q" (clk) : : "cc"); 60 asm volatile("stck %0" : "=Q" (clk) : : "cc");
62#else /* __GNUC__ */
63 asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
64#endif /* __GNUC__ */
65 return clk; 61 return clk;
66} 62}
67 63
64static inline void get_clock_ext(char *clk)
65{
66 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
67}
68
68static inline unsigned long long get_clock_xt(void) 69static inline unsigned long long get_clock_xt(void)
69{ 70{
70 unsigned char clk[16]; 71 unsigned char clk[16];
71 72 get_clock_ext(clk);
72#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
73 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
74#else /* __GNUC__ */
75 asm volatile("stcke 0(%1)" : "=m" (clk)
76 : "a" (clk) : "cc");
77#endif /* __GNUC__ */
78
79 return *((unsigned long long *)&clk[1]); 73 return *((unsigned long long *)&clk[1]);
80} 74}
81 75
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6e7211abd950..dc8a67297d0f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,8 +7,10 @@
7 7
8const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 8const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 9
10extern unsigned char cpu_core_id[NR_CPUS];
10extern cpumask_t cpu_core_map[NR_CPUS]; 11extern cpumask_t cpu_core_map[NR_CPUS];
11 12
13#define topology_core_id(cpu) (cpu_core_id[cpu])
12#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 14#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
13 15
14int topology_set_cpu_management(int fc); 16int topology_set_cpu_management(int fc);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cbf0a8745bf4..d6b1ed0ec52b 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
265 return uaccess.copy_from_user(n, from, to); 265 return uaccess.copy_from_user(n, from, to);
266} 266}
267 267
268extern void copy_from_user_overflow(void)
269#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
270__compiletime_warning("copy_from_user() buffer size is not provably correct")
271#endif
272;
273
268/** 274/**
269 * copy_from_user: - Copy a block of data from user space. 275 * copy_from_user: - Copy a block of data from user space.
270 * @to: Destination address, in kernel space. 276 * @to: Destination address, in kernel space.
@@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
284static inline unsigned long __must_check 290static inline unsigned long __must_check
285copy_from_user(void *to, const void __user *from, unsigned long n) 291copy_from_user(void *to, const void __user *from, unsigned long n)
286{ 292{
293 unsigned int sz = __compiletime_object_size(to);
294
287 might_fault(); 295 might_fault();
296 if (unlikely(sz != -1 && sz < n)) {
297 copy_from_user_overflow();
298 return n;
299 }
288 if (access_ok(VERIFY_READ, from, n)) 300 if (access_ok(VERIFY_READ, from, n))
289 n = __copy_from_user(to, from, n); 301 n = __copy_from_user(to, from, n);
290 else 302 else
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 6e9f049fa823..5f0075150a65 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -392,6 +392,7 @@
392#define __ARCH_WANT_SYS_LLSEEK 392#define __ARCH_WANT_SYS_LLSEEK
393#define __ARCH_WANT_SYS_NICE 393#define __ARCH_WANT_SYS_NICE
394#define __ARCH_WANT_SYS_OLD_GETRLIMIT 394#define __ARCH_WANT_SYS_OLD_GETRLIMIT
395#define __ARCH_WANT_SYS_OLD_MMAP
395#define __ARCH_WANT_SYS_OLDUMOUNT 396#define __ARCH_WANT_SYS_OLDUMOUNT
396#define __ARCH_WANT_SYS_SIGPENDING 397#define __ARCH_WANT_SYS_SIGPENDING
397#define __ARCH_WANT_SYS_SIGPROCMASK 398#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 7bdd7c8ebc91..533f35751aeb 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -7,7 +7,7 @@
7#define VDSO32_LBASE 0 7#define VDSO32_LBASE 0
8#define VDSO64_LBASE 0 8#define VDSO64_LBASE 0
9 9
10#define VDSO_VERSION_STRING LINUX_2.6.26 10#define VDSO_VERSION_STRING LINUX_2.6.29
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
@@ -29,6 +29,7 @@ struct vdso_data {
29 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 29 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
30 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 30 __u32 tz_dsttime; /* Type of dst correction 0x34 */
31 __u32 ectg_available; 31 __u32 ectg_available;
32 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
32}; 33};
33 34
34struct vdso_per_cpu_data { 35struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 683f6381cc59..64230bc392fa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30 30
31extra-y += head.o init_task.o vmlinux.lds 31extra-y += head.o init_task.o vmlinux.lds
32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
32 33
33obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
34obj-$(CONFIG_SMP) += smp.o topology.o 35obj-$(CONFIG_SMP) += smp.o topology.o
36obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
37 switch_cpu.o)
35obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 38obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
36obj-$(CONFIG_AUDIT) += audit.o 39obj-$(CONFIG_AUDIT) += audit.o
37compat-obj-$(CONFIG_AUDIT) += compat_audit.o 40compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 63e46433e81d..5232278d79ad 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -4,18 +4,27 @@
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6 6
7#include <linux/sched.h> 7#define ASM_OFFSETS_C
8
8#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h>
9#include <asm/vdso.h> 11#include <asm/vdso.h>
10#include <asm/sigp.h> 12#include <asm/sigp.h>
11 13
14/*
15 * Make sure that the compiler is new enough. We want a compiler that
16 * is known to work with the "Q" assembler constraint.
17 */
18#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
19#error Your compiler is too old; please use version 3.3.3 or newer
20#endif
21
12int main(void) 22int main(void)
13{ 23{
14 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
15 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
16 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); 26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
17 DEFINE(__THREAD_mm_segment, 27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
18 offsetof(struct task_struct, thread.mm_segment));
19 BLANK(); 28 BLANK();
20 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
21 BLANK(); 30 BLANK();
@@ -30,6 +39,7 @@ int main(void)
30 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
31 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
32 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); 41 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
42 DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
33 BLANK(); 43 BLANK();
34 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 44 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
35 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
@@ -52,18 +62,98 @@ int main(void)
52 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 62 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
53 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 63 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
54 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 64 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
55 DEFINE(__VDSO_ECTG_BASE, 65 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
56 offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 66 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
57 DEFINE(__VDSO_ECTG_USER, 67 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
58 offsetof(struct vdso_per_cpu_data, ectg_user_time));
59 /* constants used by the vdso */ 68 /* constants used by the vdso */
60 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 69 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
61 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 70 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
62 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 71 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK();
63 /* constants for SIGP */ 73 /* constants for SIGP */
64 DEFINE(__SIGP_STOP, sigp_stop); 74 DEFINE(__SIGP_STOP, sigp_stop);
65 DEFINE(__SIGP_RESTART, sigp_restart); 75 DEFINE(__SIGP_RESTART, sigp_restart);
66 DEFINE(__SIGP_SENSE, sigp_sense); 76 DEFINE(__SIGP_SENSE, sigp_sense);
67 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); 77 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
78 BLANK();
79 /* lowcore offsets */
80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
81 DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
82 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
83 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
88 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
89 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
90 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
91 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
92 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
93 DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
94 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
95 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
96 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
97 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
98 BLANK();
99 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
100 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
101 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
102 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
103 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
104 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
105 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
106 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
107 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
108 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
109 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
110 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
111 DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
112 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
113 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
114 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
115 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
116 DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
117 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
118 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
119 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
120 DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
121 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
122 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
123 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
124 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
125 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
126 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
127 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
128 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
129 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
130 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
135 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
136 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
137 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
138 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
139 DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
140 DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
141 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
142 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
143 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
144#ifdef CONFIG_32BIT
145 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
147#else /* CONFIG_32BIT */
148 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
149 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
150 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
151 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
152 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
153 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
154 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
155 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
156 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
157#endif /* CONFIG_32BIT */
68 return 0; 158 return 0;
69} 159}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index dc7e5259770f..15e46ca94335 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,8 +6,8 @@
6 * Michael Holzheu <holzheu@de.ibm.com> 6 * Michael Holzheu <holzheu@de.ibm.com>
7 */ 7 */
8 8
9#include <asm/asm-offsets.h>
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11 11
12#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
13 13
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 11c3aba664ea..73b624ed9cd8 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -29,7 +29,6 @@
29#include <linux/sem.h> 29#include <linux/sem.h>
30#include <linux/msg.h> 30#include <linux/msg.h>
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/slab.h>
33#include <linux/uio.h> 32#include <linux/uio.h>
34#include <linux/quota.h> 33#include <linux/quota.h>
35#include <linux/module.h> 34#include <linux/module.h>
@@ -52,6 +51,7 @@
52#include <linux/ptrace.h> 51#include <linux/ptrace.h>
53#include <linux/fadvise.h> 52#include <linux/fadvise.h>
54#include <linux/ipc.h> 53#include <linux/ipc.h>
54#include <linux/slab.h>
55 55
56#include <asm/types.h> 56#include <asm/types.h>
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 30de2d0e52bb..672ce52341b4 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -547,7 +547,7 @@ sys32_setdomainname_wrapper:
547 .globl sys32_newuname_wrapper 547 .globl sys32_newuname_wrapper
548sys32_newuname_wrapper: 548sys32_newuname_wrapper:
549 llgtr %r2,%r2 # struct new_utsname * 549 llgtr %r2,%r2 # struct new_utsname *
550 jg sys_s390_newuname # branch to system call 550 jg sys_newuname # branch to system call
551 551
552 .globl compat_sys_adjtimex_wrapper 552 .globl compat_sys_adjtimex_wrapper
553compat_sys_adjtimex_wrapper: 553compat_sys_adjtimex_wrapper:
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 0168472b2fdf..98192261491d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -655,6 +655,7 @@ found:
655 p_info->act_entry_offset = 0; 655 p_info->act_entry_offset = 0;
656 file->private_data = p_info; 656 file->private_data = p_info;
657 debug_info_get(debug_info); 657 debug_info_get(debug_info);
658 nonseekable_open(inode, file);
658out: 659out:
659 mutex_unlock(&debug_mutex); 660 mutex_unlock(&debug_mutex);
660 return rc; 661 return rc;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index db943a7ec513..b39b27d68b45 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -86,10 +86,17 @@ enum {
86 U4_12, /* 4 bit unsigned value starting at 12 */ 86 U4_12, /* 4 bit unsigned value starting at 12 */
87 U4_16, /* 4 bit unsigned value starting at 16 */ 87 U4_16, /* 4 bit unsigned value starting at 16 */
88 U4_20, /* 4 bit unsigned value starting at 20 */ 88 U4_20, /* 4 bit unsigned value starting at 20 */
89 U4_32, /* 4 bit unsigned value starting at 32 */
89 U8_8, /* 8 bit unsigned value starting at 8 */ 90 U8_8, /* 8 bit unsigned value starting at 8 */
90 U8_16, /* 8 bit unsigned value starting at 16 */ 91 U8_16, /* 8 bit unsigned value starting at 16 */
92 U8_24, /* 8 bit unsigned value starting at 24 */
93 U8_32, /* 8 bit unsigned value starting at 32 */
94 I8_8, /* 8 bit signed value starting at 8 */
95 I8_32, /* 8 bit signed value starting at 32 */
91 I16_16, /* 16 bit signed value starting at 16 */ 96 I16_16, /* 16 bit signed value starting at 16 */
97 I16_32, /* 32 bit signed value starting at 16 */
92 U16_16, /* 16 bit unsigned value starting at 16 */ 98 U16_16, /* 16 bit unsigned value starting at 16 */
99 U16_32, /* 32 bit unsigned value starting at 16 */
93 J16_16, /* PC relative jump offset at 16 */ 100 J16_16, /* PC relative jump offset at 16 */
94 J32_16, /* PC relative long offset at 16 */ 101 J32_16, /* PC relative long offset at 16 */
95 I32_16, /* 32 bit signed value starting at 16 */ 102 I32_16, /* 32 bit signed value starting at 16 */
@@ -104,21 +111,37 @@ enum {
104 */ 111 */
105enum { 112enum {
106 INSTR_INVALID, 113 INSTR_INVALID,
107 INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, 114 INSTR_E,
108 INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, 115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU,
117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
118 INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
119 INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
109 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, 120 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
110 INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, 121 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
111 INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, 122 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
112 INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, 123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
125 INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR,
126 INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
113 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
114 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, 128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
115 INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, 129 INSTR_RSI_RRP,
116 INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, 130 INSTR_RSL_R0RD,
117 INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, 131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
118 INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, 132 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
119 INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, 133 INSTR_RS_RURD,
120 INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, 134 INSTR_RXE_FRRD, INSTR_RXE_RRRD,
121 INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 135 INSTR_RXF_FRRDF,
136 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
137 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
138 INSTR_SIL_RDI, INSTR_SIL_RDU,
139 INSTR_SIY_IRD, INSTR_SIY_URD,
140 INSTR_SI_URD,
141 INSTR_SSE_RDRD,
142 INSTR_SSF_RRDRD,
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
122 INSTR_S_00, INSTR_S_RD, 145 INSTR_S_00, INSTR_S_RD,
123}; 146};
124 147
@@ -129,7 +152,7 @@ struct operand {
129}; 152};
130 153
131struct insn { 154struct insn {
132 const char name[5]; 155 const char name[6];
133 unsigned char opfrag; 156 unsigned char opfrag;
134 unsigned char format; 157 unsigned char format;
135}; 158};
@@ -170,11 +193,16 @@ static const struct operand operands[] =
170 [U4_12] = { 4, 12, 0 }, 193 [U4_12] = { 4, 12, 0 },
171 [U4_16] = { 4, 16, 0 }, 194 [U4_16] = { 4, 16, 0 },
172 [U4_20] = { 4, 20, 0 }, 195 [U4_20] = { 4, 20, 0 },
196 [U4_32] = { 4, 32, 0 },
173 [U8_8] = { 8, 8, 0 }, 197 [U8_8] = { 8, 8, 0 },
174 [U8_16] = { 8, 16, 0 }, 198 [U8_16] = { 8, 16, 0 },
199 [U8_24] = { 8, 24, 0 },
200 [U8_32] = { 8, 32, 0 },
175 [I16_16] = { 16, 16, OPERAND_SIGNED }, 201 [I16_16] = { 16, 16, OPERAND_SIGNED },
176 [U16_16] = { 16, 16, 0 }, 202 [U16_16] = { 16, 16, 0 },
203 [U16_32] = { 16, 32, 0 },
177 [J16_16] = { 16, 16, OPERAND_PCREL }, 204 [J16_16] = { 16, 16, OPERAND_PCREL },
205 [I16_32] = { 16, 32, OPERAND_SIGNED },
178 [J32_16] = { 32, 16, OPERAND_PCREL }, 206 [J32_16] = { 32, 16, OPERAND_PCREL },
179 [I32_16] = { 32, 16, OPERAND_SIGNED }, 207 [I32_16] = { 32, 16, OPERAND_SIGNED },
180 [U32_16] = { 32, 16, 0 }, 208 [U32_16] = { 32, 16, 0 },
@@ -183,82 +211,93 @@ static const struct operand operands[] =
183}; 211};
184 212
185static const unsigned char formats[][7] = { 213static const unsigned char formats[][7] = {
186 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ 214 [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
187 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ 215 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
188 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ 216 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
189 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ 217 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
190 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ 218 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
191 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ 219 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
192 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ 220 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
193 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ 221 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
194 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ 222 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
195 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ 223 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
196 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ 224 [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
197 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ 225 [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
198 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ 226 [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
199 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ 227 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
200 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ 228 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
201 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ 229 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
202 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ 230 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
203 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ 231 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
204 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ 232 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
205 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ 233 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
206 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ 234 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
207 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 235 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
208 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 236 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
209 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 237 [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
210 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ 238 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
211 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 239 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
212 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 240 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
213 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ 241 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
214 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ 242 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
215 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ 243 [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
216 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ 244 [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
217 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ 245 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
218 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ 246 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
219 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ 247 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
220 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ 248 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
221 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ 249 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
222 [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ 250 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
223 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ 251 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
224 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ 252 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
253 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
254 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
255 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
256 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
257 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
258 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
259 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
260 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
261 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
262 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
263 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
264 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
265 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
266 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
267 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
268 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
269 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
270 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
225 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 271 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
226 /* e.g. icmh */ 272 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
227 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ 273 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
228 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ 274 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
229 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ 275 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
230 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ 276 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
231 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ 277 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
232 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ 278 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
233 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
234 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
235 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
236 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, 279 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
237 /* e.g. madb */ 280 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
238 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ 281 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
239 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ 282 [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
240 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ 283 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
241 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ 284 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
242 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ 285 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
243 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ 286 [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
244 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ 287 [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
245 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ 288 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
289 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
290 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
291 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
292 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
246 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 293 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
247 /* e.g. mvc */
248 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 294 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
249 /* e.g. srp */
250 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 295 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
251 /* e.g. pack */
252 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
253 /* e.g. mvck */
254 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, 296 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
255 /* e.g. plo */
256 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, 297 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
257 /* e.g. lmd */ 298 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
258 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ 299 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
259 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ 300 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
260 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
261 /* e.g. mvcos */
262}; 301};
263 302
264static struct insn opcode[] = { 303static struct insn opcode[] = {
@@ -454,6 +493,8 @@ static struct insn opcode[] = {
454static struct insn opcode_01[] = { 493static struct insn opcode_01[] = {
455#ifdef CONFIG_64BIT 494#ifdef CONFIG_64BIT
456 { "sam64", 0x0e, INSTR_E }, 495 { "sam64", 0x0e, INSTR_E },
496 { "pfpo", 0x0a, INSTR_E },
497 { "ptff", 0x04, INSTR_E },
457#endif 498#endif
458 { "pr", 0x01, INSTR_E }, 499 { "pr", 0x01, INSTR_E },
459 { "upt", 0x02, INSTR_E }, 500 { "upt", 0x02, INSTR_E },
@@ -519,6 +560,8 @@ static struct insn opcode_b2[] = {
519 { "cutfu", 0xa7, INSTR_RRF_M0RR }, 560 { "cutfu", 0xa7, INSTR_RRF_M0RR },
520 { "stfle", 0xb0, INSTR_S_RD }, 561 { "stfle", 0xb0, INSTR_S_RD },
521 { "lpswe", 0xb2, INSTR_S_RD }, 562 { "lpswe", 0xb2, INSTR_S_RD },
563 { "srnmt", 0xb9, INSTR_S_RD },
564 { "lfas", 0xbd, INSTR_S_RD },
522#endif 565#endif
523 { "stidp", 0x02, INSTR_S_RD }, 566 { "stidp", 0x02, INSTR_S_RD },
524 { "sck", 0x04, INSTR_S_RD }, 567 { "sck", 0x04, INSTR_S_RD },
@@ -589,7 +632,6 @@ static struct insn opcode_b2[] = {
589 { "clst", 0x5d, INSTR_RRE_RR }, 632 { "clst", 0x5d, INSTR_RRE_RR },
590 { "srst", 0x5e, INSTR_RRE_RR }, 633 { "srst", 0x5e, INSTR_RRE_RR },
591 { "cmpsc", 0x63, INSTR_RRE_RR }, 634 { "cmpsc", 0x63, INSTR_RRE_RR },
592 { "cmpsc", 0x63, INSTR_RRE_RR },
593 { "siga", 0x74, INSTR_S_RD }, 635 { "siga", 0x74, INSTR_S_RD },
594 { "xsch", 0x76, INSTR_S_00 }, 636 { "xsch", 0x76, INSTR_S_00 },
595 { "rp", 0x77, INSTR_S_RD }, 637 { "rp", 0x77, INSTR_S_RD },
@@ -630,6 +672,57 @@ static struct insn opcode_b3[] = {
630 { "cger", 0xc8, INSTR_RRF_U0RF }, 672 { "cger", 0xc8, INSTR_RRF_U0RF },
631 { "cgdr", 0xc9, INSTR_RRF_U0RF }, 673 { "cgdr", 0xc9, INSTR_RRF_U0RF },
632 { "cgxr", 0xca, INSTR_RRF_U0RF }, 674 { "cgxr", 0xca, INSTR_RRF_U0RF },
675 { "lpdfr", 0x70, INSTR_RRE_FF },
676 { "lndfr", 0x71, INSTR_RRE_FF },
677 { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
678 { "lcdfr", 0x73, INSTR_RRE_FF },
679 { "ldgr", 0xc1, INSTR_RRE_FR },
680 { "lgdr", 0xcd, INSTR_RRE_RF },
681 { "adtr", 0xd2, INSTR_RRR_F0FF },
682 { "axtr", 0xda, INSTR_RRR_F0FF },
683 { "cdtr", 0xe4, INSTR_RRE_FF },
684 { "cxtr", 0xec, INSTR_RRE_FF },
685 { "kdtr", 0xe0, INSTR_RRE_FF },
686 { "kxtr", 0xe8, INSTR_RRE_FF },
687 { "cedtr", 0xf4, INSTR_RRE_FF },
688 { "cextr", 0xfc, INSTR_RRE_FF },
689 { "cdgtr", 0xf1, INSTR_RRE_FR },
690 { "cxgtr", 0xf9, INSTR_RRE_FR },
691 { "cdstr", 0xf3, INSTR_RRE_FR },
692 { "cxstr", 0xfb, INSTR_RRE_FR },
693 { "cdutr", 0xf2, INSTR_RRE_FR },
694 { "cxutr", 0xfa, INSTR_RRE_FR },
695 { "cgdtr", 0xe1, INSTR_RRF_U0RF },
696 { "cgxtr", 0xe9, INSTR_RRF_U0RF },
697 { "csdtr", 0xe3, INSTR_RRE_RF },
698 { "csxtr", 0xeb, INSTR_RRE_RF },
699 { "cudtr", 0xe2, INSTR_RRE_RF },
700 { "cuxtr", 0xea, INSTR_RRE_RF },
701 { "ddtr", 0xd1, INSTR_RRR_F0FF },
702 { "dxtr", 0xd9, INSTR_RRR_F0FF },
703 { "eedtr", 0xe5, INSTR_RRE_RF },
704 { "eextr", 0xed, INSTR_RRE_RF },
705 { "esdtr", 0xe7, INSTR_RRE_RF },
706 { "esxtr", 0xef, INSTR_RRE_RF },
707 { "iedtr", 0xf6, INSTR_RRF_F0FR },
708 { "iextr", 0xfe, INSTR_RRF_F0FR },
709 { "ltdtr", 0xd6, INSTR_RRE_FF },
710 { "ltxtr", 0xde, INSTR_RRE_FF },
711 { "fidtr", 0xd7, INSTR_RRF_UUFF },
712 { "fixtr", 0xdf, INSTR_RRF_UUFF },
713 { "ldetr", 0xd4, INSTR_RRF_0UFF },
714 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
715 { "ledtr", 0xd5, INSTR_RRF_UUFF },
716 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
717 { "mdtr", 0xd0, INSTR_RRR_F0FF },
718 { "mxtr", 0xd8, INSTR_RRR_F0FF },
719 { "qadtr", 0xf5, INSTR_RRF_FUFF },
720 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
721 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
722 { "rrxtr", 0xff, INSTR_RRF_FFRU },
723 { "sfasr", 0x85, INSTR_RRE_R0 },
724 { "sdtr", 0xd3, INSTR_RRR_F0FF },
725 { "sxtr", 0xdb, INSTR_RRR_F0FF },
633#endif 726#endif
634 { "lpebr", 0x00, INSTR_RRE_FF }, 727 { "lpebr", 0x00, INSTR_RRE_FF },
635 { "lnebr", 0x01, INSTR_RRE_FF }, 728 { "lnebr", 0x01, INSTR_RRE_FF },
@@ -780,6 +873,14 @@ static struct insn opcode_b9[] = {
780 { "cu24", 0xb1, INSTR_RRF_M0RR }, 873 { "cu24", 0xb1, INSTR_RRF_M0RR },
781 { "cu41", 0xb2, INSTR_RRF_M0RR }, 874 { "cu41", 0xb2, INSTR_RRF_M0RR },
782 { "cu42", 0xb3, INSTR_RRF_M0RR }, 875 { "cu42", 0xb3, INSTR_RRF_M0RR },
876 { "crt", 0x72, INSTR_RRF_U0RR },
877 { "cgrt", 0x60, INSTR_RRF_U0RR },
878 { "clrt", 0x73, INSTR_RRF_U0RR },
879 { "clgrt", 0x61, INSTR_RRF_U0RR },
880 { "ptf", 0xa2, INSTR_RRE_R0 },
881 { "pfmf", 0xaf, INSTR_RRE_RR },
882 { "trte", 0xbf, INSTR_RRF_M0RR },
883 { "trtre", 0xbd, INSTR_RRF_M0RR },
783#endif 884#endif
784 { "kmac", 0x1e, INSTR_RRE_RR }, 885 { "kmac", 0x1e, INSTR_RRE_RR },
785 { "lrvr", 0x1f, INSTR_RRE_RR }, 886 { "lrvr", 0x1f, INSTR_RRE_RR },
@@ -835,6 +936,43 @@ static struct insn opcode_c2[] = {
835 { "cfi", 0x0d, INSTR_RIL_RI }, 936 { "cfi", 0x0d, INSTR_RIL_RI },
836 { "clgfi", 0x0e, INSTR_RIL_RU }, 937 { "clgfi", 0x0e, INSTR_RIL_RU },
837 { "clfi", 0x0f, INSTR_RIL_RU }, 938 { "clfi", 0x0f, INSTR_RIL_RU },
939 { "msfi", 0x01, INSTR_RIL_RI },
940 { "msgfi", 0x00, INSTR_RIL_RI },
941#endif
942 { "", 0, INSTR_INVALID }
943};
944
945static struct insn opcode_c4[] = {
946#ifdef CONFIG_64BIT
947 { "lrl", 0x0d, INSTR_RIL_RP },
948 { "lgrl", 0x08, INSTR_RIL_RP },
949 { "lgfrl", 0x0c, INSTR_RIL_RP },
950 { "lhrl", 0x05, INSTR_RIL_RP },
951 { "lghrl", 0x04, INSTR_RIL_RP },
952 { "llgfrl", 0x0e, INSTR_RIL_RP },
953 { "llhrl", 0x02, INSTR_RIL_RP },
954 { "llghrl", 0x06, INSTR_RIL_RP },
955 { "strl", 0x0f, INSTR_RIL_RP },
956 { "stgrl", 0x0b, INSTR_RIL_RP },
957 { "sthrl", 0x07, INSTR_RIL_RP },
958#endif
959 { "", 0, INSTR_INVALID }
960};
961
962static struct insn opcode_c6[] = {
963#ifdef CONFIG_64BIT
964 { "crl", 0x0d, INSTR_RIL_RP },
965 { "cgrl", 0x08, INSTR_RIL_RP },
966 { "cgfrl", 0x0c, INSTR_RIL_RP },
967 { "chrl", 0x05, INSTR_RIL_RP },
968 { "cghrl", 0x04, INSTR_RIL_RP },
969 { "clrl", 0x0f, INSTR_RIL_RP },
970 { "clgrl", 0x0a, INSTR_RIL_RP },
971 { "clgfrl", 0x0e, INSTR_RIL_RP },
972 { "clhrl", 0x07, INSTR_RIL_RP },
973 { "clghrl", 0x06, INSTR_RIL_RP },
974 { "pfdrl", 0x02, INSTR_RIL_UP },
975 { "exrl", 0x00, INSTR_RIL_RP },
838#endif 976#endif
839 { "", 0, INSTR_INVALID } 977 { "", 0, INSTR_INVALID }
840}; 978};
@@ -842,6 +980,8 @@ static struct insn opcode_c2[] = {
842static struct insn opcode_c8[] = { 980static struct insn opcode_c8[] = {
843#ifdef CONFIG_64BIT 981#ifdef CONFIG_64BIT
844 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 982 { "mvcos", 0x00, INSTR_SSF_RRDRD },
983 { "ectg", 0x01, INSTR_SSF_RRDRD },
984 { "csst", 0x02, INSTR_SSF_RRDRD },
845#endif 985#endif
846 { "", 0, INSTR_INVALID } 986 { "", 0, INSTR_INVALID }
847}; 987};
@@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = {
917 { "llgh", 0x91, INSTR_RXY_RRRD }, 1057 { "llgh", 0x91, INSTR_RXY_RRRD },
918 { "llc", 0x94, INSTR_RXY_RRRD }, 1058 { "llc", 0x94, INSTR_RXY_RRRD },
919 { "llh", 0x95, INSTR_RXY_RRRD }, 1059 { "llh", 0x95, INSTR_RXY_RRRD },
1060 { "cgh", 0x34, INSTR_RXY_RRRD },
1061 { "laey", 0x75, INSTR_RXY_RRRD },
1062 { "ltgf", 0x32, INSTR_RXY_RRRD },
1063 { "mfy", 0x5c, INSTR_RXY_RRRD },
1064 { "mhy", 0x7c, INSTR_RXY_RRRD },
1065 { "pfd", 0x36, INSTR_RXY_URRD },
920#endif 1066#endif
921 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1067 { "lrv", 0x1e, INSTR_RXY_RRRD },
922 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1068 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = {
931static struct insn opcode_e5[] = { 1077static struct insn opcode_e5[] = {
932#ifdef CONFIG_64BIT 1078#ifdef CONFIG_64BIT
933 { "strag", 0x02, INSTR_SSE_RDRD }, 1079 { "strag", 0x02, INSTR_SSE_RDRD },
1080 { "chhsi", 0x54, INSTR_SIL_RDI },
1081 { "chsi", 0x5c, INSTR_SIL_RDI },
1082 { "cghsi", 0x58, INSTR_SIL_RDI },
1083 { "clhhsi", 0x55, INSTR_SIL_RDU },
1084 { "clfhsi", 0x5d, INSTR_SIL_RDU },
1085 { "clghsi", 0x59, INSTR_SIL_RDU },
1086 { "mvhhi", 0x44, INSTR_SIL_RDI },
1087 { "mvhi", 0x4c, INSTR_SIL_RDI },
1088 { "mvghi", 0x48, INSTR_SIL_RDI },
934#endif 1089#endif
935 { "lasp", 0x00, INSTR_SSE_RDRD }, 1090 { "lasp", 0x00, INSTR_SSE_RDRD },
936 { "tprot", 0x01, INSTR_SSE_RDRD }, 1091 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = {
977 { "lmy", 0x98, INSTR_RSY_RRRD }, 1132 { "lmy", 0x98, INSTR_RSY_RRRD },
978 { "lamy", 0x9a, INSTR_RSY_AARD }, 1133 { "lamy", 0x9a, INSTR_RSY_AARD },
979 { "stamy", 0x9b, INSTR_RSY_AARD }, 1134 { "stamy", 0x9b, INSTR_RSY_AARD },
1135 { "asi", 0x6a, INSTR_SIY_IRD },
1136 { "agsi", 0x7a, INSTR_SIY_IRD },
1137 { "alsi", 0x6e, INSTR_SIY_IRD },
1138 { "algsi", 0x7e, INSTR_SIY_IRD },
1139 { "ecag", 0x4c, INSTR_RSY_RRRD },
980#endif 1140#endif
981 { "rll", 0x1d, INSTR_RSY_RRRD }, 1141 { "rll", 0x1d, INSTR_RSY_RRRD },
982 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1142 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = {
988#ifdef CONFIG_64BIT 1148#ifdef CONFIG_64BIT
989 { "brxhg", 0x44, INSTR_RIE_RRP }, 1149 { "brxhg", 0x44, INSTR_RIE_RRP },
990 { "brxlg", 0x45, INSTR_RIE_RRP }, 1150 { "brxlg", 0x45, INSTR_RIE_RRP },
1151 { "crb", 0xf6, INSTR_RRS_RRRDU },
1152 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1153 { "crj", 0x76, INSTR_RIE_RRPU },
1154 { "cgrj", 0x64, INSTR_RIE_RRPU },
1155 { "cib", 0xfe, INSTR_RIS_RURDI },
1156 { "cgib", 0xfc, INSTR_RIS_RURDI },
1157 { "cij", 0x7e, INSTR_RIE_RUPI },
1158 { "cgij", 0x7c, INSTR_RIE_RUPI },
1159 { "cit", 0x72, INSTR_RIE_R0IU },
1160 { "cgit", 0x70, INSTR_RIE_R0IU },
1161 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1162 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1163 { "clrj", 0x77, INSTR_RIE_RRPU },
1164 { "clgrj", 0x65, INSTR_RIE_RRPU },
1165 { "clib", 0xff, INSTR_RIS_RURDU },
1166 { "clgib", 0xfd, INSTR_RIS_RURDU },
1167 { "clij", 0x7f, INSTR_RIE_RUPU },
1168 { "clgij", 0x7d, INSTR_RIE_RUPU },
1169 { "clfit", 0x73, INSTR_RIE_R0UU },
1170 { "clgit", 0x71, INSTR_RIE_R0UU },
1171 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1172 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1173 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1174 { "risbg", 0x55, INSTR_RIE_RRUUU },
991#endif 1175#endif
992 { "", 0, INSTR_INVALID } 1176 { "", 0, INSTR_INVALID }
993}; 1177};
@@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = {
1004 { "ldy", 0x65, INSTR_RXY_FRRD }, 1188 { "ldy", 0x65, INSTR_RXY_FRRD },
1005 { "stey", 0x66, INSTR_RXY_FRRD }, 1189 { "stey", 0x66, INSTR_RXY_FRRD },
1006 { "stdy", 0x67, INSTR_RXY_FRRD }, 1190 { "stdy", 0x67, INSTR_RXY_FRRD },
1191 { "sldt", 0x40, INSTR_RXF_FRRDF },
1192 { "slxt", 0x48, INSTR_RXF_FRRDF },
1193 { "srdt", 0x41, INSTR_RXF_FRRDF },
1194 { "srxt", 0x49, INSTR_RXF_FRRDF },
1195 { "tdcet", 0x50, INSTR_RXE_FRRD },
1196 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1197 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1198 { "tdget", 0x51, INSTR_RXE_FRRD },
1199 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1200 { "tdgxt", 0x59, INSTR_RXE_FRRD },
1007#endif 1201#endif
1008 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1202 { "ldeb", 0x04, INSTR_RXE_FRRD },
1009 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1203 { "lxdb", 0x05, INSTR_RXE_FRRD },
@@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = {
1037 { "mae", 0x2e, INSTR_RXF_FRRDF }, 1231 { "mae", 0x2e, INSTR_RXF_FRRDF },
1038 { "mse", 0x2f, INSTR_RXF_FRRDF }, 1232 { "mse", 0x2f, INSTR_RXF_FRRDF },
1039 { "sqe", 0x34, INSTR_RXE_FRRD }, 1233 { "sqe", 0x34, INSTR_RXE_FRRD },
1234 { "sqd", 0x35, INSTR_RXE_FRRD },
1040 { "mee", 0x37, INSTR_RXE_FRRD }, 1235 { "mee", 0x37, INSTR_RXE_FRRD },
1041 { "mad", 0x3e, INSTR_RXF_FRRDF }, 1236 { "mad", 0x3e, INSTR_RXF_FRRDF },
1042 { "msd", 0x3f, INSTR_RXF_FRRDF }, 1237 { "msd", 0x3f, INSTR_RXF_FRRDF },
@@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code)
1117 case 0xc2: 1312 case 0xc2:
1118 table = opcode_c2; 1313 table = opcode_c2;
1119 break; 1314 break;
1315 case 0xc4:
1316 table = opcode_c4;
1317 break;
1318 case 0xc6:
1319 table = opcode_c6;
1320 break;
1120 case 0xc8: 1321 case 0xc8:
1121 table = opcode_c8; 1322 table = opcode_c8;
1122 break; 1323 break;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index e49e9e0c69fd..c00856ad4e5a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -82,7 +82,8 @@ asm(
82 " lm 6,15,24(15)\n" 82 " lm 6,15,24(15)\n"
83#endif 83#endif
84 " br 14\n" 84 " br 14\n"
85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); 85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
86 " .previous\n");
86 87
87static __initdata char upper_command_line[COMMAND_LINE_SIZE]; 88static __initdata char upper_command_line[COMMAND_LINE_SIZE];
88 89
@@ -214,10 +215,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
214 215
215static noinline __init void detect_machine_type(void) 216static noinline __init void detect_machine_type(void)
216{ 217{
217 /* No VM information? Looks like LPAR */ 218 /* Check current-configuration-level */
218 if (stsi(&vmms, 3, 2, 2) == -ENOSYS) 219 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
220 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
219 return; 221 return;
220 if (!vmms.count) 222 }
223 /* Get virtual-machine cpu information. */
224 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
221 return; 225 return;
222 226
223 /* Running under KVM? If not we assume z/VM */ 227 /* Running under KVM? If not we assume z/VM */
@@ -352,6 +356,7 @@ static __init void detect_machine_facilities(void)
352{ 356{
353#ifdef CONFIG_64BIT 357#ifdef CONFIG_64BIT
354 unsigned int facilities; 358 unsigned int facilities;
359 unsigned long long facility_bits;
355 360
356 facilities = stfl(); 361 facilities = stfl();
357 if (facilities & (1 << 28)) 362 if (facilities & (1 << 28))
@@ -360,6 +365,9 @@ static __init void detect_machine_facilities(void)
360 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 365 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
361 if (facilities & (1 << 4)) 366 if (facilities & (1 << 4))
362 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 367 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
368 if ((stfle(&facility_bits, 1) > 0) &&
369 (facility_bits & (1ULL << (63 - 40))))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
363#endif 371#endif
364} 372}
365 373
@@ -402,8 +410,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
402 410
403static void __init setup_boot_command_line(void) 411static void __init setup_boot_command_line(void)
404{ 412{
413 int i;
414
415 /* convert arch command line to ascii */
416 for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
417 if (COMMAND_LINE[i] & 0x80)
418 break;
419 if (i < ARCH_COMMAND_LINE_SIZE)
420 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
421 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
422
405 /* copy arch command line */ 423 /* copy arch command line */
406 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 424 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
425 ARCH_COMMAND_LINE_SIZE);
407 426
408 /* append IPL PARM data to the boot command line */ 427 /* append IPL PARM data to the boot command line */
409 if (MACHINE_IS_VM) 428 if (MACHINE_IS_VM)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e8ef21c51bbe..bea9ee37ac9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/ptrace.h> 17#include <asm/ptrace.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
@@ -74,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT
74 basr %r14,%r1 73 basr %r14,%r1
75 .endm 74 .endm
76 75
77 .macro TRACE_IRQS_CHECK 76 .macro TRACE_IRQS_CHECK_ON
78 basr %r2,%r0
79 tm SP_PSW(%r15),0x03 # irqs enabled? 77 tm SP_PSW(%r15),0x03 # irqs enabled?
80 jz 0f 78 bz BASED(0f)
81 l %r1,BASED(.Ltrace_irq_on_caller) 79 TRACE_IRQS_ON
82 basr %r14,%r1 800:
83 j 1f 81 .endm
840: l %r1,BASED(.Ltrace_irq_off_caller) 82
85 basr %r14,%r1 83 .macro TRACE_IRQS_CHECK_OFF
861: 84 tm SP_PSW(%r15),0x03 # irqs enabled?
85 bz BASED(0f)
86 TRACE_IRQS_OFF
870:
87 .endm 88 .endm
88#else 89#else
89#define TRACE_IRQS_ON 90#define TRACE_IRQS_ON
90#define TRACE_IRQS_OFF 91#define TRACE_IRQS_OFF
91#define TRACE_IRQS_CHECK 92#define TRACE_IRQS_CHECK_ON
93#define TRACE_IRQS_CHECK_OFF
92#endif 94#endif
93 95
94#ifdef CONFIG_LOCKDEP 96#ifdef CONFIG_LOCKDEP
@@ -178,9 +180,9 @@ STACK_SIZE = 1 << STACK_SHIFT
178 s %r15,BASED(.Lc_spsize) # make room for registers & psw 180 s %r15,BASED(.Lc_spsize) # make room for registers & psw
179 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 181 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
180 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 182 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
181 icm %r12,3,__LC_SVC_ILC 183 icm %r12,12,__LC_SVC_ILC
182 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 184 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
183 st %r12,SP_SVCNR(%r15) 185 st %r12,SP_ILC(%r15)
184 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 186 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
185 la %r12,0 187 la %r12,0
186 st %r12,__SF_BACKCHAIN(%r15) # clear back chain 188 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
@@ -274,66 +276,45 @@ sysc_do_restart:
274 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 276 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
275 277
276sysc_return: 278sysc_return:
279 LOCKDEP_SYS_EXIT
280sysc_tif:
277 tm __TI_flags+3(%r9),_TIF_WORK_SVC 281 tm __TI_flags+3(%r9),_TIF_WORK_SVC
278 bnz BASED(sysc_work) # there is work to do (signals etc.) 282 bnz BASED(sysc_work) # there is work to do (signals etc.)
279sysc_restore: 283sysc_restore:
280#ifdef CONFIG_TRACE_IRQFLAGS
281 la %r1,BASED(sysc_restore_trace_psw_addr)
282 l %r1,0(%r1)
283 lpsw 0(%r1)
284sysc_restore_trace:
285 TRACE_IRQS_CHECK
286 LOCKDEP_SYS_EXIT
287#endif
288sysc_leave:
289 RESTORE_ALL __LC_RETURN_PSW,1 284 RESTORE_ALL __LC_RETURN_PSW,1
290sysc_done: 285sysc_done:
291 286
292#ifdef CONFIG_TRACE_IRQFLAGS
293sysc_restore_trace_psw_addr:
294 .long sysc_restore_trace_psw
295
296 .section .data,"aw",@progbits
297 .align 8
298 .globl sysc_restore_trace_psw
299sysc_restore_trace_psw:
300 .long 0, sysc_restore_trace + 0x80000000
301 .previous
302#endif
303
304# 287#
305# recheck if there is more work to do 288# There is work to do, but first we need to check if we return to userspace.
306#
307sysc_work_loop:
308 tm __TI_flags+3(%r9),_TIF_WORK_SVC
309 bz BASED(sysc_restore) # there is no work to do
310#
311# One of the work bits is on. Find out which one.
312# 289#
313sysc_work: 290sysc_work:
314 tm SP_PSW+1(%r15),0x01 # returning to user ? 291 tm SP_PSW+1(%r15),0x01 # returning to user ?
315 bno BASED(sysc_restore) 292 bno BASED(sysc_restore)
293
294#
295# One of the work bits is on. Find out which one.
296#
297sysc_work_tif:
316 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 298 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
317 bo BASED(sysc_mcck_pending) 299 bo BASED(sysc_mcck_pending)
318 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 300 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
319 bo BASED(sysc_reschedule) 301 bo BASED(sysc_reschedule)
320 tm __TI_flags+3(%r9),_TIF_SIGPENDING 302 tm __TI_flags+3(%r9),_TIF_SIGPENDING
321 bnz BASED(sysc_sigpending) 303 bo BASED(sysc_sigpending)
322 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 304 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
323 bnz BASED(sysc_notify_resume) 305 bo BASED(sysc_notify_resume)
324 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 306 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
325 bo BASED(sysc_restart) 307 bo BASED(sysc_restart)
326 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 308 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
327 bo BASED(sysc_singlestep) 309 bo BASED(sysc_singlestep)
328 b BASED(sysc_restore) 310 b BASED(sysc_return) # beware of critical section cleanup
329sysc_work_done:
330 311
331# 312#
332# _TIF_NEED_RESCHED is set, call schedule 313# _TIF_NEED_RESCHED is set, call schedule
333# 314#
334sysc_reschedule: 315sysc_reschedule:
335 l %r1,BASED(.Lschedule) 316 l %r1,BASED(.Lschedule)
336 la %r14,BASED(sysc_work_loop) 317 la %r14,BASED(sysc_return)
337 br %r1 # call scheduler 318 br %r1 # call scheduler
338 319
339# 320#
@@ -341,7 +322,7 @@ sysc_reschedule:
341# 322#
342sysc_mcck_pending: 323sysc_mcck_pending:
343 l %r1,BASED(.Ls390_handle_mcck) 324 l %r1,BASED(.Ls390_handle_mcck)
344 la %r14,BASED(sysc_work_loop) 325 la %r14,BASED(sysc_return)
345 br %r1 # TIF bit will be cleared by handler 326 br %r1 # TIF bit will be cleared by handler
346 327
347# 328#
@@ -356,7 +337,7 @@ sysc_sigpending:
356 bo BASED(sysc_restart) 337 bo BASED(sysc_restart)
357 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 338 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
358 bo BASED(sysc_singlestep) 339 bo BASED(sysc_singlestep)
359 b BASED(sysc_work_loop) 340 b BASED(sysc_return)
360 341
361# 342#
362# _TIF_NOTIFY_RESUME is set, call do_notify_resume 343# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -364,7 +345,7 @@ sysc_sigpending:
364sysc_notify_resume: 345sysc_notify_resume:
365 la %r2,SP_PTREGS(%r15) # load pt_regs 346 la %r2,SP_PTREGS(%r15) # load pt_regs
366 l %r1,BASED(.Ldo_notify_resume) 347 l %r1,BASED(.Ldo_notify_resume)
367 la %r14,BASED(sysc_work_loop) 348 la %r14,BASED(sysc_return)
368 br %r1 # call do_notify_resume 349 br %r1 # call do_notify_resume
369 350
370 351
@@ -459,11 +440,13 @@ kernel_execve:
459 br %r14 440 br %r14
460 # execve succeeded. 441 # execve succeeded.
4610: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4420: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
443 TRACE_IRQS_OFF
462 l %r15,__LC_KERNEL_STACK # load ksp 444 l %r15,__LC_KERNEL_STACK # load ksp
463 s %r15,BASED(.Lc_spsize) # make room for registers & psw 445 s %r15,BASED(.Lc_spsize) # make room for registers & psw
464 l %r9,__LC_THREAD_INFO 446 l %r9,__LC_THREAD_INFO
465 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 447 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
466 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449 TRACE_IRQS_ON
467 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 450 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
468 l %r1,BASED(.Lexecve_tail) 451 l %r1,BASED(.Lexecve_tail)
469 basr %r14,%r1 452 basr %r14,%r1
@@ -500,8 +483,8 @@ pgm_check_handler:
500 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 483 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
501 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 484 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
502pgm_no_vtime: 485pgm_no_vtime:
486 TRACE_IRQS_CHECK_OFF
503 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 487 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
504 TRACE_IRQS_OFF
505 l %r3,__LC_PGM_ILC # load program interruption code 488 l %r3,__LC_PGM_ILC # load program interruption code
506 la %r8,0x7f 489 la %r8,0x7f
507 nr %r8,%r3 490 nr %r8,%r3
@@ -510,8 +493,10 @@ pgm_do_call:
510 sll %r8,2 493 sll %r8,2
511 l %r7,0(%r8,%r7) # load address of handler routine 494 l %r7,0(%r8,%r7) # load address of handler routine
512 la %r2,SP_PTREGS(%r15) # address of register-save area 495 la %r2,SP_PTREGS(%r15) # address of register-save area
513 la %r14,BASED(sysc_return) 496 basr %r14,%r7 # branch to interrupt-handler
514 br %r7 # branch to interrupt-handler 497pgm_exit:
498 TRACE_IRQS_CHECK_ON
499 b BASED(sysc_return)
515 500
516# 501#
517# handle per exception 502# handle per exception
@@ -538,20 +523,28 @@ pgm_per_std:
538 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 523 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
539 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 524 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
540pgm_no_vtime2: 525pgm_no_vtime2:
526 TRACE_IRQS_CHECK_OFF
541 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 527 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
542 TRACE_IRQS_OFF
543 l %r1,__TI_task(%r9) 528 l %r1,__TI_task(%r9)
529 tm SP_PSW+1(%r15),0x01 # kernel per event ?
530 bz BASED(kernel_per)
544 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 531 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
545 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 532 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
546 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 533 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
547 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 534 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
548 tm SP_PSW+1(%r15),0x01 # kernel per event ?
549 bz BASED(kernel_per)
550 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
551 la %r8,0x7f 536 la %r8,0x7f
552 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
553 be BASED(sysc_return) # only per or per+check ? 538 be BASED(pgm_exit2) # only per or per+check ?
554 b BASED(pgm_do_call) 539 l %r7,BASED(.Ljump_table)
540 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler
544pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return)
555 548
556# 549#
557# it was a single stepped SVC that is causing all the trouble 550# it was a single stepped SVC that is causing all the trouble
@@ -571,8 +564,8 @@ pgm_svcper:
571 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 564 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 565 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
573 TRACE_IRQS_ON 566 TRACE_IRQS_ON
574 lm %r2,%r6,SP_R2(%r15) # load svc arguments
575 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 567 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
568 lm %r2,%r6,SP_R2(%r15) # load svc arguments
576 b BASED(sysc_do_svc) 569 b BASED(sysc_do_svc)
577 570
578# 571#
@@ -583,8 +576,8 @@ kernel_per:
583 mvi SP_SVCNR+1(%r15),0xff 576 mvi SP_SVCNR+1(%r15),0xff
584 la %r2,SP_PTREGS(%r15) # address of register-save area 577 la %r2,SP_PTREGS(%r15) # address of register-save area
585 l %r1,BASED(.Lhandle_per) # load adr. of per handler 578 l %r1,BASED(.Lhandle_per) # load adr. of per handler
586 la %r14,BASED(sysc_restore)# load adr. of system return 579 basr %r14,%r1 # branch to do_single_step
587 br %r1 # branch to do_single_step 580 b BASED(pgm_exit)
588 581
589/* 582/*
590 * IO interrupt handler routine 583 * IO interrupt handler routine
@@ -603,134 +596,126 @@ io_int_handler:
603 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 596 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
604 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 597 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
605io_no_vtime: 598io_no_vtime:
606 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
607 TRACE_IRQS_OFF 599 TRACE_IRQS_OFF
600 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
608 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 601 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
609 la %r2,SP_PTREGS(%r15) # address of register-save area 602 la %r2,SP_PTREGS(%r15) # address of register-save area
610 basr %r14,%r1 # branch to standard irq handler 603 basr %r14,%r1 # branch to standard irq handler
611io_return: 604io_return:
605 LOCKDEP_SYS_EXIT
606 TRACE_IRQS_ON
607io_tif:
612 tm __TI_flags+3(%r9),_TIF_WORK_INT 608 tm __TI_flags+3(%r9),_TIF_WORK_INT
613 bnz BASED(io_work) # there is work to do (signals etc.) 609 bnz BASED(io_work) # there is work to do (signals etc.)
614io_restore: 610io_restore:
615#ifdef CONFIG_TRACE_IRQFLAGS
616 la %r1,BASED(io_restore_trace_psw_addr)
617 l %r1,0(%r1)
618 lpsw 0(%r1)
619io_restore_trace:
620 TRACE_IRQS_CHECK
621 LOCKDEP_SYS_EXIT
622#endif
623io_leave:
624 RESTORE_ALL __LC_RETURN_PSW,0 611 RESTORE_ALL __LC_RETURN_PSW,0
625io_done: 612io_done:
626 613
627#ifdef CONFIG_TRACE_IRQFLAGS
628io_restore_trace_psw_addr:
629 .long io_restore_trace_psw
630
631 .section .data,"aw",@progbits
632 .align 8
633 .globl io_restore_trace_psw
634io_restore_trace_psw:
635 .long 0, io_restore_trace + 0x80000000
636 .previous
637#endif
638
639# 614#
640# switch to kernel stack, then check the TIF bits 615# There is work todo, find out in which context we have been interrupted:
616# 1) if we return to user space we can do all _TIF_WORK_INT work
617# 2) if we return to kernel code and preemptive scheduling is enabled check
618# the preemption counter and if it is zero call preempt_schedule_irq
619# Before any work can be done, a switch to the kernel stack is required.
641# 620#
642io_work: 621io_work:
643 tm SP_PSW+1(%r15),0x01 # returning to user ? 622 tm SP_PSW+1(%r15),0x01 # returning to user ?
644#ifndef CONFIG_PREEMPT 623 bo BASED(io_work_user) # yes -> do resched & signal
645 bno BASED(io_restore) # no-> skip resched & signal 624#ifdef CONFIG_PREEMPT
646#else
647 bnz BASED(io_work_user) # no -> check for preemptive scheduling
648 # check for preemptive scheduling 625 # check for preemptive scheduling
649 icm %r0,15,__TI_precount(%r9) 626 icm %r0,15,__TI_precount(%r9)
650 bnz BASED(io_restore) # preemption disabled 627 bnz BASED(io_restore) # preemption disabled
628 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
629 bno BASED(io_restore)
630 # switch to kernel stack
651 l %r1,SP_R15(%r15) 631 l %r1,SP_R15(%r15)
652 s %r1,BASED(.Lc_spsize) 632 s %r1,BASED(.Lc_spsize)
653 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 633 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
654 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 634 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
655 lr %r15,%r1 635 lr %r15,%r1
656io_resume_loop: 636 # TRACE_IRQS_ON already done at io_return, call
657 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 637 # TRACE_IRQS_OFF to keep things symmetrical
658 bno BASED(io_restore) 638 TRACE_IRQS_OFF
659 l %r1,BASED(.Lpreempt_schedule_irq) 639 l %r1,BASED(.Lpreempt_schedule_irq)
660 la %r14,BASED(io_resume_loop) 640 basr %r14,%r1 # call preempt_schedule_irq
661 br %r1 # call schedule 641 b BASED(io_return)
642#else
643 b BASED(io_restore)
662#endif 644#endif
663 645
646#
647# Need to do work before returning to userspace, switch to kernel stack
648#
664io_work_user: 649io_work_user:
665 l %r1,__LC_KERNEL_STACK 650 l %r1,__LC_KERNEL_STACK
666 s %r1,BASED(.Lc_spsize) 651 s %r1,BASED(.Lc_spsize)
667 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 652 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
668 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 653 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
669 lr %r15,%r1 654 lr %r15,%r1
655
670# 656#
671# One of the work bits is on. Find out which one. 657# One of the work bits is on. Find out which one.
672# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED 658# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
673# and _TIF_MCCK_PENDING 659# and _TIF_MCCK_PENDING
674# 660#
675io_work_loop: 661io_work_tif:
676 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 662 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
677 bo BASED(io_mcck_pending) 663 bo BASED(io_mcck_pending)
678 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 664 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
679 bo BASED(io_reschedule) 665 bo BASED(io_reschedule)
680 tm __TI_flags+3(%r9),_TIF_SIGPENDING 666 tm __TI_flags+3(%r9),_TIF_SIGPENDING
681 bnz BASED(io_sigpending) 667 bo BASED(io_sigpending)
682 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 668 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
683 bnz BASED(io_notify_resume) 669 bo BASED(io_notify_resume)
684 b BASED(io_restore) 670 b BASED(io_return) # beware of critical section cleanup
685io_work_done:
686 671
687# 672#
688# _TIF_MCCK_PENDING is set, call handler 673# _TIF_MCCK_PENDING is set, call handler
689# 674#
690io_mcck_pending: 675io_mcck_pending:
676 # TRACE_IRQS_ON already done at io_return
691 l %r1,BASED(.Ls390_handle_mcck) 677 l %r1,BASED(.Ls390_handle_mcck)
692 basr %r14,%r1 # TIF bit will be cleared by handler 678 basr %r14,%r1 # TIF bit will be cleared by handler
693 b BASED(io_work_loop) 679 TRACE_IRQS_OFF
680 b BASED(io_return)
694 681
695# 682#
696# _TIF_NEED_RESCHED is set, call schedule 683# _TIF_NEED_RESCHED is set, call schedule
697# 684#
698io_reschedule: 685io_reschedule:
699 TRACE_IRQS_ON 686 # TRACE_IRQS_ON already done at io_return
700 l %r1,BASED(.Lschedule) 687 l %r1,BASED(.Lschedule)
701 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 688 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
702 basr %r14,%r1 # call scheduler 689 basr %r14,%r1 # call scheduler
703 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 690 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
704 TRACE_IRQS_OFF 691 TRACE_IRQS_OFF
705 tm __TI_flags+3(%r9),_TIF_WORK_INT 692 b BASED(io_return)
706 bz BASED(io_restore) # there is no work to do
707 b BASED(io_work_loop)
708 693
709# 694#
710# _TIF_SIGPENDING is set, call do_signal 695# _TIF_SIGPENDING is set, call do_signal
711# 696#
712io_sigpending: 697io_sigpending:
713 TRACE_IRQS_ON 698 # TRACE_IRQS_ON already done at io_return
714 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 699 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
715 la %r2,SP_PTREGS(%r15) # load pt_regs 700 la %r2,SP_PTREGS(%r15) # load pt_regs
716 l %r1,BASED(.Ldo_signal) 701 l %r1,BASED(.Ldo_signal)
717 basr %r14,%r1 # call do_signal 702 basr %r14,%r1 # call do_signal
718 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 703 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
719 TRACE_IRQS_OFF 704 TRACE_IRQS_OFF
720 b BASED(io_work_loop) 705 b BASED(io_return)
721 706
722# 707#
723# _TIF_SIGPENDING is set, call do_signal 708# _TIF_SIGPENDING is set, call do_signal
724# 709#
725io_notify_resume: 710io_notify_resume:
726 TRACE_IRQS_ON 711 # TRACE_IRQS_ON already done at io_return
727 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 712 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
728 la %r2,SP_PTREGS(%r15) # load pt_regs 713 la %r2,SP_PTREGS(%r15) # load pt_regs
729 l %r1,BASED(.Ldo_notify_resume) 714 l %r1,BASED(.Ldo_notify_resume)
730 basr %r14,%r1 # call do_signal 715 basr %r14,%r1 # call do_signal
731 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 716 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
732 TRACE_IRQS_OFF 717 TRACE_IRQS_OFF
733 b BASED(io_work_loop) 718 b BASED(io_return)
734 719
735/* 720/*
736 * External interrupt handler routine 721 * External interrupt handler routine
@@ -765,15 +750,14 @@ __critical_end:
765 750
766 .globl mcck_int_handler 751 .globl mcck_int_handler
767mcck_int_handler: 752mcck_int_handler:
768 stck __LC_INT_CLOCK 753 stck __LC_MCCK_CLOCK
769 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 754 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
770 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 755 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
771 SAVE_ALL_BASE __LC_SAVE_AREA+32 756 SAVE_ALL_BASE __LC_SAVE_AREA+32
772 la %r12,__LC_MCK_OLD_PSW 757 la %r12,__LC_MCK_OLD_PSW
773 tm __LC_MCCK_CODE,0x80 # system damage? 758 tm __LC_MCCK_CODE,0x80 # system damage?
774 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 759 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
775 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER 760 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
776 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
777 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 761 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
778 bo BASED(1f) 762 bo BASED(1f)
779 la %r14,__LC_SYNC_ENTER_TIMER 763 la %r14,__LC_SYNC_ENTER_TIMER
@@ -787,7 +771,7 @@ mcck_int_handler:
787 bl BASED(0f) 771 bl BASED(0f)
788 la %r14,__LC_LAST_UPDATE_TIMER 772 la %r14,__LC_LAST_UPDATE_TIMER
7890: spt 0(%r14) 7730: spt 0(%r14)
790 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 774 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7911: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7751: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
792 bno BASED(mcck_int_main) # no -> skip cleanup critical 776 bno BASED(mcck_int_main) # no -> skip cleanup critical
793 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 777 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -809,9 +793,9 @@ mcck_int_main:
809 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 793 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
810 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 794 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
811 bz BASED(mcck_no_vtime) 795 bz BASED(mcck_no_vtime)
812 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 796 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
813 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 797 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
814 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
815mcck_no_vtime: 799mcck_no_vtime:
816 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 800 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
817 la %r2,SP_PTREGS(%r15) # load pt_regs 801 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -834,7 +818,6 @@ mcck_no_vtime:
834mcck_return: 818mcck_return:
835 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 819 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
836 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 820 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
837 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
838 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 821 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
839 bno BASED(0f) 822 bno BASED(0f)
840 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 823 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
@@ -918,18 +901,14 @@ stack_overflow:
918 901
919cleanup_table_system_call: 902cleanup_table_system_call:
920 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 903 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
921cleanup_table_sysc_return: 904cleanup_table_sysc_tif:
922 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 905 .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
923cleanup_table_sysc_leave: 906cleanup_table_sysc_restore:
924 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 907 .long sysc_restore + 0x80000000, sysc_done + 0x80000000
925cleanup_table_sysc_work_loop: 908cleanup_table_io_tif:
926 .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 909 .long io_tif + 0x80000000, io_restore + 0x80000000
927cleanup_table_io_return: 910cleanup_table_io_restore:
928 .long io_return + 0x80000000, io_leave + 0x80000000 911 .long io_restore + 0x80000000, io_done + 0x80000000
929cleanup_table_io_leave:
930 .long io_leave + 0x80000000, io_done + 0x80000000
931cleanup_table_io_work_loop:
932 .long io_work_loop + 0x80000000, io_work_done + 0x80000000
933 912
934cleanup_critical: 913cleanup_critical:
935 clc 4(4,%r12),BASED(cleanup_table_system_call) 914 clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -937,49 +916,40 @@ cleanup_critical:
937 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 916 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
938 bl BASED(cleanup_system_call) 917 bl BASED(cleanup_system_call)
9390: 9180:
940 clc 4(4,%r12),BASED(cleanup_table_sysc_return) 919 clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
941 bl BASED(0f)
942 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
943 bl BASED(cleanup_sysc_return)
9440:
945 clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
946 bl BASED(0f) 920 bl BASED(0f)
947 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 921 clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
948 bl BASED(cleanup_sysc_leave) 922 bl BASED(cleanup_sysc_tif)
9490: 9230:
950 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 924 clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
951 bl BASED(0f) 925 bl BASED(0f)
952 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 926 clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
953 bl BASED(cleanup_sysc_return) 927 bl BASED(cleanup_sysc_restore)
9540: 9280:
955 clc 4(4,%r12),BASED(cleanup_table_io_return) 929 clc 4(4,%r12),BASED(cleanup_table_io_tif)
956 bl BASED(0f) 930 bl BASED(0f)
957 clc 4(4,%r12),BASED(cleanup_table_io_return+4) 931 clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
958 bl BASED(cleanup_io_return) 932 bl BASED(cleanup_io_tif)
9590: 9330:
960 clc 4(4,%r12),BASED(cleanup_table_io_leave) 934 clc 4(4,%r12),BASED(cleanup_table_io_restore)
961 bl BASED(0f) 935 bl BASED(0f)
962 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 936 clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
963 bl BASED(cleanup_io_leave) 937 bl BASED(cleanup_io_restore)
9640:
965 clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
966 bl BASED(0f)
967 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
968 bl BASED(cleanup_io_return)
9690: 9380:
970 br %r14 939 br %r14
971 940
972cleanup_system_call: 941cleanup_system_call:
973 mvc __LC_RETURN_PSW(8),0(%r12) 942 mvc __LC_RETURN_PSW(8),0(%r12)
974 c %r12,BASED(.Lmck_old_psw)
975 be BASED(0f)
976 la %r12,__LC_SAVE_AREA+16
977 b BASED(1f)
9780: la %r12,__LC_SAVE_AREA+32
9791:
980 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 943 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
981 bh BASED(0f) 944 bh BASED(0f)
945 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
946 c %r12,BASED(.Lmck_old_psw)
947 be BASED(0f)
982 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 948 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9490: c %r12,BASED(.Lmck_old_psw)
950 la %r12,__LC_SAVE_AREA+32
951 be BASED(0f)
952 la %r12,__LC_SAVE_AREA+16
9830: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 9530: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
984 bhe BASED(cleanup_vtime) 954 bhe BASED(cleanup_vtime)
985 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 955 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
@@ -1012,55 +982,54 @@ cleanup_system_call_insn:
1012 .long sysc_stime + 0x80000000 982 .long sysc_stime + 0x80000000
1013 .long sysc_update + 0x80000000 983 .long sysc_update + 0x80000000
1014 984
1015cleanup_sysc_return: 985cleanup_sysc_tif:
1016 mvc __LC_RETURN_PSW(4),0(%r12) 986 mvc __LC_RETURN_PSW(4),0(%r12)
1017 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) 987 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
1018 la %r12,__LC_RETURN_PSW 988 la %r12,__LC_RETURN_PSW
1019 br %r14 989 br %r14
1020 990
1021cleanup_sysc_leave: 991cleanup_sysc_restore:
1022 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 992 clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
1023 be BASED(2f) 993 be BASED(2f)
994 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
995 c %r12,BASED(.Lmck_old_psw)
996 be BASED(0f)
1024 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 997 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1025 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 9980: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
1026 be BASED(2f) 999 be BASED(2f)
1027 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1000 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1028 c %r12,BASED(.Lmck_old_psw) 1001 c %r12,BASED(.Lmck_old_psw)
1029 bne BASED(0f) 1002 la %r12,__LC_SAVE_AREA+32
1030 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1003 be BASED(1f)
1031 b BASED(1f) 1004 la %r12,__LC_SAVE_AREA+16
10320: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 10051: mvc 0(16,%r12),SP_R12(%r15)
10331: lm %r0,%r11,SP_R0(%r15) 1006 lm %r0,%r11,SP_R0(%r15)
1034 l %r15,SP_R15(%r15) 1007 l %r15,SP_R15(%r15)
10352: la %r12,__LC_RETURN_PSW 10082: la %r12,__LC_RETURN_PSW
1036 br %r14 1009 br %r14
1037cleanup_sysc_leave_insn: 1010cleanup_sysc_restore_insn:
1038 .long sysc_done - 4 + 0x80000000 1011 .long sysc_done - 4 + 0x80000000
1039 .long sysc_done - 8 + 0x80000000 1012 .long sysc_done - 8 + 0x80000000
1040 1013
1041cleanup_io_return: 1014cleanup_io_tif:
1042 mvc __LC_RETURN_PSW(4),0(%r12) 1015 mvc __LC_RETURN_PSW(4),0(%r12)
1043 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1016 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1044 la %r12,__LC_RETURN_PSW 1017 la %r12,__LC_RETURN_PSW
1045 br %r14 1018 br %r14
1046 1019
1047cleanup_io_leave: 1020cleanup_io_restore:
1048 clc 4(4,%r12),BASED(cleanup_io_leave_insn) 1021 clc 4(4,%r12),BASED(cleanup_io_restore_insn)
1049 be BASED(2f) 1022 be BASED(1f)
1050 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1023 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1051 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) 1024 clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
1052 be BASED(2f) 1025 be BASED(1f)
1053 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1026 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1054 c %r12,BASED(.Lmck_old_psw)
1055 bne BASED(0f)
1056 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1027 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
1057 b BASED(1f) 1028 lm %r0,%r11,SP_R0(%r15)
10580: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
10591: lm %r0,%r11,SP_R0(%r15)
1060 l %r15,SP_R15(%r15) 1029 l %r15,SP_R15(%r15)
10612: la %r12,__LC_RETURN_PSW 10301: la %r12,__LC_RETURN_PSW
1062 br %r14 1031 br %r14
1063cleanup_io_leave_insn: 1032cleanup_io_restore_insn:
1064 .long io_done - 4 + 0x80000000 1033 .long io_done - 4 + 0x80000000
1065 .long io_done - 8 + 0x80000000 1034 .long io_done - 8 + 0x80000000
1066 1035
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e1e5e767ab56..eb15c12ec158 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -24,17 +24,13 @@ int __cpuinit start_secondary(void *cpuvoid);
24void __init startup_init(void); 24void __init startup_init(void);
25void die(const char * str, struct pt_regs * regs, long err); 25void die(const char * str, struct pt_regs * regs, long err);
26 26
27struct new_utsname; 27struct s390_mmap_arg_struct;
28struct mmap_arg_struct;
29struct fadvise64_64_args; 28struct fadvise64_64_args;
30struct old_sigaction; 29struct old_sigaction;
31struct sel_arg_struct;
32 30
33long sys_mmap2(struct mmap_arg_struct __user *arg); 31long sys_mmap2(struct s390_mmap_arg_struct __user *arg);
34long sys_s390_old_mmap(struct mmap_arg_struct __user *arg); 32long sys_s390_ipc(uint call, int first, unsigned long second,
35long sys_ipc(uint call, int first, unsigned long second,
36 unsigned long third, void __user *ptr); 33 unsigned long third, void __user *ptr);
37long sys_s390_newuname(struct new_utsname __user *name);
38long sys_s390_personality(unsigned long personality); 34long sys_s390_personality(unsigned long personality);
39long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low, 35long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
40 size_t len, int advice); 36 size_t len, int advice);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f33658f09dd7..8bccec15ea90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,18 +2,16 @@
2 * arch/s390/kernel/entry64.S 2 * arch/s390/kernel/entry64.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 15#include <asm/errno.h>
18#include <asm/ptrace.h> 16#include <asm/ptrace.h>
19#include <asm/thread_info.h> 17#include <asm/thread_info.h>
@@ -61,30 +59,45 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
61 59
62#define BASED(name) name-system_call(%r13) 60#define BASED(name) name-system_call(%r13)
63 61
62 .macro HANDLE_SIE_INTERCEPT
63#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
64 lg %r3,__LC_SIE_HOOK
65 ltgr %r3,%r3
66 jz 0f
67 basr %r14,%r3
680:
69#endif
70 .endm
71
64#ifdef CONFIG_TRACE_IRQFLAGS 72#ifdef CONFIG_TRACE_IRQFLAGS
65 .macro TRACE_IRQS_ON 73 .macro TRACE_IRQS_ON
66 basr %r2,%r0 74 basr %r2,%r0
67 brasl %r14,trace_hardirqs_on_caller 75 brasl %r14,trace_hardirqs_on_caller
68 .endm 76 .endm
69 77
70 .macro TRACE_IRQS_OFF 78 .macro TRACE_IRQS_OFF
71 basr %r2,%r0 79 basr %r2,%r0
72 brasl %r14,trace_hardirqs_off_caller 80 brasl %r14,trace_hardirqs_off_caller
73 .endm 81 .endm
74 82
75 .macro TRACE_IRQS_CHECK 83 .macro TRACE_IRQS_CHECK_ON
76 basr %r2,%r0
77 tm SP_PSW(%r15),0x03 # irqs enabled? 84 tm SP_PSW(%r15),0x03 # irqs enabled?
78 jz 0f 85 jz 0f
79 brasl %r14,trace_hardirqs_on_caller 86 TRACE_IRQS_ON
80 j 1f 870:
810: brasl %r14,trace_hardirqs_off_caller 88 .endm
821: 89
90 .macro TRACE_IRQS_CHECK_OFF
91 tm SP_PSW(%r15),0x03 # irqs enabled?
92 jz 0f
93 TRACE_IRQS_OFF
940:
83 .endm 95 .endm
84#else 96#else
85#define TRACE_IRQS_ON 97#define TRACE_IRQS_ON
86#define TRACE_IRQS_OFF 98#define TRACE_IRQS_OFF
87#define TRACE_IRQS_CHECK 99#define TRACE_IRQS_CHECK_ON
100#define TRACE_IRQS_CHECK_OFF
88#endif 101#endif
89 102
90#ifdef CONFIG_LOCKDEP 103#ifdef CONFIG_LOCKDEP
@@ -113,31 +126,35 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
113 * R15 - kernel stack pointer 126 * R15 - kernel stack pointer
114 */ 127 */
115 128
116 .macro SAVE_ALL_BASE savearea
117 stmg %r12,%r15,\savearea
118 larl %r13,system_call
119 .endm
120
121 .macro SAVE_ALL_SVC psworg,savearea 129 .macro SAVE_ALL_SVC psworg,savearea
122 la %r12,\psworg 130 stmg %r11,%r15,\savearea
123 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp 131 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
132 aghi %r15,-SP_SIZE # make room for registers & psw
133 lg %r11,__LC_LAST_BREAK
124 .endm 134 .endm
125 135
126 .macro SAVE_ALL_SYNC psworg,savearea 136 .macro SAVE_ALL_PGM psworg,savearea
127 la %r12,\psworg 137 stmg %r11,%r15,\savearea
128 tm \psworg+1,0x01 # test problem state bit 138 tm \psworg+1,0x01 # test problem state bit
129 jz 2f # skip stack setup save
130 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
131#ifdef CONFIG_CHECK_STACK 139#ifdef CONFIG_CHECK_STACK
132 j 3f 140 jnz 1f
1332: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 141 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
134 jz stack_overflow 142 jnz 2f
1353: 143 la %r12,\psworg
144 j stack_overflow
145#else
146 jz 2f
136#endif 147#endif
1372: 1481: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
1492: aghi %r15,-SP_SIZE # make room for registers & psw
150 larl %r13,system_call
151 lg %r11,__LC_LAST_BREAK
138 .endm 152 .endm
139 153
140 .macro SAVE_ALL_ASYNC psworg,savearea 154 .macro SAVE_ALL_ASYNC psworg,savearea
155 stmg %r11,%r15,\savearea
156 larl %r13,system_call
157 lg %r11,__LC_LAST_BREAK
141 la %r12,\psworg 158 la %r12,\psworg
142 tm \psworg+1,0x01 # test problem state bit 159 tm \psworg+1,0x01 # test problem state bit
143 jnz 1f # from user -> load kernel stack 160 jnz 1f # from user -> load kernel stack
@@ -151,27 +168,23 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
1510: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? 1680: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
152 slgr %r14,%r15 169 slgr %r14,%r15
153 srag %r14,%r14,STACK_SHIFT 170 srag %r14,%r14,STACK_SHIFT
154 jz 2f
1551: lg %r15,__LC_ASYNC_STACK # load async stack
156#ifdef CONFIG_CHECK_STACK 171#ifdef CONFIG_CHECK_STACK
157 j 3f 172 jnz 1f
1582: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 173 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
159 jz stack_overflow 174 jnz 2f
1603: 175 j stack_overflow
176#else
177 jz 2f
161#endif 178#endif
1622: 1791: lg %r15,__LC_ASYNC_STACK # load async stack
1802: aghi %r15,-SP_SIZE # make room for registers & psw
163 .endm 181 .endm
164 182
165 .macro CREATE_STACK_FRAME psworg,savearea 183 .macro CREATE_STACK_FRAME savearea
166 aghi %r15,-SP_SIZE # make room for registers & psw 184 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
167 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
168 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 185 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
169 icm %r12,3,__LC_SVC_ILC 186 mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
170 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 187 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
171 st %r12,SP_SVCNR(%r15)
172 mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
173 la %r12,0
174 stg %r12,__SF_BACKCHAIN(%r15)
175 .endm 188 .endm
176 189
177 .macro RESTORE_ALL psworg,sync 190 .macro RESTORE_ALL psworg,sync
@@ -187,6 +200,13 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
187 lpswe \psworg # back to caller 200 lpswe \psworg # back to caller
188 .endm 201 .endm
189 202
203 .macro LAST_BREAK
204 srag %r10,%r11,23
205 jz 0f
206 stg %r11,__TI_last_break(%r12)
2070:
208 .endm
209
190/* 210/*
191 * Scheduler resume function, called by switch_to 211 * Scheduler resume function, called by switch_to
192 * gpr2 = (task_struct *) prev 212 * gpr2 = (task_struct *) prev
@@ -232,143 +252,129 @@ __critical_start:
232system_call: 252system_call:
233 stpt __LC_SYNC_ENTER_TIMER 253 stpt __LC_SYNC_ENTER_TIMER
234sysc_saveall: 254sysc_saveall:
235 SAVE_ALL_BASE __LC_SAVE_AREA
236 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 255 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
237 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 256 CREATE_STACK_FRAME __LC_SAVE_AREA
238 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 257 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
258 mvc SP_ILC(4,%r15),__LC_SVC_ILC
259 stg %r7,SP_ARGS(%r15)
260 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
239sysc_vtime: 261sysc_vtime:
240 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 262 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
241sysc_stime: 263sysc_stime:
242 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 264 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
243sysc_update: 265sysc_update:
244 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 266 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
267 LAST_BREAK
245sysc_do_svc: 268sysc_do_svc:
246 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 269 llgh %r7,SP_SVCNR(%r15)
247 ltgr %r7,%r7 # test for svc 0 270 slag %r7,%r7,2 # shift and test for svc 0
248 jnz sysc_nr_ok 271 jnz sysc_nr_ok
249 # svc 0: system call number in %r1 272 # svc 0: system call number in %r1
250 cl %r1,BASED(.Lnr_syscalls) 273 llgfr %r1,%r1 # clear high word in r1
274 cghi %r1,NR_syscalls
251 jnl sysc_nr_ok 275 jnl sysc_nr_ok
252 lgfr %r7,%r1 # clear high word in r1 276 sth %r1,SP_SVCNR(%r15)
277 slag %r7,%r1,2 # shift and test for svc 0
253sysc_nr_ok: 278sysc_nr_ok:
254 mvc SP_ARGS(8,%r15),SP_R7(%r15)
255sysc_do_restart:
256 sth %r7,SP_SVCNR(%r15)
257 sllg %r7,%r7,2 # svc number * 4
258 larl %r10,sys_call_table 279 larl %r10,sys_call_table
259#ifdef CONFIG_COMPAT 280#ifdef CONFIG_COMPAT
260 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? 281 tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
261 jno sysc_noemu 282 jno sysc_noemu
262 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 283 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
263sysc_noemu: 284sysc_noemu:
264#endif 285#endif
265 tm __TI_flags+6(%r9),_TIF_SYSCALL 286 tm __TI_flags+6(%r12),_TIF_SYSCALL
266 lgf %r8,0(%r7,%r10) # load address of system call routine 287 lgf %r8,0(%r7,%r10) # load address of system call routine
267 jnz sysc_tracesys 288 jnz sysc_tracesys
268 basr %r14,%r8 # call sys_xxxx 289 basr %r14,%r8 # call sys_xxxx
269 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 290 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
270 291
271sysc_return: 292sysc_return:
272 tm __TI_flags+7(%r9),_TIF_WORK_SVC 293 LOCKDEP_SYS_EXIT
294sysc_tif:
295 tm __TI_flags+7(%r12),_TIF_WORK_SVC
273 jnz sysc_work # there is work to do (signals etc.) 296 jnz sysc_work # there is work to do (signals etc.)
274sysc_restore: 297sysc_restore:
275#ifdef CONFIG_TRACE_IRQFLAGS
276 larl %r1,sysc_restore_trace_psw
277 lpswe 0(%r1)
278sysc_restore_trace:
279 TRACE_IRQS_CHECK
280 LOCKDEP_SYS_EXIT
281#endif
282sysc_leave:
283 RESTORE_ALL __LC_RETURN_PSW,1 298 RESTORE_ALL __LC_RETURN_PSW,1
284sysc_done: 299sysc_done:
285 300
286#ifdef CONFIG_TRACE_IRQFLAGS
287 .section .data,"aw",@progbits
288 .align 8
289 .globl sysc_restore_trace_psw
290sysc_restore_trace_psw:
291 .quad 0, sysc_restore_trace
292 .previous
293#endif
294
295#
296# recheck if there is more work to do
297# 301#
298sysc_work_loop: 302# There is work to do, but first we need to check if we return to userspace.
299 tm __TI_flags+7(%r9),_TIF_WORK_SVC
300 jz sysc_restore # there is no work to do
301#
302# One of the work bits is on. Find out which one.
303# 303#
304sysc_work: 304sysc_work:
305 tm SP_PSW+1(%r15),0x01 # returning to user ? 305 tm SP_PSW+1(%r15),0x01 # returning to user ?
306 jno sysc_restore 306 jno sysc_restore
307 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 307
308#
309# One of the work bits is on. Find out which one.
310#
311sysc_work_tif:
312 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
308 jo sysc_mcck_pending 313 jo sysc_mcck_pending
309 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 314 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
310 jo sysc_reschedule 315 jo sysc_reschedule
311 tm __TI_flags+7(%r9),_TIF_SIGPENDING 316 tm __TI_flags+7(%r12),_TIF_SIGPENDING
312 jnz sysc_sigpending 317 jo sysc_sigpending
313 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 318 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
314 jnz sysc_notify_resume 319 jo sysc_notify_resume
315 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 320 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
316 jo sysc_restart 321 jo sysc_restart
317 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 322 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
318 jo sysc_singlestep 323 jo sysc_singlestep
319 j sysc_restore 324 j sysc_return # beware of critical section cleanup
320sysc_work_done:
321 325
322# 326#
323# _TIF_NEED_RESCHED is set, call schedule 327# _TIF_NEED_RESCHED is set, call schedule
324# 328#
325sysc_reschedule: 329sysc_reschedule:
326 larl %r14,sysc_work_loop 330 larl %r14,sysc_return
327 jg schedule # return point is sysc_return 331 jg schedule # return point is sysc_return
328 332
329# 333#
330# _TIF_MCCK_PENDING is set, call handler 334# _TIF_MCCK_PENDING is set, call handler
331# 335#
332sysc_mcck_pending: 336sysc_mcck_pending:
333 larl %r14,sysc_work_loop 337 larl %r14,sysc_return
334 jg s390_handle_mcck # TIF bit will be cleared by handler 338 jg s390_handle_mcck # TIF bit will be cleared by handler
335 339
336# 340#
337# _TIF_SIGPENDING is set, call do_signal 341# _TIF_SIGPENDING is set, call do_signal
338# 342#
339sysc_sigpending: 343sysc_sigpending:
340 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 344 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
341 la %r2,SP_PTREGS(%r15) # load pt_regs 345 la %r2,SP_PTREGS(%r15) # load pt_regs
342 brasl %r14,do_signal # call do_signal 346 brasl %r14,do_signal # call do_signal
343 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 347 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
344 jo sysc_restart 348 jo sysc_restart
345 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 349 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
346 jo sysc_singlestep 350 jo sysc_singlestep
347 j sysc_work_loop 351 j sysc_return
348 352
349# 353#
350# _TIF_NOTIFY_RESUME is set, call do_notify_resume 354# _TIF_NOTIFY_RESUME is set, call do_notify_resume
351# 355#
352sysc_notify_resume: 356sysc_notify_resume:
353 la %r2,SP_PTREGS(%r15) # load pt_regs 357 la %r2,SP_PTREGS(%r15) # load pt_regs
354 larl %r14,sysc_work_loop 358 larl %r14,sysc_return
355 jg do_notify_resume # call do_notify_resume 359 jg do_notify_resume # call do_notify_resume
356 360
357# 361#
358# _TIF_RESTART_SVC is set, set up registers and restart svc 362# _TIF_RESTART_SVC is set, set up registers and restart svc
359# 363#
360sysc_restart: 364sysc_restart:
361 ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 365 ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
362 lg %r7,SP_R2(%r15) # load new svc number 366 lg %r7,SP_R2(%r15) # load new svc number
363 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument 367 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
364 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 368 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
365 j sysc_do_restart # restart svc 369 sth %r7,SP_SVCNR(%r15)
370 slag %r7,%r7,2
371 j sysc_nr_ok # restart svc
366 372
367# 373#
368# _TIF_SINGLE_STEP is set, call do_single_step 374# _TIF_SINGLE_STEP is set, call do_single_step
369# 375#
370sysc_singlestep: 376sysc_singlestep:
371 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 377 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
372 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 378 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
373 la %r2,SP_PTREGS(%r15) # address of register-save area 379 la %r2,SP_PTREGS(%r15) # address of register-save area
374 larl %r14,sysc_return # load adr. of system return 380 larl %r14,sysc_return # load adr. of system return
@@ -381,8 +387,8 @@ sysc_singlestep:
381sysc_tracesys: 387sysc_tracesys:
382 la %r2,SP_PTREGS(%r15) # load pt_regs 388 la %r2,SP_PTREGS(%r15) # load pt_regs
383 la %r3,0 389 la %r3,0
384 srl %r7,2 390 llgh %r0,SP_SVCNR(%r15)
385 stg %r7,SP_R2(%r15) 391 stg %r0,SP_R2(%r15)
386 brasl %r14,do_syscall_trace_enter 392 brasl %r14,do_syscall_trace_enter
387 lghi %r0,NR_syscalls 393 lghi %r0,NR_syscalls
388 clgr %r0,%r2 394 clgr %r0,%r2
@@ -395,7 +401,7 @@ sysc_tracego:
395 basr %r14,%r8 # call sys_xxx 401 basr %r14,%r8 # call sys_xxx
396 stg %r2,SP_R2(%r15) # store return value 402 stg %r2,SP_R2(%r15) # store return value
397sysc_tracenogo: 403sysc_tracenogo:
398 tm __TI_flags+6(%r9),_TIF_SYSCALL 404 tm __TI_flags+6(%r12),_TIF_SYSCALL
399 jz sysc_return 405 jz sysc_return
400 la %r2,SP_PTREGS(%r15) # load pt_regs 406 la %r2,SP_PTREGS(%r15) # load pt_regs
401 larl %r14,sysc_return # return point is sysc_return 407 larl %r14,sysc_return # return point is sysc_return
@@ -407,7 +413,7 @@ sysc_tracenogo:
407 .globl ret_from_fork 413 .globl ret_from_fork
408ret_from_fork: 414ret_from_fork:
409 lg %r13,__LC_SVC_NEW_PSW+8 415 lg %r13,__LC_SVC_NEW_PSW+8
410 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 416 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
411 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 417 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
412 jo 0f 418 jo 0f
413 stg %r15,SP_R15(%r15) # store stack pointer for new kthread 419 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -437,12 +443,14 @@ kernel_execve:
437 br %r14 443 br %r14
438 # execve succeeded. 444 # execve succeeded.
4390: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4450: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
446# TRACE_IRQS_OFF
440 lg %r15,__LC_KERNEL_STACK # load ksp 447 lg %r15,__LC_KERNEL_STACK # load ksp
441 aghi %r15,-SP_SIZE # make room for registers & psw 448 aghi %r15,-SP_SIZE # make room for registers & psw
442 lg %r13,__LC_SVC_NEW_PSW+8 449 lg %r13,__LC_SVC_NEW_PSW+8
443 lg %r9,__LC_THREAD_INFO
444 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 450 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
451 lg %r12,__LC_THREAD_INFO
445 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 452 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
453# TRACE_IRQS_ON
446 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 454 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
447 brasl %r14,execve_tail 455 brasl %r14,execve_tail
448 j sysc_return 456 j sysc_return
@@ -467,20 +475,23 @@ pgm_check_handler:
467 * for LPSW?). 475 * for LPSW?).
468 */ 476 */
469 stpt __LC_SYNC_ENTER_TIMER 477 stpt __LC_SYNC_ENTER_TIMER
470 SAVE_ALL_BASE __LC_SAVE_AREA
471 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 478 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
472 jnz pgm_per # got per exception -> special case 479 jnz pgm_per # got per exception -> special case
473 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 480 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
474 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 481 CREATE_STACK_FRAME __LC_SAVE_AREA
482 xc SP_ILC(4,%r15),SP_ILC(%r15)
483 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
484 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
475 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 485 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
476 jz pgm_no_vtime 486 jz pgm_no_vtime
477 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 487 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
478 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 488 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
479 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 489 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
490 LAST_BREAK
480pgm_no_vtime: 491pgm_no_vtime:
481 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 492 HANDLE_SIE_INTERCEPT
482 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 493 TRACE_IRQS_CHECK_OFF
483 TRACE_IRQS_OFF 494 stg %r11,SP_ARGS(%r15)
484 lgf %r3,__LC_PGM_ILC # load program interruption code 495 lgf %r3,__LC_PGM_ILC # load program interruption code
485 lghi %r8,0x7f 496 lghi %r8,0x7f
486 ngr %r8,%r3 497 ngr %r8,%r3
@@ -489,8 +500,10 @@ pgm_do_call:
489 larl %r1,pgm_check_table 500 larl %r1,pgm_check_table
490 lg %r1,0(%r8,%r1) # load address of handler routine 501 lg %r1,0(%r8,%r1) # load address of handler routine
491 la %r2,SP_PTREGS(%r15) # address of register-save area 502 la %r2,SP_PTREGS(%r15) # address of register-save area
492 larl %r14,sysc_return 503 basr %r14,%r1 # branch to interrupt-handler
493 br %r1 # branch to interrupt-handler 504pgm_exit:
505 TRACE_IRQS_CHECK_ON
506 j sysc_return
494 507
495# 508#
496# handle per exception 509# handle per exception
@@ -502,55 +515,68 @@ pgm_per:
502 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW 515 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
503 je pgm_svcper 516 je pgm_svcper
504# no interesting special case, ignore PER event 517# no interesting special case, ignore PER event
505 lmg %r12,%r15,__LC_SAVE_AREA
506 lpswe __LC_PGM_OLD_PSW 518 lpswe __LC_PGM_OLD_PSW
507 519
508# 520#
509# Normal per exception 521# Normal per exception
510# 522#
511pgm_per_std: 523pgm_per_std:
512 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 524 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
513 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 525 CREATE_STACK_FRAME __LC_SAVE_AREA
526 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
527 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
514 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 528 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
515 jz pgm_no_vtime2 529 jz pgm_no_vtime2
516 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 530 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
517 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 531 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
518 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 532 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
533 LAST_BREAK
519pgm_no_vtime2: 534pgm_no_vtime2:
520 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 535 HANDLE_SIE_INTERCEPT
521 TRACE_IRQS_OFF 536 TRACE_IRQS_CHECK_OFF
522 lg %r1,__TI_task(%r9) 537 lg %r1,__TI_task(%r12)
523 tm SP_PSW+1(%r15),0x01 # kernel per event ? 538 tm SP_PSW+1(%r15),0x01 # kernel per event ?
524 jz kernel_per 539 jz kernel_per
525 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 540 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
526 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 541 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
527 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 542 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
528 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 543 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
529 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
530 lghi %r8,0x7f 545 lghi %r8,0x7f
531 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
532 je sysc_return 547 je pgm_exit2
533 j pgm_do_call 548 sll %r8,3
549 larl %r1,pgm_check_table
550 lg %r1,0(%r8,%r1) # load address of handler routine
551 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return
534 557
535# 558#
536# it was a single stepped SVC that is causing all the trouble 559# it was a single stepped SVC that is causing all the trouble
537# 560#
538pgm_svcper: 561pgm_svcper:
539 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 562 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
540 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 563 CREATE_STACK_FRAME __LC_SAVE_AREA
564 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
565 mvc SP_ILC(4,%r15),__LC_SVC_ILC
566 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
541 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 567 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
542 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 568 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 569 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 570 LAST_BREAK
545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 571 TRACE_IRQS_OFF
546 lg %r8,__TI_task(%r9) 572 lg %r8,__TI_task(%r12)
547 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 573 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
548 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 574 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
549 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 575 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 576 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
551 TRACE_IRQS_ON 577 TRACE_IRQS_ON
552 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
553 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 578 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
579 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
554 j sysc_do_svc 580 j sysc_do_svc
555 581
556# 582#
@@ -559,8 +585,8 @@ pgm_svcper:
559kernel_per: 585kernel_per:
560 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 586 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
561 la %r2,SP_PTREGS(%r15) # address of register-save area 587 la %r2,SP_PTREGS(%r15) # address of register-save area
562 larl %r14,sysc_restore # load adr. of system ret, no work 588 brasl %r14,do_single_step
563 jg do_single_step # branch to do_single_step 589 j pgm_exit
564 590
565/* 591/*
566 * IO interrupt handler routine 592 * IO interrupt handler routine
@@ -569,162 +595,133 @@ kernel_per:
569io_int_handler: 595io_int_handler:
570 stck __LC_INT_CLOCK 596 stck __LC_INT_CLOCK
571 stpt __LC_ASYNC_ENTER_TIMER 597 stpt __LC_ASYNC_ENTER_TIMER
572 SAVE_ALL_BASE __LC_SAVE_AREA+32 598 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
573 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 599 CREATE_STACK_FRAME __LC_SAVE_AREA+40
574 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 600 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
601 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
575 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 602 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
576 jz io_no_vtime 603 jz io_no_vtime
577 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 604 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
578 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 605 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
579 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 606 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
607 LAST_BREAK
580io_no_vtime: 608io_no_vtime:
581 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 609 HANDLE_SIE_INTERCEPT
582 TRACE_IRQS_OFF 610 TRACE_IRQS_OFF
583 la %r2,SP_PTREGS(%r15) # address of register-save area 611 la %r2,SP_PTREGS(%r15) # address of register-save area
584 brasl %r14,do_IRQ # call standard irq handler 612 brasl %r14,do_IRQ # call standard irq handler
585io_return: 613io_return:
586 tm __TI_flags+7(%r9),_TIF_WORK_INT 614 LOCKDEP_SYS_EXIT
615 TRACE_IRQS_ON
616io_tif:
617 tm __TI_flags+7(%r12),_TIF_WORK_INT
587 jnz io_work # there is work to do (signals etc.) 618 jnz io_work # there is work to do (signals etc.)
588io_restore: 619io_restore:
589#ifdef CONFIG_TRACE_IRQFLAGS
590 larl %r1,io_restore_trace_psw
591 lpswe 0(%r1)
592io_restore_trace:
593 TRACE_IRQS_CHECK
594 LOCKDEP_SYS_EXIT
595#endif
596io_leave:
597 RESTORE_ALL __LC_RETURN_PSW,0 620 RESTORE_ALL __LC_RETURN_PSW,0
598io_done: 621io_done:
599 622
600#ifdef CONFIG_TRACE_IRQFLAGS
601 .section .data,"aw",@progbits
602 .align 8
603 .globl io_restore_trace_psw
604io_restore_trace_psw:
605 .quad 0, io_restore_trace
606 .previous
607#endif
608
609# 623#
610# There is work todo, we need to check if we return to userspace, then 624# There is work todo, find out in which context we have been interrupted:
611# check, if we are in SIE, if yes leave it 625# 1) if we return to user space we can do all _TIF_WORK_INT work
626# 2) if we return to kernel code and kvm is enabled check if we need to
627# modify the psw to leave SIE
628# 3) if we return to kernel code and preemptive scheduling is enabled check
629# the preemption counter and if it is zero call preempt_schedule_irq
630# Before any work can be done, a switch to the kernel stack is required.
612# 631#
613io_work: 632io_work:
614 tm SP_PSW+1(%r15),0x01 # returning to user ? 633 tm SP_PSW+1(%r15),0x01 # returning to user ?
615#ifndef CONFIG_PREEMPT 634 jo io_work_user # yes -> do resched & signal
616#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 635#ifdef CONFIG_PREEMPT
617 jnz io_work_user # yes -> no need to check for SIE
618 la %r1, BASED(sie_opcode) # we return to kernel here
619 lg %r2, SP_PSW+8(%r15)
620 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
621 jne io_restore # no-> return to kernel
622 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
623 aghi %r1, 4
624 stg %r1, SP_PSW+8(%r15)
625 j io_restore # return to kernel
626#else
627 jno io_restore # no-> skip resched & signal
628#endif
629#else
630 jnz io_work_user # yes -> do resched & signal
631#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
632 la %r1, BASED(sie_opcode)
633 lg %r2, SP_PSW+8(%r15)
634 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
635 jne 0f # no -> leave PSW alone
636 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
637 aghi %r1, 4
638 stg %r1, SP_PSW+8(%r15)
6390:
640#endif
641 # check for preemptive scheduling 636 # check for preemptive scheduling
642 icm %r0,15,__TI_precount(%r9) 637 icm %r0,15,__TI_precount(%r12)
643 jnz io_restore # preemption is disabled 638 jnz io_restore # preemption is disabled
639 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
640 jno io_restore
644 # switch to kernel stack 641 # switch to kernel stack
645 lg %r1,SP_R15(%r15) 642 lg %r1,SP_R15(%r15)
646 aghi %r1,-SP_SIZE 643 aghi %r1,-SP_SIZE
647 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 644 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
648 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 645 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
649 lgr %r15,%r1 646 lgr %r15,%r1
650io_resume_loop: 647 # TRACE_IRQS_ON already done at io_return, call
651 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 648 # TRACE_IRQS_OFF to keep things symmetrical
652 jno io_restore 649 TRACE_IRQS_OFF
653 larl %r14,io_resume_loop 650 brasl %r14,preempt_schedule_irq
654 jg preempt_schedule_irq 651 j io_return
652#else
653 j io_restore
655#endif 654#endif
656 655
656#
657# Need to do work before returning to userspace, switch to kernel stack
658#
657io_work_user: 659io_work_user:
658 lg %r1,__LC_KERNEL_STACK 660 lg %r1,__LC_KERNEL_STACK
659 aghi %r1,-SP_SIZE 661 aghi %r1,-SP_SIZE
660 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 662 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
661 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 663 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
662 lgr %r15,%r1 664 lgr %r15,%r1
665
663# 666#
664# One of the work bits is on. Find out which one. 667# One of the work bits is on. Find out which one.
665# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED 668# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
666# and _TIF_MCCK_PENDING 669# and _TIF_MCCK_PENDING
667# 670#
668io_work_loop: 671io_work_tif:
669 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 672 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
670 jo io_mcck_pending 673 jo io_mcck_pending
671 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 674 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
672 jo io_reschedule 675 jo io_reschedule
673 tm __TI_flags+7(%r9),_TIF_SIGPENDING 676 tm __TI_flags+7(%r12),_TIF_SIGPENDING
674 jnz io_sigpending 677 jo io_sigpending
675 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 678 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
676 jnz io_notify_resume 679 jo io_notify_resume
677 j io_restore 680 j io_return # beware of critical section cleanup
678io_work_done:
679
680#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
681sie_opcode:
682 .long 0xb2140000
683#endif
684 681
685# 682#
686# _TIF_MCCK_PENDING is set, call handler 683# _TIF_MCCK_PENDING is set, call handler
687# 684#
688io_mcck_pending: 685io_mcck_pending:
686 # TRACE_IRQS_ON already done at io_return
689 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 687 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
690 j io_work_loop 688 TRACE_IRQS_OFF
689 j io_return
691 690
692# 691#
693# _TIF_NEED_RESCHED is set, call schedule 692# _TIF_NEED_RESCHED is set, call schedule
694# 693#
695io_reschedule: 694io_reschedule:
696 TRACE_IRQS_ON 695 # TRACE_IRQS_ON already done at io_return
697 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 696 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
698 brasl %r14,schedule # call scheduler 697 brasl %r14,schedule # call scheduler
699 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 698 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
700 TRACE_IRQS_OFF 699 TRACE_IRQS_OFF
701 tm __TI_flags+7(%r9),_TIF_WORK_INT 700 j io_return
702 jz io_restore # there is no work to do
703 j io_work_loop
704 701
705# 702#
706# _TIF_SIGPENDING or is set, call do_signal 703# _TIF_SIGPENDING or is set, call do_signal
707# 704#
708io_sigpending: 705io_sigpending:
709 TRACE_IRQS_ON 706 # TRACE_IRQS_ON already done at io_return
710 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 707 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
711 la %r2,SP_PTREGS(%r15) # load pt_regs 708 la %r2,SP_PTREGS(%r15) # load pt_regs
712 brasl %r14,do_signal # call do_signal 709 brasl %r14,do_signal # call do_signal
713 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 710 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
714 TRACE_IRQS_OFF 711 TRACE_IRQS_OFF
715 j io_work_loop 712 j io_return
716 713
717# 714#
718# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 715# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
719# 716#
720io_notify_resume: 717io_notify_resume:
721 TRACE_IRQS_ON 718 # TRACE_IRQS_ON already done at io_return
722 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 719 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
723 la %r2,SP_PTREGS(%r15) # load pt_regs 720 la %r2,SP_PTREGS(%r15) # load pt_regs
724 brasl %r14,do_notify_resume # call do_notify_resume 721 brasl %r14,do_notify_resume # call do_notify_resume
725 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 722 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
726 TRACE_IRQS_OFF 723 TRACE_IRQS_OFF
727 j io_work_loop 724 j io_return
728 725
729/* 726/*
730 * External interrupt handler routine 727 * External interrupt handler routine
@@ -733,16 +730,18 @@ io_notify_resume:
733ext_int_handler: 730ext_int_handler:
734 stck __LC_INT_CLOCK 731 stck __LC_INT_CLOCK
735 stpt __LC_ASYNC_ENTER_TIMER 732 stpt __LC_ASYNC_ENTER_TIMER
736 SAVE_ALL_BASE __LC_SAVE_AREA+32 733 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
737 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 734 CREATE_STACK_FRAME __LC_SAVE_AREA+40
738 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 735 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
736 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
739 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 737 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
740 jz ext_no_vtime 738 jz ext_no_vtime
741 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 739 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
742 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 740 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
743 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 741 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
742 LAST_BREAK
744ext_no_vtime: 743ext_no_vtime:
745 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 744 HANDLE_SIE_INTERCEPT
746 TRACE_IRQS_OFF 745 TRACE_IRQS_OFF
747 la %r2,SP_PTREGS(%r15) # address of register-save area 746 la %r2,SP_PTREGS(%r15) # address of register-save area
748 llgh %r3,__LC_EXT_INT_CODE # get interruption code 747 llgh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -756,17 +755,18 @@ __critical_end:
756 */ 755 */
757 .globl mcck_int_handler 756 .globl mcck_int_handler
758mcck_int_handler: 757mcck_int_handler:
759 stck __LC_INT_CLOCK 758 stck __LC_MCCK_CLOCK
760 la %r1,4095 # revalidate r1 759 la %r1,4095 # revalidate r1
761 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 760 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
762 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 761 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
763 SAVE_ALL_BASE __LC_SAVE_AREA+64 762 stmg %r11,%r15,__LC_SAVE_AREA+80
763 larl %r13,system_call
764 lg %r11,__LC_LAST_BREAK
764 la %r12,__LC_MCK_OLD_PSW 765 la %r12,__LC_MCK_OLD_PSW
765 tm __LC_MCCK_CODE,0x80 # system damage? 766 tm __LC_MCCK_CODE,0x80 # system damage?
766 jo mcck_int_main # yes -> rest of mcck code invalid 767 jo mcck_int_main # yes -> rest of mcck code invalid
767 la %r14,4095 768 la %r14,4095
768 mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER 769 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
769 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
770 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 770 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
771 jo 1f 771 jo 1f
772 la %r14,__LC_SYNC_ENTER_TIMER 772 la %r14,__LC_SYNC_ENTER_TIMER
@@ -780,7 +780,7 @@ mcck_int_handler:
780 jl 0f 780 jl 0f
781 la %r14,__LC_LAST_UPDATE_TIMER 781 la %r14,__LC_LAST_UPDATE_TIMER
7820: spt 0(%r14) 7820: spt 0(%r14)
783 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 783 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7841: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7841: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
785 jno mcck_int_main # no -> skip cleanup critical 785 jno mcck_int_main # no -> skip cleanup critical
786 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 786 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -796,16 +796,19 @@ mcck_int_main:
796 srag %r14,%r14,PAGE_SHIFT 796 srag %r14,%r14,PAGE_SHIFT
797 jz 0f 797 jz 0f
798 lg %r15,__LC_PANIC_STACK # load panic stack 798 lg %r15,__LC_PANIC_STACK # load panic stack
7990: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 7990: aghi %r15,-SP_SIZE # make room for registers & psw
800 CREATE_STACK_FRAME __LC_SAVE_AREA+80
801 mvc SP_PSW(16,%r15),0(%r12)
802 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
800 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 803 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
801 jno mcck_no_vtime # no -> no timer update 804 jno mcck_no_vtime # no -> no timer update
802 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 805 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
803 jz mcck_no_vtime 806 jz mcck_no_vtime
804 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 807 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
805 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 808 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
806 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 809 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
810 LAST_BREAK
807mcck_no_vtime: 811mcck_no_vtime:
808 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
809 la %r2,SP_PTREGS(%r15) # load pt_regs 812 la %r2,SP_PTREGS(%r15) # load pt_regs
810 brasl %r14,s390_do_machine_check 813 brasl %r14,s390_do_machine_check
811 tm SP_PSW+1(%r15),0x01 # returning to user ? 814 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -816,8 +819,9 @@ mcck_no_vtime:
816 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 819 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
817 lgr %r15,%r1 820 lgr %r15,%r1
818 stosm __SF_EMPTY(%r15),0x04 # turn dat on 821 stosm __SF_EMPTY(%r15),0x04 # turn dat on
819 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 822 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
820 jno mcck_return 823 jno mcck_return
824 HANDLE_SIE_INTERCEPT
821 TRACE_IRQS_OFF 825 TRACE_IRQS_OFF
822 brasl %r14,s390_handle_mcck 826 brasl %r14,s390_handle_mcck
823 TRACE_IRQS_ON 827 TRACE_IRQS_ON
@@ -825,11 +829,11 @@ mcck_return:
825 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW 829 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
826 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 830 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
827 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 831 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
828 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
829 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 832 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
830 jno 0f 833 jno 0f
831 stpt __LC_EXIT_TIMER 834 stpt __LC_EXIT_TIMER
8320: lpswe __LC_RETURN_MCCK_PSW # back to caller 8350: lpswe __LC_RETURN_MCCK_PSW # back to caller
836mcck_done:
833 837
834/* 838/*
835 * Restart interruption handler, kick starter for additional CPUs 839 * Restart interruption handler, kick starter for additional CPUs
@@ -885,14 +889,14 @@ stack_overflow:
885 lg %r15,__LC_PANIC_STACK # change to panic stack 889 lg %r15,__LC_PANIC_STACK # change to panic stack
886 aghi %r15,-SP_SIZE 890 aghi %r15,-SP_SIZE
887 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 891 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
888 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 892 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
889 la %r1,__LC_SAVE_AREA 893 la %r1,__LC_SAVE_AREA
890 chi %r12,__LC_SVC_OLD_PSW 894 chi %r12,__LC_SVC_OLD_PSW
891 je 0f 895 je 0f
892 chi %r12,__LC_PGM_OLD_PSW 896 chi %r12,__LC_PGM_OLD_PSW
893 je 0f 897 je 0f
894 la %r1,__LC_SAVE_AREA+32 898 la %r1,__LC_SAVE_AREA+40
8950: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8990: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
896 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 900 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
897 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 901 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
898 la %r2,SP_PTREGS(%r15) # load pt_regs 902 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -901,18 +905,14 @@ stack_overflow:
901 905
902cleanup_table_system_call: 906cleanup_table_system_call:
903 .quad system_call, sysc_do_svc 907 .quad system_call, sysc_do_svc
904cleanup_table_sysc_return: 908cleanup_table_sysc_tif:
905 .quad sysc_return, sysc_leave 909 .quad sysc_tif, sysc_restore
906cleanup_table_sysc_leave: 910cleanup_table_sysc_restore:
907 .quad sysc_leave, sysc_done 911 .quad sysc_restore, sysc_done
908cleanup_table_sysc_work_loop: 912cleanup_table_io_tif:
909 .quad sysc_work_loop, sysc_work_done 913 .quad io_tif, io_restore
910cleanup_table_io_return: 914cleanup_table_io_restore:
911 .quad io_return, io_leave 915 .quad io_restore, io_done
912cleanup_table_io_leave:
913 .quad io_leave, io_done
914cleanup_table_io_work_loop:
915 .quad io_work_loop, io_work_done
916 916
917cleanup_critical: 917cleanup_critical:
918 clc 8(8,%r12),BASED(cleanup_table_system_call) 918 clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -920,61 +920,54 @@ cleanup_critical:
920 clc 8(8,%r12),BASED(cleanup_table_system_call+8) 920 clc 8(8,%r12),BASED(cleanup_table_system_call+8)
921 jl cleanup_system_call 921 jl cleanup_system_call
9220: 9220:
923 clc 8(8,%r12),BASED(cleanup_table_sysc_return) 923 clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
924 jl 0f 924 jl 0f
925 clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) 925 clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
926 jl cleanup_sysc_return 926 jl cleanup_sysc_tif
9270: 9270:
928 clc 8(8,%r12),BASED(cleanup_table_sysc_leave) 928 clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
929 jl 0f 929 jl 0f
930 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) 930 clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
931 jl cleanup_sysc_leave 931 jl cleanup_sysc_restore
9320: 9320:
933 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) 933 clc 8(8,%r12),BASED(cleanup_table_io_tif)
934 jl 0f 934 jl 0f
935 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) 935 clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
936 jl cleanup_sysc_return 936 jl cleanup_io_tif
9370: 9370:
938 clc 8(8,%r12),BASED(cleanup_table_io_return) 938 clc 8(8,%r12),BASED(cleanup_table_io_restore)
939 jl 0f 939 jl 0f
940 clc 8(8,%r12),BASED(cleanup_table_io_return+8) 940 clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
941 jl cleanup_io_return 941 jl cleanup_io_restore
9420:
943 clc 8(8,%r12),BASED(cleanup_table_io_leave)
944 jl 0f
945 clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
946 jl cleanup_io_leave
9470:
948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
949 jl 0f
950 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
951 jl cleanup_io_return
9520: 9420:
953 br %r14 943 br %r14
954 944
955cleanup_system_call: 945cleanup_system_call:
956 mvc __LC_RETURN_PSW(16),0(%r12) 946 mvc __LC_RETURN_PSW(16),0(%r12)
957 cghi %r12,__LC_MCK_OLD_PSW
958 je 0f
959 la %r12,__LC_SAVE_AREA+32
960 j 1f
9610: la %r12,__LC_SAVE_AREA+64
9621:
963 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) 947 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
964 jh 0f 948 jh 0f
949 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
950 cghi %r12,__LC_MCK_OLD_PSW
951 je 0f
965 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 952 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9530: cghi %r12,__LC_MCK_OLD_PSW
954 la %r12,__LC_SAVE_AREA+80
955 je 0f
956 la %r12,__LC_SAVE_AREA+40
9660: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 9570: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
967 jhe cleanup_vtime 958 jhe cleanup_vtime
968 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) 959 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
969 jh 0f 960 jh 0f
970 mvc __LC_SAVE_AREA(32),0(%r12) 961 mvc __LC_SAVE_AREA(40),0(%r12)
9710: stg %r13,8(%r12) 9620: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
972 stg %r12,__LC_SAVE_AREA+96 # argh 963 aghi %r15,-SP_SIZE # make room for registers & psw
973 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 964 stg %r15,32(%r12)
974 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 965 stg %r11,0(%r12)
975 lg %r12,__LC_SAVE_AREA+96 # argh 966 CREATE_STACK_FRAME __LC_SAVE_AREA
976 stg %r15,24(%r12) 967 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
977 llgh %r7,__LC_SVC_INT_CODE 968 mvc SP_ILC(4,%r15),__LC_SVC_ILC
969 stg %r7,SP_ARGS(%r15)
970 mvc 8(8,%r12),__LC_THREAD_INFO
978cleanup_vtime: 971cleanup_vtime:
979 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 972 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
980 jhe cleanup_stime 973 jhe cleanup_stime
@@ -985,7 +978,11 @@ cleanup_stime:
985 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 978 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
986cleanup_update: 979cleanup_update:
987 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 980 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
988 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) 981 srag %r12,%r11,23
982 lg %r12,__LC_THREAD_INFO
983 jz 0f
984 stg %r11,__TI_last_break(%r12)
9850: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
989 la %r12,__LC_RETURN_PSW 986 la %r12,__LC_RETURN_PSW
990 br %r14 987 br %r14
991cleanup_system_call_insn: 988cleanup_system_call_insn:
@@ -995,55 +992,54 @@ cleanup_system_call_insn:
995 .quad sysc_stime 992 .quad sysc_stime
996 .quad sysc_update 993 .quad sysc_update
997 994
998cleanup_sysc_return: 995cleanup_sysc_tif:
999 mvc __LC_RETURN_PSW(8),0(%r12) 996 mvc __LC_RETURN_PSW(8),0(%r12)
1000 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) 997 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
1001 la %r12,__LC_RETURN_PSW 998 la %r12,__LC_RETURN_PSW
1002 br %r14 999 br %r14
1003 1000
1004cleanup_sysc_leave: 1001cleanup_sysc_restore:
1005 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) 1002 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
1006 je 3f 1003 je 2f
1007 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) 1004 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
1008 jhe 0f 1005 jhe 0f
1006 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1007 cghi %r12,__LC_MCK_OLD_PSW
1008 je 0f
1009 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1009 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
10100: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10100: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1011 cghi %r12,__LC_MCK_OLD_PSW 1011 cghi %r12,__LC_MCK_OLD_PSW
1012 jne 1f 1012 la %r12,__LC_SAVE_AREA+80
1013 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) 1013 je 1f
1014 j 2f 1014 la %r12,__LC_SAVE_AREA+40
10151: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) 10151: mvc 0(40,%r12),SP_R11(%r15)
10162: lmg %r0,%r11,SP_R0(%r15) 1016 lmg %r0,%r10,SP_R0(%r15)
1017 lg %r15,SP_R15(%r15) 1017 lg %r15,SP_R15(%r15)
10183: la %r12,__LC_RETURN_PSW 10182: la %r12,__LC_RETURN_PSW
1019 br %r14 1019 br %r14
1020cleanup_sysc_leave_insn: 1020cleanup_sysc_restore_insn:
1021 .quad sysc_done - 4 1021 .quad sysc_done - 4
1022 .quad sysc_done - 16 1022 .quad sysc_done - 16
1023 1023
1024cleanup_io_return: 1024cleanup_io_tif:
1025 mvc __LC_RETURN_PSW(8),0(%r12) 1025 mvc __LC_RETURN_PSW(8),0(%r12)
1026 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) 1026 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
1027 la %r12,__LC_RETURN_PSW 1027 la %r12,__LC_RETURN_PSW
1028 br %r14 1028 br %r14
1029 1029
1030cleanup_io_leave: 1030cleanup_io_restore:
1031 clc 8(8,%r12),BASED(cleanup_io_leave_insn) 1031 clc 8(8,%r12),BASED(cleanup_io_restore_insn)
1032 je 3f 1032 je 1f
1033 clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) 1033 clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
1034 jhe 0f 1034 jhe 0f
1035 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1035 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
10360: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10360: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1037 cghi %r12,__LC_MCK_OLD_PSW 1037 mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
1038 jne 1f 1038 lmg %r0,%r10,SP_R0(%r15)
1039 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
1040 j 2f
10411: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
10422: lmg %r0,%r11,SP_R0(%r15)
1043 lg %r15,SP_R15(%r15) 1039 lg %r15,SP_R15(%r15)
10443: la %r12,__LC_RETURN_PSW 10401: la %r12,__LC_RETURN_PSW
1045 br %r14 1041 br %r14
1046cleanup_io_leave_insn: 1042cleanup_io_restore_insn:
1047 .quad io_done - 4 1043 .quad io_done - 4
1048 .quad io_done - 16 1044 .quad io_done - 16
1049 1045
@@ -1051,13 +1047,6 @@ cleanup_io_leave_insn:
1051 * Integer constants 1047 * Integer constants
1052 */ 1048 */
1053 .align 4 1049 .align 4
1054.Lconst:
1055.Lnr_syscalls: .long NR_syscalls
1056.L0x0130: .short 0x130
1057.L0x0140: .short 0x140
1058.L0x0150: .short 0x150
1059.L0x0160: .short 0x160
1060.L0x0170: .short 0x170
1061.Lcritical_start: 1050.Lcritical_start:
1062 .quad __critical_start 1051 .quad __critical_start
1063.Lcritical_end: 1052.Lcritical_end:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5a82bc68193e..6a83d0581317 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -13,7 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <trace/syscall.h> 15#include <trace/syscall.h>
16#include <asm/lowcore.h> 16#include <asm/asm-offsets.h>
17 17
18#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
19 19
@@ -200,13 +200,3 @@ out:
200 return parent; 200 return parent;
201} 201}
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203
204#ifdef CONFIG_FTRACE_SYSCALLS
205
206extern unsigned int sys_call_table[];
207
208unsigned long __init arch_syscall_addr(int nr)
209{
210 return (unsigned long)sys_call_table[nr];
211}
212#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index c52b4f7742fa..51838ad42d56 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2009 2 * Copyright IBM Corp. 1999,2010
3 * 3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -22,12 +22,9 @@
22 */ 22 */
23 23
24#include <linux/init.h> 24#include <linux/init.h>
25#include <asm/setup.h>
26#include <asm/lowcore.h>
27#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
28#include <asm/thread_info.h> 26#include <asm/thread_info.h>
29#include <asm/page.h> 27#include <asm/page.h>
30#include <asm/cpu.h>
31 28
32#ifdef CONFIG_64BIT 29#ifdef CONFIG_64BIT
33#define ARCH_OFFSET 4 30#define ARCH_OFFSET 4
@@ -288,19 +285,7 @@ iplstart:
288 bz .Lagain1 # skip dateset trailer 285 bz .Lagain1 # skip dateset trailer
289 la %r5,0(%r4,%r2) 286 la %r5,0(%r4,%r2)
290 lr %r3,%r2 287 lr %r3,%r2
291.Lidebc: 288 la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
292 tm 0(%r5),0x80 # high order bit set ?
293 bo .Ldocv # yes -> convert from EBCDIC
294 ahi %r5,-1
295 bct %r3,.Lidebc
296 b .Lnocv
297.Ldocv:
298 l %r3,.Lcvtab
299 tr 0(256,%r4),0(%r3) # convert parameters to ascii
300 tr 256(256,%r4),0(%r3)
301 tr 512(256,%r4),0(%r3)
302 tr 768(122,%r4),0(%r3)
303.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
304 mvc 0(256,%r3),0(%r4) 289 mvc 0(256,%r3),0(%r4)
305 mvc 256(256,%r3),256(%r4) 290 mvc 256(256,%r3),256(%r4)
306 mvc 512(256,%r3),512(%r4) 291 mvc 512(256,%r3),512(%r4)
@@ -343,8 +328,8 @@ iplstart:
343# 328#
344# reset files in VM reader 329# reset files in VM reader
345# 330#
346 stidp __LC_CPUID # store cpuid 331 stidp __LC_SAVE_AREA # store cpuid
347 tm __LC_CPUID,0xff # running VM ? 332 tm __LC_SAVE_AREA,0xff # running VM ?
348 bno .Lnoreset 333 bno .Lnoreset
349 la %r2,.Lreset 334 la %r2,.Lreset
350 lhi %r3,26 335 lhi %r3,26
@@ -384,7 +369,6 @@ iplstart:
384.Linitrd:.long _end + 0x400000 # default address of initrd 369.Linitrd:.long _end + 0x400000 # default address of initrd
385.Lparm: .long PARMAREA 370.Lparm: .long PARMAREA
386.Lstartup: .long startup 371.Lstartup: .long startup
387.Lcvtab:.long _ebcasc # ebcdic to ascii table
388.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 372.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
389 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 373 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
390 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 374 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
@@ -417,13 +401,10 @@ start:
417.sk8x8: 401.sk8x8:
418 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer 402 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
419.gotr: 403.gotr:
420 l %r10,.tbl # EBCDIC to ASCII table
421 tr 0(240,%r8),0(%r10)
422 slr %r0,%r0 404 slr %r0,%r0
423 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) 405 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
424 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) 406 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
425 j startup # continue with startup 407 j startup # continue with startup
426.tbl: .long _ebcasc # translate table
427.cmd: .long COMMAND_LINE # address of command line buffer 408.cmd: .long COMMAND_LINE # address of command line buffer
428.parm: .long PARMAREA 409.parm: .long PARMAREA
429.lowcase: 410.lowcase:
@@ -467,16 +448,15 @@ start:
467# or linload or SALIPL 448# or linload or SALIPL
468# 449#
469 .org 0x10000 450 .org 0x10000
470startup:basr %r13,0 # get base 451 .globl startup
452startup:
453 basr %r13,0 # get base
471.LPG0: 454.LPG0:
472 xc 0x200(256),0x200 # partially clear lowcore 455 xc 0x200(256),0x200 # partially clear lowcore
473 xc 0x300(256),0x300 456 xc 0x300(256),0x300
474 l %r1,5f-.LPG0(%r13) 457 stck __LC_LAST_UPDATE_CLOCK
475 stck 0(%r1) 458 spt 5f-.LPG0(%r13)
476 spt 6f-.LPG0(%r13) 459 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
477 mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1)
478 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
479 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
480#ifndef CONFIG_MARCH_G5 460#ifndef CONFIG_MARCH_G5
481 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 461 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
482 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 462 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
@@ -494,7 +474,6 @@ startup:basr %r13,0 # get base
494 cl %r0,2f+12-.LPG0(%r13) 474 cl %r0,2f+12-.LPG0(%r13)
495 je 3f 475 je 3f
4961: l %r15,.Lstack-.LPG0(%r13) 4761: l %r15,.Lstack-.LPG0(%r13)
497 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
498 ahi %r15,-96 477 ahi %r15,-96
499 la %r2,.Lals_string-.LPG0(%r13) 478 la %r2,.Lals_string-.LPG0(%r13)
500 l %r3,.Lsclp_print-.LPG0(%r13) 479 l %r3,.Lsclp_print-.LPG0(%r13)
@@ -505,7 +484,7 @@ startup:basr %r13,0 # get base
505.Lsclp_print: 484.Lsclp_print:
506 .long _sclp_print_early 485 .long _sclp_print_early
507.Lstack: 486.Lstack:
508 .long init_thread_union 487 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
509 .align 16 488 .align 16
5102: .long 0x000a0000,0x8badcccc 4892: .long 0x000a0000,0x8badcccc
511#if defined(CONFIG_64BIT) 490#if defined(CONFIG_64BIT)
@@ -532,13 +511,25 @@ startup:basr %r13,0 # get base
5323: 5113:
533#endif 512#endif
534 513
514#ifdef CONFIG_64BIT
515 mvi __LC_AR_MODE_ID,1 # set esame flag
516 slr %r0,%r0 # set cpuid to zero
517 lhi %r1,2 # mode 2 = esame (dump)
518 sigp %r1,%r0,0x12 # switch to esame mode
519 sam64 # switch to 64 bit mode
520 larl %r13,4f
521 lmh %r0,%r15,0(%r13) # clear high-order half
522 jg startup_continue
5234: .fill 16,4,0x0
524#else
525 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
535 l %r13,4f-.LPG0(%r13) 526 l %r13,4f-.LPG0(%r13)
536 b 0(%r13) 527 b 0(%r13)
537 .align 4 528 .align 8
5384: .long startup_continue 5294: .long startup_continue
5395: .long sched_clock_base_cc 530#endif
540 .align 8 531 .align 8
5416: .long 0x7fffffff,0xffffffff 5325: .long 0x7fffffff,0xffffffff
542 533
543# 534#
544# params at 10400 (setup.h) 535# params at 10400 (setup.h)
@@ -552,8 +543,4 @@ startup:basr %r13,0 # get base
552 .byte "root=/dev/ram0 ro" 543 .byte "root=/dev/ram0 ro"
553 .byte 0 544 .byte 0
554 545
555#ifdef CONFIG_64BIT 546 .org 0x11000
556#include "head64.S"
557#else
558#include "head31.S"
559#endif
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 602b508cd4c4..b8f8dc126102 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head31.S 2 * arch/s390/kernel/head31.S
3 * 3 *
4 * Copyright (C) IBM Corp. 2005,2006 4 * Copyright (C) IBM Corp. 2005,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,13 +10,19 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 basr %r13,0 # get base
17.LPG1: 22.LPG1:
18 23
19 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 24 l %r1,.Lbase_cc-.LPG1(%r13)
25 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
20 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 26 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
21 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 27 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
22 # move IPL device to lowcore 28 # move IPL device to lowcore
@@ -69,12 +75,14 @@ startup_continue:
69.Lduald:.rept 8 75.Lduald:.rept 8
70 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
71 .endr 77 .endr
78.Lbase_cc:
79 .long sched_clock_base_cc
72 80
73 .org 0x12000
74 .globl _ehead 81 .globl _ehead
75_ehead: 82_ehead:
83
76#ifdef CONFIG_SHARED_KERNEL 84#ifdef CONFIG_SHARED_KERNEL
77 .org 0x100000 85 .org 0x100000 - 0x11000 # head.o ends at 0x11000
78#endif 86#endif
79 87
80# 88#
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index d984a2a380c3..cdef68717416 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head64.S 2 * arch/s390/kernel/head64.S
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright (C) IBM Corp. 1999,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,81 +10,17 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 larl %r1,sched_clock_base_cc
17.LPG1: sll %r13,1 # remove high order bit 22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
18 srl %r13,1 23 larl %r13,.LPG1 # get base
19
20#ifdef CONFIG_ZFCPDUMP
21
22 # check if we have been ipled using zfcp dump:
23
24 tm 0xb9,0x01 # test if subchannel is enabled
25 jno .nodump # subchannel disabled
26 l %r1,0xb8
27 la %r5,.Lipl_schib-.LPG1(%r13)
28 stsch 0(%r5) # get schib of subchannel
29 jne .nodump # schib not available
30 tm 5(%r5),0x01 # devno valid?
31 jno .nodump
32 tm 4(%r5),0x80 # qdio capable device?
33 jno .nodump
34 l %r2,20(%r0) # address of ipl parameter block
35 lhi %r3,0
36 ic %r3,0x148(%r2) # get opt field
37 chi %r3,0x20 # load with dump?
38 jne .nodump
39
40 # store all prefix registers in case of load with dump:
41
42 la %r7,0 # base register for 0 page
43 la %r8,0 # first cpu
44 l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
45 ahi %r11,4 # skip boot cpu
46 lr %r12,%r11
47 ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
48 stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
491:
50 cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
51 je 4f # if yes get next cpu
522:
53 lr %r9,%r7
54 sigp %r9,%r8,0x9 # stop & store status of cpu
55 brc 8,3f # accepted
56 brc 4,4f # status stored: next cpu
57 brc 2,2b # busy: try again
58 brc 1,4f # not op: next cpu
593:
60 mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
61 ahi %r11,4 # next element in prefix array
62 clr %r11,%r12
63 je 5f # no more space in prefix array
644:
65 ahi %r8,1 # next cpu (r8 += 1)
66 chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
67 jle 1b # jump if not last cpu
685:
69 lhi %r1,2 # mode 2 = esame (dump)
70 j 6f
71 .align 4
72.Lipl_schib:
73 .rept 13
74 .long 0
75 .endr
76.nodump:
77 lhi %r1,1 # mode 1 = esame (normal ipl)
786:
79#else
80 lhi %r1,1 # mode 1 = esame (normal ipl)
81#endif /* CONFIG_ZFCPDUMP */
82 mvi __LC_AR_MODE_ID,1 # set esame flag
83 slr %r0,%r0 # set cpuid to zero
84 sigp %r1,%r0,0x12 # switch to esame mode
85 sam64 # switch to 64 bit mode
86 llgfr %r13,%r13 # clear high-order half of base reg
87 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
88 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 24 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
89 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 25 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
90 # move IPL device to lowcore 26 # move IPL device to lowcore
@@ -108,6 +44,7 @@ startup_continue:
108 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, 44 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
109 # virtual and never return ... 45 # virtual and never return ...
110 .align 16 46 .align 16
47.LPG1:
111.Lentry:.quad 0x0000000180000000,_stext 48.Lentry:.quad 0x0000000180000000,_stext
112.Lctl: .quad 0x04350002 # cr0: various things 49.Lctl: .quad 0x04350002 # cr0: various things
113 .quad 0 # cr1: primary space segment table 50 .quad 0 # cr1: primary space segment table
@@ -129,13 +66,6 @@ startup_continue:
129.L4malign:.quad 0xffffffffffc00000 66.L4malign:.quad 0xffffffffffc00000
130.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 67.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
131.Lnop: .long 0x07000700 68.Lnop: .long 0x07000700
132.Lzero64:.fill 16,4,0x0
133#ifdef CONFIG_ZFCPDUMP
134.Lcurrent_cpu:
135 .long 0x0
136.Lpref_arr_ptr:
137 .long zfcpdump_prefix_array
138#endif /* CONFIG_ZFCPDUMP */
139.Lparmaddr: 69.Lparmaddr:
140 .quad PARMAREA 70 .quad PARMAREA
141 .align 64 71 .align 64
@@ -146,11 +76,11 @@ startup_continue:
146 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
147 .endr 77 .endr
148 78
149 .org 0x12000
150 .globl _ehead 79 .globl _ehead
151_ehead: 80_ehead:
81
152#ifdef CONFIG_SHARED_KERNEL 82#ifdef CONFIG_SHARED_KERNEL
153 .org 0x100000 83 .org 0x100000 - 0x11000 # head.o ends at 0x11000
154#endif 84#endif
155 85
156# 86#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4d73296fed74..a689070be287 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -15,6 +15,7 @@
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/gfp.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
19#include <asm/smp.h> 20#include <asm/smp.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
@@ -402,8 +403,9 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
402static struct kobj_attribute sys_ipl_device_attr = 403static struct kobj_attribute sys_ipl_device_attr =
403 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL); 404 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
404 405
405static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 406static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
406 char *buf, loff_t off, size_t count) 407 struct bin_attribute *attr, char *buf,
408 loff_t off, size_t count)
407{ 409{
408 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, 410 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
409 IPL_PARMBLOCK_SIZE); 411 IPL_PARMBLOCK_SIZE);
@@ -418,8 +420,9 @@ static struct bin_attribute ipl_parameter_attr = {
418 .read = &ipl_parameter_read, 420 .read = &ipl_parameter_read,
419}; 421};
420 422
421static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *attr, 423static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
422 char *buf, loff_t off, size_t count) 424 struct bin_attribute *attr, char *buf,
425 loff_t off, size_t count)
423{ 426{
424 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; 427 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
425 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; 428 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
@@ -553,7 +556,7 @@ out:
553 return rc; 556 return rc;
554} 557}
555 558
556static void ipl_run(struct shutdown_trigger *trigger) 559static void __ipl_run(void *unused)
557{ 560{
558 diag308(DIAG308_IPL, NULL); 561 diag308(DIAG308_IPL, NULL);
559 if (MACHINE_IS_VM) 562 if (MACHINE_IS_VM)
@@ -562,6 +565,11 @@ static void ipl_run(struct shutdown_trigger *trigger)
562 reipl_ccw_dev(&ipl_info.data.ccw.dev_id); 565 reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
563} 566}
564 567
568static void ipl_run(struct shutdown_trigger *trigger)
569{
570 smp_switch_to_ipl_cpu(__ipl_run, NULL);
571}
572
565static int __init ipl_init(void) 573static int __init ipl_init(void)
566{ 574{
567 int rc; 575 int rc;
@@ -688,7 +696,7 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
688 696
689/* FCP reipl device attributes */ 697/* FCP reipl device attributes */
690 698
691static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, 699static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
692 struct bin_attribute *attr, 700 struct bin_attribute *attr,
693 char *buf, loff_t off, size_t count) 701 char *buf, loff_t off, size_t count)
694{ 702{
@@ -698,7 +706,7 @@ static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
698 return memory_read_from_buffer(buf, count, &off, scp_data, size); 706 return memory_read_from_buffer(buf, count, &off, scp_data, size);
699} 707}
700 708
701static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, 709static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
702 struct bin_attribute *attr, 710 struct bin_attribute *attr,
703 char *buf, loff_t off, size_t count) 711 char *buf, loff_t off, size_t count)
704{ 712{
@@ -1039,7 +1047,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
1039 sprintf(dst + pos, " PARM %s", vmparm); 1047 sprintf(dst + pos, " PARM %s", vmparm);
1040} 1048}
1041 1049
1042static void reipl_run(struct shutdown_trigger *trigger) 1050static void __reipl_run(void *unused)
1043{ 1051{
1044 struct ccw_dev_id devid; 1052 struct ccw_dev_id devid;
1045 static char buf[128]; 1053 static char buf[128];
@@ -1087,6 +1095,11 @@ static void reipl_run(struct shutdown_trigger *trigger)
1087 disabled_wait((unsigned long) __builtin_return_address(0)); 1095 disabled_wait((unsigned long) __builtin_return_address(0));
1088} 1096}
1089 1097
1098static void reipl_run(struct shutdown_trigger *trigger)
1099{
1100 smp_switch_to_ipl_cpu(__reipl_run, NULL);
1101}
1102
1090static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1103static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
1091{ 1104{
1092 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; 1105 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
@@ -1369,20 +1382,18 @@ static struct kobj_attribute dump_type_attr =
1369 1382
1370static struct kset *dump_kset; 1383static struct kset *dump_kset;
1371 1384
1372static void dump_run(struct shutdown_trigger *trigger) 1385static void __dump_run(void *unused)
1373{ 1386{
1374 struct ccw_dev_id devid; 1387 struct ccw_dev_id devid;
1375 static char buf[100]; 1388 static char buf[100];
1376 1389
1377 switch (dump_method) { 1390 switch (dump_method) {
1378 case DUMP_METHOD_CCW_CIO: 1391 case DUMP_METHOD_CCW_CIO:
1379 smp_send_stop();
1380 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1392 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1381 devid.ssid = 0; 1393 devid.ssid = 0;
1382 reipl_ccw_dev(&devid); 1394 reipl_ccw_dev(&devid);
1383 break; 1395 break;
1384 case DUMP_METHOD_CCW_VM: 1396 case DUMP_METHOD_CCW_VM:
1385 smp_send_stop();
1386 sprintf(buf, "STORE STATUS"); 1397 sprintf(buf, "STORE STATUS");
1387 __cpcmd(buf, NULL, 0, NULL); 1398 __cpcmd(buf, NULL, 0, NULL);
1388 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 1399 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
@@ -1396,10 +1407,17 @@ static void dump_run(struct shutdown_trigger *trigger)
1396 diag308(DIAG308_SET, dump_block_fcp); 1407 diag308(DIAG308_SET, dump_block_fcp);
1397 diag308(DIAG308_DUMP, NULL); 1408 diag308(DIAG308_DUMP, NULL);
1398 break; 1409 break;
1399 case DUMP_METHOD_NONE: 1410 default:
1400 return; 1411 break;
1401 } 1412 }
1402 printk(KERN_EMERG "Dump failed!\n"); 1413}
1414
1415static void dump_run(struct shutdown_trigger *trigger)
1416{
1417 if (dump_method == DUMP_METHOD_NONE)
1418 return;
1419 smp_send_stop();
1420 smp_switch_to_ipl_cpu(__dump_run, NULL);
1403} 1421}
1404 1422
1405static int __init dump_ccw_init(void) 1423static int __init dump_ccw_init(void)
@@ -1577,7 +1595,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1577static int vmcmd_init(void) 1595static int vmcmd_init(void)
1578{ 1596{
1579 if (!MACHINE_IS_VM) 1597 if (!MACHINE_IS_VM)
1580 return -ENOTSUPP; 1598 return -EOPNOTSUPP;
1581 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); 1599 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
1582 if (!vmcmd_kset) 1600 if (!vmcmd_kset)
1583 return -ENOMEM; 1601 return -ENOMEM;
@@ -1595,7 +1613,7 @@ static void stop_run(struct shutdown_trigger *trigger)
1595{ 1613{
1596 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1614 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1597 disabled_wait((unsigned long) __builtin_return_address(0)); 1615 disabled_wait((unsigned long) __builtin_return_address(0));
1598 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 1616 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1599 cpu_relax(); 1617 cpu_relax();
1600 for (;;); 1618 for (;;);
1601} 1619}
@@ -1902,7 +1920,6 @@ void __init ipl_update_parameters(void)
1902void __init ipl_save_parameters(void) 1920void __init ipl_save_parameters(void)
1903{ 1921{
1904 struct cio_iplinfo iplinfo; 1922 struct cio_iplinfo iplinfo;
1905 unsigned int *ipl_ptr;
1906 void *src, *dst; 1923 void *src, *dst;
1907 1924
1908 if (cio_get_iplinfo(&iplinfo)) 1925 if (cio_get_iplinfo(&iplinfo))
@@ -1913,11 +1930,10 @@ void __init ipl_save_parameters(void)
1913 if (!iplinfo.is_qdio) 1930 if (!iplinfo.is_qdio)
1914 return; 1931 return;
1915 ipl_flags |= IPL_PARMBLOCK_VALID; 1932 ipl_flags |= IPL_PARMBLOCK_VALID;
1916 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; 1933 src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
1917 src = (void *)(unsigned long)*ipl_ptr;
1918 dst = (void *)IPL_PARMBLOCK_ORIGIN; 1934 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1919 memmove(dst, src, PAGE_SIZE); 1935 memmove(dst, src, PAGE_SIZE);
1920 *ipl_ptr = IPL_PARMBLOCK_ORIGIN; 1936 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
1921} 1937}
1922 1938
1923static LIST_HEAD(rcall); 1939static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 86783efa24ee..2a3d2bf6f083 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32 33
33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 34DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 35DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -62,6 +63,8 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
62 case 0x0b: /* bsm */ 63 case 0x0b: /* bsm */
63 case 0x83: /* diag */ 64 case 0x83: /* diag */
64 case 0x44: /* ex */ 65 case 0x44: /* ex */
66 case 0xac: /* stnsm */
67 case 0xad: /* stosm */
65 return -EINVAL; 68 return -EINVAL;
66 } 69 }
67 switch (*(__u16 *) instruction) { 70 switch (*(__u16 *) instruction) {
@@ -71,6 +74,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
71 case 0xb258: /* bsg */ 74 case 0xb258: /* bsg */
72 case 0xb218: /* pc */ 75 case 0xb218: /* pc */
73 case 0xb228: /* pt */ 76 case 0xb228: /* pt */
77 case 0xb98d: /* epsw */
74 return -EINVAL; 78 return -EINVAL;
75 } 79 }
76 return 0; 80 return 0;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 131d7ee8b416..a922d51df6bf 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -54,11 +54,11 @@ void machine_shutdown(void)
54{ 54{
55} 55}
56 56
57void machine_kexec(struct kimage *image) 57static void __machine_kexec(void *data)
58{ 58{
59 relocate_kernel_t data_mover; 59 relocate_kernel_t data_mover;
60 struct kimage *image = data;
60 61
61 smp_send_stop();
62 pfault_fini(); 62 pfault_fini();
63 s390_reset_system(); 63 s390_reset_system();
64 64
@@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image)
68 (*data_mover)(&image->head, image->start); 68 (*data_mover)(&image->head, image->start);
69 for (;;); 69 for (;;);
70} 70}
71
72void machine_kexec(struct kimage *image)
73{
74 smp_send_stop();
75 smp_switch_to_ipl_cpu(__machine_kexec, image);
76}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 639380a0c45c..22cfd634c355 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -55,8 +55,10 @@ void *module_alloc(unsigned long size)
55/* Free memory returned from module_alloc */ 55/* Free memory returned from module_alloc */
56void module_free(struct module *mod, void *module_region) 56void module_free(struct module *mod, void *module_region)
57{ 57{
58 vfree(mod->arch.syminfo); 58 if (mod) {
59 mod->arch.syminfo = NULL; 59 vfree(mod->arch.syminfo);
60 mod->arch.syminfo = NULL;
61 }
60 vfree(module_region); 62 vfree(module_region);
61} 63}
62 64
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 015e27da40eb..ac151399ef34 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
255 int umode; 255 int umode;
256 256
257 nmi_enter(); 257 nmi_enter();
258 s390_idle_check(); 258 s390_idle_check(regs, S390_lowcore.mcck_clock,
259 S390_lowcore.mcck_enter_timer);
259 260
260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 261 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
261 mcck = &__get_cpu_var(cpu_mcck); 262 mcck = &__get_cpu_var(cpu_mcck);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 00b6d1d292f2..1039fdea15b5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -16,9 +16,9 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/slab.h>
19#include <linux/unistd.h> 20#include <linux/unistd.h>
20#include <linux/ptrace.h> 21#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/user.h> 23#include <linux/user.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0729f36c2fe3..ecb2d02b02e4 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,24 +18,42 @@
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
20 20
21static DEFINE_PER_CPU(struct cpuid, cpu_id);
22
23/*
24 * cpu_init - initializes state that is per-CPU.
25 */
26void __cpuinit cpu_init(void)
27{
28 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
29
30 get_cpu_id(id);
31 atomic_inc(&init_mm.mm_count);
32 current->active_mm = &init_mm;
33 BUG_ON(current->mm);
34 enter_lazy_tlb(&init_mm, current);
35}
36
37/*
38 * print_cpu_info - print basic information about a cpu
39 */
21void __cpuinit print_cpu_info(void) 40void __cpuinit print_cpu_info(void)
22{ 41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
23 pr_info("Processor %d started, address %d, identification %06X\n", 44 pr_info("Processor %d started, address %d, identification %06X\n",
24 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, 45 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
25 S390_lowcore.cpu_id.ident);
26} 46}
27 47
28/* 48/*
29 * show_cpuinfo - Get information on one CPU for use by procfs. 49 * show_cpuinfo - Get information on one CPU for use by procfs.
30 */ 50 */
31
32static int show_cpuinfo(struct seq_file *m, void *v) 51static int show_cpuinfo(struct seq_file *m, void *v)
33{ 52{
34 static const char *hwcap_str[10] = { 53 static const char *hwcap_str[10] = {
35 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 54 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
36 "edat", "etf3eh", "highgprs" 55 "edat", "etf3eh", "highgprs"
37 }; 56 };
38 struct _lowcore *lc;
39 unsigned long n = (unsigned long) v - 1; 57 unsigned long n = (unsigned long) v - 1;
40 int i; 58 int i;
41 59
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
55 } 73 }
56 74
57 if (cpu_online(n)) { 75 if (cpu_online(n)) {
58#ifdef CONFIG_SMP 76 struct cpuid *id = &per_cpu(cpu_id, n);
59 lc = (smp_processor_id() == n) ?
60 &S390_lowcore : lowcore_ptr[n];
61#else
62 lc = &S390_lowcore;
63#endif
64 seq_printf(m, "processor %li: " 77 seq_printf(m, "processor %li: "
65 "version = %02X, " 78 "version = %02X, "
66 "identification = %06X, " 79 "identification = %06X, "
67 "machine = %04X\n", 80 "machine = %04X\n",
68 n, lc->cpu_id.version, 81 n, id->version, id->ident, id->machine);
69 lc->cpu_id.ident,
70 lc->cpu_id.machine);
71 } 82 }
72 preempt_enable(); 83 preempt_enable();
73 return 0; 84 return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7cf464234419..83339d33c4b1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -57,6 +57,7 @@
57enum s390_regset { 57enum s390_regset {
58 REGSET_GENERAL, 58 REGSET_GENERAL,
59 REGSET_FP, 59 REGSET_FP,
60 REGSET_LAST_BREAK,
60 REGSET_GENERAL_EXTENDED, 61 REGSET_GENERAL_EXTENDED,
61}; 62};
62 63
@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
381 copied += sizeof(unsigned long); 382 copied += sizeof(unsigned long);
382 } 383 }
383 return 0; 384 return 0;
385 case PTRACE_GET_LAST_BREAK:
386 put_user(task_thread_info(child)->last_break,
387 (unsigned long __user *) data);
388 return 0;
384 default: 389 default:
385 /* Removing high order bit from addr (only for 31 bit). */ 390 /* Removing high order bit from addr (only for 31 bit). */
386 addr &= PSW_ADDR_INSN; 391 addr &= PSW_ADDR_INSN;
@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
633 copied += sizeof(unsigned int); 638 copied += sizeof(unsigned int);
634 } 639 }
635 return 0; 640 return 0;
641 case PTRACE_GET_LAST_BREAK:
642 put_user(task_thread_info(child)->last_break,
643 (unsigned int __user *) data);
644 return 0;
636 } 645 }
637 return compat_ptrace_request(child, request, addr, data); 646 return compat_ptrace_request(child, request, addr, data);
638} 647}
@@ -640,7 +649,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
640 649
641asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 650asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
642{ 651{
643 long ret; 652 long ret = 0;
644 653
645 /* Do the secure computing check first. */ 654 /* Do the secure computing check first. */
646 secure_computing(regs->gprs[2]); 655 secure_computing(regs->gprs[2]);
@@ -649,7 +658,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
649 * The sysc_tracesys code in entry.S stored the system 658 * The sysc_tracesys code in entry.S stored the system
650 * call number to gprs[2]. 659 * call number to gprs[2].
651 */ 660 */
652 ret = regs->gprs[2];
653 if (test_thread_flag(TIF_SYSCALL_TRACE) && 661 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
654 (tracehook_report_syscall_entry(regs) || 662 (tracehook_report_syscall_entry(regs) ||
655 regs->gprs[2] >= NR_syscalls)) { 663 regs->gprs[2] >= NR_syscalls)) {
@@ -671,7 +679,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
671 regs->gprs[2], regs->orig_gpr2, 679 regs->gprs[2], regs->orig_gpr2,
672 regs->gprs[3], regs->gprs[4], 680 regs->gprs[3], regs->gprs[4],
673 regs->gprs[5]); 681 regs->gprs[5]);
674 return ret; 682 return ret ?: regs->gprs[2];
675} 683}
676 684
677asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) 685asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
@@ -798,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target,
798 return rc; 806 return rc;
799} 807}
800 808
809#ifdef CONFIG_64BIT
810
811static int s390_last_break_get(struct task_struct *target,
812 const struct user_regset *regset,
813 unsigned int pos, unsigned int count,
814 void *kbuf, void __user *ubuf)
815{
816 if (count > 0) {
817 if (kbuf) {
818 unsigned long *k = kbuf;
819 *k = task_thread_info(target)->last_break;
820 } else {
821 unsigned long __user *u = ubuf;
822 if (__put_user(task_thread_info(target)->last_break, u))
823 return -EFAULT;
824 }
825 }
826 return 0;
827}
828
829#endif
830
801static const struct user_regset s390_regsets[] = { 831static const struct user_regset s390_regsets[] = {
802 [REGSET_GENERAL] = { 832 [REGSET_GENERAL] = {
803 .core_note_type = NT_PRSTATUS, 833 .core_note_type = NT_PRSTATUS,
@@ -815,6 +845,15 @@ static const struct user_regset s390_regsets[] = {
815 .get = s390_fpregs_get, 845 .get = s390_fpregs_get,
816 .set = s390_fpregs_set, 846 .set = s390_fpregs_set,
817 }, 847 },
848#ifdef CONFIG_64BIT
849 [REGSET_LAST_BREAK] = {
850 .core_note_type = NT_S390_LAST_BREAK,
851 .n = 1,
852 .size = sizeof(long),
853 .align = sizeof(long),
854 .get = s390_last_break_get,
855 },
856#endif
818}; 857};
819 858
820static const struct user_regset_view user_s390_view = { 859static const struct user_regset_view user_s390_view = {
@@ -949,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target,
949 return rc; 988 return rc;
950} 989}
951 990
991static int s390_compat_last_break_get(struct task_struct *target,
992 const struct user_regset *regset,
993 unsigned int pos, unsigned int count,
994 void *kbuf, void __user *ubuf)
995{
996 compat_ulong_t last_break;
997
998 if (count > 0) {
999 last_break = task_thread_info(target)->last_break;
1000 if (kbuf) {
1001 unsigned long *k = kbuf;
1002 *k = last_break;
1003 } else {
1004 unsigned long __user *u = ubuf;
1005 if (__put_user(last_break, u))
1006 return -EFAULT;
1007 }
1008 }
1009 return 0;
1010}
1011
952static const struct user_regset s390_compat_regsets[] = { 1012static const struct user_regset s390_compat_regsets[] = {
953 [REGSET_GENERAL] = { 1013 [REGSET_GENERAL] = {
954 .core_note_type = NT_PRSTATUS, 1014 .core_note_type = NT_PRSTATUS,
@@ -966,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = {
966 .get = s390_fpregs_get, 1026 .get = s390_fpregs_get,
967 .set = s390_fpregs_set, 1027 .set = s390_fpregs_set,
968 }, 1028 },
1029 [REGSET_LAST_BREAK] = {
1030 .core_note_type = NT_S390_LAST_BREAK,
1031 .n = 1,
1032 .size = sizeof(long),
1033 .align = sizeof(long),
1034 .get = s390_compat_last_break_get,
1035 },
969 [REGSET_GENERAL_EXTENDED] = { 1036 [REGSET_GENERAL_EXTENDED] = {
970 .core_note_type = NT_S390_HIGH_GPRS, 1037 .core_note_type = NT_S390_HIGH_GPRS,
971 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1038 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
@@ -992,3 +1059,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
992#endif 1059#endif
993 return &user_s390_view; 1060 return &user_s390_view;
994} 1061}
1062
1063static const char *gpr_names[NUM_GPRS] = {
1064 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1065 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1066};
1067
1068unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1069{
1070 if (offset >= NUM_GPRS)
1071 return 0;
1072 return regs->gprs[offset];
1073}
1074
1075int regs_query_register_offset(const char *name)
1076{
1077 unsigned long offset;
1078
1079 if (!name || *name != 'r')
1080 return -EINVAL;
1081 if (strict_strtoul(name + 1, 10, &offset))
1082 return -EINVAL;
1083 if (offset >= NUM_GPRS)
1084 return -EINVAL;
1085 return offset;
1086}
1087
1088const char *regs_query_register_name(unsigned int offset)
1089{
1090 if (offset >= NUM_GPRS)
1091 return NULL;
1092 return gpr_names[offset];
1093}
1094
1095static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1096{
1097 unsigned long ksp = kernel_stack_pointer(regs);
1098
1099 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1100}
1101
1102/**
1103 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1104 * @regs:pt_regs which contains kernel stack pointer.
1105 * @n:stack entry number.
1106 *
1107 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1108 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1109 * this returns 0.
1110 */
1111unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1112{
1113 unsigned long addr;
1114
1115 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1116 if (!regs_within_kernel_stack(regs, addr))
1117 return 0;
1118 return *(unsigned long *)addr;
1119}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 2f481cc3d1c9..cb899d9f8505 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,7 +6,7 @@
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 */ 7 */
8 8
9#include <asm/lowcore.h> 9#include <asm/asm-offsets.h>
10 10
11# 11#
12# do_reipl_asm 12# do_reipl_asm
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 774147824c3d..5e73dee63baa 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,7 +4,7 @@
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
6 6
7#include <asm/lowcore.h> 7#include <asm/asm-offsets.h>
8 8
9# 9#
10# do_reipl_asm 10# do_reipl_asm
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 59618bcd99b7..9ce641b5291f 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
120 struct pt_regs *old_regs; 120 struct pt_regs *old_regs;
121 121
122 old_regs = set_irq_regs(regs); 122 old_regs = set_irq_regs(regs);
123 s390_idle_check(); 123 s390_idle_check(regs, S390_lowcore.int_clock,
124 S390_lowcore.async_enter_timer);
124 irq_enter(); 125 irq_enter();
125 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 126 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
126 /* Serve timer interrupts first. */ 127 /* Serve timer interrupts first. */
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index e27ca63076d1..2e82fdd89320 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -9,8 +9,10 @@
9 */ 9 */
10 10
11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler 11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
12LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
12LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter 13LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
13LC_EXT_INT_CODE = 0x86 # addr of ext int code 14LC_EXT_INT_CODE = 0x86 # addr of ext int code
15LC_AR_MODE_ID = 0xa3
14 16
15# 17#
16# Subroutine which waits synchronously until either an external interruption 18# Subroutine which waits synchronously until either an external interruption
@@ -30,8 +32,16 @@ _sclp_wait_int:
30.LbaseS1: 32.LbaseS1:
31 ahi %r15,-96 # create stack frame 33 ahi %r15,-96 # create stack frame
32 la %r8,LC_EXT_NEW_PSW # register int handler 34 la %r8,LC_EXT_NEW_PSW # register int handler
33 mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) 35 la %r9,.LextpswS1-.LbaseS1(%r13)
34 mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) 36#ifdef CONFIG_64BIT
37 tm LC_AR_MODE_ID,1
38 jno .Lesa1
39 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
40 la %r9,.LextpswS1_64-.LbaseS1(%r13)
41.Lesa1:
42#endif
43 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
44 mvc 0(16,%r8),0(%r9)
35 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 45 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
36 ltr %r2,%r2 46 ltr %r2,%r2
37 jz .LsetctS1 47 jz .LsetctS1
@@ -64,15 +74,19 @@ _sclp_wait_int:
64.LtimeoutS1: 74.LtimeoutS1:
65 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting 75 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting
66 # restore old handler 76 # restore old handler
67 mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) 77 mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
68 lm %r6,%r15,120(%r15) # restore registers 78 lm %r6,%r15,120(%r15) # restore registers
69 br %r14 # return to caller 79 br %r14 # return to caller
70 80
71 .align 8 81 .align 8
72.LoldpswS1: 82.LoldpswS1:
73 .long 0, 0 # old ext int PSW 83 .long 0, 0, 0, 0 # old ext int PSW
74.LextpswS1: 84.LextpswS1:
75 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 85 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
86#ifdef CONFIG_64BIT
87.LextpswS1_64:
88 .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
89#endif
76.LwaitpswS1: 90.LwaitpswS1:
77 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 91 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
78.LtimeS1: 92.LtimeS1:
@@ -221,7 +235,7 @@ _sclp_print:
221 lh %r9,0(%r8) # update sccb length 235 lh %r9,0(%r8) # update sccb length
222 ar %r9,%r6 236 ar %r9,%r6
223 sth %r9,0(%r8) 237 sth %r9,0(%r8)
224 ar %r7,%r6 # update current mto adress 238 ar %r7,%r6 # update current mto address
225 ltr %r0,%r0 # more characters? 239 ltr %r0,%r0 # more characters?
226 jnz .LinitmtoS4 240 jnz .LinitmtoS4
227 l %r2,.LwritedataS4-.LbaseS4(%r13)# write data 241 l %r2,.LwritedataS4-.LbaseS4(%r13)# write data
@@ -250,6 +264,13 @@ _sclp_print:
250_sclp_print_early: 264_sclp_print_early:
251 stm %r6,%r15,24(%r15) # save registers 265 stm %r6,%r15,24(%r15) # save registers
252 ahi %r15,-96 # create stack frame 266 ahi %r15,-96 # create stack frame
267#ifdef CONFIG_64BIT
268 tm LC_AR_MODE_ID,1
269 jno .Lesa2
270 ahi %r15,-80
271 stmh %r6,%r15,96(%r15) # store upper register halves
272.Lesa2:
273#endif
253 lr %r10,%r2 # save string pointer 274 lr %r10,%r2 # save string pointer
254 lhi %r2,0 275 lhi %r2,0
255 bras %r14,_sclp_setup # enable console 276 bras %r14,_sclp_setup # enable console
@@ -262,6 +283,13 @@ _sclp_print_early:
262 lhi %r2,1 283 lhi %r2,1
263 bras %r14,_sclp_setup # disable console 284 bras %r14,_sclp_setup # disable console
264.LendS5: 285.LendS5:
286#ifdef CONFIG_64BIT
287 tm LC_AR_MODE_ID,1
288 jno .Lesa3
289 lmh %r6,%r15,96(%r15) # store upper register halves
290 ahi %r15,80
291.Lesa3:
292#endif
265 lm %r6,%r15,120(%r15) # restore registers 293 lm %r6,%r15,120(%r15) # restore registers
266 br %r14 294 br %r14
267 295
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8d8957b38ab3..c8e8e1354e1d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
@@ -25,7 +25,6 @@
25#include <linux/stddef.h> 25#include <linux/stddef.h>
26#include <linux/unistd.h> 26#include <linux/unistd.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/user.h> 28#include <linux/user.h>
30#include <linux/tty.h> 29#include <linux/tty.h>
31#include <linux/ioport.h> 30#include <linux/ioport.h>
@@ -114,22 +113,6 @@ static struct resource data_resource = {
114}; 113};
115 114
116/* 115/*
117 * cpu_init() initializes state that is per-CPU.
118 */
119void __cpuinit cpu_init(void)
120{
121 /*
122 * Store processor id in lowcore (used e.g. in timer_interrupt)
123 */
124 get_cpu_id(&S390_lowcore.cpu_id);
125
126 atomic_inc(&init_mm.mm_count);
127 current->active_mm = &init_mm;
128 BUG_ON(current->mm);
129 enter_lazy_tlb(&init_mm, current);
130}
131
132/*
133 * condev= and conmode= setup parameter. 116 * condev= and conmode= setup parameter.
134 */ 117 */
135 118
@@ -386,25 +369,18 @@ static void setup_addressing_mode(void)
386 pr_info("Address spaces switched, " 369 pr_info("Address spaces switched, "
387 "mvcos not available\n"); 370 "mvcos not available\n");
388 } 371 }
389#ifdef CONFIG_TRACE_IRQFLAGS
390 sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
391 io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
392#endif
393} 372}
394 373
395static void __init 374static void __init
396setup_lowcore(void) 375setup_lowcore(void)
397{ 376{
398 struct _lowcore *lc; 377 struct _lowcore *lc;
399 int lc_pages;
400 378
401 /* 379 /*
402 * Setup lowcore for boot cpu 380 * Setup lowcore for boot cpu
403 */ 381 */
404 lc_pages = sizeof(void *) == 8 ? 2 : 1; 382 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
405 lc = (struct _lowcore *) 383 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
406 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
407 memset(lc, 0, lc_pages * PAGE_SIZE);
408 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 384 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
409 lc->restart_psw.addr = 385 lc->restart_psw.addr =
410 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 386 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
@@ -436,11 +412,12 @@ setup_lowcore(void)
436#ifndef CONFIG_64BIT 412#ifndef CONFIG_64BIT
437 if (MACHINE_HAS_IEEE) { 413 if (MACHINE_HAS_IEEE) {
438 lc->extended_save_area_addr = (__u32) 414 lc->extended_save_area_addr = (__u32)
439 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 415 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
440 /* enable extended save area */ 416 /* enable extended save area */
441 __ctl_set_bit(14, 29); 417 __ctl_set_bit(14, 29);
442 } 418 }
443#else 419#else
420 lc->cmf_hpp = -1ULL;
444 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 421 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
445#endif 422#endif
446 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 423 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -699,6 +676,7 @@ static void __init setup_hwcaps(void)
699 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 676 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
700 unsigned long long facility_list_extended; 677 unsigned long long facility_list_extended;
701 unsigned int facility_list; 678 unsigned int facility_list;
679 struct cpuid cpu_id;
702 int i; 680 int i;
703 681
704 facility_list = stfl(); 682 facility_list = stfl();
@@ -760,7 +738,8 @@ static void __init setup_hwcaps(void)
760 */ 738 */
761 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 739 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
762 740
763 switch (S390_lowcore.cpu_id.machine) { 741 get_cpu_id(&cpu_id);
742 switch (cpu_id.machine) {
764 case 0x9672: 743 case 0x9672:
765#if !defined(CONFIG_64BIT) 744#if !defined(CONFIG_64BIT)
766 default: /* Use "g5" as default for 31 bit kernels. */ 745 default: /* Use "g5" as default for 31 bit kernels. */
@@ -804,7 +783,7 @@ setup_arch(char **cmdline_p)
804 if (MACHINE_IS_VM) 783 if (MACHINE_IS_VM)
805 pr_info("Linux is running as a z/VM " 784 pr_info("Linux is running as a z/VM "
806 "guest operating system in 31-bit mode\n"); 785 "guest operating system in 31-bit mode\n");
807 else 786 else if (MACHINE_IS_LPAR)
808 pr_info("Linux is running natively in 31-bit mode\n"); 787 pr_info("Linux is running natively in 31-bit mode\n");
809 if (MACHINE_HAS_IEEE) 788 if (MACHINE_HAS_IEEE)
810 pr_info("The hardware system has IEEE compatible " 789 pr_info("The hardware system has IEEE compatible "
@@ -818,7 +797,7 @@ setup_arch(char **cmdline_p)
818 "guest operating system in 64-bit mode\n"); 797 "guest operating system in 64-bit mode\n");
819 else if (MACHINE_IS_KVM) 798 else if (MACHINE_IS_KVM)
820 pr_info("Linux is running under KVM in 64-bit mode\n"); 799 pr_info("Linux is running under KVM in 64-bit mode\n");
821 else 800 else if (MACHINE_IS_LPAR)
822 pr_info("Linux is running natively in 64-bit mode\n"); 801 pr_info("Linux is running natively in 64-bit mode\n");
823#endif /* CONFIG_64BIT */ 802#endif /* CONFIG_64BIT */
824 803
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6289945562b0..ee7ac8b11782 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
313 To avoid breaking binary compatibility, they are passed as args. */ 313 To avoid breaking binary compatibility, they are passed as args. */
314 regs->gprs[4] = current->thread.trap_no; 314 regs->gprs[4] = current->thread.trap_no;
315 regs->gprs[5] = current->thread.prot_addr; 315 regs->gprs[5] = current->thread.prot_addr;
316 regs->gprs[6] = task_thread_info(current)->last_break;
316 317
317 /* Place signal number on stack to allow backtrace from handler. */ 318 /* Place signal number on stack to allow backtrace from handler. */
318 if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) 319 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
376 regs->gprs[2] = map_signal(sig); 377 regs->gprs[2] = map_signal(sig);
377 regs->gprs[3] = (unsigned long) &frame->info; 378 regs->gprs[3] = (unsigned long) &frame->info;
378 regs->gprs[4] = (unsigned long) &frame->uc; 379 regs->gprs[4] = (unsigned long) &frame->uc;
380 regs->gprs[5] = task_thread_info(current)->last_break;
379 return 0; 381 return 0;
380 382
381give_sigsegv: 383give_sigsegv:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 76a6fdd46c45..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -36,6 +36,8 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/timex.h> 37#include <linux/timex.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39#include <linux/slab.h>
40#include <asm/asm-offsets.h>
39#include <asm/ipl.h> 41#include <asm/ipl.h>
40#include <asm/setup.h> 42#include <asm/setup.h>
41#include <asm/sigp.h> 43#include <asm/sigp.h>
@@ -53,7 +55,7 @@
53#include "entry.h" 55#include "entry.h"
54 56
55/* logical cpu to cpu address */ 57/* logical cpu to cpu address */
56int __cpu_logical_map[NR_CPUS]; 58unsigned short __cpu_logical_map[NR_CPUS];
57 59
58static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
59 61
@@ -72,13 +74,13 @@ static int cpu_management;
72 74
73static DEFINE_PER_CPU(struct cpu, cpu_devices); 75static DEFINE_PER_CPU(struct cpu, cpu_devices);
74 76
75static void smp_ext_bitcall(int, ec_bit_sig); 77static void smp_ext_bitcall(int, int);
76 78
77static int cpu_stopped(int cpu) 79static int raw_cpu_stopped(int cpu)
78{ 80{
79 __u32 status; 81 u32 status;
80 82
81 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { 83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
82 case sigp_status_stored: 84 case sigp_status_stored:
83 /* Check for stopped and check stop state */ 85 /* Check for stopped and check stop state */
84 if (status & 0x50) 86 if (status & 0x50)
@@ -90,6 +92,44 @@ static int cpu_stopped(int cpu)
90 return 0; 92 return 0;
91} 93}
92 94
95static inline int cpu_stopped(int cpu)
96{
97 return raw_cpu_stopped(cpu_logical_map(cpu));
98}
99
100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101{
102 struct _lowcore *lc, *current_lc;
103 struct stack_frame *sf;
104 struct pt_regs *regs;
105 unsigned long sp;
106
107 if (smp_processor_id() == 0)
108 func(data);
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()];
113 lc = lowcore_ptr[0];
114 if (!lc)
115 lc = current_lc;
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118 if (!cpu_online(0))
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
121 cpu_relax();
122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
131}
132
93void smp_send_stop(void) 133void smp_send_stop(void)
94{ 134{
95 int cpu, rc; 135 int cpu, rc;
@@ -103,7 +143,7 @@ void smp_send_stop(void)
103 if (cpu == smp_processor_id()) 143 if (cpu == smp_processor_id())
104 continue; 144 continue;
105 do { 145 do {
106 rc = signal_processor(cpu, sigp_stop); 146 rc = sigp(cpu, sigp_stop);
107 } while (rc == sigp_busy); 147 } while (rc == sigp_busy);
108 148
109 while (!cpu_stopped(cpu)) 149 while (!cpu_stopped(cpu))
@@ -139,13 +179,13 @@ static void do_ext_call_interrupt(__u16 code)
139 * Send an external call sigp to another cpu and return without waiting 179 * Send an external call sigp to another cpu and return without waiting
140 * for its completion. 180 * for its completion.
141 */ 181 */
142static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 182static void smp_ext_bitcall(int cpu, int sig)
143{ 183{
144 /* 184 /*
145 * Set signaling bit in lowcore of target cpu and kick it 185 * Set signaling bit in lowcore of target cpu and kick it
146 */ 186 */
147 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 187 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
148 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 188 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
149 udelay(10); 189 udelay(10);
150} 190}
151 191
@@ -239,24 +279,8 @@ void smp_ctl_clear_bit(int cr, int bit)
239} 279}
240EXPORT_SYMBOL(smp_ctl_clear_bit); 280EXPORT_SYMBOL(smp_ctl_clear_bit);
241 281
242/*
243 * In early ipl state a temp. logically cpu number is needed, so the sigp
244 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
245 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
246 */
247#define CPU_INIT_NO 1
248
249#ifdef CONFIG_ZFCPDUMP 282#ifdef CONFIG_ZFCPDUMP
250 283
251/*
252 * zfcpdump_prefix_array holds prefix registers for the following scenario:
253 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
254 * save its prefix registers, since they get lost, when switching from 31 bit
255 * to 64 bit.
256 */
257unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
258 __attribute__((__section__(".data")));
259
260static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 284static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
261{ 285{
262 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 286 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
@@ -266,21 +290,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
266 "the dump\n", cpu, NR_CPUS - 1); 290 "the dump\n", cpu, NR_CPUS - 1);
267 return; 291 return;
268 } 292 }
269 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 293 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
270 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 294 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
271 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
272 sigp_busy)
273 cpu_relax(); 295 cpu_relax();
274 memcpy(zfcpdump_save_areas[cpu], 296 memcpy_real(zfcpdump_save_areas[cpu],
275 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 297 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
276 SAVE_AREA_SIZE); 298 sizeof(struct save_area));
277#ifdef CONFIG_64BIT
278 /* copy original prefix register */
279 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
280#endif
281} 299}
282 300
283union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 301struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
284EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 302EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
285 303
286#else 304#else
@@ -389,8 +407,7 @@ static void __init smp_detect_cpus(void)
389 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { 407 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
390 if (cpu == boot_cpu_addr) 408 if (cpu == boot_cpu_addr)
391 continue; 409 continue;
392 __cpu_logical_map[CPU_INIT_NO] = cpu; 410 if (!raw_cpu_stopped(cpu))
393 if (!cpu_stopped(CPU_INIT_NO))
394 continue; 411 continue;
395 smp_get_save_area(c_cpus, cpu); 412 smp_get_save_area(c_cpus, cpu);
396 c_cpus++; 413 c_cpus++;
@@ -413,8 +430,7 @@ static void __init smp_detect_cpus(void)
413 cpu_addr = info->cpu[cpu].address; 430 cpu_addr = info->cpu[cpu].address;
414 if (cpu_addr == boot_cpu_addr) 431 if (cpu_addr == boot_cpu_addr)
415 continue; 432 continue;
416 __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 433 if (!raw_cpu_stopped(cpu_addr)) {
417 if (!cpu_stopped(CPU_INIT_NO)) {
418 s_cpus++; 434 s_cpus++;
419 continue; 435 continue;
420 } 436 }
@@ -533,18 +549,18 @@ static void smp_free_lowcore(int cpu)
533/* Upping and downing of CPUs */ 549/* Upping and downing of CPUs */
534int __cpuinit __cpu_up(unsigned int cpu) 550int __cpuinit __cpu_up(unsigned int cpu)
535{ 551{
536 struct task_struct *idle;
537 struct _lowcore *cpu_lowcore; 552 struct _lowcore *cpu_lowcore;
553 struct task_struct *idle;
538 struct stack_frame *sf; 554 struct stack_frame *sf;
539 sigp_ccode ccode;
540 u32 lowcore; 555 u32 lowcore;
556 int ccode;
541 557
542 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 558 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
543 return -EIO; 559 return -EIO;
544 if (smp_alloc_lowcore(cpu)) 560 if (smp_alloc_lowcore(cpu))
545 return -ENOMEM; 561 return -ENOMEM;
546 do { 562 do {
547 ccode = signal_processor(cpu, sigp_initial_cpu_reset); 563 ccode = sigp(cpu, sigp_initial_cpu_reset);
548 if (ccode == sigp_busy) 564 if (ccode == sigp_busy)
549 udelay(10); 565 udelay(10);
550 if (ccode == sigp_not_operational) 566 if (ccode == sigp_not_operational)
@@ -552,7 +568,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
552 } while (ccode == sigp_busy); 568 } while (ccode == sigp_busy);
553 569
554 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 570 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
555 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 571 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
556 udelay(10); 572 udelay(10);
557 573
558 idle = current_set[cpu]; 574 idle = current_set[cpu];
@@ -578,7 +594,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
578 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 594 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
579 eieio(); 595 eieio();
580 596
581 while (signal_processor(cpu, sigp_restart) == sigp_busy) 597 while (sigp(cpu, sigp_restart) == sigp_busy)
582 udelay(10); 598 udelay(10);
583 599
584 while (!cpu_online(cpu)) 600 while (!cpu_online(cpu))
@@ -640,7 +656,7 @@ void __cpu_die(unsigned int cpu)
640 /* Wait until target cpu is down */ 656 /* Wait until target cpu is down */
641 while (!cpu_stopped(cpu)) 657 while (!cpu_stopped(cpu))
642 cpu_relax(); 658 cpu_relax();
643 while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) 659 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
644 udelay(10); 660 udelay(10);
645 smp_free_lowcore(cpu); 661 smp_free_lowcore(cpu);
646 pr_info("Processor %d stopped\n", cpu); 662 pr_info("Processor %d stopped\n", cpu);
@@ -649,7 +665,7 @@ void __cpu_die(unsigned int cpu)
649void cpu_die(void) 665void cpu_die(void)
650{ 666{
651 idle_task_exit(); 667 idle_task_exit();
652 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 668 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
653 cpu_relax(); 669 cpu_relax();
654 for (;;); 670 for (;;);
655} 671}
@@ -765,7 +781,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
765 get_online_cpus(); 781 get_online_cpus();
766 mutex_lock(&smp_cpu_state_mutex); 782 mutex_lock(&smp_cpu_state_mutex);
767 rc = -EBUSY; 783 rc = -EBUSY;
768 if (cpu_online(cpu)) 784 /* disallow configuration changes of online cpus and cpu 0 */
785 if (cpu_online(cpu) || cpu == 0)
769 goto out; 786 goto out;
770 rc = 0; 787 rc = 0;
771 switch (val) { 788 switch (val) {
@@ -927,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
927 struct cpu *c = &per_cpu(cpu_devices, cpu); 944 struct cpu *c = &per_cpu(cpu_devices, cpu);
928 struct sys_device *s = &c->sysdev; 945 struct sys_device *s = &c->sysdev;
929 struct s390_idle_data *idle; 946 struct s390_idle_data *idle;
947 int err = 0;
930 948
931 switch (action) { 949 switch (action) {
932 case CPU_ONLINE: 950 case CPU_ONLINE:
933 case CPU_ONLINE_FROZEN: 951 case CPU_ONLINE_FROZEN:
934 idle = &per_cpu(s390_idle, cpu); 952 idle = &per_cpu(s390_idle, cpu);
935 memset(idle, 0, sizeof(struct s390_idle_data)); 953 memset(idle, 0, sizeof(struct s390_idle_data));
936 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 954 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
937 return NOTIFY_BAD;
938 break; 955 break;
939 case CPU_DEAD: 956 case CPU_DEAD:
940 case CPU_DEAD_FROZEN: 957 case CPU_DEAD_FROZEN:
941 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
942 break; 959 break;
943 } 960 }
944 return NOTIFY_OK; 961 return notifier_from_errno(err);
945} 962}
946 963
947static struct notifier_block __cpuinitdata smp_cpu_nb = { 964static struct notifier_block __cpuinitdata smp_cpu_nb = {
@@ -1004,7 +1021,9 @@ out:
1004 return rc; 1021 return rc;
1005} 1022}
1006 1023
1007static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, 1024static ssize_t __ref rescan_store(struct sysdev_class *class,
1025 struct sysdev_class_attribute *attr,
1026 const char *buf,
1008 size_t count) 1027 size_t count)
1009{ 1028{
1010 int rc; 1029 int rc;
@@ -1015,7 +1034,9 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
1015static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); 1034static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1016#endif /* CONFIG_HOTPLUG_CPU */ 1035#endif /* CONFIG_HOTPLUG_CPU */
1017 1036
1018static ssize_t dispatching_show(struct sysdev_class *class, char *buf) 1037static ssize_t dispatching_show(struct sysdev_class *class,
1038 struct sysdev_class_attribute *attr,
1039 char *buf)
1019{ 1040{
1020 ssize_t count; 1041 ssize_t count;
1021 1042
@@ -1025,7 +1046,9 @@ static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1025 return count; 1046 return count;
1026} 1047}
1027 1048
1028static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, 1049static ssize_t dispatching_store(struct sysdev_class *dev,
1050 struct sysdev_class_attribute *attr,
1051 const char *buf,
1029 size_t count) 1052 size_t count)
1030{ 1053{
1031 int val, rc; 1054 int val, rc;
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
new file mode 100644
index 000000000000..469f11b574fa
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu.S
@@ -0,0 +1,58 @@
1/*
2 * 31-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stm %r6,%r15,__SF_GPRS(%r15)
23 lr %r1,%r15
24 ahi %r15,-STACK_FRAME_OVERHEAD
25 st %r1,__SF_BACKCHAIN(%r15)
26 basr %r13,0
270: la %r1,.gprregs_addr-0b(%r13)
28 l %r1,0(%r1)
29 stm %r0,%r15,0(%r1)
301: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
31 brc 2,1b /* busy, try again */
322: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
33 brc 2,2b /* busy, try again */
343: j 3b
35
36 .globl smp_restart_cpu
37smp_restart_cpu:
38 basr %r13,0
390: la %r1,.gprregs_addr-0b(%r13)
40 l %r1,0(%r1)
41 lm %r0,%r15,0(%r1)
421: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
43 brc 10,1b /* busy, accepted (status 0), running */
44 tmll %r0,0x40 /* Test if calling CPU is stopped */
45 jz 1b
46 ltr %r4,%r4 /* New stack ? */
47 jz 1f
48 lr %r15,%r4
491: basr %r14,%r2
50
51.gprregs_addr:
52 .long .gprregs
53
54 .section .data,"aw",@progbits
55.gprregs:
56 .rept 16
57 .long 0
58 .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
new file mode 100644
index 000000000000..d94aacc898cb
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -0,0 +1,51 @@
1/*
2 * 64-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stmg %r6,%r15,__SF_GPRS(%r15)
23 lgr %r1,%r15
24 aghi %r15,-STACK_FRAME_OVERHEAD
25 stg %r1,__SF_BACKCHAIN(%r15)
26 larl %r1,.gprregs
27 stmg %r0,%r15,0(%r1)
281: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
29 brc 2,1b /* busy, try again */
302: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
31 brc 2,2b /* busy, try again */
323: j 3b
33
34 .globl smp_restart_cpu
35smp_restart_cpu:
36 larl %r1,.gprregs
37 lmg %r0,%r15,0(%r1)
381: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
39 brc 10,1b /* busy, accepted (status 0), running */
40 tmll %r0,0x40 /* Test if calling CPU is stopped */
41 jz 1b
42 ltgr %r4,%r4 /* New stack ? */
43 jz 1f
44 lgr %r15,%r4
451: basr %r14,%r2
46
47 .section .data,"aw",@progbits
48.gprregs:
49 .rept 16
50 .quad 0
51 .endr
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index e5cd623cb025..1f066e46e83e 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -176,7 +176,7 @@ pgm_check_entry:
176 cgr %r1,%r2 176 cgr %r1,%r2
177 je restore_registers /* r1 = r2 -> nothing to do */ 177 je restore_registers /* r1 = r2 -> nothing to do */
178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
179 mvc __LC_RESTART_PSW(16,%r0),0(%r4) 179 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1803: 1803:
181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
182 brc 8,4f /* accepted */ 182 brc 8,4f /* accepted */
@@ -256,6 +256,9 @@ restore_registers:
256 lghi %r2,0 256 lghi %r2,0
257 brasl %r14,arch_set_page_states 257 brasl %r14,arch_set_page_states
258 258
259 /* Reinitialize the channel subsystem */
260 brasl %r14,channel_subsystem_reinit
261
259 /* Return 0 */ 262 /* Return 0 */
260 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 263 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
261 lghi %r2,0 264 lghi %r2,0
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 86a74c9c9e63..7b6b0f81a283 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -33,13 +33,12 @@
33#include "entry.h" 33#include "entry.h"
34 34
35/* 35/*
36 * Perform the select(nd, in, out, ex, tv) and mmap() system 36 * Perform the mmap() system call. Linux for S/390 isn't able to handle more
37 * calls. Linux for S/390 isn't able to handle more than 5 37 * than 5 system call parameters, so this system call uses a memory block
38 * system call parameters, so these system calls used a memory 38 * for parameter passing.
39 * block for parameter passing..
40 */ 39 */
41 40
42struct mmap_arg_struct { 41struct s390_mmap_arg_struct {
43 unsigned long addr; 42 unsigned long addr;
44 unsigned long len; 43 unsigned long len;
45 unsigned long prot; 44 unsigned long prot;
@@ -48,9 +47,9 @@ struct mmap_arg_struct {
48 unsigned long offset; 47 unsigned long offset;
49}; 48};
50 49
51SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) 50SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
52{ 51{
53 struct mmap_arg_struct a; 52 struct s390_mmap_arg_struct a;
54 int error = -EFAULT; 53 int error = -EFAULT;
55 54
56 if (copy_from_user(&a, arg, sizeof(a))) 55 if (copy_from_user(&a, arg, sizeof(a)))
@@ -60,29 +59,12 @@ out:
60 return error; 59 return error;
61} 60}
62 61
63SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
64{
65 struct mmap_arg_struct a;
66 long error = -EFAULT;
67
68 if (copy_from_user(&a, arg, sizeof(a)))
69 goto out;
70
71 error = -EINVAL;
72 if (a.offset & ~PAGE_MASK)
73 goto out;
74
75 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
76out:
77 return error;
78}
79
80/* 62/*
81 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 63 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
82 * 64 *
83 * This is really horribly ugly. 65 * This is really horribly ugly.
84 */ 66 */
85SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second, 67SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
86 unsigned long, third, void __user *, ptr) 68 unsigned long, third, void __user *, ptr)
87{ 69{
88 struct ipc_kludge tmp; 70 struct ipc_kludge tmp;
@@ -149,17 +131,6 @@ SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second,
149} 131}
150 132
151#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
152SYSCALL_DEFINE1(s390_newuname, struct new_utsname __user *, name)
153{
154 int ret = sys_newuname(name);
155
156 if (personality(current->personality) == PER_LINUX32 && !ret) {
157 ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
158 if (ret) ret = -EFAULT;
159 }
160 return ret;
161}
162
163SYSCALL_DEFINE1(s390_personality, unsigned long, personality) 134SYSCALL_DEFINE1(s390_personality, unsigned long, personality)
164{ 135{
165 int ret; 136 int ret;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 30eca070d426..201ce6bed34e 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -98,7 +98,7 @@ SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
98SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper) 98SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
99SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper) 99SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
100SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */ 100SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
101SYSCALL(sys_s390_old_mmap,sys_s390_old_mmap,old32_mmap_wrapper) /* 90 */ 101SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */
102SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper) 102SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
103SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper) 103SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
104SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper) 104SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
@@ -125,12 +125,12 @@ NI_SYSCALL /* vm86old for i386 */
125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper) 125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ 126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) 127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) 128SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) 129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) 130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */ 131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) 132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper) 133SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */ 134NI_SYSCALL /* modify_ldt for i386 */
135SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) 135SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ 136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index b5e75e1061c8..a0ffc7717ed6 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <asm/ebcdic.h> 15#include <asm/ebcdic.h>
15#include <asm/sysinfo.h> 16#include <asm/sysinfo.h>
16#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 65065ac48ed3..15a7536452d5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -36,6 +36,7 @@
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/clocksource.h> 37#include <linux/clocksource.h>
38#include <linux/clockchips.h> 38#include <linux/clockchips.h>
39#include <linux/gfp.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/delay.h> 41#include <asm/delay.h>
41#include <asm/s390_ext.h> 42#include <asm/s390_ext.h>
@@ -51,14 +52,6 @@
51#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 52#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
52#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 53#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
53 54
54/*
55 * Create a small time difference between the timer interrupts
56 * on the different cpus to avoid lock contention.
57 */
58#define CPU_DEVIATION (smp_processor_id() << 12)
59
60#define TICK_SIZE tick
61
62u64 sched_clock_base_cc = -1; /* Force to data section. */ 55u64 sched_clock_base_cc = -1; /* Force to data section. */
63EXPORT_SYMBOL_GPL(sched_clock_base_cc); 56EXPORT_SYMBOL_GPL(sched_clock_base_cc);
64 57
@@ -81,15 +74,15 @@ unsigned long long monotonic_clock(void)
81} 74}
82EXPORT_SYMBOL(monotonic_clock); 75EXPORT_SYMBOL(monotonic_clock);
83 76
84void tod_to_timeval(__u64 todval, struct timespec *xtime) 77void tod_to_timeval(__u64 todval, struct timespec *xt)
85{ 78{
86 unsigned long long sec; 79 unsigned long long sec;
87 80
88 sec = todval >> 12; 81 sec = todval >> 12;
89 do_div(sec, 1000000); 82 do_div(sec, 1000000);
90 xtime->tv_sec = sec; 83 xt->tv_sec = sec;
91 todval -= (sec * 1000000) << 12; 84 todval -= (sec * 1000000) << 12;
92 xtime->tv_nsec = ((todval * 1000) >> 12); 85 xt->tv_nsec = ((todval * 1000) >> 12);
93} 86}
94EXPORT_SYMBOL(tod_to_timeval); 87EXPORT_SYMBOL(tod_to_timeval);
95 88
@@ -224,10 +217,11 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
224 ++vdso_data->tb_update_count; 217 ++vdso_data->tb_update_count;
225 smp_wmb(); 218 smp_wmb();
226 vdso_data->xtime_tod_stamp = clock->cycle_last; 219 vdso_data->xtime_tod_stamp = clock->cycle_last;
227 vdso_data->xtime_clock_sec = xtime.tv_sec; 220 vdso_data->xtime_clock_sec = wall_time->tv_sec;
228 vdso_data->xtime_clock_nsec = xtime.tv_nsec; 221 vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
229 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 222 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
230 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 223 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
224 vdso_data->ntp_mult = mult;
231 smp_wmb(); 225 smp_wmb();
232 ++vdso_data->tb_update_count; 226 ++vdso_data->tb_update_count;
233} 227}
@@ -397,7 +391,6 @@ static void __init time_init_wq(void)
397 if (time_sync_wq) 391 if (time_sync_wq)
398 return; 392 return;
399 time_sync_wq = create_singlethread_workqueue("timesync"); 393 time_sync_wq = create_singlethread_workqueue("timesync");
400 stop_machine_create();
401} 394}
402 395
403/* 396/*
@@ -531,8 +524,11 @@ void etr_switch_to_local(void)
531 if (!etr_eacr.sl) 524 if (!etr_eacr.sl)
532 return; 525 return;
533 disable_sync_clock(NULL); 526 disable_sync_clock(NULL);
534 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 527 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
535 queue_work(time_sync_wq, &etr_work); 528 etr_eacr.es = etr_eacr.sl = 0;
529 etr_setr(&etr_eacr);
530 queue_work(time_sync_wq, &etr_work);
531 }
536} 532}
537 533
538/* 534/*
@@ -546,8 +542,11 @@ void etr_sync_check(void)
546 if (!etr_eacr.es) 542 if (!etr_eacr.es)
547 return; 543 return;
548 disable_sync_clock(NULL); 544 disable_sync_clock(NULL);
549 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 545 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
550 queue_work(time_sync_wq, &etr_work); 546 etr_eacr.es = 0;
547 etr_setr(&etr_eacr);
548 queue_work(time_sync_wq, &etr_work);
549 }
551} 550}
552 551
553/* 552/*
@@ -909,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
909 * Do not try to get the alternate port aib if the clock 908 * Do not try to get the alternate port aib if the clock
910 * is not in sync yet. 909 * is not in sync yet.
911 */ 910 */
912 if (!check_sync_clock()) 911 if (!eacr.es || !check_sync_clock())
913 return eacr; 912 return eacr;
914 913
915 /* 914 /*
@@ -1071,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
1071 * If the clock is in sync just update the eacr and return. 1070 * If the clock is in sync just update the eacr and return.
1072 * If there is no valid sync port wait for a port update. 1071 * If there is no valid sync port wait for a port update.
1073 */ 1072 */
1074 if (check_sync_clock() || sync_port < 0) { 1073 if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1075 etr_update_eacr(eacr); 1074 etr_update_eacr(eacr);
1076 etr_set_tolec_timeout(now); 1075 etr_set_tolec_timeout(now);
1077 goto out_unlock; 1076 goto out_unlock;
@@ -1124,14 +1123,18 @@ static struct sys_device etr_port1_dev = {
1124/* 1123/*
1125 * ETR class attributes 1124 * ETR class attributes
1126 */ 1125 */
1127static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf) 1126static ssize_t etr_stepping_port_show(struct sysdev_class *class,
1127 struct sysdev_class_attribute *attr,
1128 char *buf)
1128{ 1129{
1129 return sprintf(buf, "%i\n", etr_port0.esw.p); 1130 return sprintf(buf, "%i\n", etr_port0.esw.p);
1130} 1131}
1131 1132
1132static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); 1133static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1133 1134
1134static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf) 1135static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
1136 struct sysdev_class_attribute *attr,
1137 char *buf)
1135{ 1138{
1136 char *mode_str; 1139 char *mode_str;
1137 1140
@@ -1592,7 +1595,9 @@ static struct sysdev_class stp_sysclass = {
1592 .name = "stp", 1595 .name = "stp",
1593}; 1596};
1594 1597
1595static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf) 1598static ssize_t stp_ctn_id_show(struct sysdev_class *class,
1599 struct sysdev_class_attribute *attr,
1600 char *buf)
1596{ 1601{
1597 if (!stp_online) 1602 if (!stp_online)
1598 return -ENODATA; 1603 return -ENODATA;
@@ -1602,7 +1607,9 @@ static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
1602 1607
1603static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); 1608static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1604 1609
1605static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf) 1610static ssize_t stp_ctn_type_show(struct sysdev_class *class,
1611 struct sysdev_class_attribute *attr,
1612 char *buf)
1606{ 1613{
1607 if (!stp_online) 1614 if (!stp_online)
1608 return -ENODATA; 1615 return -ENODATA;
@@ -1611,7 +1618,9 @@ static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
1611 1618
1612static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); 1619static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1613 1620
1614static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf) 1621static ssize_t stp_dst_offset_show(struct sysdev_class *class,
1622 struct sysdev_class_attribute *attr,
1623 char *buf)
1615{ 1624{
1616 if (!stp_online || !(stp_info.vbits & 0x2000)) 1625 if (!stp_online || !(stp_info.vbits & 0x2000))
1617 return -ENODATA; 1626 return -ENODATA;
@@ -1620,7 +1629,9 @@ static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
1620 1629
1621static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); 1630static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1622 1631
1623static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf) 1632static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
1633 struct sysdev_class_attribute *attr,
1634 char *buf)
1624{ 1635{
1625 if (!stp_online || !(stp_info.vbits & 0x8000)) 1636 if (!stp_online || !(stp_info.vbits & 0x8000))
1626 return -ENODATA; 1637 return -ENODATA;
@@ -1629,7 +1640,9 @@ static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
1629 1640
1630static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); 1641static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1631 1642
1632static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf) 1643static ssize_t stp_stratum_show(struct sysdev_class *class,
1644 struct sysdev_class_attribute *attr,
1645 char *buf)
1633{ 1646{
1634 if (!stp_online) 1647 if (!stp_online)
1635 return -ENODATA; 1648 return -ENODATA;
@@ -1638,7 +1651,9 @@ static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
1638 1651
1639static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); 1652static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1640 1653
1641static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf) 1654static ssize_t stp_time_offset_show(struct sysdev_class *class,
1655 struct sysdev_class_attribute *attr,
1656 char *buf)
1642{ 1657{
1643 if (!stp_online || !(stp_info.vbits & 0x0800)) 1658 if (!stp_online || !(stp_info.vbits & 0x0800))
1644 return -ENODATA; 1659 return -ENODATA;
@@ -1647,7 +1662,9 @@ static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
1647 1662
1648static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); 1663static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1649 1664
1650static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf) 1665static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
1666 struct sysdev_class_attribute *attr,
1667 char *buf)
1651{ 1668{
1652 if (!stp_online || !(stp_info.vbits & 0x4000)) 1669 if (!stp_online || !(stp_info.vbits & 0x4000))
1653 return -ENODATA; 1670 return -ENODATA;
@@ -1657,7 +1674,9 @@ static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
1657static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, 1674static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1658 stp_time_zone_offset_show, NULL); 1675 stp_time_zone_offset_show, NULL);
1659 1676
1660static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf) 1677static ssize_t stp_timing_mode_show(struct sysdev_class *class,
1678 struct sysdev_class_attribute *attr,
1679 char *buf)
1661{ 1680{
1662 if (!stp_online) 1681 if (!stp_online)
1663 return -ENODATA; 1682 return -ENODATA;
@@ -1666,7 +1685,9 @@ static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
1666 1685
1667static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); 1686static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1668 1687
1669static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf) 1688static ssize_t stp_timing_state_show(struct sysdev_class *class,
1689 struct sysdev_class_attribute *attr,
1690 char *buf)
1670{ 1691{
1671 if (!stp_online) 1692 if (!stp_online)
1672 return -ENODATA; 1693 return -ENODATA;
@@ -1675,12 +1696,15 @@ static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
1675 1696
1676static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); 1697static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1677 1698
1678static ssize_t stp_online_show(struct sysdev_class *class, char *buf) 1699static ssize_t stp_online_show(struct sysdev_class *class,
1700 struct sysdev_class_attribute *attr,
1701 char *buf)
1679{ 1702{
1680 return sprintf(buf, "%i\n", stp_online); 1703 return sprintf(buf, "%i\n", stp_online);
1681} 1704}
1682 1705
1683static ssize_t stp_online_store(struct sysdev_class *class, 1706static ssize_t stp_online_store(struct sysdev_class *class,
1707 struct sysdev_class_attribute *attr,
1684 const char *buf, size_t count) 1708 const char *buf, size_t count)
1685{ 1709{
1686 unsigned int value; 1710 unsigned int value;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14ef6f05e432..bcef00766a64 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,8 @@ struct tl_cpu {
37}; 37};
38 38
39struct tl_container { 39struct tl_container {
40 unsigned char reserved[8]; 40 unsigned char reserved[7];
41 unsigned char id;
41}; 42};
42 43
43union tl_entry { 44union tl_entry {
@@ -58,6 +59,7 @@ struct tl_info {
58 59
59struct core_info { 60struct core_info {
60 struct core_info *next; 61 struct core_info *next;
62 unsigned char id;
61 cpumask_t mask; 63 cpumask_t mask;
62}; 64};
63 65
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
73static DEFINE_SPINLOCK(topology_lock); 75static DEFINE_SPINLOCK(topology_lock);
74 76
75cpumask_t cpu_core_map[NR_CPUS]; 77cpumask_t cpu_core_map[NR_CPUS];
78unsigned char cpu_core_id[NR_CPUS];
76 79
77static cpumask_t cpu_coregroup_map(unsigned int cpu) 80static cpumask_t cpu_coregroup_map(unsigned int cpu)
78{ 81{
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
116 for_each_present_cpu(lcpu) { 119 for_each_present_cpu(lcpu) {
117 if (cpu_logical_map(lcpu) == rcpu) { 120 if (cpu_logical_map(lcpu) == rcpu) {
118 cpu_set(lcpu, core->mask); 121 cpu_set(lcpu, core->mask);
122 cpu_core_id[lcpu] = core->id;
119 smp_cpu_polarization[lcpu] = tl_cpu->pp; 123 smp_cpu_polarization[lcpu] = tl_cpu->pp;
120 } 124 }
121 } 125 }
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
158 break; 162 break;
159 case 1: 163 case 1:
160 core = core->next; 164 core = core->next;
165 core->id = tle->container.id;
161 break; 166 break;
162 case 0: 167 case 0:
163 add_cpus_to_core(&tle->cpu, core); 168 add_cpus_to_core(&tle->cpu, core);
@@ -165,10 +170,11 @@ static void tl_to_cores(struct tl_info *info)
165 default: 170 default:
166 clear_cores(); 171 clear_cores();
167 machine_has_topology = 0; 172 machine_has_topology = 0;
168 return; 173 goto out;
169 } 174 }
170 tle = next_tle(tle); 175 tle = next_tle(tle);
171 } 176 }
177out:
172 spin_unlock_irq(&topology_lock); 178 spin_unlock_irq(&topology_lock);
173} 179}
174 180
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 6e7ad63854c0..5d8f0f3d0250 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -46,13 +46,7 @@
46 46
47pgm_check_handler_t *pgm_check_table[128]; 47pgm_check_handler_t *pgm_check_table[128];
48 48
49#ifdef CONFIG_SYSCTL 49int show_unhandled_signals;
50#ifdef CONFIG_PROCESS_DEBUG
51int sysctl_userprocess_debug = 1;
52#else
53int sysctl_userprocess_debug = 0;
54#endif
55#endif
56 50
57extern pgm_check_handler_t do_protection_exception; 51extern pgm_check_handler_t do_protection_exception;
58extern pgm_check_handler_t do_dat_exception; 52extern pgm_check_handler_t do_dat_exception;
@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err)
315 do_exit(SIGSEGV); 309 do_exit(SIGSEGV);
316} 310}
317 311
318static void inline 312static void inline report_user_fault(struct pt_regs *regs, long int_code,
319report_user_fault(long interruption_code, struct pt_regs *regs) 313 int signr)
320{ 314{
321#if defined(CONFIG_SYSCTL) 315 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
322 if (!sysctl_userprocess_debug)
323 return; 316 return;
324#endif 317 if (!unhandled_signal(current, signr))
325#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 318 return;
326 printk("User process fault: interruption code 0x%lX\n", 319 if (!printk_ratelimit())
327 interruption_code); 320 return;
321 printk("User process fault: interruption code 0x%lX ", int_code);
322 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
323 printk("\n");
328 show_regs(regs); 324 show_regs(regs);
329#endif
330} 325}
331 326
332int is_valid_bugaddr(unsigned long addr) 327int is_valid_bugaddr(unsigned long addr)
@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
354 349
355 tsk->thread.trap_no = interruption_code & 0xffff; 350 tsk->thread.trap_no = interruption_code & 0xffff;
356 force_sig_info(signr, info, tsk); 351 force_sig_info(signr, info, tsk);
357 report_user_fault(interruption_code, regs); 352 report_user_fault(regs, interruption_code, signr);
358 } else { 353 } else {
359 const struct exception_table_entry *fixup; 354 const struct exception_table_entry *fixup;
360 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 355 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code)
390{ 385{
391 if (regs->psw.mask & PSW_MASK_PSTATE) { 386 if (regs->psw.mask & PSW_MASK_PSTATE) {
392 local_irq_enable(); 387 local_irq_enable();
388 report_user_fault(regs, interruption_code, SIGSEGV);
393 do_exit(SIGSEGV); 389 do_exit(SIGSEGV);
394 report_user_fault(interruption_code, regs);
395 } else 390 } else
396 die("Unknown program exception", regs, interruption_code); 391 die("Unknown program exception", regs, interruption_code);
397} 392}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 5f99e66c51c3..6b83870507d5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -23,6 +23,7 @@
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/compat.h> 25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
@@ -101,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
101/* 102/*
102 * Allocate/free per cpu vdso data. 103 * Allocate/free per cpu vdso data.
103 */ 104 */
104#ifdef CONFIG_64BIT
105#define SEGMENT_ORDER 2 105#define SEGMENT_ORDER 2
106#else
107#define SEGMENT_ORDER 1
108#endif
109 106
110int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 107int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
111{ 108{
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 4a98909a8310..969643954273 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,13 +38,13 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
42 lr %r2,%r0 42 lr %r2,%r0
43 lhi %r0,1000 43 l %r0,__VDSO_NTP_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 ahi %r0,1000 47 a %r0,__VDSO_NTP_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 srdl %r0,12
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
@@ -86,13 +86,13 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 86 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 87 brc 3,12f
88 ahi %r0,-1 88 ahi %r0,-1
8912: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
90 lr %r2,%r0 90 lr %r2,%r0
91 lhi %r0,1000 91 l %r0,__VDSO_NTP_MULT(%r5)
92 ltr %r1,%r1 92 ltr %r1,%r1
93 mr %r0,%r0 93 mr %r0,%r0
94 jnm 13f 94 jnm 13f
95 ahi %r0,1000 95 a %r0,__VDSO_NTP_MULT(%r5)
9613: alr %r0,%r2 9613: alr %r0,%r2
97 srdl %r0,12 97 srdl %r0,12
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index ad8acfc949fb..2d3633175e3b 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,13 +35,13 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 lhi %r0,1000 40 l %r0,__VDSO_NTP_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 ahi %r0,1000 44 a %r0,__VDSO_NTP_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12 46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 49106c6e6f88..f40467884a03 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -36,7 +36,7 @@ __kernel_clock_gettime:
36 stck 48(%r15) /* Store TOD clock */ 36 stck 48(%r15) /* Store TOD clock */
37 lg %r1,48(%r15) 37 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 mghi %r1,1000 39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
42 lg %r0,__VDSO_XTIME_SEC(%r5) 42 lg %r0,__VDSO_XTIME_SEC(%r5)
@@ -64,7 +64,7 @@ __kernel_clock_gettime:
64 stck 48(%r15) /* Store TOD clock */ 64 stck 48(%r15) /* Store TOD clock */
65 lg %r1,48(%r15) 65 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 mghi %r1,1000 67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 70 lg %r0,__VDSO_XTIME_SEC(%r5)
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index f873e75634e1..36ee674722ec 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,7 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 mghi %r1,1000 34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ 37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b59a812a010e..3479f1b0d4e0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk)
121} 121}
122EXPORT_SYMBOL_GPL(account_system_vtime); 122EXPORT_SYMBOL_GPL(account_system_vtime);
123 123
124void vtime_start_cpu(void) 124void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
125{ 125{
126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
128 __u64 idle_time, expires; 128 __u64 idle_time, expires;
129 129
130 if (idle->idle_enter == 0ULL)
131 return;
132
130 /* Account time spent with enabled wait psw loaded as idle time. */ 133 /* Account time spent with enabled wait psw loaded as idle time. */
131 idle_time = S390_lowcore.int_clock - idle->idle_enter; 134 idle_time = int_clock - idle->idle_enter;
132 account_idle_time(idle_time); 135 account_idle_time(idle_time);
133 S390_lowcore.steal_timer += 136 S390_lowcore.steal_timer +=
134 idle->idle_enter - S390_lowcore.last_update_clock; 137 idle->idle_enter - S390_lowcore.last_update_clock;
135 S390_lowcore.last_update_clock = S390_lowcore.int_clock; 138 S390_lowcore.last_update_clock = int_clock;
136 139
137 /* Account system time spent going idle. */ 140 /* Account system time spent going idle. */
138 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 141 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
139 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer; 142 S390_lowcore.last_update_timer = enter_timer;
140 143
141 /* Restart vtime CPU timer */ 144 /* Restart vtime CPU timer */
142 if (vq->do_spt) { 145 if (vq->do_spt) {
143 /* Program old expire value but first save progress. */ 146 /* Program old expire value but first save progress. */
144 expires = vq->idle - S390_lowcore.async_enter_timer; 147 expires = vq->idle - enter_timer;
145 expires += get_vtimer(); 148 expires += get_vtimer();
146 set_vtimer(expires); 149 set_vtimer(expires);
147 } else { 150 } else {
148 /* Don't account the CPU timer delta while the cpu was idle. */ 151 /* Don't account the CPU timer delta while the cpu was idle. */
149 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; 152 vq->elapsed -= vq->idle - enter_timer;
150 } 153 }
151 154
152 idle->sequence++; 155 idle->sequence++;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 6ee55ae84ce2..a7251580891c 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -35,6 +35,7 @@ config KVM
35 35
36# OK, it's a little counter-intuitive to do this, but it puts it neatly under 36# OK, it's a little counter-intuitive to do this, but it puts it neatly under
37# the virtualization menu. 37# the virtualization menu.
38source drivers/vhost/Kconfig
38source drivers/virtio/Kconfig 39source drivers/virtio/Kconfig
39 40
40endif # VIRTUALIZATION 41endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 8300309698fa..9e4c84187cf5 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -39,7 +39,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
39 vcpu->run->s390_reset_flags = 0; 39 vcpu->run->s390_reset_flags = 0;
40 break; 40 break;
41 default: 41 default:
42 return -ENOTSUPP; 42 return -EOPNOTSUPP;
43 } 43 }
44 44
45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -62,6 +62,6 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
62 case 0x308: 62 case 0x308:
63 return __diag_ipl_functions(vcpu); 63 return __diag_ipl_functions(vcpu);
64 default: 64 default:
65 return -ENOTSUPP; 65 return -EOPNOTSUPP;
66 } 66 }
67} 67}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index b40096494e46..3ddc30895e31 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -32,7 +32,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
32 32
33 vcpu->stat.instruction_lctlg++; 33 vcpu->stat.instruction_lctlg++;
34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
35 return -ENOTSUPP; 35 return -EOPNOTSUPP;
36 36
37 useraddr = disp2; 37 useraddr = disp2;
38 if (base2) 38 if (base2)
@@ -138,7 +138,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
138 rc = __kvm_s390_vcpu_store_status(vcpu, 138 rc = __kvm_s390_vcpu_store_status(vcpu,
139 KVM_S390_STORE_STATUS_NOADDR); 139 KVM_S390_STORE_STATUS_NOADDR);
140 if (rc >= 0) 140 if (rc >= 0)
141 rc = -ENOTSUPP; 141 rc = -EOPNOTSUPP;
142 } 142 }
143 143
144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { 144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
@@ -150,7 +150,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
153 rc = -ENOTSUPP; 153 rc = -EOPNOTSUPP;
154 } 154 }
155 155
156 spin_unlock_bh(&vcpu->arch.local_int.lock); 156 spin_unlock_bh(&vcpu->arch.local_int.lock);
@@ -171,9 +171,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
171 2*PAGE_SIZE); 171 2*PAGE_SIZE);
172 if (rc) 172 if (rc)
173 /* user will receive sigsegv, exit to user */ 173 /* user will receive sigsegv, exit to user */
174 rc = -ENOTSUPP; 174 rc = -EOPNOTSUPP;
175 } else 175 } else
176 rc = -ENOTSUPP; 176 rc = -EOPNOTSUPP;
177 177
178 if (rc) 178 if (rc)
179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
@@ -189,7 +189,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
189 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; 189 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
190 if (handler) 190 if (handler)
191 return handler(vcpu); 191 return handler(vcpu);
192 return -ENOTSUPP; 192 return -EOPNOTSUPP;
193} 193}
194 194
195static int handle_prog(struct kvm_vcpu *vcpu) 195static int handle_prog(struct kvm_vcpu *vcpu)
@@ -206,7 +206,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
206 rc = handle_instruction(vcpu); 206 rc = handle_instruction(vcpu);
207 rc2 = handle_prog(vcpu); 207 rc2 = handle_prog(vcpu);
208 208
209 if (rc == -ENOTSUPP) 209 if (rc == -EOPNOTSUPP)
210 vcpu->arch.sie_block->icptcode = 0x04; 210 vcpu->arch.sie_block->icptcode = 0x04;
211 if (rc) 211 if (rc)
212 return rc; 212 return rc;
@@ -231,9 +231,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
231 u8 code = vcpu->arch.sie_block->icptcode; 231 u8 code = vcpu->arch.sie_block->icptcode;
232 232
233 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) 233 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
234 return -ENOTSUPP; 234 return -EOPNOTSUPP;
235 func = intercept_funcs[code >> 2]; 235 func = intercept_funcs[code >> 2];
236 if (func) 236 if (func)
237 return func(vcpu); 237 return func(vcpu);
238 return -ENOTSUPP; 238 return -EOPNOTSUPP;
239} 239}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 43486c2408e1..35c21bf910c5 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -10,12 +10,13 @@
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 11 */
12 12
13#include <asm/lowcore.h>
14#include <asm/uaccess.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17#include <linux/kvm_host.h> 14#include <linux/kvm_host.h>
15#include <linux/hrtimer.h>
18#include <linux/signal.h> 16#include <linux/signal.h>
17#include <linux/slab.h>
18#include <asm/asm-offsets.h>
19#include <asm/uaccess.h>
19#include "kvm-s390.h" 20#include "kvm-s390.h"
20#include "gaccess.h" 21#include "gaccess.h"
21 22
@@ -187,8 +188,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
187 if (rc == -EFAULT) 188 if (rc == -EFAULT)
188 exception = 1; 189 exception = 1;
189 190
190 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, 191 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
191 inti->ext.ext_params2); 192 inti->ext.ext_params2);
192 if (rc == -EFAULT) 193 if (rc == -EFAULT)
193 exception = 1; 194 exception = 1;
194 break; 195 break;
@@ -342,7 +343,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
342 if (psw_interrupts_disabled(vcpu)) { 343 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 344 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu); 345 __unset_cpu_idle(vcpu);
345 return -ENOTSUPP; /* disabled wait */ 346 return -EOPNOTSUPP; /* disabled wait */
346 } 347 }
347 348
348 if (psw_extint_disabled(vcpu) || 349 if (psw_extint_disabled(vcpu) ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f8bcaefd7d34..ae3705816878 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <asm/asm-offsets.h>
26#include <asm/lowcore.h> 27#include <asm/lowcore.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
28#include <asm/nmi.h> 29#include <asm/nmi.h>
@@ -241,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
241 kvm_free_physmem(kvm); 242 kvm_free_physmem(kvm);
242 free_page((unsigned long)(kvm->arch.sca)); 243 free_page((unsigned long)(kvm->arch.sca));
243 debug_unregister(kvm->arch.dbf); 244 debug_unregister(kvm->arch.dbf);
245 cleanup_srcu_struct(&kvm->srcu);
244 kfree(kvm); 246 kfree(kvm);
245} 247}
246 248
@@ -339,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
339 341
340 rc = kvm_vcpu_init(vcpu, kvm, id); 342 rc = kvm_vcpu_init(vcpu, kvm, id);
341 if (rc) 343 if (rc)
342 goto out_free_cpu; 344 goto out_free_sie_block;
343 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
344 vcpu->arch.sie_block); 346 vcpu->arch.sie_block);
345 347
346 return vcpu; 348 return vcpu;
349out_free_sie_block:
350 free_page((unsigned long)(vcpu->arch.sie_block));
347out_free_cpu: 351out_free_cpu:
348 kfree(vcpu); 352 kfree(vcpu);
349out_nomem: 353out_nomem:
@@ -543,7 +547,7 @@ rerun_vcpu:
543 rc = -EINTR; 547 rc = -EINTR;
544 } 548 }
545 549
546 if (rc == -ENOTSUPP) { 550 if (rc == -EOPNOTSUPP) {
547 /* intercept cannot be handled in-kernel, prepare kvm-run */ 551 /* intercept cannot be handled in-kernel, prepare kvm-run */
548 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 552 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
549 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 553 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
@@ -603,45 +607,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
603 } else 607 } else
604 prefix = 0; 608 prefix = 0;
605 609
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
607 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 611 vcpu->arch.guest_fpregs.fprs, 128, prefix))
608 return -EFAULT; 612 return -EFAULT;
609 613
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
611 vcpu->arch.guest_gprs, 128, prefix)) 615 vcpu->arch.guest_gprs, 128, prefix))
612 return -EFAULT; 616 return -EFAULT;
613 617
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), 618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
615 &vcpu->arch.sie_block->gpsw, 16, prefix)) 619 &vcpu->arch.sie_block->gpsw, 16, prefix))
616 return -EFAULT; 620 return -EFAULT;
617 621
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), 622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
619 &vcpu->arch.sie_block->prefix, 4, prefix)) 623 &vcpu->arch.sie_block->prefix, 4, prefix))
620 return -EFAULT; 624 return -EFAULT;
621 625
622 if (__guestcopy(vcpu, 626 if (__guestcopy(vcpu,
623 addr + offsetof(struct save_area_s390x, fp_ctrl_reg), 627 addr + offsetof(struct save_area, fp_ctrl_reg),
624 &vcpu->arch.guest_fpregs.fpc, 4, prefix)) 628 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
625 return -EFAULT; 629 return -EFAULT;
626 630
627 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), 631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
628 &vcpu->arch.sie_block->todpr, 4, prefix)) 632 &vcpu->arch.sie_block->todpr, 4, prefix))
629 return -EFAULT; 633 return -EFAULT;
630 634
631 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), 635 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
632 &vcpu->arch.sie_block->cputm, 8, prefix)) 636 &vcpu->arch.sie_block->cputm, 8, prefix))
633 return -EFAULT; 637 return -EFAULT;
634 638
635 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), 639 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
636 &vcpu->arch.sie_block->ckc, 8, prefix)) 640 &vcpu->arch.sie_block->ckc, 8, prefix))
637 return -EFAULT; 641 return -EFAULT;
638 642
639 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), 643 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
640 &vcpu->arch.guest_acrs, 64, prefix)) 644 &vcpu->arch.guest_acrs, 64, prefix))
641 return -EFAULT; 645 return -EFAULT;
642 646
643 if (__guestcopy(vcpu, 647 if (__guestcopy(vcpu,
644 addr + offsetof(struct save_area_s390x, ctrl_regs), 648 addr + offsetof(struct save_area, ctrl_regs),
645 &vcpu->arch.sie_block->gcr, 128, prefix)) 649 &vcpu->arch.sie_block->gcr, 128, prefix))
646 return -EFAULT; 650 return -EFAULT;
647 return 0; 651 return 0;
@@ -689,14 +693,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
689} 693}
690 694
691/* Section: memory related */ 695/* Section: memory related */
692int kvm_arch_set_memory_region(struct kvm *kvm, 696int kvm_arch_prepare_memory_region(struct kvm *kvm,
693 struct kvm_userspace_memory_region *mem, 697 struct kvm_memory_slot *memslot,
694 struct kvm_memory_slot old, 698 struct kvm_memory_slot old,
695 int user_alloc) 699 struct kvm_userspace_memory_region *mem,
700 int user_alloc)
696{ 701{
697 int i;
698 struct kvm_vcpu *vcpu;
699
700 /* A few sanity checks. We can have exactly one memory slot which has 702 /* A few sanity checks. We can have exactly one memory slot which has
701 to start at guest virtual zero and which has to be located at a 703 to start at guest virtual zero and which has to be located at a
702 page boundary in userland and which has to end at a page boundary. 704 page boundary in userland and which has to end at a page boundary.
@@ -719,14 +721,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
719 if (!user_alloc) 721 if (!user_alloc)
720 return -EINVAL; 722 return -EINVAL;
721 723
724 return 0;
725}
726
727void kvm_arch_commit_memory_region(struct kvm *kvm,
728 struct kvm_userspace_memory_region *mem,
729 struct kvm_memory_slot old,
730 int user_alloc)
731{
732 int i;
733 struct kvm_vcpu *vcpu;
734
722 /* request update of sie control block for all available vcpus */ 735 /* request update of sie control block for all available vcpus */
723 kvm_for_each_vcpu(i, vcpu, kvm) { 736 kvm_for_each_vcpu(i, vcpu, kvm) {
724 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 737 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
725 continue; 738 continue;
726 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 739 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
727 } 740 }
728
729 return 0;
730} 741}
731 742
732void kvm_arch_flush_shadow(struct kvm *kvm) 743void kvm_arch_flush_shadow(struct kvm *kvm)
@@ -741,7 +752,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
741static int __init kvm_s390_init(void) 752static int __init kvm_s390_init(void)
742{ 753{
743 int ret; 754 int ret;
744 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); 755 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
745 if (ret) 756 if (ret)
746 return ret; 757 return ret;
747 758
@@ -750,7 +761,7 @@ static int __init kvm_s390_init(void)
750 * to hold the maximum amount of facilites. On the other hand, we 761 * to hold the maximum amount of facilites. On the other hand, we
751 * only set facilities that are known to work in KVM. 762 * only set facilities that are known to work in KVM.
752 */ 763 */
753 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA); 764 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
754 if (!facilities) { 765 if (!facilities) {
755 kvm_exit(); 766 kvm_exit();
756 return -ENOMEM; 767 return -ENOMEM;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce8285ba0..cfa9d1777457 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
67 67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) 68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{ 69{
70 int idx;
70 struct kvm_memory_slot *mem; 71 struct kvm_memory_slot *mem;
72 struct kvm_memslots *memslots;
71 73
72 down_read(&vcpu->kvm->slots_lock); 74 idx = srcu_read_lock(&vcpu->kvm->srcu);
73 mem = &vcpu->kvm->memslots[0]; 75 memslots = kvm_memslots(vcpu->kvm);
76
77 mem = &memslots->memslots[0];
74 78
75 vcpu->arch.sie_block->gmsor = mem->userspace_addr; 79 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
76 vcpu->arch.sie_block->gmslm = 80 vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
78 (mem->npages << PAGE_SHIFT) + 82 (mem->npages << PAGE_SHIFT) +
79 VIRTIODESCSPACE - 1ul; 83 VIRTIODESCSPACE - 1ul;
80 84
81 up_read(&vcpu->kvm->slots_lock); 85 srcu_read_unlock(&vcpu->kvm->srcu, idx);
82} 86}
83 87
84/* implemented in priv.c */ 88/* implemented in priv.c */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d426aac8095d..44205507717c 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/kvm.h> 14#include <linux/kvm.h>
15#include <linux/gfp.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <asm/current.h> 17#include <asm/current.h>
17#include <asm/debug.h> 18#include <asm/debug.h>
@@ -323,5 +324,5 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
323 else 324 else
324 return handler(vcpu); 325 return handler(vcpu);
325 } 326 }
326 return -ENOTSUPP; 327 return -EOPNOTSUPP;
327} 328}
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 934fd6a885f6..7e9d30d567b0 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -1,20 +1,58 @@
1/* 1/*
2 * sie64a.S - low level sie call 2 * sie64a.S - low level sie call
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2010
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
11 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
11 */ 12 */
12 13
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/setup.h>
17#include <asm/asm-offsets.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20
21_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
22
23/*
24 * offsets into stackframe
25 * SP_ = offsets into stack sie64 is called with
26 * SPI_ = offsets into irq stack
27 */
28SP_GREGS = __SF_EMPTY
29SP_HOOK = __SF_EMPTY+8
30SP_GPP = __SF_EMPTY+16
31SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
32
15 33
16SP_R5 = 5 * 8 # offset into stackframe 34 .macro SPP newpp
17SP_R6 = 6 * 8 35 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
36 jz 0f
37 .insn s,0xb2800000,\newpp
380:
39 .endm
40
41sie_irq_handler:
42 SPP __LC_CMF_HPP # set host id
43 larl %r2,sie_inst
44 clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
45 jne 1f
46 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
47 lg %r2,__LC_THREAD_INFO # pointer thread_info struct
48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE
49 jz 0f
50 larl %r2,sie_exit # work pending, leave sie
51 stg %r2,__LC_RETURN_PSW+8
52 br %r14
530: larl %r2,sie_reenter # re-enter with guest id
54 stg %r2,__LC_RETURN_PSW+8
551: br %r14
18 56
19/* 57/*
20 * sie64a calling convention: 58 * sie64a calling convention:
@@ -23,23 +61,34 @@ SP_R6 = 6 * 8
23 */ 61 */
24 .globl sie64a 62 .globl sie64a
25sie64a: 63sie64a:
26 lgr %r5,%r3 64 stg %r3,SP_GREGS(%r15) # save guest register save area
27 stmg %r5,%r14,SP_R5(%r15) # save register on entry 65 stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
28 lgr %r14,%r2 # pointer to sie control block 66 lgr %r14,%r2 # pointer to sie control block
29 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 67 larl %r5,sie_irq_handler
68 stg %r2,SP_GPP(%r15)
69 stg %r5,SP_HOOK(%r15) # save hook target
70 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
71sie_reenter:
72 mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
73 SPP SP_GPP(%r15) # set guest id
30sie_inst: 74sie_inst:
31 sie 0(%r14) 75 sie 0(%r14)
32 lg %r14,SP_R5(%r15) 76 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
33 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 77 SPP __LC_CMF_HPP # set host id
78sie_exit:
79 lg %r14,SP_GREGS(%r15)
80 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
34 lghi %r2,0 81 lghi %r2,0
35 lmg %r6,%r14,SP_R6(%r15) 82 lmg %r6,%r14,__SF_GPRS(%r15)
36 br %r14 83 br %r14
37 84
38sie_err: 85sie_err:
39 lg %r14,SP_R5(%r15) 86 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
40 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 87 SPP __LC_CMF_HPP # set host id
88 lg %r14,SP_GREGS(%r15)
89 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
41 lghi %r2,-EFAULT 90 lghi %r2,-EFAULT
42 lmg %r6,%r14,SP_R6(%r15) 91 lmg %r6,%r14,__SF_GPRS(%r15)
43 br %r14 92 br %r14
44 93
45 .section __ex_table,"a" 94 .section __ex_table,"a"
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 15ee1111de58..702276f5e2fa 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/kvm.h> 15#include <linux/kvm.h>
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17#include <linux/slab.h>
17#include "gaccess.h" 18#include "gaccess.h"
18#include "kvm-s390.h" 19#include "kvm-s390.h"
19 20
@@ -112,7 +113,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
112{ 113{
113 struct kvm_s390_interrupt_info *inti; 114 struct kvm_s390_interrupt_info *inti;
114 115
115 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 116 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
116 if (!inti) 117 if (!inti)
117 return -ENOMEM; 118 return -ENOMEM;
118 inti->type = KVM_S390_SIGP_STOP; 119 inti->type = KVM_S390_SIGP_STOP;
@@ -172,7 +173,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
172 rc = 0; /* order accepted */ 173 rc = 0; /* order accepted */
173 break; 174 break;
174 default: 175 default:
175 rc = -ENOTSUPP; 176 rc = -EOPNOTSUPP;
176 } 177 }
177 return rc; 178 return rc;
178} 179}
@@ -293,7 +294,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
293 vcpu->stat.instruction_sigp_restart++; 294 vcpu->stat.instruction_sigp_restart++;
294 /* user space must know about restart */ 295 /* user space must know about restart */
295 default: 296 default:
296 return -ENOTSUPP; 297 return -EOPNOTSUPP;
297 } 298 }
298 299
299 if (rc < 0) 300 if (rc < 0)
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 97975ec7a274..761ab8b56afc 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o 7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
7lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
8lib-$(CONFIG_SMP) += spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index cff327f109a8..91754ffb9203 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
46 unsigned int owner;
46 47
47 while (1) { 48 while (1) {
48 if (count-- <= 0) { 49 owner = lp->owner_cpu;
49 unsigned int owner = lp->owner_cpu; 50 if (!owner || smp_vcpu_scheduled(~owner)) {
50 if (owner != 0) 51 for (count = spin_retry; count > 0; count--) {
51 _raw_yield_cpu(~owner); 52 if (arch_spin_is_locked(lp))
52 count = spin_retry; 53 continue;
54 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
55 cpu) == 0)
56 return;
57 }
58 if (MACHINE_IS_LPAR)
59 continue;
53 } 60 }
54 if (arch_spin_is_locked(lp)) 61 owner = lp->owner_cpu;
55 continue; 62 if (owner)
63 _raw_yield_cpu(~owner);
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 65 return;
58 } 66 }
@@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 71{
64 int count = spin_retry; 72 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 73 unsigned int cpu = ~smp_processor_id();
74 unsigned int owner;
66 75
67 local_irq_restore(flags); 76 local_irq_restore(flags);
68 while (1) { 77 while (1) {
69 if (count-- <= 0) { 78 owner = lp->owner_cpu;
70 unsigned int owner = lp->owner_cpu; 79 if (!owner || smp_vcpu_scheduled(~owner)) {
71 if (owner != 0) 80 for (count = spin_retry; count > 0; count--) {
72 _raw_yield_cpu(~owner); 81 if (arch_spin_is_locked(lp))
73 count = spin_retry; 82 continue;
83 local_irq_disable();
84 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
85 cpu) == 0)
86 return;
87 local_irq_restore(flags);
88 }
89 if (MACHINE_IS_LPAR)
90 continue;
74 } 91 }
75 if (arch_spin_is_locked(lp)) 92 owner = lp->owner_cpu;
76 continue; 93 if (owner)
94 _raw_yield_cpu(~owner);
77 local_irq_disable(); 95 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return; 97 return;
@@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
100void arch_spin_relax(arch_spinlock_t *lock) 118void arch_spin_relax(arch_spinlock_t *lock)
101{ 119{
102 unsigned int cpu = lock->owner_cpu; 120 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 121 if (cpu != 0) {
104 _raw_yield_cpu(~cpu); 122 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu);
125 }
105} 126}
106EXPORT_SYMBOL(arch_spin_relax); 127EXPORT_SYMBOL(arch_spin_relax);
107 128
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/s390/lib/usercopy.c
@@ -0,0 +1,8 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 76a3637b88e0..eb6a2ef5f82e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,17 +1,16 @@
1/* 1/*
2 * arch/s390/mm/cmm.c 2 * Collaborative memory management interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp 2003,2010
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * 6 *
8 * Collaborative memory management interface.
9 */ 7 */
10 8
11#include <linux/errno.h> 9#include <linux/errno.h>
12#include <linux/fs.h> 10#include <linux/fs.h>
13#include <linux/init.h> 11#include <linux/init.h>
14#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/gfp.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
16#include <linux/sysctl.h> 15#include <linux/sysctl.h>
17#include <linux/ctype.h> 16#include <linux/ctype.h>
@@ -19,9 +18,9 @@
19#include <linux/kthread.h> 18#include <linux/kthread.h>
20#include <linux/oom.h> 19#include <linux/oom.h>
21#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/uaccess.h>
22 22
23#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24#include <asm/uaccess.h>
25#include <asm/diag.h> 24#include <asm/diag.h>
26 25
27static char *sender = "VMRMSVM"; 26static char *sender = "VMRMSVM";
@@ -52,14 +51,14 @@ static struct cmm_page_array *cmm_timed_page_list;
52static DEFINE_SPINLOCK(cmm_lock); 51static DEFINE_SPINLOCK(cmm_lock);
53 52
54static struct task_struct *cmm_thread_ptr; 53static struct task_struct *cmm_thread_ptr;
55static wait_queue_head_t cmm_thread_wait; 54static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
56static struct timer_list cmm_timer; 55static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
57 56
58static void cmm_timer_fn(unsigned long); 57static void cmm_timer_fn(unsigned long);
59static void cmm_set_timer(void); 58static void cmm_set_timer(void);
60 59
61static long 60static long cmm_alloc_pages(long nr, long *counter,
62cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) 61 struct cmm_page_array **list)
63{ 62{
64 struct cmm_page_array *pa, *npa; 63 struct cmm_page_array *pa, *npa;
65 unsigned long addr; 64 unsigned long addr;
@@ -98,8 +97,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
98 return nr; 97 return nr;
99} 98}
100 99
101static long 100static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
102cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
103{ 101{
104 struct cmm_page_array *pa; 102 struct cmm_page_array *pa;
105 unsigned long addr; 103 unsigned long addr;
@@ -139,11 +137,10 @@ static int cmm_oom_notify(struct notifier_block *self,
139} 137}
140 138
141static struct notifier_block cmm_oom_nb = { 139static struct notifier_block cmm_oom_nb = {
142 .notifier_call = cmm_oom_notify 140 .notifier_call = cmm_oom_notify,
143}; 141};
144 142
145static int 143static int cmm_thread(void *dummy)
146cmm_thread(void *dummy)
147{ 144{
148 int rc; 145 int rc;
149 146
@@ -169,7 +166,7 @@ cmm_thread(void *dummy)
169 cmm_timed_pages_target = cmm_timed_pages; 166 cmm_timed_pages_target = cmm_timed_pages;
170 } else if (cmm_timed_pages_target < cmm_timed_pages) { 167 } else if (cmm_timed_pages_target < cmm_timed_pages) {
171 cmm_free_pages(1, &cmm_timed_pages, 168 cmm_free_pages(1, &cmm_timed_pages,
172 &cmm_timed_page_list); 169 &cmm_timed_page_list);
173 } 170 }
174 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) 171 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
175 cmm_set_timer(); 172 cmm_set_timer();
@@ -177,14 +174,12 @@ cmm_thread(void *dummy)
177 return 0; 174 return 0;
178} 175}
179 176
180static void 177static void cmm_kick_thread(void)
181cmm_kick_thread(void)
182{ 178{
183 wake_up(&cmm_thread_wait); 179 wake_up(&cmm_thread_wait);
184} 180}
185 181
186static void 182static void cmm_set_timer(void)
187cmm_set_timer(void)
188{ 183{
189 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 184 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
190 if (timer_pending(&cmm_timer)) 185 if (timer_pending(&cmm_timer))
@@ -201,8 +196,7 @@ cmm_set_timer(void)
201 add_timer(&cmm_timer); 196 add_timer(&cmm_timer);
202} 197}
203 198
204static void 199static void cmm_timer_fn(unsigned long ignored)
205cmm_timer_fn(unsigned long ignored)
206{ 200{
207 long nr; 201 long nr;
208 202
@@ -215,57 +209,49 @@ cmm_timer_fn(unsigned long ignored)
215 cmm_set_timer(); 209 cmm_set_timer();
216} 210}
217 211
218void 212static void cmm_set_pages(long nr)
219cmm_set_pages(long nr)
220{ 213{
221 cmm_pages_target = nr; 214 cmm_pages_target = nr;
222 cmm_kick_thread(); 215 cmm_kick_thread();
223} 216}
224 217
225long 218static long cmm_get_pages(void)
226cmm_get_pages(void)
227{ 219{
228 return cmm_pages; 220 return cmm_pages;
229} 221}
230 222
231void 223static void cmm_add_timed_pages(long nr)
232cmm_add_timed_pages(long nr)
233{ 224{
234 cmm_timed_pages_target += nr; 225 cmm_timed_pages_target += nr;
235 cmm_kick_thread(); 226 cmm_kick_thread();
236} 227}
237 228
238long 229static long cmm_get_timed_pages(void)
239cmm_get_timed_pages(void)
240{ 230{
241 return cmm_timed_pages; 231 return cmm_timed_pages;
242} 232}
243 233
244void 234static void cmm_set_timeout(long nr, long seconds)
245cmm_set_timeout(long nr, long seconds)
246{ 235{
247 cmm_timeout_pages = nr; 236 cmm_timeout_pages = nr;
248 cmm_timeout_seconds = seconds; 237 cmm_timeout_seconds = seconds;
249 cmm_set_timer(); 238 cmm_set_timer();
250} 239}
251 240
252static int 241static int cmm_skip_blanks(char *cp, char **endp)
253cmm_skip_blanks(char *cp, char **endp)
254{ 242{
255 char *str; 243 char *str;
256 244
257 for (str = cp; *str == ' ' || *str == '\t'; str++); 245 for (str = cp; *str == ' ' || *str == '\t'; str++)
246 ;
258 *endp = str; 247 *endp = str;
259 return str != cp; 248 return str != cp;
260} 249}
261 250
262#ifdef CONFIG_CMM_PROC
263
264static struct ctl_table cmm_table[]; 251static struct ctl_table cmm_table[];
265 252
266static int 253static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
267cmm_pages_handler(ctl_table *ctl, int write, 254 size_t *lenp, loff_t *ppos)
268 void __user *buffer, size_t *lenp, loff_t *ppos)
269{ 255{
270 char buf[16], *p; 256 char buf[16], *p;
271 long nr; 257 long nr;
@@ -304,9 +290,8 @@ cmm_pages_handler(ctl_table *ctl, int write,
304 return 0; 290 return 0;
305} 291}
306 292
307static int 293static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
308cmm_timeout_handler(ctl_table *ctl, int write, 294 size_t *lenp, loff_t *ppos)
309 void __user *buffer, size_t *lenp, loff_t *ppos)
310{ 295{
311 char buf[64], *p; 296 char buf[64], *p;
312 long nr, seconds; 297 long nr, seconds;
@@ -369,12 +354,10 @@ static struct ctl_table cmm_dir_table[] = {
369 }, 354 },
370 { } 355 { }
371}; 356};
372#endif
373 357
374#ifdef CONFIG_CMM_IUCV 358#ifdef CONFIG_CMM_IUCV
375#define SMSG_PREFIX "CMM" 359#define SMSG_PREFIX "CMM"
376static void 360static void cmm_smsg_target(const char *from, char *msg)
377cmm_smsg_target(char *from, char *msg)
378{ 361{
379 long nr, seconds; 362 long nr, seconds;
380 363
@@ -444,16 +427,13 @@ static struct notifier_block cmm_power_notifier = {
444 .notifier_call = cmm_power_event, 427 .notifier_call = cmm_power_event,
445}; 428};
446 429
447static int 430static int cmm_init(void)
448cmm_init (void)
449{ 431{
450 int rc = -ENOMEM; 432 int rc = -ENOMEM;
451 433
452#ifdef CONFIG_CMM_PROC
453 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 434 cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
454 if (!cmm_sysctl_header) 435 if (!cmm_sysctl_header)
455 goto out_sysctl; 436 goto out_sysctl;
456#endif
457#ifdef CONFIG_CMM_IUCV 437#ifdef CONFIG_CMM_IUCV
458 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 438 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
459 if (rc < 0) 439 if (rc < 0)
@@ -465,8 +445,6 @@ cmm_init (void)
465 rc = register_pm_notifier(&cmm_power_notifier); 445 rc = register_pm_notifier(&cmm_power_notifier);
466 if (rc) 446 if (rc)
467 goto out_pm; 447 goto out_pm;
468 init_waitqueue_head(&cmm_thread_wait);
469 init_timer(&cmm_timer);
470 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 448 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
471 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 449 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
472 if (rc) 450 if (rc)
@@ -482,36 +460,26 @@ out_oom_notify:
482 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 460 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
483out_smsg: 461out_smsg:
484#endif 462#endif
485#ifdef CONFIG_CMM_PROC
486 unregister_sysctl_table(cmm_sysctl_header); 463 unregister_sysctl_table(cmm_sysctl_header);
487out_sysctl: 464out_sysctl:
488#endif 465 del_timer_sync(&cmm_timer);
489 return rc; 466 return rc;
490} 467}
468module_init(cmm_init);
491 469
492static void 470static void cmm_exit(void)
493cmm_exit(void)
494{ 471{
495 kthread_stop(cmm_thread_ptr);
496 unregister_pm_notifier(&cmm_power_notifier);
497 unregister_oom_notifier(&cmm_oom_nb);
498 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
499 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
500#ifdef CONFIG_CMM_PROC
501 unregister_sysctl_table(cmm_sysctl_header); 472 unregister_sysctl_table(cmm_sysctl_header);
502#endif
503#ifdef CONFIG_CMM_IUCV 473#ifdef CONFIG_CMM_IUCV
504 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 474 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
505#endif 475#endif
476 unregister_pm_notifier(&cmm_power_notifier);
477 unregister_oom_notifier(&cmm_oom_nb);
478 kthread_stop(cmm_thread_ptr);
479 del_timer_sync(&cmm_timer);
480 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
481 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
506} 482}
507
508module_init(cmm_init);
509module_exit(cmm_exit); 483module_exit(cmm_exit);
510 484
511EXPORT_SYMBOL(cmm_set_pages);
512EXPORT_SYMBOL(cmm_get_pages);
513EXPORT_SYMBOL(cmm_add_timed_pages);
514EXPORT_SYMBOL(cmm_get_timed_pages);
515EXPORT_SYMBOL(cmm_set_timeout);
516
517MODULE_LICENSE("GPL"); 485MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 5c8457129603..3cc95dd0a3a6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -105,7 +105,7 @@ static int
105dcss_set_subcodes(void) 105dcss_set_subcodes(void)
106{ 106{
107#ifdef CONFIG_64BIT 107#ifdef CONFIG_64BIT
108 char *name = kmalloc(8 * sizeof(char), GFP_DMA); 108 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
109 unsigned long rx, ry; 109 unsigned long rx, ry;
110 int rc; 110 int rc;
111 111
@@ -252,12 +252,13 @@ dcss_diag_translate_rc (int vm_rc) {
252static int 252static int
253query_segment_type (struct dcss_segment *seg) 253query_segment_type (struct dcss_segment *seg)
254{ 254{
255 struct qin64 *qin = kmalloc (sizeof(struct qin64), GFP_DMA);
256 struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA);
257
258 int diag_cc, rc, i;
259 unsigned long dummy, vmrc; 255 unsigned long dummy, vmrc;
256 int diag_cc, rc, i;
257 struct qout64 *qout;
258 struct qin64 *qin;
260 259
260 qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
261 qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
261 if ((qin == NULL) || (qout == NULL)) { 262 if ((qin == NULL) || (qout == NULL)) {
262 rc = -ENOMEM; 263 rc = -ENOMEM;
263 goto out_free; 264 goto out_free;
@@ -286,7 +287,7 @@ query_segment_type (struct dcss_segment *seg)
286 copy data for the new format. */ 287 copy data for the new format. */
287 if (segext_scode == DCSS_SEGEXT) { 288 if (segext_scode == DCSS_SEGEXT) {
288 struct qout64_old *qout_old; 289 struct qout64_old *qout_old;
289 qout_old = kzalloc(sizeof(struct qout64_old), GFP_DMA); 290 qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
290 if (qout_old == NULL) { 291 if (qout_old == NULL) {
291 rc = -ENOMEM; 292 rc = -ENOMEM;
292 goto out_free; 293 goto out_free;
@@ -309,7 +310,7 @@ query_segment_type (struct dcss_segment *seg)
309 } 310 }
310#endif 311#endif
311 if (qout->segcnt > 6) { 312 if (qout->segcnt > 6) {
312 rc = -ENOTSUPP; 313 rc = -EOPNOTSUPP;
313 goto out_free; 314 goto out_free;
314 } 315 }
315 316
@@ -324,11 +325,11 @@ query_segment_type (struct dcss_segment *seg)
324 for (i=0; i<qout->segcnt; i++) { 325 for (i=0; i<qout->segcnt; i++) {
325 if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && 326 if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
326 ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { 327 ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
327 rc = -ENOTSUPP; 328 rc = -EOPNOTSUPP;
328 goto out_free; 329 goto out_free;
329 } 330 }
330 if (start != qout->range[i].start >> PAGE_SHIFT) { 331 if (start != qout->range[i].start >> PAGE_SHIFT) {
331 rc = -ENOTSUPP; 332 rc = -EOPNOTSUPP;
332 goto out_free; 333 goto out_free;
333 } 334 }
334 start = (qout->range[i].end >> PAGE_SHIFT) + 1; 335 start = (qout->range[i].end >> PAGE_SHIFT) + 1;
@@ -357,7 +358,7 @@ query_segment_type (struct dcss_segment *seg)
357 * -ENOSYS : we are not running on VM 358 * -ENOSYS : we are not running on VM
358 * -EIO : could not perform query diagnose 359 * -EIO : could not perform query diagnose
359 * -ENOENT : no such segment 360 * -ENOENT : no such segment
360 * -ENOTSUPP: multi-part segment cannot be used with linux 361 * -EOPNOTSUPP: multi-part segment cannot be used with linux
361 * -ENOMEM : out of memory 362 * -ENOMEM : out of memory
362 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h 363 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
363 */ 364 */
@@ -407,11 +408,11 @@ segment_overlaps_others (struct dcss_segment *seg)
407static int 408static int
408__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) 409__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
409{ 410{
410 struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment),
411 GFP_DMA);
412 int rc, diag_cc;
413 unsigned long start_addr, end_addr, dummy; 411 unsigned long start_addr, end_addr, dummy;
412 struct dcss_segment *seg;
413 int rc, diag_cc;
414 414
415 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
415 if (seg == NULL) { 416 if (seg == NULL) {
416 rc = -ENOMEM; 417 rc = -ENOMEM;
417 goto out; 418 goto out;
@@ -515,7 +516,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
515 * -ENOSYS : we are not running on VM 516 * -ENOSYS : we are not running on VM
516 * -EIO : could not perform query or load diagnose 517 * -EIO : could not perform query or load diagnose
517 * -ENOENT : no such segment 518 * -ENOENT : no such segment
518 * -ENOTSUPP: multi-part segment cannot be used with linux 519 * -EOPNOTSUPP: multi-part segment cannot be used with linux
519 * -ENOSPC : segment cannot be used (overlaps with storage) 520 * -ENOSPC : segment cannot be used (overlaps with storage)
520 * -EBUSY : segment can temporarily not be used (overlaps with dcss) 521 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
521 * -ERANGE : segment cannot be used (exceeds kernel mapping range) 522 * -ERANGE : segment cannot be used (exceeds kernel mapping range)
@@ -742,7 +743,7 @@ void segment_warning(int rc, char *seg_name)
742 pr_err("Loading or querying DCSS %s resulted in a " 743 pr_err("Loading or querying DCSS %s resulted in a "
743 "hardware error\n", seg_name); 744 "hardware error\n", seg_name);
744 break; 745 break;
745 case -ENOTSUPP: 746 case -EOPNOTSUPP:
746 pr_err("DCSS %s has multiple page ranges and cannot be " 747 pr_err("DCSS %s has multiple page ranges and cannot be "
747 "loaded or queried\n", seg_name); 748 "loaded or queried\n", seg_name);
748 break; 749 break;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc102e70d9c2..2505b2ea0ef1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -30,6 +30,7 @@
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <asm/asm-offsets.h>
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 36#include <asm/s390_ext.h>
@@ -47,10 +48,6 @@
47#define __PF_RES_FIELD 0x8000000000000000ULL 48#define __PF_RES_FIELD 0x8000000000000000ULL
48#endif /* CONFIG_64BIT */ 49#endif /* CONFIG_64BIT */
49 50
50#ifdef CONFIG_SYSCTL
51extern int sysctl_userprocess_debug;
52#endif
53
54#define VM_FAULT_BADCONTEXT 0x010000 51#define VM_FAULT_BADCONTEXT 0x010000
55#define VM_FAULT_BADMAP 0x020000 52#define VM_FAULT_BADMAP 0x020000
56#define VM_FAULT_BADACCESS 0x040000 53#define VM_FAULT_BADACCESS 0x040000
@@ -59,15 +56,13 @@ static inline int notify_page_fault(struct pt_regs *regs)
59{ 56{
60 int ret = 0; 57 int ret = 0;
61 58
62#ifdef CONFIG_KPROBES
63 /* kprobe_running() needs smp_processor_id() */ 59 /* kprobe_running() needs smp_processor_id() */
64 if (!user_mode(regs)) { 60 if (kprobes_built_in() && !user_mode(regs)) {
65 preempt_disable(); 61 preempt_disable();
66 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 62 if (kprobe_running() && kprobe_fault_handler(regs, 14))
67 ret = 1; 63 ret = 1;
68 preempt_enable(); 64 preempt_enable();
69 } 65 }
70#endif
71 return ret; 66 return ret;
72} 67}
73 68
@@ -121,6 +116,22 @@ static inline int user_space_fault(unsigned long trans_exc_code)
121 return trans_exc_code != 3; 116 return trans_exc_code != 3;
122} 117}
123 118
119static inline void report_user_fault(struct pt_regs *regs, long int_code,
120 int signr, unsigned long address)
121{
122 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
123 return;
124 if (!unhandled_signal(current, signr))
125 return;
126 if (!printk_ratelimit())
127 return;
128 printk("User process fault: interruption code 0x%lX ", int_code);
129 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
130 printk("\n");
131 printk("failing address: %lX\n", address);
132 show_regs(regs);
133}
134
124/* 135/*
125 * Send SIGSEGV to task. This is an external routine 136 * Send SIGSEGV to task. This is an external routine
126 * to keep the stack usage of do_page_fault small. 137 * to keep the stack usage of do_page_fault small.
@@ -134,17 +145,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
134 address = trans_exc_code & __FAIL_ADDR_MASK; 145 address = trans_exc_code & __FAIL_ADDR_MASK;
135 current->thread.prot_addr = address; 146 current->thread.prot_addr = address;
136 current->thread.trap_no = int_code; 147 current->thread.trap_no = int_code;
137#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 148 report_user_fault(regs, int_code, SIGSEGV, address);
138#if defined(CONFIG_SYSCTL)
139 if (sysctl_userprocess_debug)
140#endif
141 {
142 printk("User process fault: interruption code 0x%lX\n",
143 int_code);
144 printk("failing address: %lX\n", address);
145 show_regs(regs);
146 }
147#endif
148 si.si_signo = SIGSEGV; 149 si.si_signo = SIGSEGV;
149 si.si_code = si_code; 150 si.si_code = si_code;
150 si.si_addr = (void __user *) address; 151 si.si_addr = (void __user *) address;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 765647952221..acc91c75bc94 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -26,6 +26,7 @@
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28#include <linux/initrd.h> 28#include <linux/initrd.h>
29#include <linux/gfp.h>
29#include <asm/processor.h> 30#include <asm/processor.h>
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
@@ -143,33 +144,34 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
143} 144}
144#endif 145#endif
145 146
146void free_initmem(void) 147void free_init_pages(char *what, unsigned long begin, unsigned long end)
147{ 148{
148 unsigned long addr; 149 unsigned long addr = begin;
149 150
150 addr = (unsigned long)(&__init_begin); 151 if (begin >= end)
151 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 152 return;
153 for (; addr < end; addr += PAGE_SIZE) {
152 ClearPageReserved(virt_to_page(addr)); 154 ClearPageReserved(virt_to_page(addr));
153 init_page_count(virt_to_page(addr)); 155 init_page_count(virt_to_page(addr));
154 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 156 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
157 PAGE_SIZE);
155 free_page(addr); 158 free_page(addr);
156 totalram_pages++; 159 totalram_pages++;
157 } 160 }
158 printk ("Freeing unused kernel memory: %ldk freed\n", 161 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
159 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); 162}
163
164void free_initmem(void)
165{
166 free_init_pages("unused kernel memory",
167 (unsigned long)&__init_begin,
168 (unsigned long)&__init_end);
160} 169}
161 170
162#ifdef CONFIG_BLK_DEV_INITRD 171#ifdef CONFIG_BLK_DEV_INITRD
163void free_initrd_mem(unsigned long start, unsigned long end) 172void free_initrd_mem(unsigned long start, unsigned long end)
164{ 173{
165 if (start < end) 174 free_init_pages("initrd memory", start, end);
166 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
167 for (; start < end; start += PAGE_SIZE) {
168 ClearPageReserved(virt_to_page(start));
169 init_page_count(virt_to_page(start));
170 free_page(start);
171 totalram_pages++;
172 }
173} 175}
174#endif 176#endif
175 177
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 81756271dc44..a8c2af8c650f 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -59,3 +59,29 @@ long probe_kernel_write(void *dst, void *src, size_t size)
59 } 59 }
60 return copied < 0 ? -EFAULT : 0; 60 return copied < 0 ? -EFAULT : 0;
61} 61}
62
63int memcpy_real(void *dest, void *src, size_t count)
64{
65 register unsigned long _dest asm("2") = (unsigned long) dest;
66 register unsigned long _len1 asm("3") = (unsigned long) count;
67 register unsigned long _src asm("4") = (unsigned long) src;
68 register unsigned long _len2 asm("5") = (unsigned long) count;
69 unsigned long flags;
70 int rc = -EFAULT;
71
72 if (!count)
73 return 0;
74 flags = __raw_local_irq_stnsm(0xf8UL);
75 asm volatile (
76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n"
78 " lhi %0,0x0\n"
79 "2:\n"
80 EX_TABLE(1b,2b)
81 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src))
84 : "cc", "memory");
85 __raw_local_irq_ssm(flags);
86 return rc;
87}
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 098923ae458f..a90d45e9dfb0 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/gfp.h>
13#include <linux/init.h> 14#include <linux/init.h>
14 15
15#define ESSA_SET_STABLE 1 16#define ESSA_SET_STABLE 1
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ad621e06ada3..8d999249d357 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -6,11 +6,11 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/gfp.h>
9#include <linux/mm.h> 10#include <linux/mm.h>
10#include <linux/swap.h> 11#include <linux/swap.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/highmem.h> 13#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/module.h> 16#include <linux/module.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 300ab012b0fd..90165e7ca04e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/hugetlb.h> 13#include <linux/hugetlb.h>
14#include <linux/slab.h>
14#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
@@ -70,12 +71,8 @@ static pte_t __ref *vmem_pte_alloc(void)
70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
71 if (!pte) 72 if (!pte)
72 return NULL; 73 return NULL;
73 if (MACHINE_HAS_HPAGE) 74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, 75 PTRS_PER_PTE * sizeof(pte_t));
75 PTRS_PER_PTE * sizeof(pte_t));
76 else
77 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
78 PTRS_PER_PTE * sizeof(pte_t));
79 return pte; 76 return pte;
80} 77}
81 78
@@ -116,8 +113,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
116 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
117 (address + HPAGE_SIZE <= start + size) && 114 (address + HPAGE_SIZE <= start + size) &&
118 (address >= HPAGE_SIZE)) { 115 (address >= HPAGE_SIZE)) {
119 pte_val(pte) |= _SEGMENT_ENTRY_LARGE | 116 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
120 _SEGMENT_ENTRY_CO;
121 pmd_val(*pm_dir) = pte_val(pte); 117 pmd_val(*pm_dir) = pte_val(pte);
122 address += HPAGE_SIZE - PAGE_SIZE; 118 address += HPAGE_SIZE - PAGE_SIZE;
123 continue; 119 continue;