aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/CommonIO6
-rw-r--r--Documentation/s390/driver-model.txt4
-rw-r--r--arch/s390/Kconfig6
-rw-r--r--arch/s390/Kconfig.debug13
-rw-r--r--arch/s390/Makefile13
-rw-r--r--arch/s390/boot/Makefile8
-rw-r--r--arch/s390/boot/compressed/Makefile60
-rw-r--r--arch/s390/boot/compressed/head31.S51
-rw-r--r--arch/s390/boot/compressed/head64.S48
-rw-r--r--arch/s390/boot/compressed/misc.c158
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S55
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr10
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/hypfs/hypfs_diag.c4
-rw-r--r--arch/s390/include/asm/atomic.h86
-rw-r--r--arch/s390/include/asm/bitops.h83
-rw-r--r--arch/s390/include/asm/bug.h10
-rw-r--r--arch/s390/include/asm/crw.h1
-rw-r--r--arch/s390/include/asm/etr.h12
-rw-r--r--arch/s390/include/asm/irqflags.h36
-rw-r--r--arch/s390/include/asm/lowcore.h250
-rw-r--r--arch/s390/include/asm/page.h3
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/qdio.h3
-rw-r--r--arch/s390/include/asm/rwsem.h147
-rw-r--r--arch/s390/include/asm/setup.h9
-rw-r--r--arch/s390/include/asm/sigp.h142
-rw-r--r--arch/s390/include/asm/smp.h38
-rw-r--r--arch/s390/include/asm/spinlock.h18
-rw-r--r--arch/s390/include/asm/swab.h16
-rw-r--r--arch/s390/include/asm/sysinfo.h3
-rw-r--r--arch/s390/include/asm/system.h168
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timex.h22
-rw-r--r--arch/s390/include/asm/uaccess.h12
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c99
-rw-r--r--arch/s390/kernel/base.S2
-rw-r--r--arch/s390/kernel/dis.c369
-rw-r--r--arch/s390/kernel/early.c22
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/head.S60
-rw-r--r--arch/s390/kernel/head31.S16
-rw-r--r--arch/s390/kernel/head64.S92
-rw-r--r--arch/s390/kernel/ipl.c41
-rw-r--r--arch/s390/kernel/machine_kexec.c10
-rw-r--r--arch/s390/kernel/reipl.S2
-rw-r--r--arch/s390/kernel/reipl64.S2
-rw-r--r--arch/s390/kernel/sclp.S36
-rw-r--r--arch/s390/kernel/setup.c11
-rw-r--r--arch/s390/kernel/smp.c108
-rw-r--r--arch/s390/kernel/switch_cpu.S58
-rw-r--r--arch/s390/kernel/switch_cpu64.S51
-rw-r--r--arch/s390/kernel/swsusp_asm64.S2
-rw-r--r--arch/s390/kernel/time.c8
-rw-r--r--arch/s390/kernel/vdso.c1
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/interrupt.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c23
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c4
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/spinlock.c53
-rw-r--r--arch/s390/lib/usercopy.c8
-rw-r--r--arch/s390/mm/extmem.c12
-rw-r--r--arch/s390/mm/fault.c5
-rw-r--r--arch/s390/mm/init.c33
-rw-r--r--drivers/s390/block/dasd.c48
-rw-r--r--drivers/s390/block/dasd_devmap.c13
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c109
-rw-r--r--drivers/s390/char/zcore.c163
-rw-r--r--drivers/s390/cio/ccwreq.c2
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c4
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/crw.c29
-rw-r--r--drivers/s390/cio/css.c79
-rw-r--r--drivers/s390/cio/css.h5
-rw-r--r--drivers/s390/cio/device.c160
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c43
-rw-r--r--drivers/s390/cio/qdio.h92
-rw-r--r--drivers/s390/cio/qdio_debug.c23
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c158
-rw-r--r--drivers/s390/kvm/kvm_virtio.c4
-rw-r--r--include/linux/elf.h5
95 files changed, 2207 insertions, 1501 deletions
diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
index 339207d11d95..d378cba66456 100644
--- a/Documentation/s390/CommonIO
+++ b/Documentation/s390/CommonIO
@@ -87,6 +87,12 @@ Command line parameters
87 compatibility, by the device number in hexadecimal (0xabcd or abcd). Device 87 compatibility, by the device number in hexadecimal (0xabcd or abcd). Device
88 numbers given as 0xabcd will be interpreted as 0.0.abcd. 88 numbers given as 0xabcd will be interpreted as 0.0.abcd.
89 89
90* /proc/cio_settle
91
92 A write request to this file is blocked until all queued cio actions are
93 handled. This will allow userspace to wait for pending work affecting
94 device availability after changing cio_ignore or the hardware configuration.
95
90* For some of the information present in the /proc filesystem in 2.4 (namely, 96* For some of the information present in the /proc filesystem in 2.4 (namely,
91 /proc/subchannels and /proc/chpids), see driver-model.txt. 97 /proc/subchannels and /proc/chpids), see driver-model.txt.
92 Information formerly in /proc/irq_count is now in /proc/interrupts. 98 Information formerly in /proc/irq_count is now in /proc/interrupts.
diff --git a/Documentation/s390/driver-model.txt b/Documentation/s390/driver-model.txt
index bde473df748d..ed265cf54cde 100644
--- a/Documentation/s390/driver-model.txt
+++ b/Documentation/s390/driver-model.txt
@@ -223,8 +223,8 @@ touched by the driver - it should use the ccwgroup device's driver_data for its
223private data. 223private data.
224 224
225To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in 225To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in
226mind that most drivers will need to implement both a ccwgroup and a ccw driver 226mind that most drivers will need to implement both a ccwgroup and a ccw
227(unless you have a meta ccw driver, like cu3088 for lcs and ctc). 227driver.
228 228
229 229
2302. Channel paths 2302. Channel paths
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c80235206c01..19deda8d8875 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -54,6 +54,9 @@ config GENERIC_BUG
54 depends on BUG 54 depends on BUG
55 default y 55 default y
56 56
57config GENERIC_BUG_RELATIVE_POINTERS
58 def_bool y
59
57config NO_IOMEM 60config NO_IOMEM
58 def_bool y 61 def_bool y
59 62
@@ -95,6 +98,9 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 98 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 99 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_EVENTS 100 select HAVE_PERF_EVENTS
101 select HAVE_KERNEL_GZIP
102 select HAVE_KERNEL_BZIP2
103 select HAVE_KERNEL_LZMA
98 select ARCH_INLINE_SPIN_TRYLOCK 104 select ARCH_INLINE_SPIN_TRYLOCK
99 select ARCH_INLINE_SPIN_TRYLOCK_BH 105 select ARCH_INLINE_SPIN_TRYLOCK_BH
100 select ARCH_INLINE_SPIN_LOCK 106 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2283933a9a93..45e0c6199f36 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT
6 6
7source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
8 8
9config DEBUG_STRICT_USER_COPY_CHECKS
10 bool "Strict user copy size checks"
11 ---help---
12 Enabling this option turns a certain set of sanity checks for user
13 copy operations into compile time warnings.
14
15 The copy_from_user() etc checks are there to help test if there
16 are sufficient security checks on the length argument of
17 the copy operation, by having gcc prove that the argument is
18 within bounds.
19
20 If unsure, or if you run an older (pre 4.4) gcc, say N.
21
9endmenu 22endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index fc8fb20e7fc0..0da10746e0e5 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,6 +14,7 @@
14# 14#
15 15
16ifndef CONFIG_64BIT 16ifndef CONFIG_64BIT
17LD_BFD := elf32-s390
17LDFLAGS := -m elf_s390 18LDFLAGS := -m elf_s390
18KBUILD_CFLAGS += -m31 19KBUILD_CFLAGS += -m31
19KBUILD_AFLAGS += -m31 20KBUILD_AFLAGS += -m31
@@ -21,6 +22,7 @@ UTS_MACHINE := s390
21STACK_SIZE := 8192 22STACK_SIZE := 8192
22CHECKFLAGS += -D__s390__ -msize-long 23CHECKFLAGS += -D__s390__ -msize-long
23else 24else
25LD_BFD := elf64-s390
24LDFLAGS := -m elf64_s390 26LDFLAGS := -m elf64_s390
25MODFLAGS += -fpic -D__PIC__ 27MODFLAGS += -fpic -D__PIC__
26KBUILD_CFLAGS += -m64 28KBUILD_CFLAGS += -m64
@@ -30,6 +32,8 @@ STACK_SIZE := 16384
30CHECKFLAGS += -D__s390__ -D__s390x__ 32CHECKFLAGS += -D__s390__ -D__s390x__
31endif 33endif
32 34
35export LD_BFD
36
33cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) 37cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
34cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) 38cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
35cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) 39cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
@@ -85,7 +89,9 @@ KBUILD_AFLAGS += $(aflags-y)
85OBJCOPYFLAGS := -O binary 89OBJCOPYFLAGS := -O binary
86LDFLAGS_vmlinux := -e start 90LDFLAGS_vmlinux := -e start
87 91
88head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o 92head-y := arch/s390/kernel/head.o
93head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
94head-y += arch/s390/kernel/init_task.o
89 95
90core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ 96core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
91 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ 97 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
@@ -99,12 +105,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
99 105
100boot := arch/s390/boot 106boot := arch/s390/boot
101 107
102all: image 108all: image bzImage
103 109
104install: vmlinux 110install: vmlinux
105 $(Q)$(MAKE) $(build)=$(boot) $@ 111 $(Q)$(MAKE) $(build)=$(boot) $@
106 112
107image: vmlinux 113image bzImage: vmlinux
108 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 114 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
109 115
110zfcpdump: 116zfcpdump:
@@ -116,4 +122,5 @@ archclean:
116# Don't use tabs in echo arguments 122# Don't use tabs in echo arguments
117define archhelp 123define archhelp
118 echo '* image - Kernel image for IPL ($(boot)/image)' 124 echo '* image - Kernel image for IPL ($(boot)/image)'
125 echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
119endef 126endef
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 4d97eef36b8d..8800cf090694 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -9,10 +9,18 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \
9EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. 9EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
10 10
11targets := image 11targets := image
12targets += bzImage
13subdir- := compressed
12 14
13$(obj)/image: vmlinux FORCE 15$(obj)/image: vmlinux FORCE
14 $(call if_changed,objcopy) 16 $(call if_changed,objcopy)
15 17
18$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
19 $(call if_changed,objcopy)
20
21$(obj)/compressed/vmlinux: FORCE
22 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
23
16install: $(CONFIGURE) $(obj)/image 24install: $(CONFIGURE) $(obj)/image
17 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ 25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
18 System.map Kerntypes "$(INSTALL_PATH)" 26 System.map Kerntypes "$(INSTALL_PATH)"
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
new file mode 100644
index 000000000000..6e4a67ad07e1
--- /dev/null
+++ b/arch/s390/boot/compressed/Makefile
@@ -0,0 +1,60 @@
1#
2# linux/arch/s390/boot/compressed/Makefile
3#
4# create a compressed vmlinux image from the original vmlinux
5#
6
7BITS := $(if $(CONFIG_64BIT),64,31)
8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
10 vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o
11
12KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
13KBUILD_CFLAGS += $(cflags-y)
14KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
15KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
16
17GCOV_PROFILE := n
18
19OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
20OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o
21
22LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
23$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
24 $(call if_changed,ld)
25 @:
26
27sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p'
28
29quiet_cmd_sizes = GEN $@
30 cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
31
32$(obj)/sizes.h: vmlinux
33 $(call if_changed,sizes)
34
35AFLAGS_head$(BITS).o += -I$(obj)
36$(obj)/head$(BITS).o: $(obj)/sizes.h
37
38CFLAGS_misc.o += -I$(obj)
39$(obj)/misc.o: $(obj)/sizes.h
40
41OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
42$(obj)/vmlinux.bin: vmlinux
43 $(call if_changed,objcopy)
44
45vmlinux.bin.all-y := $(obj)/vmlinux.bin
46
47suffix-$(CONFIG_KERNEL_GZIP) := gz
48suffix-$(CONFIG_KERNEL_BZIP2) := bz2
49suffix-$(CONFIG_KERNEL_LZMA) := lzma
50
51$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
52 $(call if_changed,gzip)
53$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
54 $(call if_changed,bzip2)
55$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
56 $(call if_changed,lzma)
57
58LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
59$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
60 $(call if_changed,ld)
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
new file mode 100644
index 000000000000..2a5523a32bcc
--- /dev/null
+++ b/arch/s390/boot/compressed/head31.S
@@ -0,0 +1,51 @@
1/*
2 * Startup glue code to uncompress the kernel
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <asm/asm-offsets.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include "sizes.h"
14
15__HEAD
16 .globl startup_continue
17startup_continue:
18 basr %r13,0 # get base
19.LPG1:
20 # setup stack
21 l %r15,.Lstack-.LPG1(%r13)
22 ahi %r15,-96
23 l %r1,.Ldecompress-.LPG1(%r13)
24 basr %r14,%r1
25 # setup registers for memory mover & branch to target
26 lr %r4,%r2
27 l %r2,.Loffset-.LPG1(%r13)
28 la %r4,0(%r2,%r4)
29 l %r3,.Lmvsize-.LPG1(%r13)
30 lr %r5,%r3
31 # move the memory mover someplace safe
32 la %r1,0x200
33 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
34 # decompress image is started at 0x11000
35 lr %r6,%r2
36 br %r1
37mover:
38 mvcle %r2,%r4,0
39 jo mover
40 br %r6
41mover_end:
42
43 .align 8
44.Lstack:
45 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
46.Ldecompress:
47 .long decompress_kernel
48.Loffset:
49 .long 0x11000
50.Lmvsize:
51 .long SZ__bss_start
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
new file mode 100644
index 000000000000..2982cb140550
--- /dev/null
+++ b/arch/s390/boot/compressed/head64.S
@@ -0,0 +1,48 @@
1/*
2 * Startup glue code to uncompress the kernel
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <asm/asm-offsets.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include "sizes.h"
14
15__HEAD
16 .globl startup_continue
17startup_continue:
18 basr %r13,0 # get base
19.LPG1:
20 # setup stack
21 lg %r15,.Lstack-.LPG1(%r13)
22 aghi %r15,-160
23 brasl %r14,decompress_kernel
24 # setup registers for memory mover & branch to target
25 lgr %r4,%r2
26 lg %r2,.Loffset-.LPG1(%r13)
27 la %r4,0(%r2,%r4)
28 lg %r3,.Lmvsize-.LPG1(%r13)
29 lgr %r5,%r3
30 # move the memory mover someplace safe
31 la %r1,0x200
32 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
33 # decompress image is started at 0x11000
34 lgr %r6,%r2
35 br %r1
36mover:
37 mvcle %r2,%r4,0
38 jo mover
39 br %r6
40mover_end:
41
42 .align 8
43.Lstack:
44 .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
45.Loffset:
46 .quad 0x11000
47.Lmvsize:
48 .quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
new file mode 100644
index 000000000000..a97d69525829
--- /dev/null
+++ b/arch/s390/boot/compressed/misc.c
@@ -0,0 +1,158 @@
1/*
2 * Definitions and wrapper functions for kernel decompressor
3 *
4 * Copyright IBM Corp. 2010
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <asm/uaccess.h>
10#include <asm/page.h>
11#include <asm/ipl.h>
12#include "sizes.h"
13
14/*
15 * gzip declarations
16 */
17#define STATIC static
18
19#undef memset
20#undef memcpy
21#undef memmove
22#define memzero(s, n) memset((s), 0, (n))
23
24/* Symbols defined by linker scripts */
25extern char input_data[];
26extern int input_len;
27extern int _text;
28extern int _end;
29
30static void error(char *m);
31
32static unsigned long free_mem_ptr;
33static unsigned long free_mem_end_ptr;
34
35#ifdef CONFIG_HAVE_KERNEL_BZIP2
36#define HEAP_SIZE 0x400000
37#else
38#define HEAP_SIZE 0x10000
39#endif
40
41#ifdef CONFIG_KERNEL_GZIP
42#include "../../../../lib/decompress_inflate.c"
43#endif
44
45#ifdef CONFIG_KERNEL_BZIP2
46#include "../../../../lib/decompress_bunzip2.c"
47#endif
48
49#ifdef CONFIG_KERNEL_LZMA
50#include "../../../../lib/decompress_unlzma.c"
51#endif
52
53extern _sclp_print_early(const char *);
54
55int puts(const char *s)
56{
57 _sclp_print_early(s);
58 return 0;
59}
60
61void *memset(void *s, int c, size_t n)
62{
63 char *xs;
64
65 if (c == 0)
66 return __builtin_memset(s, 0, n);
67
68 xs = (char *) s;
69 if (n > 0)
70 do {
71 *xs++ = c;
72 } while (--n > 0);
73 return s;
74}
75
76void *memcpy(void *__dest, __const void *__src, size_t __n)
77{
78 return __builtin_memcpy(__dest, __src, __n);
79}
80
81void *memmove(void *__dest, __const void *__src, size_t __n)
82{
83 char *d;
84 const char *s;
85
86 if (__dest <= __src)
87 return __builtin_memcpy(__dest, __src, __n);
88 d = __dest + __n;
89 s = __src + __n;
90 while (__n--)
91 *--d = *--s;
92 return __dest;
93}
94
95static void error(char *x)
96{
97 unsigned long long psw = 0x000a0000deadbeefULL;
98
99 puts("\n\n");
100 puts(x);
101 puts("\n\n -- System halted");
102
103 asm volatile("lpsw %0" : : "Q" (psw));
104}
105
106/*
107 * Safe guard the ipl parameter block against a memory area that will be
108 * overwritten. The validity check for the ipl parameter block is complex
109 * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
110 * the ipl parameter block intersects with the passed memory area we can
111 * safely assume that we can read from that memory. In that case just copy
112 * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
113 * block.
114 */
115static void check_ipl_parmblock(void *start, unsigned long size)
116{
117 void *src, *dst;
118
119 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
120 if (src + PAGE_SIZE <= start || src >= start + size)
121 return;
122 dst = (void *) IPL_PARMBLOCK_ORIGIN;
123 memmove(dst, src, PAGE_SIZE);
124 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
125}
126
127unsigned long decompress_kernel(void)
128{
129 unsigned long output_addr;
130 unsigned char *output;
131
132 free_mem_ptr = (unsigned long)&_end;
133 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
134 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
135
136 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
137
138#ifdef CONFIG_BLK_DEV_INITRD
139 /*
140 * Move the initrd right behind the end of the decompressed
141 * kernel image.
142 */
143 if (INITRD_START && INITRD_SIZE &&
144 INITRD_START < (unsigned long) output + SZ__bss_start) {
145 check_ipl_parmblock(output + SZ__bss_start,
146 INITRD_START + INITRD_SIZE);
147 memmove(output + SZ__bss_start,
148 (void *) INITRD_START, INITRD_SIZE);
149 INITRD_START = (unsigned long) output + SZ__bss_start;
150 }
151#endif
152
153 puts("Uncompressing Linux... ");
154 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
155 puts("Ok, booting the kernel.\n");
156 return (unsigned long) output;
157}
158
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
new file mode 100644
index 000000000000..d80f79d8dd9c
--- /dev/null
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -0,0 +1,55 @@
1#include <asm-generic/vmlinux.lds.h>
2
3#ifdef CONFIG_64BIT
4OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
5OUTPUT_ARCH(s390:64-bit)
6#else
7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
8OUTPUT_ARCH(s390)
9#endif
10
11ENTRY(startup)
12
13SECTIONS
14{
15 /* Be careful parts of head_64.S assume startup_32 is at
16 * address 0.
17 */
18 . = 0;
19 .head.text : {
20 _head = . ;
21 HEAD_TEXT
22 _ehead = . ;
23 }
24 .rodata.compressed : {
25 *(.rodata.compressed)
26 }
27 .text : {
28 _text = .; /* Text */
29 *(.text)
30 *(.text.*)
31 _etext = . ;
32 }
33 .rodata : {
34 _rodata = . ;
35 *(.rodata) /* read-only data */
36 *(.rodata.*)
37 _erodata = . ;
38 }
39 .data : {
40 _data = . ;
41 *(.data)
42 *(.data.*)
43 _edata = . ;
44 }
45 . = ALIGN(256);
46 .bss : {
47 _bss = . ;
48 *(.bss)
49 *(.bss.*)
50 *(COMMON)
51 . = ALIGN(8); /* For convenience during zeroing */
52 _ebss = .;
53 }
54 _end = .;
55}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr
new file mode 100644
index 000000000000..f02382ae5c48
--- /dev/null
+++ b/arch/s390/boot/compressed/vmlinux.scr
@@ -0,0 +1,10 @@
1SECTIONS
2{
3 .rodata.compressed : {
4 input_len = .;
5 LONG(input_data_end - input_data) input_data = .;
6 *(.data)
7 output_len = . - 4;
8 input_data_end = .;
9 }
10}
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index b416aa11b91e..7ae71cc56973 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -36,6 +36,13 @@ CONFIG_LOCK_KERNEL=y
36CONFIG_INIT_ENV_ARG_LIMIT=32 36CONFIG_INIT_ENV_ARG_LIMIT=32
37CONFIG_LOCALVERSION="" 37CONFIG_LOCALVERSION=""
38CONFIG_LOCALVERSION_AUTO=y 38CONFIG_LOCALVERSION_AUTO=y
39CONFIG_HAVE_KERNEL_GZIP=y
40CONFIG_HAVE_KERNEL_BZIP2=y
41CONFIG_HAVE_KERNEL_LZMA=y
42CONFIG_KERNEL_GZIP=y
43# CONFIG_KERNEL_BZIP2 is not set
44# CONFIG_KERNEL_LZMA is not set
45# CONFIG_KERNEL_LZO is not set
39CONFIG_SWAP=y 46CONFIG_SWAP=y
40CONFIG_SYSVIPC=y 47CONFIG_SYSVIPC=y
41CONFIG_SYSVIPC_SYSCTL=y 48CONFIG_SYSVIPC_SYSCTL=y
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 2b92d501425f..87cf523192e9 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -488,7 +488,7 @@ out:
488 488
489static int diag224(void *ptr) 489static int diag224(void *ptr)
490{ 490{
491 int rc = -ENOTSUPP; 491 int rc = -EOPNOTSUPP;
492 492
493 asm volatile( 493 asm volatile(
494 " diag %1,%2,0x224\n" 494 " diag %1,%2,0x224\n"
@@ -507,7 +507,7 @@ static int diag224_get_name_table(void)
507 return -ENOMEM; 507 return -ENOMEM;
508 if (diag224(diag224_cpu_names)) { 508 if (diag224(diag224_cpu_names)) {
509 kfree(diag224_cpu_names); 509 kfree(diag224_cpu_names);
510 return -ENOTSUPP; 510 return -EOPNOTSUPP;
511 } 511 }
512 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); 512 EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
513 return 0; 513 return 0;
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2a113d6a7dfd..451bfbb9db3d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -18,8 +18,6 @@
18 18
19#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
20 20
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 21#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 int old_val, new_val; \ 22 int old_val, new_val; \
25 asm volatile( \ 23 asm volatile( \
@@ -35,26 +33,6 @@
35 new_val; \ 33 new_val; \
36}) 34})
37 35
38#else /* __GNUC__ */
39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 int old_val, new_val; \
42 asm volatile( \
43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \
45 op_string " %1,%4\n" \
46 " cs %0,%1,0(%3)\n" \
47 " jl 0b" \
48 : "=&d" (old_val), "=&d" (new_val), \
49 "=m" (((atomic_t *)(ptr))->counter) \
50 : "a" (ptr), "d" (op_val), \
51 "m" (((atomic_t *)(ptr))->counter) \
52 : "cc", "memory"); \
53 new_val; \
54})
55
56#endif /* __GNUC__ */
57
58static inline int atomic_read(const atomic_t *v) 36static inline int atomic_read(const atomic_t *v)
59{ 37{
60 barrier(); 38 barrier();
@@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
101 79
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 80static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{ 81{
104#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
105 asm volatile( 82 asm volatile(
106 " cs %0,%2,%1" 83 " cs %0,%2,%1"
107 : "+d" (old), "=Q" (v->counter) 84 : "+d" (old), "=Q" (v->counter)
108 : "d" (new), "Q" (v->counter) 85 : "d" (new), "Q" (v->counter)
109 : "cc", "memory"); 86 : "cc", "memory");
110#else /* __GNUC__ */
111 asm volatile(
112 " cs %0,%3,0(%2)"
113 : "+d" (old), "=m" (v->counter)
114 : "a" (v), "d" (new), "m" (v->counter)
115 : "cc", "memory");
116#endif /* __GNUC__ */
117 return old; 87 return old;
118} 88}
119 89
@@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
140 110
141#ifdef CONFIG_64BIT 111#ifdef CONFIG_64BIT
142 112
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 113#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 long long old_val, new_val; \ 114 long long old_val, new_val; \
147 asm volatile( \ 115 asm volatile( \
@@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
157 new_val; \ 125 new_val; \
158}) 126})
159 127
160#else /* __GNUC__ */
161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 long long old_val, new_val; \
164 asm volatile( \
165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \
167 op_string " %1,%4\n" \
168 " csg %0,%1,0(%3)\n" \
169 " jl 0b" \
170 : "=&d" (old_val), "=&d" (new_val), \
171 "=m" (((atomic_t *)(ptr))->counter) \
172 : "a" (ptr), "d" (op_val), \
173 "m" (((atomic_t *)(ptr))->counter) \
174 : "cc", "memory"); \
175 new_val; \
176})
177
178#endif /* __GNUC__ */
179
180static inline long long atomic64_read(const atomic64_t *v) 128static inline long long atomic64_read(const atomic64_t *v)
181{ 129{
182 barrier(); 130 barrier();
@@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
214static inline long long atomic64_cmpxchg(atomic64_t *v, 162static inline long long atomic64_cmpxchg(atomic64_t *v,
215 long long old, long long new) 163 long long old, long long new)
216{ 164{
217#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
218 asm volatile( 165 asm volatile(
219 " csg %0,%2,%1" 166 " csg %0,%2,%1"
220 : "+d" (old), "=Q" (v->counter) 167 : "+d" (old), "=Q" (v->counter)
221 : "d" (new), "Q" (v->counter) 168 : "d" (new), "Q" (v->counter)
222 : "cc", "memory"); 169 : "cc", "memory");
223#else /* __GNUC__ */
224 asm volatile(
225 " csg %0,%3,0(%2)"
226 : "+d" (old), "=m" (v->counter)
227 : "a" (v), "d" (new), "m" (v->counter)
228 : "cc", "memory");
229#endif /* __GNUC__ */
230 return old; 170 return old;
231} 171}
232 172
@@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v)
243 register_pair rp; 183 register_pair rp;
244 184
245 asm volatile( 185 asm volatile(
246 " lm %0,%N0,0(%1)" 186 " lm %0,%N0,%1"
247 : "=&d" (rp) 187 : "=&d" (rp) : "Q" (v->counter) );
248 : "a" (&v->counter), "m" (v->counter)
249 );
250 return rp.pair; 188 return rp.pair;
251} 189}
252 190
@@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i)
255 register_pair rp = {.pair = i}; 193 register_pair rp = {.pair = i};
256 194
257 asm volatile( 195 asm volatile(
258 " stm %1,%N1,0(%2)" 196 " stm %1,%N1,%0"
259 : "=m" (v->counter) 197 : "=Q" (v->counter) : "d" (rp) );
260 : "d" (rp), "a" (&v->counter)
261 );
262} 198}
263 199
264static inline long long atomic64_xchg(atomic64_t *v, long long new) 200static inline long long atomic64_xchg(atomic64_t *v, long long new)
@@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
267 register_pair rp_old; 203 register_pair rp_old;
268 204
269 asm volatile( 205 asm volatile(
270 " lm %0,%N0,0(%2)\n" 206 " lm %0,%N0,%1\n"
271 "0: cds %0,%3,0(%2)\n" 207 "0: cds %0,%2,%1\n"
272 " jl 0b\n" 208 " jl 0b\n"
273 : "=&d" (rp_old), "+m" (v->counter) 209 : "=&d" (rp_old), "=Q" (v->counter)
274 : "a" (&v->counter), "d" (rp_new) 210 : "d" (rp_new), "Q" (v->counter)
275 : "cc"); 211 : "cc");
276 return rp_old.pair; 212 return rp_old.pair;
277} 213}
@@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
283 register_pair rp_new = {.pair = new}; 219 register_pair rp_new = {.pair = new};
284 220
285 asm volatile( 221 asm volatile(
286 " cds %0,%3,0(%2)" 222 " cds %0,%2,%1"
287 : "+&d" (rp_old), "+m" (v->counter) 223 : "+&d" (rp_old), "=Q" (v->counter)
288 : "a" (&v->counter), "d" (rp_new) 224 : "d" (rp_new), "Q" (v->counter)
289 : "cc"); 225 : "cc");
290 return rp_old.pair; 226 return rp_old.pair;
291} 227}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index b30606f6d523..2e05972c5085 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -71,8 +71,6 @@ extern const char _sb_findmap[];
71#define __BITOPS_AND "nr" 71#define __BITOPS_AND "nr"
72#define __BITOPS_XOR "xr" 72#define __BITOPS_XOR "xr"
73 73
74#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
75
76#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 74#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
77 asm volatile( \ 75 asm volatile( \
78 " l %0,%2\n" \ 76 " l %0,%2\n" \
@@ -85,22 +83,6 @@ extern const char _sb_findmap[];
85 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 83 : "d" (__val), "Q" (*(unsigned long *) __addr) \
86 : "cc"); 84 : "cc");
87 85
88#else /* __GNUC__ */
89
90#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
91 asm volatile( \
92 " l %0,0(%4)\n" \
93 "0: lr %1,%0\n" \
94 __op_string " %1,%3\n" \
95 " cs %0,%1,0(%4)\n" \
96 " jl 0b" \
97 : "=&d" (__old), "=&d" (__new), \
98 "=m" (*(unsigned long *) __addr) \
99 : "d" (__val), "a" (__addr), \
100 "m" (*(unsigned long *) __addr) : "cc");
101
102#endif /* __GNUC__ */
103
104#else /* __s390x__ */ 86#else /* __s390x__ */
105 87
106#define __BITOPS_ALIGN 7 88#define __BITOPS_ALIGN 7
@@ -109,8 +91,6 @@ extern const char _sb_findmap[];
109#define __BITOPS_AND "ngr" 91#define __BITOPS_AND "ngr"
110#define __BITOPS_XOR "xgr" 92#define __BITOPS_XOR "xgr"
111 93
112#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
113
114#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 94#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
115 asm volatile( \ 95 asm volatile( \
116 " lg %0,%2\n" \ 96 " lg %0,%2\n" \
@@ -123,23 +103,6 @@ extern const char _sb_findmap[];
123 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 103 : "d" (__val), "Q" (*(unsigned long *) __addr) \
124 : "cc"); 104 : "cc");
125 105
126#else /* __GNUC__ */
127
128#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
129 asm volatile( \
130 " lg %0,0(%4)\n" \
131 "0: lgr %1,%0\n" \
132 __op_string " %1,%3\n" \
133 " csg %0,%1,0(%4)\n" \
134 " jl 0b" \
135 : "=&d" (__old), "=&d" (__new), \
136 "=m" (*(unsigned long *) __addr) \
137 : "d" (__val), "a" (__addr), \
138 "m" (*(unsigned long *) __addr) : "cc");
139
140
141#endif /* __GNUC__ */
142
143#endif /* __s390x__ */ 106#endif /* __s390x__ */
144 107
145#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 108#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
@@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
261 224
262 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 225 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
263 asm volatile( 226 asm volatile(
264 " oc 0(1,%1),0(%2)" 227 " oc %O0(1,%R0),%1"
265 : "=m" (*(char *) addr) : "a" (addr), 228 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
266 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
267} 229}
268 230
269static inline void 231static inline void
@@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
290 252
291 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 253 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
292 asm volatile( 254 asm volatile(
293 " nc 0(1,%1),0(%2)" 255 " nc %O0(1,%R0),%1"
294 : "=m" (*(char *) addr) : "a" (addr), 256 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
295 "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
296} 257}
297 258
298static inline void 259static inline void
@@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
318 279
319 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 280 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
320 asm volatile( 281 asm volatile(
321 " xc 0(1,%1),0(%2)" 282 " xc %O0(1,%R0),%1"
322 : "=m" (*(char *) addr) : "a" (addr), 283 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
323 "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
324} 284}
325 285
326static inline void 286static inline void
@@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
349 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 309 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
350 ch = *(unsigned char *) addr; 310 ch = *(unsigned char *) addr;
351 asm volatile( 311 asm volatile(
352 " oc 0(1,%1),0(%2)" 312 " oc %O0(1,%R0),%1"
353 : "=m" (*(char *) addr) 313 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
354 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 314 : "cc", "memory");
355 "m" (*(char *) addr) : "cc", "memory");
356 return (ch >> (nr & 7)) & 1; 315 return (ch >> (nr & 7)) & 1;
357} 316}
358#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 317#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
369 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 328 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
370 ch = *(unsigned char *) addr; 329 ch = *(unsigned char *) addr;
371 asm volatile( 330 asm volatile(
372 " nc 0(1,%1),0(%2)" 331 " nc %O0(1,%R0),%1"
373 : "=m" (*(char *) addr) 332 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
374 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 333 : "cc", "memory");
375 "m" (*(char *) addr) : "cc", "memory");
376 return (ch >> (nr & 7)) & 1; 334 return (ch >> (nr & 7)) & 1;
377} 335}
378#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 336#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
389 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 347 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
390 ch = *(unsigned char *) addr; 348 ch = *(unsigned char *) addr;
391 asm volatile( 349 asm volatile(
392 " xc 0(1,%1),0(%2)" 350 " xc %O0(1,%R0),%1"
393 : "=m" (*(char *) addr) 351 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
394 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 352 : "cc", "memory");
395 "m" (*(char *) addr) : "cc", "memory");
396 return (ch >> (nr & 7)) & 1; 353 return (ch >> (nr & 7)) & 1;
397} 354}
398#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 355#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
591 p = (unsigned long *)((unsigned long) p + offset); 548 p = (unsigned long *)((unsigned long) p + offset);
592#ifndef __s390x__ 549#ifndef __s390x__
593 asm volatile( 550 asm volatile(
594 " ic %0,0(%1)\n" 551 " ic %0,%O1(%R1)\n"
595 " icm %0,2,1(%1)\n" 552 " icm %0,2,%O1+1(%R1)\n"
596 " icm %0,4,2(%1)\n" 553 " icm %0,4,%O1+2(%R1)\n"
597 " icm %0,8,3(%1)" 554 " icm %0,8,%O1+3(%R1)"
598 : "=&d" (word) : "a" (p), "m" (*p) : "cc"); 555 : "=&d" (word) : "Q" (*p) : "cc");
599#else 556#else
600 asm volatile( 557 asm volatile(
601 " lrvg %0,%1" 558 " lrvg %0,%1"
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index b1066b9fb5f8..9beeb9db9b23 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -5,12 +5,6 @@
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7 7
8#ifdef CONFIG_64BIT
9#define S390_LONG ".quad"
10#else
11#define S390_LONG ".long"
12#endif
13
14#ifdef CONFIG_DEBUG_BUGVERBOSE 8#ifdef CONFIG_DEBUG_BUGVERBOSE
15 9
16#define __EMIT_BUG(x) do { \ 10#define __EMIT_BUG(x) do { \
@@ -21,7 +15,7 @@
21 "2: .asciz \""__FILE__"\"\n" \ 15 "2: .asciz \""__FILE__"\"\n" \
22 ".previous\n" \ 16 ".previous\n" \
23 ".section __bug_table,\"a\"\n" \ 17 ".section __bug_table,\"a\"\n" \
24 "3:\t" S390_LONG "\t1b,2b\n" \ 18 "3: .long 1b-3b,2b-3b\n" \
25 " .short %0,%1\n" \ 19 " .short %0,%1\n" \
26 " .org 3b+%2\n" \ 20 " .org 3b+%2\n" \
27 ".previous\n" \ 21 ".previous\n" \
@@ -37,7 +31,7 @@
37 "0: j 0b+2\n" \ 31 "0: j 0b+2\n" \
38 "1:\n" \ 32 "1:\n" \
39 ".section __bug_table,\"a\"\n" \ 33 ".section __bug_table,\"a\"\n" \
40 "2:\t" S390_LONG "\t1b\n" \ 34 "2: .long 1b-2b\n" \
41 " .short %0\n" \ 35 " .short %0\n" \
42 " .org 2b+%1\n" \ 36 " .org 2b+%1\n" \
43 ".previous\n" \ 37 ".previous\n" \
diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h
index 2185a6d619d3..749a97e61bea 100644
--- a/arch/s390/include/asm/crw.h
+++ b/arch/s390/include/asm/crw.h
@@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
32extern int crw_register_handler(int rsc, crw_handler_t handler); 32extern int crw_register_handler(int rsc, crw_handler_t handler);
33extern void crw_unregister_handler(int rsc); 33extern void crw_unregister_handler(int rsc);
34extern void crw_handle_channel_report(void); 34extern void crw_handle_channel_report(void);
35void crw_wait_for_channel_report(void);
35 36
36#define NR_RSCS 16 37#define NR_RSCS 16
37 38
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 80ef58c61970..538e1b36a726 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl)
145 int rc = -ENOSYS; 145 int rc = -ENOSYS;
146 146
147 asm volatile( 147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n" 148 " .insn s,0xb2160000,%1\n"
149 "0: la %0,0\n" 149 "0: la %0,0\n"
150 "1:\n" 150 "1:\n"
151 EX_TABLE(0b,1b) 151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); 152 : "+d" (rc) : "Q" (*ctrl));
153 return rc; 153 return rc;
154} 154}
155 155
@@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib)
159 int rc = -ENOSYS; 159 int rc = -ENOSYS;
160 160
161 asm volatile( 161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n" 162 " .insn s,0xb2170000,%1\n"
163 "0: la %0,0\n" 163 "0: la %0,0\n"
164 "1:\n" 164 "1:\n"
165 EX_TABLE(0b,1b) 165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib)); 166 : "+d" (rc) : "Q" (*aib));
167 return rc; 167 return rc;
168} 168}
169 169
@@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func)
174 int rc = -ENOSYS; 174 int rc = -ENOSYS;
175 175
176 asm volatile( 176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n" 177 " .insn s,0xb2b30000,%1\n"
178 "0: la %0,0\n" 178 "0: la %0,0\n"
179 "1:\n" 179 "1:\n"
180 EX_TABLE(0b,1b) 180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); 181 : "+d" (rc) : "Q" (*aib), "d" (reg0));
182 return rc; 182 return rc;
183} 183}
184 184
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index c2fb432f576a..15b3ac253898 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
12
13/* store then or system mask. */ 11/* store then or system mask. */
14#define __raw_local_irq_stosm(__or) \ 12#define __raw_local_irq_stosm(__or) \
15({ \ 13({ \
@@ -36,40 +34,6 @@
36 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
37}) 35})
38 36
39#else /* __GNUC__ */
40
41/* store then or system mask. */
42#define __raw_local_irq_stosm(__or) \
43({ \
44 unsigned long __mask; \
45 asm volatile( \
46 " stosm 0(%1),%2" \
47 : "=m" (__mask) \
48 : "a" (&__mask), "i" (__or) : "memory"); \
49 __mask; \
50})
51
52/* store then and system mask. */
53#define __raw_local_irq_stnsm(__and) \
54({ \
55 unsigned long __mask; \
56 asm volatile( \
57 " stnsm 0(%1),%2" \
58 : "=m" (__mask) \
59 : "a" (&__mask), "i" (__and) : "memory"); \
60 __mask; \
61})
62
63/* set system mask. */
64#define __raw_local_irq_ssm(__mask) \
65({ \
66 asm volatile( \
67 " ssm 0(%0)" \
68 : : "a" (&__mask), "m" (__mask) : "memory"); \
69})
70
71#endif /* __GNUC__ */
72
73/* interrupt control.. */ 37/* interrupt control.. */
74static inline unsigned long raw_local_irq_enable(void) 38static inline unsigned long raw_local_irq_enable(void)
75{ 39{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index c25dfac7dd76..05527c040b7a 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,141 +1,16 @@
1/* 1/*
2 * include/asm-s390/lowcore.h 2 * Copyright IBM Corp. 1999,2010
3 * 3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Denis Joseph Barrow,
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
9 */ 6 */
10 7
11#ifndef _ASM_S390_LOWCORE_H 8#ifndef _ASM_S390_LOWCORE_H
12#define _ASM_S390_LOWCORE_H 9#define _ASM_S390_LOWCORE_H
13 10
14#define __LC_IPL_PARMBLOCK_PTR 0x0014
15#define __LC_EXT_PARAMS 0x0080
16#define __LC_CPU_ADDRESS 0x0084
17#define __LC_EXT_INT_CODE 0x0086
18
19#define __LC_SVC_ILC 0x0088
20#define __LC_SVC_INT_CODE 0x008a
21#define __LC_PGM_ILC 0x008c
22#define __LC_PGM_INT_CODE 0x008e
23
24#define __LC_PER_ATMID 0x0096
25#define __LC_PER_ADDRESS 0x0098
26#define __LC_PER_ACCESS_ID 0x00a1
27#define __LC_AR_MODE_ID 0x00a3
28
29#define __LC_SUBCHANNEL_ID 0x00b8
30#define __LC_SUBCHANNEL_NR 0x00ba
31#define __LC_IO_INT_PARM 0x00bc
32#define __LC_IO_INT_WORD 0x00c0
33#define __LC_STFL_FAC_LIST 0x00c8
34#define __LC_MCCK_CODE 0x00e8
35
36#define __LC_DUMP_REIPL 0x0e00
37
38#ifndef __s390x__
39#define __LC_EXT_OLD_PSW 0x0018
40#define __LC_SVC_OLD_PSW 0x0020
41#define __LC_PGM_OLD_PSW 0x0028
42#define __LC_MCK_OLD_PSW 0x0030
43#define __LC_IO_OLD_PSW 0x0038
44#define __LC_EXT_NEW_PSW 0x0058
45#define __LC_SVC_NEW_PSW 0x0060
46#define __LC_PGM_NEW_PSW 0x0068
47#define __LC_MCK_NEW_PSW 0x0070
48#define __LC_IO_NEW_PSW 0x0078
49#define __LC_SAVE_AREA 0x0200
50#define __LC_RETURN_PSW 0x0240
51#define __LC_RETURN_MCCK_PSW 0x0248
52#define __LC_SYNC_ENTER_TIMER 0x0250
53#define __LC_ASYNC_ENTER_TIMER 0x0258
54#define __LC_EXIT_TIMER 0x0260
55#define __LC_USER_TIMER 0x0268
56#define __LC_SYSTEM_TIMER 0x0270
57#define __LC_STEAL_TIMER 0x0278
58#define __LC_LAST_UPDATE_TIMER 0x0280
59#define __LC_LAST_UPDATE_CLOCK 0x0288
60#define __LC_CURRENT 0x0290
61#define __LC_THREAD_INFO 0x0294
62#define __LC_KERNEL_STACK 0x0298
63#define __LC_ASYNC_STACK 0x029c
64#define __LC_PANIC_STACK 0x02a0
65#define __LC_KERNEL_ASCE 0x02a4
66#define __LC_USER_ASCE 0x02a8
67#define __LC_USER_EXEC_ASCE 0x02ac
68#define __LC_CPUID 0x02b0
69#define __LC_INT_CLOCK 0x02c8
70#define __LC_MACHINE_FLAGS 0x02d8
71#define __LC_FTRACE_FUNC 0x02dc
72#define __LC_IRB 0x0300
73#define __LC_PFAULT_INTPARM 0x0080
74#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
75#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0
76#define __LC_PSW_SAVE_AREA 0x0100
77#define __LC_PREFIX_SAVE_AREA 0x0108
78#define __LC_AREGS_SAVE_AREA 0x0120
79#define __LC_FPREGS_SAVE_AREA 0x0160
80#define __LC_GPREGS_SAVE_AREA 0x0180
81#define __LC_CREGS_SAVE_AREA 0x01c0
82#else /* __s390x__ */
83#define __LC_LAST_BREAK 0x0110
84#define __LC_EXT_OLD_PSW 0x0130
85#define __LC_SVC_OLD_PSW 0x0140
86#define __LC_PGM_OLD_PSW 0x0150
87#define __LC_MCK_OLD_PSW 0x0160
88#define __LC_IO_OLD_PSW 0x0170
89#define __LC_RESTART_PSW 0x01a0
90#define __LC_EXT_NEW_PSW 0x01b0
91#define __LC_SVC_NEW_PSW 0x01c0
92#define __LC_PGM_NEW_PSW 0x01d0
93#define __LC_MCK_NEW_PSW 0x01e0
94#define __LC_IO_NEW_PSW 0x01f0
95#define __LC_SAVE_AREA 0x0200
96#define __LC_RETURN_PSW 0x0280
97#define __LC_RETURN_MCCK_PSW 0x0290
98#define __LC_SYNC_ENTER_TIMER 0x02a0
99#define __LC_ASYNC_ENTER_TIMER 0x02a8
100#define __LC_EXIT_TIMER 0x02b0
101#define __LC_USER_TIMER 0x02b8
102#define __LC_SYSTEM_TIMER 0x02c0
103#define __LC_STEAL_TIMER 0x02c8
104#define __LC_LAST_UPDATE_TIMER 0x02d0
105#define __LC_LAST_UPDATE_CLOCK 0x02d8
106#define __LC_CURRENT 0x02e0
107#define __LC_THREAD_INFO 0x02e8
108#define __LC_KERNEL_STACK 0x02f0
109#define __LC_ASYNC_STACK 0x02f8
110#define __LC_PANIC_STACK 0x0300
111#define __LC_KERNEL_ASCE 0x0308
112#define __LC_USER_ASCE 0x0310
113#define __LC_USER_EXEC_ASCE 0x0318
114#define __LC_CPUID 0x0320
115#define __LC_INT_CLOCK 0x0340
116#define __LC_VDSO_PER_CPU 0x0350
117#define __LC_MACHINE_FLAGS 0x0358
118#define __LC_FTRACE_FUNC 0x0360
119#define __LC_IRB 0x0380
120#define __LC_PASTE 0x03c0
121#define __LC_PFAULT_INTPARM 0x11b8
122#define __LC_FPREGS_SAVE_AREA 0x1200
123#define __LC_GPREGS_SAVE_AREA 0x1280
124#define __LC_PSW_SAVE_AREA 0x1300
125#define __LC_PREFIX_SAVE_AREA 0x1318
126#define __LC_FP_CREG_SAVE_AREA 0x131c
127#define __LC_TODREG_SAVE_AREA 0x1324
128#define __LC_CPU_TIMER_SAVE_AREA 0x1328
129#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
130#define __LC_AREGS_SAVE_AREA 0x1340
131#define __LC_CREGS_SAVE_AREA 0x1380
132#endif /* __s390x__ */
133
134#ifndef __ASSEMBLY__
135
136#include <asm/cpu.h>
137#include <asm/ptrace.h>
138#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h>
13#include <asm/cpu.h>
139 14
140void restart_int_handler(void); 15void restart_int_handler(void);
141void ext_int_handler(void); 16void ext_int_handler(void);
@@ -144,7 +19,12 @@ void pgm_check_handler(void);
144void mcck_int_handler(void); 19void mcck_int_handler(void);
145void io_int_handler(void); 20void io_int_handler(void);
146 21
147struct save_area_s390 { 22#ifdef CONFIG_32BIT
23
24#define LC_ORDER 0
25#define LC_PAGES 1
26
27struct save_area {
148 u32 ext_save; 28 u32 ext_save;
149 u64 timer; 29 u64 timer;
150 u64 clk_cmp; 30 u64 clk_cmp;
@@ -156,54 +36,13 @@ struct save_area_s390 {
156 u64 fp_regs[4]; 36 u64 fp_regs[4];
157 u32 gp_regs[16]; 37 u32 gp_regs[16];
158 u32 ctrl_regs[16]; 38 u32 ctrl_regs[16];
159} __attribute__((packed)); 39} __packed;
160 40
161struct save_area_s390x { 41struct _lowcore {
162 u64 fp_regs[16];
163 u64 gp_regs[16];
164 u8 psw[16];
165 u8 pad1[8];
166 u32 pref_reg;
167 u32 fp_ctrl_reg;
168 u8 pad2[4];
169 u32 tod_reg;
170 u64 timer;
171 u64 clk_cmp;
172 u8 pad3[8];
173 u32 acc_regs[16];
174 u64 ctrl_regs[16];
175} __attribute__((packed));
176
177union save_area {
178 struct save_area_s390 s390;
179 struct save_area_s390x s390x;
180};
181
182#define SAVE_AREA_BASE_S390 0xd4
183#define SAVE_AREA_BASE_S390X 0x1200
184
185#ifndef __s390x__
186#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
187#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
188#else
189#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
190#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
191#endif
192
193#ifndef __s390x__
194#define LC_ORDER 0
195#else
196#define LC_ORDER 1
197#endif
198
199#define LC_PAGES (1UL << LC_ORDER)
200
201struct _lowcore
202{
203#ifndef __s390x__
204 /* 0x0000 - 0x01ff: defined by architecture */
205 psw_t restart_psw; /* 0x0000 */ 42 psw_t restart_psw; /* 0x0000 */
206 __u32 ccw2[4]; /* 0x0008 */ 43 psw_t restart_old_psw; /* 0x0008 */
44 __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
45 __u32 ipl_parmblock_ptr; /* 0x0014 */
207 psw_t external_old_psw; /* 0x0018 */ 46 psw_t external_old_psw; /* 0x0018 */
208 psw_t svc_old_psw; /* 0x0020 */ 47 psw_t svc_old_psw; /* 0x0020 */
209 psw_t program_old_psw; /* 0x0028 */ 48 psw_t program_old_psw; /* 0x0028 */
@@ -229,7 +68,9 @@ struct _lowcore
229 __u32 monitor_code; /* 0x009c */ 68 __u32 monitor_code; /* 0x009c */
230 __u8 exc_access_id; /* 0x00a0 */ 69 __u8 exc_access_id; /* 0x00a0 */
231 __u8 per_access_id; /* 0x00a1 */ 70 __u8 per_access_id; /* 0x00a1 */
232 __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ 71 __u8 op_access_id; /* 0x00a2 */
72 __u8 ar_access_id; /* 0x00a3 */
73 __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
233 __u16 subchannel_id; /* 0x00b8 */ 74 __u16 subchannel_id; /* 0x00b8 */
234 __u16 subchannel_nr; /* 0x00ba */ 75 __u16 subchannel_nr; /* 0x00ba */
235 __u32 io_int_parm; /* 0x00bc */ 76 __u32 io_int_parm; /* 0x00bc */
@@ -245,8 +86,9 @@ struct _lowcore
245 __u32 external_damage_code; /* 0x00f4 */ 86 __u32 external_damage_code; /* 0x00f4 */
246 __u32 failing_storage_address; /* 0x00f8 */ 87 __u32 failing_storage_address; /* 0x00f8 */
247 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ 88 __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
248 __u32 st_status_fixed_logout[4]; /* 0x0100 */ 89 psw_t psw_save_area; /* 0x0100 */
249 __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ 90 __u32 prefixreg_save_area; /* 0x0108 */
91 __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
250 92
251 /* CPU register save area: defined by architecture */ 93 /* CPU register save area: defined by architecture */
252 __u32 access_regs_save_area[16]; /* 0x0120 */ 94 __u32 access_regs_save_area[16]; /* 0x0120 */
@@ -310,10 +152,32 @@ struct _lowcore
310 152
311 /* Align to the top 1k of prefix area */ 153 /* Align to the top 1k of prefix area */
312 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ 154 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */
313#else /* !__s390x__ */ 155} __packed;
314 /* 0x0000 - 0x01ff: defined by architecture */ 156
315 __u32 ccw1[2]; /* 0x0000 */ 157#else /* CONFIG_32BIT */
316 __u32 ccw2[4]; /* 0x0008 */ 158
159#define LC_ORDER 1
160#define LC_PAGES 2
161
162struct save_area {
163 u64 fp_regs[16];
164 u64 gp_regs[16];
165 u8 psw[16];
166 u8 pad1[8];
167 u32 pref_reg;
168 u32 fp_ctrl_reg;
169 u8 pad2[4];
170 u32 tod_reg;
171 u64 timer;
172 u64 clk_cmp;
173 u8 pad3[8];
174 u32 acc_regs[16];
175 u64 ctrl_regs[16];
176} __packed;
177
178struct _lowcore {
179 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
180 __u32 ipl_parmblock_ptr; /* 0x0014 */
317 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ 181 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
318 __u32 ext_params; /* 0x0080 */ 182 __u32 ext_params; /* 0x0080 */
319 __u16 cpu_addr; /* 0x0084 */ 183 __u16 cpu_addr; /* 0x0084 */
@@ -344,7 +208,9 @@ struct _lowcore
344 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 208 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
345 __u32 external_damage_code; /* 0x00f4 */ 209 __u32 external_damage_code; /* 0x00f4 */
346 addr_t failing_storage_address; /* 0x00f8 */ 210 addr_t failing_storage_address; /* 0x00f8 */
347 __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ 211 __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
212 __u64 breaking_event_addr; /* 0x0110 */
213 __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
348 psw_t restart_old_psw; /* 0x0120 */ 214 psw_t restart_old_psw; /* 0x0120 */
349 psw_t external_old_psw; /* 0x0130 */ 215 psw_t external_old_psw; /* 0x0130 */
350 psw_t svc_old_psw; /* 0x0140 */ 216 psw_t svc_old_psw; /* 0x0140 */
@@ -425,7 +291,7 @@ struct _lowcore
425 /* CPU register save area: defined by architecture */ 291 /* CPU register save area: defined by architecture */
426 __u64 floating_pt_save_area[16]; /* 0x1200 */ 292 __u64 floating_pt_save_area[16]; /* 0x1200 */
427 __u64 gpregs_save_area[16]; /* 0x1280 */ 293 __u64 gpregs_save_area[16]; /* 0x1280 */
428 __u32 st_status_fixed_logout[4]; /* 0x1300 */ 294 psw_t psw_save_area; /* 0x1300 */
429 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ 295 __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
430 __u32 prefixreg_save_area; /* 0x1318 */ 296 __u32 prefixreg_save_area; /* 0x1318 */
431 __u32 fpt_creg_save_area; /* 0x131c */ 297 __u32 fpt_creg_save_area; /* 0x131c */
@@ -439,10 +305,12 @@ struct _lowcore
439 305
440 /* align to the top of the prefix area */ 306 /* align to the top of the prefix area */
441 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ 307 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */
442#endif /* !__s390x__ */ 308} __packed;
443} __attribute__((packed)); /* End structure*/ 309
310#endif /* CONFIG_32BIT */
444 311
445#define S390_lowcore (*((struct _lowcore *) 0)) 312#define S390_lowcore (*((struct _lowcore *) 0))
313
446extern struct _lowcore *lowcore_ptr[]; 314extern struct _lowcore *lowcore_ptr[];
447 315
448static inline void set_prefix(__u32 address) 316static inline void set_prefix(__u32 address)
@@ -458,6 +326,4 @@ static inline __u32 store_prefix(void)
458 return address; 326 return address;
459} 327}
460 328
461#endif 329#endif /* _ASM_S390_LOWCORE_H */
462
463#endif
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5e9daf5d7f22..af650fb47206 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -107,9 +107,6 @@ typedef pte_t *pgtable_t;
107#define __pgd(x) ((pgd_t) { (x) } ) 107#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 108#define __pgprot(x) ((pgprot_t) { (x) } )
109 109
110/* default storage key used for all pages */
111extern unsigned int default_storage_key;
112
113static inline void 110static inline void
114page_set_storage_key(unsigned long addr, unsigned int skey) 111page_set_storage_key(unsigned long addr, unsigned int skey)
115{ 112{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b42715458312..73e259834e10 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -28,7 +28,7 @@
28 28
29static inline void get_cpu_id(struct cpuid *ptr) 29static inline void get_cpu_id(struct cpuid *ptr)
30{ 30{
31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 asm volatile("stidp %0" : "=Q" (*ptr));
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
@@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key)
184static inline void __load_psw(psw_t psw) 184static inline void __load_psw(psw_t psw)
185{ 185{
186#ifndef __s390x__ 186#ifndef __s390x__
187 asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 187 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
188#else 188#else
189 asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 189 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
190#endif 190#endif
191} 191}
192 192
@@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask)
206 asm volatile( 206 asm volatile(
207 " basr %0,0\n" 207 " basr %0,0\n"
208 "0: ahi %0,1f-0b\n" 208 "0: ahi %0,1f-0b\n"
209 " st %0,4(%1)\n" 209 " st %0,%O1+4(%R1)\n"
210 " lpsw 0(%1)\n" 210 " lpsw %1\n"
211 "1:" 211 "1:"
212 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 212 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
213#else /* __s390x__ */ 213#else /* __s390x__ */
214 asm volatile( 214 asm volatile(
215 " larl %0,1f\n" 215 " larl %0,1f\n"
216 " stg %0,8(%1)\n" 216 " stg %0,%O1+8(%R1)\n"
217 " lpswe 0(%1)\n" 217 " lpswe %1\n"
218 "1:" 218 "1:"
219 : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 219 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
220#endif /* __s390x__ */ 220#endif /* __s390x__ */
221} 221}
222 222
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 79d849f014f0..c666bfe5e984 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -13,7 +13,8 @@
13#include <asm/cio.h> 13#include <asm/cio.h>
14#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
15 15
16#define QDIO_MAX_QUEUES_PER_IRQ 32 16/* only use 4 queues to save some cachelines */
17#define QDIO_MAX_QUEUES_PER_IRQ 4
17#define QDIO_MAX_BUFFERS_PER_Q 128 18#define QDIO_MAX_BUFFERS_PER_Q 128
18#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) 19#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
19#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 20#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 9d2a17971805..423fdda2322d 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem)
124 124
125 asm volatile( 125 asm volatile(
126#ifndef __s390x__ 126#ifndef __s390x__
127 " l %0,0(%3)\n" 127 " l %0,%2\n"
128 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
129 " ahi %1,%5\n" 129 " ahi %1,%4\n"
130 " cs %0,%1,0(%3)\n" 130 " cs %0,%1,%2\n"
131 " jl 0b" 131 " jl 0b"
132#else /* __s390x__ */ 132#else /* __s390x__ */
133 " lg %0,0(%3)\n" 133 " lg %0,%2\n"
134 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
135 " aghi %1,%5\n" 135 " aghi %1,%4\n"
136 " csg %0,%1,0(%3)\n" 136 " csg %0,%1,%2\n"
137 " jl 0b" 137 " jl 0b"
138#endif /* __s390x__ */ 138#endif /* __s390x__ */
139 : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
140 : "a" (&sem->count), "m" (sem->count), 140 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
141 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 141 : "cc", "memory");
142 if (old < 0) 142 if (old < 0)
143 rwsem_down_read_failed(sem); 143 rwsem_down_read_failed(sem);
144} 144}
@@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
152 152
153 asm volatile( 153 asm volatile(
154#ifndef __s390x__ 154#ifndef __s390x__
155 " l %0,0(%3)\n" 155 " l %0,%2\n"
156 "0: ltr %1,%0\n" 156 "0: ltr %1,%0\n"
157 " jm 1f\n" 157 " jm 1f\n"
158 " ahi %1,%5\n" 158 " ahi %1,%4\n"
159 " cs %0,%1,0(%3)\n" 159 " cs %0,%1,%2\n"
160 " jl 0b\n" 160 " jl 0b\n"
161 "1:" 161 "1:"
162#else /* __s390x__ */ 162#else /* __s390x__ */
163 " lg %0,0(%3)\n" 163 " lg %0,%2\n"
164 "0: ltgr %1,%0\n" 164 "0: ltgr %1,%0\n"
165 " jm 1f\n" 165 " jm 1f\n"
166 " aghi %1,%5\n" 166 " aghi %1,%4\n"
167 " csg %0,%1,0(%3)\n" 167 " csg %0,%1,%2\n"
168 " jl 0b\n" 168 " jl 0b\n"
169 "1:" 169 "1:"
170#endif /* __s390x__ */ 170#endif /* __s390x__ */
171 : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
172 : "a" (&sem->count), "m" (sem->count), 172 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
173 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 173 : "cc", "memory");
174 return old >= 0 ? 1 : 0; 174 return old >= 0 ? 1 : 0;
175} 175}
176 176
@@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 184 tmp = RWSEM_ACTIVE_WRITE_BIAS;
185 asm volatile( 185 asm volatile(
186#ifndef __s390x__ 186#ifndef __s390x__
187 " l %0,0(%3)\n" 187 " l %0,%2\n"
188 "0: lr %1,%0\n" 188 "0: lr %1,%0\n"
189 " a %1,%5\n" 189 " a %1,%4\n"
190 " cs %0,%1,0(%3)\n" 190 " cs %0,%1,%2\n"
191 " jl 0b" 191 " jl 0b"
192#else /* __s390x__ */ 192#else /* __s390x__ */
193 " lg %0,0(%3)\n" 193 " lg %0,%2\n"
194 "0: lgr %1,%0\n" 194 "0: lgr %1,%0\n"
195 " ag %1,%5\n" 195 " ag %1,%4\n"
196 " csg %0,%1,0(%3)\n" 196 " csg %0,%1,%2\n"
197 " jl 0b" 197 " jl 0b"
198#endif /* __s390x__ */ 198#endif /* __s390x__ */
199 : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 200 : "Q" (sem->count), "m" (tmp)
201 : "cc", "memory"); 201 : "cc", "memory");
202 if (old != 0) 202 if (old != 0)
203 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
@@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
217 217
218 asm volatile( 218 asm volatile(
219#ifndef __s390x__ 219#ifndef __s390x__
220 " l %0,0(%2)\n" 220 " l %0,%1\n"
221 "0: ltr %0,%0\n" 221 "0: ltr %0,%0\n"
222 " jnz 1f\n" 222 " jnz 1f\n"
223 " cs %0,%4,0(%2)\n" 223 " cs %0,%3,%1\n"
224 " jl 0b\n" 224 " jl 0b\n"
225#else /* __s390x__ */ 225#else /* __s390x__ */
226 " lg %0,0(%2)\n" 226 " lg %0,%1\n"
227 "0: ltgr %0,%0\n" 227 "0: ltgr %0,%0\n"
228 " jnz 1f\n" 228 " jnz 1f\n"
229 " csg %0,%4,0(%2)\n" 229 " csg %0,%3,%1\n"
230 " jl 0b\n" 230 " jl 0b\n"
231#endif /* __s390x__ */ 231#endif /* __s390x__ */
232 "1:" 232 "1:"
233 : "=&d" (old), "=m" (sem->count) 233 : "=&d" (old), "=Q" (sem->count)
234 : "a" (&sem->count), "m" (sem->count), 234 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
235 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); 235 : "cc", "memory");
236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
237} 237}
238 238
@@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem)
245 245
246 asm volatile( 246 asm volatile(
247#ifndef __s390x__ 247#ifndef __s390x__
248 " l %0,0(%3)\n" 248 " l %0,%2\n"
249 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
250 " ahi %1,%5\n" 250 " ahi %1,%4\n"
251 " cs %0,%1,0(%3)\n" 251 " cs %0,%1,%2\n"
252 " jl 0b" 252 " jl 0b"
253#else /* __s390x__ */ 253#else /* __s390x__ */
254 " lg %0,0(%3)\n" 254 " lg %0,%2\n"
255 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
256 " aghi %1,%5\n" 256 " aghi %1,%4\n"
257 " csg %0,%1,0(%3)\n" 257 " csg %0,%1,%2\n"
258 " jl 0b" 258 " jl 0b"
259#endif /* __s390x__ */ 259#endif /* __s390x__ */
260 : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
261 : "a" (&sem->count), "m" (sem->count), 261 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
262 "i" (-RWSEM_ACTIVE_READ_BIAS)
263 : "cc", "memory"); 262 : "cc", "memory");
264 if (new < 0) 263 if (new < 0)
265 if ((new & RWSEM_ACTIVE_MASK) == 0) 264 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem)
276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 275 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
277 asm volatile( 276 asm volatile(
278#ifndef __s390x__ 277#ifndef __s390x__
279 " l %0,0(%3)\n" 278 " l %0,%2\n"
280 "0: lr %1,%0\n" 279 "0: lr %1,%0\n"
281 " a %1,%5\n" 280 " a %1,%4\n"
282 " cs %0,%1,0(%3)\n" 281 " cs %0,%1,%2\n"
283 " jl 0b" 282 " jl 0b"
284#else /* __s390x__ */ 283#else /* __s390x__ */
285 " lg %0,0(%3)\n" 284 " lg %0,%2\n"
286 "0: lgr %1,%0\n" 285 "0: lgr %1,%0\n"
287 " ag %1,%5\n" 286 " ag %1,%4\n"
288 " csg %0,%1,0(%3)\n" 287 " csg %0,%1,%2\n"
289 " jl 0b" 288 " jl 0b"
290#endif /* __s390x__ */ 289#endif /* __s390x__ */
291 : "=&d" (old), "=&d" (new), "=m" (sem->count) 290 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 291 : "Q" (sem->count), "m" (tmp)
293 : "cc", "memory"); 292 : "cc", "memory");
294 if (new < 0) 293 if (new < 0)
295 if ((new & RWSEM_ACTIVE_MASK) == 0) 294 if ((new & RWSEM_ACTIVE_MASK) == 0)
@@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
306 tmp = -RWSEM_WAITING_BIAS; 305 tmp = -RWSEM_WAITING_BIAS;
307 asm volatile( 306 asm volatile(
308#ifndef __s390x__ 307#ifndef __s390x__
309 " l %0,0(%3)\n" 308 " l %0,%2\n"
310 "0: lr %1,%0\n" 309 "0: lr %1,%0\n"
311 " a %1,%5\n" 310 " a %1,%4\n"
312 " cs %0,%1,0(%3)\n" 311 " cs %0,%1,%2\n"
313 " jl 0b" 312 " jl 0b"
314#else /* __s390x__ */ 313#else /* __s390x__ */
315 " lg %0,0(%3)\n" 314 " lg %0,%2\n"
316 "0: lgr %1,%0\n" 315 "0: lgr %1,%0\n"
317 " ag %1,%5\n" 316 " ag %1,%4\n"
318 " csg %0,%1,0(%3)\n" 317 " csg %0,%1,%2\n"
319 " jl 0b" 318 " jl 0b"
320#endif /* __s390x__ */ 319#endif /* __s390x__ */
321 : "=&d" (old), "=&d" (new), "=m" (sem->count) 320 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 321 : "Q" (sem->count), "m" (tmp)
323 : "cc", "memory"); 322 : "cc", "memory");
324 if (new > 1) 323 if (new > 1)
325 rwsem_downgrade_wake(sem); 324 rwsem_downgrade_wake(sem);
@@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
334 333
335 asm volatile( 334 asm volatile(
336#ifndef __s390x__ 335#ifndef __s390x__
337 " l %0,0(%3)\n" 336 " l %0,%2\n"
338 "0: lr %1,%0\n" 337 "0: lr %1,%0\n"
339 " ar %1,%5\n" 338 " ar %1,%4\n"
340 " cs %0,%1,0(%3)\n" 339 " cs %0,%1,%2\n"
341 " jl 0b" 340 " jl 0b"
342#else /* __s390x__ */ 341#else /* __s390x__ */
343 " lg %0,0(%3)\n" 342 " lg %0,%2\n"
344 "0: lgr %1,%0\n" 343 "0: lgr %1,%0\n"
345 " agr %1,%5\n" 344 " agr %1,%4\n"
346 " csg %0,%1,0(%3)\n" 345 " csg %0,%1,%2\n"
347 " jl 0b" 346 " jl 0b"
348#endif /* __s390x__ */ 347#endif /* __s390x__ */
349 : "=&d" (old), "=&d" (new), "=m" (sem->count) 348 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 349 : "Q" (sem->count), "d" (delta)
351 : "cc", "memory"); 350 : "cc", "memory");
352} 351}
353 352
@@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
360 359
361 asm volatile( 360 asm volatile(
362#ifndef __s390x__ 361#ifndef __s390x__
363 " l %0,0(%3)\n" 362 " l %0,%2\n"
364 "0: lr %1,%0\n" 363 "0: lr %1,%0\n"
365 " ar %1,%5\n" 364 " ar %1,%4\n"
366 " cs %0,%1,0(%3)\n" 365 " cs %0,%1,%2\n"
367 " jl 0b" 366 " jl 0b"
368#else /* __s390x__ */ 367#else /* __s390x__ */
369 " lg %0,0(%3)\n" 368 " lg %0,%2\n"
370 "0: lgr %1,%0\n" 369 "0: lgr %1,%0\n"
371 " agr %1,%5\n" 370 " agr %1,%4\n"
372 " csg %0,%1,0(%3)\n" 371 " csg %0,%1,%2\n"
373 " jl 0b" 372 " jl 0b"
374#endif /* __s390x__ */ 373#endif /* __s390x__ */
375 : "=&d" (old), "=&d" (new), "=m" (sem->count) 374 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 375 : "Q" (sem->count), "d" (delta)
377 : "cc", "memory"); 376 : "cc", "memory");
378 return new; 377 return new;
379} 378}
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 52a779c337e8..9ab6bd3a65d1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -14,14 +14,14 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#include <asm/lowcore.h>
18#include <asm/types.h>
19
20#define PARMAREA 0x10400 17#define PARMAREA 0x10400
21#define MEMORY_CHUNKS 256 18#define MEMORY_CHUNKS 256
22 19
23#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
24 21
22#include <asm/lowcore.h>
23#include <asm/types.h>
24
25#ifndef __s390x__ 25#ifndef __s390x__
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
@@ -71,9 +71,12 @@ extern unsigned int user_mode;
71#define MACHINE_FLAG_KVM (1UL << 9) 71#define MACHINE_FLAG_KVM (1UL << 9)
72#define MACHINE_FLAG_HPAGE (1UL << 10) 72#define MACHINE_FLAG_HPAGE (1UL << 10)
73#define MACHINE_FLAG_PFMF (1UL << 11) 73#define MACHINE_FLAG_PFMF (1UL << 11)
74#define MACHINE_FLAG_LPAR (1UL << 12)
74 75
75#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 76#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
76#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 77#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
78#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
79
77#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 80#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
78 81
79#ifndef __s390x__ 82#ifndef __s390x__
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index f72d611f7e13..e3bffd4e2d66 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -1,24 +1,19 @@
1/* 1/*
2 * include/asm-s390/sigp.h 2 * Routines and structures for signalling other processors.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow,
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 *
10 * sigp.h by D.J. Barrow (c) IBM 1999
11 * contains routines / structures for signalling other S/390 processors in an
12 * SMP configuration.
13 */ 8 */
14 9
15#ifndef __SIGP__ 10#ifndef __ASM_SIGP_H
16#define __SIGP__ 11#define __ASM_SIGP_H
17 12
18#include <asm/system.h> 13#include <asm/system.h>
19 14
20/* get real cpu address from logical cpu number */ 15/* Get real cpu address from logical cpu number. */
21extern int __cpu_logical_map[]; 16extern unsigned short __cpu_logical_map[];
22 17
23static inline int cpu_logical_map(int cpu) 18static inline int cpu_logical_map(int cpu)
24{ 19{
@@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu)
29#endif 24#endif
30} 25}
31 26
32typedef enum 27enum {
33{ 28 sigp_sense = 1,
34 sigp_unassigned=0x0, 29 sigp_external_call = 2,
35 sigp_sense, 30 sigp_emergency_signal = 3,
36 sigp_external_call, 31 sigp_start = 4,
37 sigp_emergency_signal, 32 sigp_stop = 5,
38 sigp_start, 33 sigp_restart = 6,
39 sigp_stop, 34 sigp_stop_and_store_status = 9,
40 sigp_restart, 35 sigp_initial_cpu_reset = 11,
41 sigp_unassigned1, 36 sigp_cpu_reset = 12,
42 sigp_unassigned2, 37 sigp_set_prefix = 13,
43 sigp_stop_and_store_status, 38 sigp_store_status_at_address = 14,
44 sigp_unassigned3, 39 sigp_store_extended_status_at_address = 15,
45 sigp_initial_cpu_reset, 40 sigp_set_architecture = 18,
46 sigp_cpu_reset, 41 sigp_conditional_emergency_signal = 19,
47 sigp_set_prefix, 42 sigp_sense_running = 21,
48 sigp_store_status_at_address, 43};
49 sigp_store_extended_status_at_address 44
50} sigp_order_code; 45enum {
51 46 sigp_order_code_accepted = 0,
52typedef __u32 sigp_status_word; 47 sigp_status_stored = 1,
53 48 sigp_busy = 2,
54typedef enum 49 sigp_not_operational = 3,
55{ 50};
56 sigp_order_code_accepted=0,
57 sigp_status_stored,
58 sigp_busy,
59 sigp_not_operational
60} sigp_ccode;
61
62 51
63/* 52/*
64 * Definitions for the external call 53 * Definitions for external call.
65 */ 54 */
66 55enum {
67/* 'Bit' signals, asynchronous */ 56 ec_schedule = 0,
68typedef enum
69{
70 ec_schedule=0,
71 ec_call_function, 57 ec_call_function,
72 ec_call_function_single, 58 ec_call_function_single,
73 ec_bit_last 59};
74} ec_bit_sig;
75 60
76/* 61/*
77 * Signal processor 62 * Signal processor.
78 */ 63 */
79static inline sigp_ccode 64static inline int raw_sigp(u16 cpu, int order)
80signal_processor(__u16 cpu_addr, sigp_order_code order_code)
81{ 65{
82 register unsigned long reg1 asm ("1") = 0; 66 register unsigned long reg1 asm ("1") = 0;
83 sigp_ccode ccode; 67 int ccode;
84 68
85 asm volatile( 69 asm volatile(
86 " sigp %1,%2,0(%3)\n" 70 " sigp %1,%2,0(%3)\n"
87 " ipm %0\n" 71 " ipm %0\n"
88 " srl %0,28\n" 72 " srl %0,28\n"
89 : "=d" (ccode) 73 : "=d" (ccode)
90 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 74 : "d" (reg1), "d" (cpu),
91 "a" (order_code) : "cc" , "memory"); 75 "a" (order) : "cc" , "memory");
92 return ccode; 76 return ccode;
93} 77}
94 78
95/* 79/*
96 * Signal processor with parameter 80 * Signal processor with parameter.
97 */ 81 */
98static inline sigp_ccode 82static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
99signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
100{ 83{
101 register unsigned int reg1 asm ("1") = parameter; 84 register unsigned int reg1 asm ("1") = parameter;
102 sigp_ccode ccode; 85 int ccode;
103 86
104 asm volatile( 87 asm volatile(
105 " sigp %1,%2,0(%3)\n" 88 " sigp %1,%2,0(%3)\n"
106 " ipm %0\n" 89 " ipm %0\n"
107 " srl %0,28\n" 90 " srl %0,28\n"
108 : "=d" (ccode) 91 : "=d" (ccode)
109 : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), 92 : "d" (reg1), "d" (cpu),
110 "a" (order_code) : "cc" , "memory"); 93 "a" (order) : "cc" , "memory");
111 return ccode; 94 return ccode;
112} 95}
113 96
114/* 97/*
115 * Signal processor with parameter and return status 98 * Signal processor with parameter and return status.
116 */ 99 */
117static inline sigp_ccode 100static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
118signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
119 sigp_order_code order_code)
120{ 101{
121 register unsigned int reg1 asm ("1") = parameter; 102 register unsigned int reg1 asm ("1") = parm;
122 sigp_ccode ccode; 103 int ccode;
123 104
124 asm volatile( 105 asm volatile(
125 " sigp %1,%2,0(%3)\n" 106 " sigp %1,%2,0(%3)\n"
126 " ipm %0\n" 107 " ipm %0\n"
127 " srl %0,28\n" 108 " srl %0,28\n"
128 : "=d" (ccode), "+d" (reg1) 109 : "=d" (ccode), "+d" (reg1)
129 : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) 110 : "d" (cpu), "a" (order)
130 : "cc" , "memory"); 111 : "cc" , "memory");
131 *statusptr = reg1; 112 *status = reg1;
132 return ccode; 113 return ccode;
133} 114}
134 115
135#endif /* __SIGP__ */ 116static inline int sigp(int cpu, int order)
117{
118 return raw_sigp(cpu_logical_map(cpu), order);
119}
120
121static inline int sigp_p(u32 parameter, int cpu, int order)
122{
123 return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
124}
125
126static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
127{
128 return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
129}
130
131#endif /* __ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 2ab1141eeb50..edc03cb9cd79 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,43 @@ extern int smp_cpu_polarization[];
29extern void arch_send_call_function_single_ipi(int cpu); 29extern void arch_send_call_function_single_ipi(int cpu);
30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 30extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
31 31
32extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 32extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
33
34extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
35extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
36 int from, int to);
37extern void smp_restart_cpu(void);
38
39/*
40 * returns 1 if (virtual) cpu is scheduled
41 * returns 0 otherwise
42 */
43static inline int smp_vcpu_scheduled(int cpu)
44{
45 u32 status;
46
47 switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
48 case sigp_status_stored:
49 /* Check for running status */
50 if (status & 0x400)
51 return 0;
52 break;
53 case sigp_not_operational:
54 return 0;
55 default:
56 break;
57 }
58 return 1;
59}
60
61#else /* CONFIG_SMP */
62
63static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
64{
65 func(data);
66}
67
68#define smp_vcpu_scheduled (1)
33 69
34#endif /* CONFIG_SMP */ 70#endif /* CONFIG_SMP */
35 71
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index a587907d77f3..56612fc8186e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -13,8 +13,6 @@
13 13
14#include <linux/smp.h> 14#include <linux/smp.h>
15 15
16#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17
18static inline int 16static inline int
19_raw_compare_and_swap(volatile unsigned int *lock, 17_raw_compare_and_swap(volatile unsigned int *lock,
20 unsigned int old, unsigned int new) 18 unsigned int old, unsigned int new)
@@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock,
27 return old; 25 return old;
28} 26}
29 27
30#else /* __GNUC__ */
31
32static inline int
33_raw_compare_and_swap(volatile unsigned int *lock,
34 unsigned int old, unsigned int new)
35{
36 asm volatile(
37 " cs %0,%3,0(%4)"
38 : "=d" (old), "=m" (*lock)
39 : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40 : "cc", "memory" );
41 return old;
42}
43
44#endif /* __GNUC__ */
45
46/* 28/*
47 * Simple spin lock operations. There are two variants, one clears IRQ's 29 * Simple spin lock operations. There are two variants, one clears IRQ's
48 * on the local processor, one does not. 30 * on the local processor, one does not.
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index eb18dc1f327b..6bdee21c077e 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x)
47 47
48 asm volatile( 48 asm volatile(
49#ifndef __s390x__ 49#ifndef __s390x__
50 " icm %0,8,3(%1)\n" 50 " icm %0,8,%O1+3(%R1)\n"
51 " icm %0,4,2(%1)\n" 51 " icm %0,4,%O1+2(%R1)\n"
52 " icm %0,2,1(%1)\n" 52 " icm %0,2,%O1+1(%R1)\n"
53 " ic %0,0(%1)" 53 " ic %0,%1"
54 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 54 : "=&d" (result) : "Q" (*x) : "cc");
55#else /* __s390x__ */ 55#else /* __s390x__ */
56 " lrv %0,%1" 56 " lrv %0,%1"
57 : "=d" (result) : "m" (*x)); 57 : "=d" (result) : "m" (*x));
@@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x)
77 77
78 asm volatile( 78 asm volatile(
79#ifndef __s390x__ 79#ifndef __s390x__
80 " icm %0,2,1(%1)\n" 80 " icm %0,2,%O+1(%R1)\n"
81 " ic %0,0(%1)\n" 81 " ic %0,%1\n"
82 : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 82 : "=&d" (result) : "Q" (*x) : "cc");
83#else /* __s390x__ */ 83#else /* __s390x__ */
84 " lrvh %0,%1" 84 " lrvh %0,%1"
85 : "=d" (result) : "m" (*x)); 85 : "=d" (result) : "m" (*x));
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 9d70057d828c..22bdb2a0ee5f 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -87,7 +87,8 @@ struct sysinfo_2_2_2 {
87 87
88struct sysinfo_3_2_2 { 88struct sysinfo_3_2_2 {
89 char reserved_0[31]; 89 char reserved_0[31];
90 unsigned char count; 90 unsigned char :4;
91 unsigned char count:4;
91 struct { 92 struct {
92 char reserved_0[4]; 93 char reserved_0[4];
93 unsigned short cpus_total; 94 unsigned short cpus_total;
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 379661d2f81a..67ee6c3c6bb3 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *);
24static inline void save_fp_regs(s390_fp_regs *fpregs) 24static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 25{
26 asm volatile( 26 asm volatile(
27 " std 0,8(%1)\n" 27 " std 0,%O0+8(%R0)\n"
28 " std 2,24(%1)\n" 28 " std 2,%O0+24(%R0)\n"
29 " std 4,40(%1)\n" 29 " std 4,%O0+40(%R0)\n"
30 " std 6,56(%1)" 30 " std 6,%O0+56(%R0)"
31 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 31 : "=Q" (*fpregs) : "Q" (*fpregs));
32 if (!MACHINE_HAS_IEEE) 32 if (!MACHINE_HAS_IEEE)
33 return; 33 return;
34 asm volatile( 34 asm volatile(
35 " stfpc 0(%1)\n" 35 " stfpc %0\n"
36 " std 1,16(%1)\n" 36 " std 1,%O0+16(%R0)\n"
37 " std 3,32(%1)\n" 37 " std 3,%O0+32(%R0)\n"
38 " std 5,48(%1)\n" 38 " std 5,%O0+48(%R0)\n"
39 " std 7,64(%1)\n" 39 " std 7,%O0+64(%R0)\n"
40 " std 8,72(%1)\n" 40 " std 8,%O0+72(%R0)\n"
41 " std 9,80(%1)\n" 41 " std 9,%O0+80(%R0)\n"
42 " std 10,88(%1)\n" 42 " std 10,%O0+88(%R0)\n"
43 " std 11,96(%1)\n" 43 " std 11,%O0+96(%R0)\n"
44 " std 12,104(%1)\n" 44 " std 12,%O0+104(%R0)\n"
45 " std 13,112(%1)\n" 45 " std 13,%O0+112(%R0)\n"
46 " std 14,120(%1)\n" 46 " std 14,%O0+120(%R0)\n"
47 " std 15,128(%1)\n" 47 " std 15,%O0+128(%R0)\n"
48 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 48 : "=Q" (*fpregs) : "Q" (*fpregs));
49} 49}
50 50
51static inline void restore_fp_regs(s390_fp_regs *fpregs) 51static inline void restore_fp_regs(s390_fp_regs *fpregs)
52{ 52{
53 asm volatile( 53 asm volatile(
54 " ld 0,8(%0)\n" 54 " ld 0,%O0+8(%R0)\n"
55 " ld 2,24(%0)\n" 55 " ld 2,%O0+24(%R0)\n"
56 " ld 4,40(%0)\n" 56 " ld 4,%O0+40(%R0)\n"
57 " ld 6,56(%0)" 57 " ld 6,%O0+56(%R0)"
58 : : "a" (fpregs), "m" (*fpregs)); 58 : : "Q" (*fpregs));
59 if (!MACHINE_HAS_IEEE) 59 if (!MACHINE_HAS_IEEE)
60 return; 60 return;
61 asm volatile( 61 asm volatile(
62 " lfpc 0(%0)\n" 62 " lfpc %0\n"
63 " ld 1,16(%0)\n" 63 " ld 1,%O0+16(%R0)\n"
64 " ld 3,32(%0)\n" 64 " ld 3,%O0+32(%R0)\n"
65 " ld 5,48(%0)\n" 65 " ld 5,%O0+48(%R0)\n"
66 " ld 7,64(%0)\n" 66 " ld 7,%O0+64(%R0)\n"
67 " ld 8,72(%0)\n" 67 " ld 8,%O0+72(%R0)\n"
68 " ld 9,80(%0)\n" 68 " ld 9,%O0+80(%R0)\n"
69 " ld 10,88(%0)\n" 69 " ld 10,%O0+88(%R0)\n"
70 " ld 11,96(%0)\n" 70 " ld 11,%O0+96(%R0)\n"
71 " ld 12,104(%0)\n" 71 " ld 12,%O0+104(%R0)\n"
72 " ld 13,112(%0)\n" 72 " ld 13,%O0+112(%R0)\n"
73 " ld 14,120(%0)\n" 73 " ld 14,%O0+120(%R0)\n"
74 " ld 15,128(%0)\n" 74 " ld 15,%O0+128(%R0)\n"
75 : : "a" (fpregs), "m" (*fpregs)); 75 : : "Q" (*fpregs));
76} 76}
77 77
78static inline void save_access_regs(unsigned int *acrs) 78static inline void save_access_regs(unsigned int *acrs)
79{ 79{
80 asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); 80 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
81} 81}
82 82
83static inline void restore_access_regs(unsigned int *acrs) 83static inline void restore_access_regs(unsigned int *acrs)
84{ 84{
85 asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); 85 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
86} 86}
87 87
88#define switch_to(prev,next,last) do { \ 88#define switch_to(prev,next,last) do { \
@@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
139 shift = (3 ^ (addr & 3)) << 3; 139 shift = (3 ^ (addr & 3)) << 3;
140 addr ^= addr & 3; 140 addr ^= addr & 3;
141 asm volatile( 141 asm volatile(
142 " l %0,0(%4)\n" 142 " l %0,%4\n"
143 "0: lr 0,%0\n" 143 "0: lr 0,%0\n"
144 " nr 0,%3\n" 144 " nr 0,%3\n"
145 " or 0,%2\n" 145 " or 0,%2\n"
146 " cs %0,0,0(%4)\n" 146 " cs %0,0,%4\n"
147 " jl 0b\n" 147 " jl 0b\n"
148 : "=&d" (old), "=m" (*(int *) addr) 148 : "=&d" (old), "=Q" (*(int *) addr)
149 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 149 : "d" (x << shift), "d" (~(255 << shift)),
150 "m" (*(int *) addr) : "memory", "cc", "0"); 150 "Q" (*(int *) addr) : "memory", "cc", "0");
151 return old >> shift; 151 return old >> shift;
152 case 2: 152 case 2:
153 addr = (unsigned long) ptr; 153 addr = (unsigned long) ptr;
154 shift = (2 ^ (addr & 2)) << 3; 154 shift = (2 ^ (addr & 2)) << 3;
155 addr ^= addr & 2; 155 addr ^= addr & 2;
156 asm volatile( 156 asm volatile(
157 " l %0,0(%4)\n" 157 " l %0,%4\n"
158 "0: lr 0,%0\n" 158 "0: lr 0,%0\n"
159 " nr 0,%3\n" 159 " nr 0,%3\n"
160 " or 0,%2\n" 160 " or 0,%2\n"
161 " cs %0,0,0(%4)\n" 161 " cs %0,0,%4\n"
162 " jl 0b\n" 162 " jl 0b\n"
163 : "=&d" (old), "=m" (*(int *) addr) 163 : "=&d" (old), "=Q" (*(int *) addr)
164 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 164 : "d" (x << shift), "d" (~(65535 << shift)),
165 "m" (*(int *) addr) : "memory", "cc", "0"); 165 "Q" (*(int *) addr) : "memory", "cc", "0");
166 return old >> shift; 166 return old >> shift;
167 case 4: 167 case 4:
168 asm volatile( 168 asm volatile(
169 " l %0,0(%3)\n" 169 " l %0,%3\n"
170 "0: cs %0,%2,0(%3)\n" 170 "0: cs %0,%2,%3\n"
171 " jl 0b\n" 171 " jl 0b\n"
172 : "=&d" (old), "=m" (*(int *) ptr) 172 : "=&d" (old), "=Q" (*(int *) ptr)
173 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 173 : "d" (x), "Q" (*(int *) ptr)
174 : "memory", "cc"); 174 : "memory", "cc");
175 return old; 175 return old;
176#ifdef __s390x__ 176#ifdef __s390x__
177 case 8: 177 case 8:
178 asm volatile( 178 asm volatile(
179 " lg %0,0(%3)\n" 179 " lg %0,%3\n"
180 "0: csg %0,%2,0(%3)\n" 180 "0: csg %0,%2,%3\n"
181 " jl 0b\n" 181 " jl 0b\n"
182 : "=&d" (old), "=m" (*(long *) ptr) 182 : "=&d" (old), "=m" (*(long *) ptr)
183 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 183 : "d" (x), "Q" (*(long *) ptr)
184 : "memory", "cc"); 184 : "memory", "cc");
185 return old; 185 return old;
186#endif /* __s390x__ */ 186#endif /* __s390x__ */
@@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
215 shift = (3 ^ (addr & 3)) << 3; 215 shift = (3 ^ (addr & 3)) << 3;
216 addr ^= addr & 3; 216 addr ^= addr & 3;
217 asm volatile( 217 asm volatile(
218 " l %0,0(%4)\n" 218 " l %0,%2\n"
219 "0: nr %0,%5\n" 219 "0: nr %0,%5\n"
220 " lr %1,%0\n" 220 " lr %1,%0\n"
221 " or %0,%2\n" 221 " or %0,%2\n"
222 " or %1,%3\n" 222 " or %1,%3\n"
223 " cs %0,%1,0(%4)\n" 223 " cs %0,%1,%2\n"
224 " jnl 1f\n" 224 " jnl 1f\n"
225 " xr %1,%0\n" 225 " xr %1,%0\n"
226 " nr %1,%5\n" 226 " nr %1,%5\n"
227 " jnz 0b\n" 227 " jnz 0b\n"
228 "1:" 228 "1:"
229 : "=&d" (prev), "=&d" (tmp) 229 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
230 : "d" (old << shift), "d" (new << shift), "a" (ptr), 230 : "d" (old << shift), "d" (new << shift),
231 "d" (~(255 << shift)) 231 "d" (~(255 << shift)), "Q" (*(int *) ptr)
232 : "memory", "cc"); 232 : "memory", "cc");
233 return prev >> shift; 233 return prev >> shift;
234 case 2: 234 case 2:
@@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
236 shift = (2 ^ (addr & 2)) << 3; 236 shift = (2 ^ (addr & 2)) << 3;
237 addr ^= addr & 2; 237 addr ^= addr & 2;
238 asm volatile( 238 asm volatile(
239 " l %0,0(%4)\n" 239 " l %0,%2\n"
240 "0: nr %0,%5\n" 240 "0: nr %0,%5\n"
241 " lr %1,%0\n" 241 " lr %1,%0\n"
242 " or %0,%2\n" 242 " or %0,%2\n"
243 " or %1,%3\n" 243 " or %1,%3\n"
244 " cs %0,%1,0(%4)\n" 244 " cs %0,%1,%2\n"
245 " jnl 1f\n" 245 " jnl 1f\n"
246 " xr %1,%0\n" 246 " xr %1,%0\n"
247 " nr %1,%5\n" 247 " nr %1,%5\n"
248 " jnz 0b\n" 248 " jnz 0b\n"
249 "1:" 249 "1:"
250 : "=&d" (prev), "=&d" (tmp) 250 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
251 : "d" (old << shift), "d" (new << shift), "a" (ptr), 251 : "d" (old << shift), "d" (new << shift),
252 "d" (~(65535 << shift)) 252 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
253 : "memory", "cc"); 253 : "memory", "cc");
254 return prev >> shift; 254 return prev >> shift;
255 case 4: 255 case 4:
256 asm volatile( 256 asm volatile(
257 " cs %0,%2,0(%3)\n" 257 " cs %0,%3,%1\n"
258 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 258 : "=&d" (prev), "=Q" (*(int *) ptr)
259 : "0" (old), "d" (new), "Q" (*(int *) ptr)
259 : "memory", "cc"); 260 : "memory", "cc");
260 return prev; 261 return prev;
261#ifdef __s390x__ 262#ifdef __s390x__
262 case 8: 263 case 8:
263 asm volatile( 264 asm volatile(
264 " csg %0,%2,0(%3)\n" 265 " csg %0,%3,%1\n"
265 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 266 : "=&d" (prev), "=Q" (*(long *) ptr)
267 : "0" (old), "d" (new), "Q" (*(long *) ptr)
266 : "memory", "cc"); 268 : "memory", "cc");
267 return prev; 269 return prev;
268#endif /* __s390x__ */ 270#endif /* __s390x__ */
@@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
302#define __ctl_load(array, low, high) ({ \ 304#define __ctl_load(array, low, high) ({ \
303 typedef struct { char _[sizeof(array)]; } addrtype; \ 305 typedef struct { char _[sizeof(array)]; } addrtype; \
304 asm volatile( \ 306 asm volatile( \
305 " lctlg %1,%2,0(%0)\n" \ 307 " lctlg %1,%2,%0\n" \
306 : : "a" (&array), "i" (low), "i" (high), \ 308 : : "Q" (*(addrtype *)(&array)), \
307 "m" (*(addrtype *)(&array))); \ 309 "i" (low), "i" (high)); \
308 }) 310 })
309 311
310#define __ctl_store(array, low, high) ({ \ 312#define __ctl_store(array, low, high) ({ \
311 typedef struct { char _[sizeof(array)]; } addrtype; \ 313 typedef struct { char _[sizeof(array)]; } addrtype; \
312 asm volatile( \ 314 asm volatile( \
313 " stctg %2,%3,0(%1)\n" \ 315 " stctg %1,%2,%0\n" \
314 : "=m" (*(addrtype *)(&array)) \ 316 : "=Q" (*(addrtype *)(&array)) \
315 : "a" (&array), "i" (low), "i" (high)); \ 317 : "i" (low), "i" (high)); \
316 }) 318 })
317 319
318#else /* __s390x__ */ 320#else /* __s390x__ */
@@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
320#define __ctl_load(array, low, high) ({ \ 322#define __ctl_load(array, low, high) ({ \
321 typedef struct { char _[sizeof(array)]; } addrtype; \ 323 typedef struct { char _[sizeof(array)]; } addrtype; \
322 asm volatile( \ 324 asm volatile( \
323 " lctl %1,%2,0(%0)\n" \ 325 " lctl %1,%2,%0\n" \
324 : : "a" (&array), "i" (low), "i" (high), \ 326 : : "Q" (*(addrtype *)(&array)), \
325 "m" (*(addrtype *)(&array))); \ 327 "i" (low), "i" (high)); \
326}) 328})
327 329
328#define __ctl_store(array, low, high) ({ \ 330#define __ctl_store(array, low, high) ({ \
329 typedef struct { char _[sizeof(array)]; } addrtype; \ 331 typedef struct { char _[sizeof(array)]; } addrtype; \
330 asm volatile( \ 332 asm volatile( \
331 " stctl %2,%3,0(%1)\n" \ 333 " stctl %1,%2,%0\n" \
332 : "=m" (*(addrtype *)(&array)) \ 334 : "=Q" (*(addrtype *)(&array)) \
333 : "a" (&array), "i" (low), "i" (high)); \ 335 : "i" (low), "i" (high)); \
334 }) 336 })
335 337
336#endif /* __s390x__ */ 338#endif /* __s390x__ */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 66069e736842..34f0873d6525 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -73,7 +73,7 @@ struct thread_info {
73/* how to get the thread information struct from C */ 73/* how to get the thread information struct from C */
74static inline struct thread_info *current_thread_info(void) 74static inline struct thread_info *current_thread_info(void)
75{ 75{
76 return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); 76 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
77} 77}
78 78
79#define THREAD_SIZE_ORDER THREAD_ORDER 79#define THREAD_SIZE_ORDER THREAD_ORDER
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 68d9fea34b4b..f174bdaa6b59 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -20,10 +20,10 @@ static inline int set_clock(__u64 time)
20 int cc; 20 int cc;
21 21
22 asm volatile( 22 asm volatile(
23 " sck 0(%2)\n" 23 " sck %1\n"
24 " ipm %0\n" 24 " ipm %0\n"
25 " srl %0,28\n" 25 " srl %0,28\n"
26 : "=d" (cc) : "m" (time), "a" (&time) : "cc"); 26 : "=d" (cc) : "Q" (time) : "cc");
27 return cc; 27 return cc;
28} 28}
29 29
@@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time)
32 int cc; 32 int cc;
33 33
34 asm volatile( 34 asm volatile(
35 " stck 0(%2)\n" 35 " stck %1\n"
36 " ipm %0\n" 36 " ipm %0\n"
37 " srl %0,28\n" 37 " srl %0,28\n"
38 : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); 38 : "=d" (cc), "=Q" (*time) : : "cc");
39 return cc; 39 return cc;
40} 40}
41 41
42static inline void set_clock_comparator(__u64 time) 42static inline void set_clock_comparator(__u64 time)
43{ 43{
44 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); 44 asm volatile("sckc %0" : : "Q" (time));
45} 45}
46 46
47static inline void store_clock_comparator(__u64 *time) 47static inline void store_clock_comparator(__u64 *time)
48{ 48{
49 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); 49 asm volatile("stckc %0" : "=Q" (*time));
50} 50}
51 51
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
@@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void)
57{ 57{
58 unsigned long long clk; 58 unsigned long long clk;
59 59
60#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
61 asm volatile("stck %0" : "=Q" (clk) : : "cc"); 60 asm volatile("stck %0" : "=Q" (clk) : : "cc");
62#else /* __GNUC__ */
63 asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
64#endif /* __GNUC__ */
65 return clk; 61 return clk;
66} 62}
67 63
@@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void)
69{ 65{
70 unsigned char clk[16]; 66 unsigned char clk[16];
71 67
72#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
73 asm volatile("stcke %0" : "=Q" (clk) : : "cc"); 68 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
74#else /* __GNUC__ */
75 asm volatile("stcke 0(%1)" : "=m" (clk)
76 : "a" (clk) : "cc");
77#endif /* __GNUC__ */
78
79 return *((unsigned long long *)&clk[1]); 69 return *((unsigned long long *)&clk[1]);
80} 70}
81 71
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cbf0a8745bf4..d6b1ed0ec52b 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
265 return uaccess.copy_from_user(n, from, to); 265 return uaccess.copy_from_user(n, from, to);
266} 266}
267 267
268extern void copy_from_user_overflow(void)
269#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
270__compiletime_warning("copy_from_user() buffer size is not provably correct")
271#endif
272;
273
268/** 274/**
269 * copy_from_user: - Copy a block of data from user space. 275 * copy_from_user: - Copy a block of data from user space.
270 * @to: Destination address, in kernel space. 276 * @to: Destination address, in kernel space.
@@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
284static inline unsigned long __must_check 290static inline unsigned long __must_check
285copy_from_user(void *to, const void __user *from, unsigned long n) 291copy_from_user(void *to, const void __user *from, unsigned long n)
286{ 292{
293 unsigned int sz = __compiletime_object_size(to);
294
287 might_fault(); 295 might_fault();
296 if (unlikely(sz != -1 && sz < n)) {
297 copy_from_user_overflow();
298 return n;
299 }
288 if (access_ok(VERIFY_READ, from, n)) 300 if (access_ok(VERIFY_READ, from, n))
289 n = __copy_from_user(to, from, n); 301 n = __copy_from_user(to, from, n);
290 else 302 else
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 7bdd7c8ebc91..4a76d9480cce 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -7,7 +7,7 @@
7#define VDSO32_LBASE 0 7#define VDSO32_LBASE 0
8#define VDSO64_LBASE 0 8#define VDSO64_LBASE 0
9 9
10#define VDSO_VERSION_STRING LINUX_2.6.26 10#define VDSO_VERSION_STRING LINUX_2.6.29
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 683f6381cc59..64230bc392fa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30 30
31extra-y += head.o init_task.o vmlinux.lds 31extra-y += head.o init_task.o vmlinux.lds
32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
32 33
33obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
34obj-$(CONFIG_SMP) += smp.o topology.o 35obj-$(CONFIG_SMP) += smp.o topology.o
36obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
37 switch_cpu.o)
35obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 38obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
36obj-$(CONFIG_AUDIT) += audit.o 39obj-$(CONFIG_AUDIT) += audit.o
37compat-obj-$(CONFIG_AUDIT) += compat_audit.o 40compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 63e46433e81d..08db736dded0 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -4,18 +4,27 @@
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6 6
7#include <linux/sched.h> 7#define ASM_OFFSETS_C
8
8#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h>
9#include <asm/vdso.h> 11#include <asm/vdso.h>
10#include <asm/sigp.h> 12#include <asm/sigp.h>
11 13
14/*
15 * Make sure that the compiler is new enough. We want a compiler that
16 * is known to work with the "Q" assembler constraint.
17 */
18#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
19#error Your compiler is too old; please use version 3.3.3 or newer
20#endif
21
12int main(void) 22int main(void)
13{ 23{
14 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
15 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
16 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); 26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
17 DEFINE(__THREAD_mm_segment, 27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
18 offsetof(struct task_struct, thread.mm_segment));
19 BLANK(); 28 BLANK();
20 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
21 BLANK(); 30 BLANK();
@@ -52,18 +61,94 @@ int main(void)
52 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 61 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
53 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 62 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
54 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 63 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
55 DEFINE(__VDSO_ECTG_BASE, 64 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
56 offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 65 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
57 DEFINE(__VDSO_ECTG_USER,
58 offsetof(struct vdso_per_cpu_data, ectg_user_time));
59 /* constants used by the vdso */ 66 /* constants used by the vdso */
60 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 67 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
61 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 68 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
62 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 69 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
70 BLANK();
63 /* constants for SIGP */ 71 /* constants for SIGP */
64 DEFINE(__SIGP_STOP, sigp_stop); 72 DEFINE(__SIGP_STOP, sigp_stop);
65 DEFINE(__SIGP_RESTART, sigp_restart); 73 DEFINE(__SIGP_RESTART, sigp_restart);
66 DEFINE(__SIGP_SENSE, sigp_sense); 74 DEFINE(__SIGP_SENSE, sigp_sense);
67 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); 75 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
76 BLANK();
77 /* lowcore offsets */
78 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
79 DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
80 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
81 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
82 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
83 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
84 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
85 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
86 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
87 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
88 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
89 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
90 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
91 DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
92 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
93 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
94 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
95 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
96 BLANK();
97 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
98 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
99 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
100 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
101 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
102 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
103 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
104 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
105 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
106 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
107 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
108 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
109 DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
110 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
111 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
112 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
113 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
114 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
115 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
116 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
117 DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
118 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
119 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
120 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
121 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
122 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
123 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
124 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
125 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
126 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
127 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
128 DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
129 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
130 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
131 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
132 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
133 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
134 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
135 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
136 DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
137 DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
138 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
139 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
140 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
141#ifdef CONFIG_32BIT
142 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
143 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
144#else /* CONFIG_32BIT */
145 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
146 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
147 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
148 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
149 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
150 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
151 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
152#endif /* CONFIG_32BIT */
68 return 0; 153 return 0;
69} 154}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index dc7e5259770f..15e46ca94335 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,8 +6,8 @@
6 * Michael Holzheu <holzheu@de.ibm.com> 6 * Michael Holzheu <holzheu@de.ibm.com>
7 */ 7 */
8 8
9#include <asm/asm-offsets.h>
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11 11
12#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
13 13
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index db943a7ec513..b39b27d68b45 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -86,10 +86,17 @@ enum {
86 U4_12, /* 4 bit unsigned value starting at 12 */ 86 U4_12, /* 4 bit unsigned value starting at 12 */
87 U4_16, /* 4 bit unsigned value starting at 16 */ 87 U4_16, /* 4 bit unsigned value starting at 16 */
88 U4_20, /* 4 bit unsigned value starting at 20 */ 88 U4_20, /* 4 bit unsigned value starting at 20 */
89 U4_32, /* 4 bit unsigned value starting at 32 */
89 U8_8, /* 8 bit unsigned value starting at 8 */ 90 U8_8, /* 8 bit unsigned value starting at 8 */
90 U8_16, /* 8 bit unsigned value starting at 16 */ 91 U8_16, /* 8 bit unsigned value starting at 16 */
92 U8_24, /* 8 bit unsigned value starting at 24 */
93 U8_32, /* 8 bit unsigned value starting at 32 */
94 I8_8, /* 8 bit signed value starting at 8 */
95 I8_32, /* 8 bit signed value starting at 32 */
91 I16_16, /* 16 bit signed value starting at 16 */ 96 I16_16, /* 16 bit signed value starting at 16 */
97 I16_32, /* 32 bit signed value starting at 16 */
92 U16_16, /* 16 bit unsigned value starting at 16 */ 98 U16_16, /* 16 bit unsigned value starting at 16 */
99 U16_32, /* 32 bit unsigned value starting at 16 */
93 J16_16, /* PC relative jump offset at 16 */ 100 J16_16, /* PC relative jump offset at 16 */
94 J32_16, /* PC relative long offset at 16 */ 101 J32_16, /* PC relative long offset at 16 */
95 I32_16, /* 32 bit signed value starting at 16 */ 102 I32_16, /* 32 bit signed value starting at 16 */
@@ -104,21 +111,37 @@ enum {
104 */ 111 */
105enum { 112enum {
106 INSTR_INVALID, 113 INSTR_INVALID,
107 INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, 114 INSTR_E,
108 INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, 115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU,
117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
118 INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
119 INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
109 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, 120 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
110 INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, 121 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
111 INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, 122 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
112 INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, 123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
125 INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR,
126 INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
113 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
114 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, 128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
115 INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, 129 INSTR_RSI_RRP,
116 INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, 130 INSTR_RSL_R0RD,
117 INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, 131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
118 INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, 132 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
119 INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, 133 INSTR_RS_RURD,
120 INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, 134 INSTR_RXE_FRRD, INSTR_RXE_RRRD,
121 INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 135 INSTR_RXF_FRRDF,
136 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
137 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
138 INSTR_SIL_RDI, INSTR_SIL_RDU,
139 INSTR_SIY_IRD, INSTR_SIY_URD,
140 INSTR_SI_URD,
141 INSTR_SSE_RDRD,
142 INSTR_SSF_RRDRD,
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
122 INSTR_S_00, INSTR_S_RD, 145 INSTR_S_00, INSTR_S_RD,
123}; 146};
124 147
@@ -129,7 +152,7 @@ struct operand {
129}; 152};
130 153
131struct insn { 154struct insn {
132 const char name[5]; 155 const char name[6];
133 unsigned char opfrag; 156 unsigned char opfrag;
134 unsigned char format; 157 unsigned char format;
135}; 158};
@@ -170,11 +193,16 @@ static const struct operand operands[] =
170 [U4_12] = { 4, 12, 0 }, 193 [U4_12] = { 4, 12, 0 },
171 [U4_16] = { 4, 16, 0 }, 194 [U4_16] = { 4, 16, 0 },
172 [U4_20] = { 4, 20, 0 }, 195 [U4_20] = { 4, 20, 0 },
196 [U4_32] = { 4, 32, 0 },
173 [U8_8] = { 8, 8, 0 }, 197 [U8_8] = { 8, 8, 0 },
174 [U8_16] = { 8, 16, 0 }, 198 [U8_16] = { 8, 16, 0 },
199 [U8_24] = { 8, 24, 0 },
200 [U8_32] = { 8, 32, 0 },
175 [I16_16] = { 16, 16, OPERAND_SIGNED }, 201 [I16_16] = { 16, 16, OPERAND_SIGNED },
176 [U16_16] = { 16, 16, 0 }, 202 [U16_16] = { 16, 16, 0 },
203 [U16_32] = { 16, 32, 0 },
177 [J16_16] = { 16, 16, OPERAND_PCREL }, 204 [J16_16] = { 16, 16, OPERAND_PCREL },
205 [I16_32] = { 16, 32, OPERAND_SIGNED },
178 [J32_16] = { 32, 16, OPERAND_PCREL }, 206 [J32_16] = { 32, 16, OPERAND_PCREL },
179 [I32_16] = { 32, 16, OPERAND_SIGNED }, 207 [I32_16] = { 32, 16, OPERAND_SIGNED },
180 [U32_16] = { 32, 16, 0 }, 208 [U32_16] = { 32, 16, 0 },
@@ -183,82 +211,93 @@ static const struct operand operands[] =
183}; 211};
184 212
185static const unsigned char formats[][7] = { 213static const unsigned char formats[][7] = {
186 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ 214 [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
187 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ 215 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
188 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ 216 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
189 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ 217 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
190 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ 218 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
191 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ 219 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
192 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ 220 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
193 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ 221 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
194 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ 222 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
195 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ 223 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
196 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ 224 [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
197 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ 225 [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
198 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ 226 [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
199 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ 227 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
200 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ 228 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
201 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ 229 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
202 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ 230 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
203 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ 231 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
204 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ 232 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
205 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ 233 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
206 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ 234 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
207 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 235 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
208 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 236 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
209 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 237 [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
210 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ 238 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
211 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 239 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
212 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 240 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
213 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ 241 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
214 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ 242 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
215 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ 243 [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
216 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ 244 [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
217 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ 245 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
218 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ 246 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
219 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ 247 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
220 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ 248 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
221 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ 249 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
222 [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ 250 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
223 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ 251 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
224 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ 252 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
253 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
254 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
255 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
256 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
257 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
258 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
259 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
260 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
261 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
262 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
263 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
264 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
265 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
266 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
267 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
268 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
269 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
270 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
225 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 271 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
226 /* e.g. icmh */ 272 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
227 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ 273 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
228 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ 274 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
229 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ 275 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
230 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ 276 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
231 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ 277 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
232 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ 278 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
233 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
234 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
235 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
236 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, 279 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
237 /* e.g. madb */ 280 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
238 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ 281 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
239 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ 282 [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
240 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ 283 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
241 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ 284 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
242 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ 285 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
243 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ 286 [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
244 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ 287 [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
245 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ 288 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
289 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
290 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
291 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
292 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
246 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 293 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
247 /* e.g. mvc */
248 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 294 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
249 /* e.g. srp */
250 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 295 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
251 /* e.g. pack */
252 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
253 /* e.g. mvck */
254 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, 296 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
255 /* e.g. plo */
256 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, 297 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
257 /* e.g. lmd */ 298 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
258 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ 299 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
259 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ 300 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
260 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
261 /* e.g. mvcos */
262}; 301};
263 302
264static struct insn opcode[] = { 303static struct insn opcode[] = {
@@ -454,6 +493,8 @@ static struct insn opcode[] = {
454static struct insn opcode_01[] = { 493static struct insn opcode_01[] = {
455#ifdef CONFIG_64BIT 494#ifdef CONFIG_64BIT
456 { "sam64", 0x0e, INSTR_E }, 495 { "sam64", 0x0e, INSTR_E },
496 { "pfpo", 0x0a, INSTR_E },
497 { "ptff", 0x04, INSTR_E },
457#endif 498#endif
458 { "pr", 0x01, INSTR_E }, 499 { "pr", 0x01, INSTR_E },
459 { "upt", 0x02, INSTR_E }, 500 { "upt", 0x02, INSTR_E },
@@ -519,6 +560,8 @@ static struct insn opcode_b2[] = {
519 { "cutfu", 0xa7, INSTR_RRF_M0RR }, 560 { "cutfu", 0xa7, INSTR_RRF_M0RR },
520 { "stfle", 0xb0, INSTR_S_RD }, 561 { "stfle", 0xb0, INSTR_S_RD },
521 { "lpswe", 0xb2, INSTR_S_RD }, 562 { "lpswe", 0xb2, INSTR_S_RD },
563 { "srnmt", 0xb9, INSTR_S_RD },
564 { "lfas", 0xbd, INSTR_S_RD },
522#endif 565#endif
523 { "stidp", 0x02, INSTR_S_RD }, 566 { "stidp", 0x02, INSTR_S_RD },
524 { "sck", 0x04, INSTR_S_RD }, 567 { "sck", 0x04, INSTR_S_RD },
@@ -589,7 +632,6 @@ static struct insn opcode_b2[] = {
589 { "clst", 0x5d, INSTR_RRE_RR }, 632 { "clst", 0x5d, INSTR_RRE_RR },
590 { "srst", 0x5e, INSTR_RRE_RR }, 633 { "srst", 0x5e, INSTR_RRE_RR },
591 { "cmpsc", 0x63, INSTR_RRE_RR }, 634 { "cmpsc", 0x63, INSTR_RRE_RR },
592 { "cmpsc", 0x63, INSTR_RRE_RR },
593 { "siga", 0x74, INSTR_S_RD }, 635 { "siga", 0x74, INSTR_S_RD },
594 { "xsch", 0x76, INSTR_S_00 }, 636 { "xsch", 0x76, INSTR_S_00 },
595 { "rp", 0x77, INSTR_S_RD }, 637 { "rp", 0x77, INSTR_S_RD },
@@ -630,6 +672,57 @@ static struct insn opcode_b3[] = {
630 { "cger", 0xc8, INSTR_RRF_U0RF }, 672 { "cger", 0xc8, INSTR_RRF_U0RF },
631 { "cgdr", 0xc9, INSTR_RRF_U0RF }, 673 { "cgdr", 0xc9, INSTR_RRF_U0RF },
632 { "cgxr", 0xca, INSTR_RRF_U0RF }, 674 { "cgxr", 0xca, INSTR_RRF_U0RF },
675 { "lpdfr", 0x70, INSTR_RRE_FF },
676 { "lndfr", 0x71, INSTR_RRE_FF },
677 { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
678 { "lcdfr", 0x73, INSTR_RRE_FF },
679 { "ldgr", 0xc1, INSTR_RRE_FR },
680 { "lgdr", 0xcd, INSTR_RRE_RF },
681 { "adtr", 0xd2, INSTR_RRR_F0FF },
682 { "axtr", 0xda, INSTR_RRR_F0FF },
683 { "cdtr", 0xe4, INSTR_RRE_FF },
684 { "cxtr", 0xec, INSTR_RRE_FF },
685 { "kdtr", 0xe0, INSTR_RRE_FF },
686 { "kxtr", 0xe8, INSTR_RRE_FF },
687 { "cedtr", 0xf4, INSTR_RRE_FF },
688 { "cextr", 0xfc, INSTR_RRE_FF },
689 { "cdgtr", 0xf1, INSTR_RRE_FR },
690 { "cxgtr", 0xf9, INSTR_RRE_FR },
691 { "cdstr", 0xf3, INSTR_RRE_FR },
692 { "cxstr", 0xfb, INSTR_RRE_FR },
693 { "cdutr", 0xf2, INSTR_RRE_FR },
694 { "cxutr", 0xfa, INSTR_RRE_FR },
695 { "cgdtr", 0xe1, INSTR_RRF_U0RF },
696 { "cgxtr", 0xe9, INSTR_RRF_U0RF },
697 { "csdtr", 0xe3, INSTR_RRE_RF },
698 { "csxtr", 0xeb, INSTR_RRE_RF },
699 { "cudtr", 0xe2, INSTR_RRE_RF },
700 { "cuxtr", 0xea, INSTR_RRE_RF },
701 { "ddtr", 0xd1, INSTR_RRR_F0FF },
702 { "dxtr", 0xd9, INSTR_RRR_F0FF },
703 { "eedtr", 0xe5, INSTR_RRE_RF },
704 { "eextr", 0xed, INSTR_RRE_RF },
705 { "esdtr", 0xe7, INSTR_RRE_RF },
706 { "esxtr", 0xef, INSTR_RRE_RF },
707 { "iedtr", 0xf6, INSTR_RRF_F0FR },
708 { "iextr", 0xfe, INSTR_RRF_F0FR },
709 { "ltdtr", 0xd6, INSTR_RRE_FF },
710 { "ltxtr", 0xde, INSTR_RRE_FF },
711 { "fidtr", 0xd7, INSTR_RRF_UUFF },
712 { "fixtr", 0xdf, INSTR_RRF_UUFF },
713 { "ldetr", 0xd4, INSTR_RRF_0UFF },
714 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
715 { "ledtr", 0xd5, INSTR_RRF_UUFF },
716 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
717 { "mdtr", 0xd0, INSTR_RRR_F0FF },
718 { "mxtr", 0xd8, INSTR_RRR_F0FF },
719 { "qadtr", 0xf5, INSTR_RRF_FUFF },
720 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
721 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
722 { "rrxtr", 0xff, INSTR_RRF_FFRU },
723 { "sfasr", 0x85, INSTR_RRE_R0 },
724 { "sdtr", 0xd3, INSTR_RRR_F0FF },
725 { "sxtr", 0xdb, INSTR_RRR_F0FF },
633#endif 726#endif
634 { "lpebr", 0x00, INSTR_RRE_FF }, 727 { "lpebr", 0x00, INSTR_RRE_FF },
635 { "lnebr", 0x01, INSTR_RRE_FF }, 728 { "lnebr", 0x01, INSTR_RRE_FF },
@@ -780,6 +873,14 @@ static struct insn opcode_b9[] = {
780 { "cu24", 0xb1, INSTR_RRF_M0RR }, 873 { "cu24", 0xb1, INSTR_RRF_M0RR },
781 { "cu41", 0xb2, INSTR_RRF_M0RR }, 874 { "cu41", 0xb2, INSTR_RRF_M0RR },
782 { "cu42", 0xb3, INSTR_RRF_M0RR }, 875 { "cu42", 0xb3, INSTR_RRF_M0RR },
876 { "crt", 0x72, INSTR_RRF_U0RR },
877 { "cgrt", 0x60, INSTR_RRF_U0RR },
878 { "clrt", 0x73, INSTR_RRF_U0RR },
879 { "clgrt", 0x61, INSTR_RRF_U0RR },
880 { "ptf", 0xa2, INSTR_RRE_R0 },
881 { "pfmf", 0xaf, INSTR_RRE_RR },
882 { "trte", 0xbf, INSTR_RRF_M0RR },
883 { "trtre", 0xbd, INSTR_RRF_M0RR },
783#endif 884#endif
784 { "kmac", 0x1e, INSTR_RRE_RR }, 885 { "kmac", 0x1e, INSTR_RRE_RR },
785 { "lrvr", 0x1f, INSTR_RRE_RR }, 886 { "lrvr", 0x1f, INSTR_RRE_RR },
@@ -835,6 +936,43 @@ static struct insn opcode_c2[] = {
835 { "cfi", 0x0d, INSTR_RIL_RI }, 936 { "cfi", 0x0d, INSTR_RIL_RI },
836 { "clgfi", 0x0e, INSTR_RIL_RU }, 937 { "clgfi", 0x0e, INSTR_RIL_RU },
837 { "clfi", 0x0f, INSTR_RIL_RU }, 938 { "clfi", 0x0f, INSTR_RIL_RU },
939 { "msfi", 0x01, INSTR_RIL_RI },
940 { "msgfi", 0x00, INSTR_RIL_RI },
941#endif
942 { "", 0, INSTR_INVALID }
943};
944
945static struct insn opcode_c4[] = {
946#ifdef CONFIG_64BIT
947 { "lrl", 0x0d, INSTR_RIL_RP },
948 { "lgrl", 0x08, INSTR_RIL_RP },
949 { "lgfrl", 0x0c, INSTR_RIL_RP },
950 { "lhrl", 0x05, INSTR_RIL_RP },
951 { "lghrl", 0x04, INSTR_RIL_RP },
952 { "llgfrl", 0x0e, INSTR_RIL_RP },
953 { "llhrl", 0x02, INSTR_RIL_RP },
954 { "llghrl", 0x06, INSTR_RIL_RP },
955 { "strl", 0x0f, INSTR_RIL_RP },
956 { "stgrl", 0x0b, INSTR_RIL_RP },
957 { "sthrl", 0x07, INSTR_RIL_RP },
958#endif
959 { "", 0, INSTR_INVALID }
960};
961
962static struct insn opcode_c6[] = {
963#ifdef CONFIG_64BIT
964 { "crl", 0x0d, INSTR_RIL_RP },
965 { "cgrl", 0x08, INSTR_RIL_RP },
966 { "cgfrl", 0x0c, INSTR_RIL_RP },
967 { "chrl", 0x05, INSTR_RIL_RP },
968 { "cghrl", 0x04, INSTR_RIL_RP },
969 { "clrl", 0x0f, INSTR_RIL_RP },
970 { "clgrl", 0x0a, INSTR_RIL_RP },
971 { "clgfrl", 0x0e, INSTR_RIL_RP },
972 { "clhrl", 0x07, INSTR_RIL_RP },
973 { "clghrl", 0x06, INSTR_RIL_RP },
974 { "pfdrl", 0x02, INSTR_RIL_UP },
975 { "exrl", 0x00, INSTR_RIL_RP },
838#endif 976#endif
839 { "", 0, INSTR_INVALID } 977 { "", 0, INSTR_INVALID }
840}; 978};
@@ -842,6 +980,8 @@ static struct insn opcode_c2[] = {
842static struct insn opcode_c8[] = { 980static struct insn opcode_c8[] = {
843#ifdef CONFIG_64BIT 981#ifdef CONFIG_64BIT
844 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 982 { "mvcos", 0x00, INSTR_SSF_RRDRD },
983 { "ectg", 0x01, INSTR_SSF_RRDRD },
984 { "csst", 0x02, INSTR_SSF_RRDRD },
845#endif 985#endif
846 { "", 0, INSTR_INVALID } 986 { "", 0, INSTR_INVALID }
847}; 987};
@@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = {
917 { "llgh", 0x91, INSTR_RXY_RRRD }, 1057 { "llgh", 0x91, INSTR_RXY_RRRD },
918 { "llc", 0x94, INSTR_RXY_RRRD }, 1058 { "llc", 0x94, INSTR_RXY_RRRD },
919 { "llh", 0x95, INSTR_RXY_RRRD }, 1059 { "llh", 0x95, INSTR_RXY_RRRD },
1060 { "cgh", 0x34, INSTR_RXY_RRRD },
1061 { "laey", 0x75, INSTR_RXY_RRRD },
1062 { "ltgf", 0x32, INSTR_RXY_RRRD },
1063 { "mfy", 0x5c, INSTR_RXY_RRRD },
1064 { "mhy", 0x7c, INSTR_RXY_RRRD },
1065 { "pfd", 0x36, INSTR_RXY_URRD },
920#endif 1066#endif
921 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1067 { "lrv", 0x1e, INSTR_RXY_RRRD },
922 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1068 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = {
931static struct insn opcode_e5[] = { 1077static struct insn opcode_e5[] = {
932#ifdef CONFIG_64BIT 1078#ifdef CONFIG_64BIT
933 { "strag", 0x02, INSTR_SSE_RDRD }, 1079 { "strag", 0x02, INSTR_SSE_RDRD },
1080 { "chhsi", 0x54, INSTR_SIL_RDI },
1081 { "chsi", 0x5c, INSTR_SIL_RDI },
1082 { "cghsi", 0x58, INSTR_SIL_RDI },
1083 { "clhhsi", 0x55, INSTR_SIL_RDU },
1084 { "clfhsi", 0x5d, INSTR_SIL_RDU },
1085 { "clghsi", 0x59, INSTR_SIL_RDU },
1086 { "mvhhi", 0x44, INSTR_SIL_RDI },
1087 { "mvhi", 0x4c, INSTR_SIL_RDI },
1088 { "mvghi", 0x48, INSTR_SIL_RDI },
934#endif 1089#endif
935 { "lasp", 0x00, INSTR_SSE_RDRD }, 1090 { "lasp", 0x00, INSTR_SSE_RDRD },
936 { "tprot", 0x01, INSTR_SSE_RDRD }, 1091 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = {
977 { "lmy", 0x98, INSTR_RSY_RRRD }, 1132 { "lmy", 0x98, INSTR_RSY_RRRD },
978 { "lamy", 0x9a, INSTR_RSY_AARD }, 1133 { "lamy", 0x9a, INSTR_RSY_AARD },
979 { "stamy", 0x9b, INSTR_RSY_AARD }, 1134 { "stamy", 0x9b, INSTR_RSY_AARD },
1135 { "asi", 0x6a, INSTR_SIY_IRD },
1136 { "agsi", 0x7a, INSTR_SIY_IRD },
1137 { "alsi", 0x6e, INSTR_SIY_IRD },
1138 { "algsi", 0x7e, INSTR_SIY_IRD },
1139 { "ecag", 0x4c, INSTR_RSY_RRRD },
980#endif 1140#endif
981 { "rll", 0x1d, INSTR_RSY_RRRD }, 1141 { "rll", 0x1d, INSTR_RSY_RRRD },
982 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1142 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = {
988#ifdef CONFIG_64BIT 1148#ifdef CONFIG_64BIT
989 { "brxhg", 0x44, INSTR_RIE_RRP }, 1149 { "brxhg", 0x44, INSTR_RIE_RRP },
990 { "brxlg", 0x45, INSTR_RIE_RRP }, 1150 { "brxlg", 0x45, INSTR_RIE_RRP },
1151 { "crb", 0xf6, INSTR_RRS_RRRDU },
1152 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1153 { "crj", 0x76, INSTR_RIE_RRPU },
1154 { "cgrj", 0x64, INSTR_RIE_RRPU },
1155 { "cib", 0xfe, INSTR_RIS_RURDI },
1156 { "cgib", 0xfc, INSTR_RIS_RURDI },
1157 { "cij", 0x7e, INSTR_RIE_RUPI },
1158 { "cgij", 0x7c, INSTR_RIE_RUPI },
1159 { "cit", 0x72, INSTR_RIE_R0IU },
1160 { "cgit", 0x70, INSTR_RIE_R0IU },
1161 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1162 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1163 { "clrj", 0x77, INSTR_RIE_RRPU },
1164 { "clgrj", 0x65, INSTR_RIE_RRPU },
1165 { "clib", 0xff, INSTR_RIS_RURDU },
1166 { "clgib", 0xfd, INSTR_RIS_RURDU },
1167 { "clij", 0x7f, INSTR_RIE_RUPU },
1168 { "clgij", 0x7d, INSTR_RIE_RUPU },
1169 { "clfit", 0x73, INSTR_RIE_R0UU },
1170 { "clgit", 0x71, INSTR_RIE_R0UU },
1171 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1172 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1173 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1174 { "risbg", 0x55, INSTR_RIE_RRUUU },
991#endif 1175#endif
992 { "", 0, INSTR_INVALID } 1176 { "", 0, INSTR_INVALID }
993}; 1177};
@@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = {
1004 { "ldy", 0x65, INSTR_RXY_FRRD }, 1188 { "ldy", 0x65, INSTR_RXY_FRRD },
1005 { "stey", 0x66, INSTR_RXY_FRRD }, 1189 { "stey", 0x66, INSTR_RXY_FRRD },
1006 { "stdy", 0x67, INSTR_RXY_FRRD }, 1190 { "stdy", 0x67, INSTR_RXY_FRRD },
1191 { "sldt", 0x40, INSTR_RXF_FRRDF },
1192 { "slxt", 0x48, INSTR_RXF_FRRDF },
1193 { "srdt", 0x41, INSTR_RXF_FRRDF },
1194 { "srxt", 0x49, INSTR_RXF_FRRDF },
1195 { "tdcet", 0x50, INSTR_RXE_FRRD },
1196 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1197 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1198 { "tdget", 0x51, INSTR_RXE_FRRD },
1199 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1200 { "tdgxt", 0x59, INSTR_RXE_FRRD },
1007#endif 1201#endif
1008 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1202 { "ldeb", 0x04, INSTR_RXE_FRRD },
1009 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1203 { "lxdb", 0x05, INSTR_RXE_FRRD },
@@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = {
1037 { "mae", 0x2e, INSTR_RXF_FRRDF }, 1231 { "mae", 0x2e, INSTR_RXF_FRRDF },
1038 { "mse", 0x2f, INSTR_RXF_FRRDF }, 1232 { "mse", 0x2f, INSTR_RXF_FRRDF },
1039 { "sqe", 0x34, INSTR_RXE_FRRD }, 1233 { "sqe", 0x34, INSTR_RXE_FRRD },
1234 { "sqd", 0x35, INSTR_RXE_FRRD },
1040 { "mee", 0x37, INSTR_RXE_FRRD }, 1235 { "mee", 0x37, INSTR_RXE_FRRD },
1041 { "mad", 0x3e, INSTR_RXF_FRRDF }, 1236 { "mad", 0x3e, INSTR_RXF_FRRDF },
1042 { "msd", 0x3f, INSTR_RXF_FRRDF }, 1237 { "msd", 0x3f, INSTR_RXF_FRRDF },
@@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code)
1117 case 0xc2: 1312 case 0xc2:
1118 table = opcode_c2; 1313 table = opcode_c2;
1119 break; 1314 break;
1315 case 0xc4:
1316 table = opcode_c4;
1317 break;
1318 case 0xc6:
1319 table = opcode_c6;
1320 break;
1120 case 0xc8: 1321 case 0xc8:
1121 table = opcode_c8; 1322 table = opcode_c8;
1122 break; 1323 break;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index e49e9e0c69fd..31d618a443af 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -214,10 +214,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
214 214
215static noinline __init void detect_machine_type(void) 215static noinline __init void detect_machine_type(void)
216{ 216{
217 /* No VM information? Looks like LPAR */ 217 /* Check current-configuration-level */
218 if (stsi(&vmms, 3, 2, 2) == -ENOSYS) 218 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
219 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
219 return; 220 return;
220 if (!vmms.count) 221 }
222 /* Get virtual-machine cpu information. */
223 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
221 return; 224 return;
222 225
223 /* Running under KVM? If not we assume z/VM */ 226 /* Running under KVM? If not we assume z/VM */
@@ -402,8 +405,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
402 405
403static void __init setup_boot_command_line(void) 406static void __init setup_boot_command_line(void)
404{ 407{
408 int i;
409
410 /* convert arch command line to ascii */
411 for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
412 if (COMMAND_LINE[i] & 0x80)
413 break;
414 if (i < ARCH_COMMAND_LINE_SIZE)
415 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
416 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
417
405 /* copy arch command line */ 418 /* copy arch command line */
406 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 419 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
420 ARCH_COMMAND_LINE_SIZE);
407 421
408 /* append IPL PARM data to the boot command line */ 422 /* append IPL PARM data to the boot command line */
409 if (MACHINE_IS_VM) 423 if (MACHINE_IS_VM)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e8ef21c51bbe..4348f9bc5393 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/ptrace.h> 17#include <asm/ptrace.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f33658f09dd7..29fd0f1e6ec4 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -9,11 +9,9 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 15#include <asm/errno.h>
18#include <asm/ptrace.h> 16#include <asm/ptrace.h>
19#include <asm/thread_info.h> 17#include <asm/thread_info.h>
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5a82bc68193e..314d8f09cf31 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -13,7 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <trace/syscall.h> 15#include <trace/syscall.h>
16#include <asm/lowcore.h> 16#include <asm/asm-offsets.h>
17 17
18#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
19 19
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index c52b4f7742fa..ca4a62bd862f 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2009 2 * Copyright IBM Corp. 1999,2010
3 * 3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -22,12 +22,9 @@
22 */ 22 */
23 23
24#include <linux/init.h> 24#include <linux/init.h>
25#include <asm/setup.h>
26#include <asm/lowcore.h>
27#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
28#include <asm/thread_info.h> 26#include <asm/thread_info.h>
29#include <asm/page.h> 27#include <asm/page.h>
30#include <asm/cpu.h>
31 28
32#ifdef CONFIG_64BIT 29#ifdef CONFIG_64BIT
33#define ARCH_OFFSET 4 30#define ARCH_OFFSET 4
@@ -288,19 +285,7 @@ iplstart:
288 bz .Lagain1 # skip dateset trailer 285 bz .Lagain1 # skip dateset trailer
289 la %r5,0(%r4,%r2) 286 la %r5,0(%r4,%r2)
290 lr %r3,%r2 287 lr %r3,%r2
291.Lidebc: 288 la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
292 tm 0(%r5),0x80 # high order bit set ?
293 bo .Ldocv # yes -> convert from EBCDIC
294 ahi %r5,-1
295 bct %r3,.Lidebc
296 b .Lnocv
297.Ldocv:
298 l %r3,.Lcvtab
299 tr 0(256,%r4),0(%r3) # convert parameters to ascii
300 tr 256(256,%r4),0(%r3)
301 tr 512(256,%r4),0(%r3)
302 tr 768(122,%r4),0(%r3)
303.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
304 mvc 0(256,%r3),0(%r4) 289 mvc 0(256,%r3),0(%r4)
305 mvc 256(256,%r3),256(%r4) 290 mvc 256(256,%r3),256(%r4)
306 mvc 512(256,%r3),512(%r4) 291 mvc 512(256,%r3),512(%r4)
@@ -384,7 +369,6 @@ iplstart:
384.Linitrd:.long _end + 0x400000 # default address of initrd 369.Linitrd:.long _end + 0x400000 # default address of initrd
385.Lparm: .long PARMAREA 370.Lparm: .long PARMAREA
386.Lstartup: .long startup 371.Lstartup: .long startup
387.Lcvtab:.long _ebcasc # ebcdic to ascii table
388.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 372.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
389 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 373 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
390 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 374 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
@@ -417,13 +401,10 @@ start:
417.sk8x8: 401.sk8x8:
418 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer 402 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
419.gotr: 403.gotr:
420 l %r10,.tbl # EBCDIC to ASCII table
421 tr 0(240,%r8),0(%r10)
422 slr %r0,%r0 404 slr %r0,%r0
423 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) 405 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
424 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) 406 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
425 j startup # continue with startup 407 j startup # continue with startup
426.tbl: .long _ebcasc # translate table
427.cmd: .long COMMAND_LINE # address of command line buffer 408.cmd: .long COMMAND_LINE # address of command line buffer
428.parm: .long PARMAREA 409.parm: .long PARMAREA
429.lowcase: 410.lowcase:
@@ -467,16 +448,15 @@ start:
467# or linload or SALIPL 448# or linload or SALIPL
468# 449#
469 .org 0x10000 450 .org 0x10000
470startup:basr %r13,0 # get base 451 .globl startup
452startup:
453 basr %r13,0 # get base
471.LPG0: 454.LPG0:
472 xc 0x200(256),0x200 # partially clear lowcore 455 xc 0x200(256),0x200 # partially clear lowcore
473 xc 0x300(256),0x300 456 xc 0x300(256),0x300
474 l %r1,5f-.LPG0(%r13) 457 stck __LC_LAST_UPDATE_CLOCK
475 stck 0(%r1) 458 spt 5f-.LPG0(%r13)
476 spt 6f-.LPG0(%r13) 459 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
477 mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1)
478 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
479 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
480#ifndef CONFIG_MARCH_G5 460#ifndef CONFIG_MARCH_G5
481 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 461 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
482 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 462 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
@@ -494,7 +474,6 @@ startup:basr %r13,0 # get base
494 cl %r0,2f+12-.LPG0(%r13) 474 cl %r0,2f+12-.LPG0(%r13)
495 je 3f 475 je 3f
4961: l %r15,.Lstack-.LPG0(%r13) 4761: l %r15,.Lstack-.LPG0(%r13)
497 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
498 ahi %r15,-96 477 ahi %r15,-96
499 la %r2,.Lals_string-.LPG0(%r13) 478 la %r2,.Lals_string-.LPG0(%r13)
500 l %r3,.Lsclp_print-.LPG0(%r13) 479 l %r3,.Lsclp_print-.LPG0(%r13)
@@ -505,7 +484,7 @@ startup:basr %r13,0 # get base
505.Lsclp_print: 484.Lsclp_print:
506 .long _sclp_print_early 485 .long _sclp_print_early
507.Lstack: 486.Lstack:
508 .long init_thread_union 487 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
509 .align 16 488 .align 16
5102: .long 0x000a0000,0x8badcccc 4892: .long 0x000a0000,0x8badcccc
511#if defined(CONFIG_64BIT) 490#if defined(CONFIG_64BIT)
@@ -532,13 +511,22 @@ startup:basr %r13,0 # get base
5323: 5113:
533#endif 512#endif
534 513
514#ifdef CONFIG_64BIT
515 mvi __LC_AR_MODE_ID,1 # set esame flag
516 slr %r0,%r0 # set cpuid to zero
517 lhi %r1,2 # mode 2 = esame (dump)
518 sigp %r1,%r0,0x12 # switch to esame mode
519 sam64 # switch to 64 bit mode
520 jg startup_continue
521#else
522 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
535 l %r13,4f-.LPG0(%r13) 523 l %r13,4f-.LPG0(%r13)
536 b 0(%r13) 524 b 0(%r13)
537 .align 4 525 .align 8
5384: .long startup_continue 5264: .long startup_continue
5395: .long sched_clock_base_cc 527#endif
540 .align 8 528 .align 8
5416: .long 0x7fffffff,0xffffffff 5295: .long 0x7fffffff,0xffffffff
542 530
543# 531#
544# params at 10400 (setup.h) 532# params at 10400 (setup.h)
@@ -552,8 +540,4 @@ startup:basr %r13,0 # get base
552 .byte "root=/dev/ram0 ro" 540 .byte "root=/dev/ram0 ro"
553 .byte 0 541 .byte 0
554 542
555#ifdef CONFIG_64BIT 543 .org 0x11000
556#include "head64.S"
557#else
558#include "head31.S"
559#endif
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 602b508cd4c4..1bbcc499d455 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head31.S 2 * arch/s390/kernel/head31.S
3 * 3 *
4 * Copyright (C) IBM Corp. 2005,2006 4 * Copyright (C) IBM Corp. 2005,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,13 +10,19 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 basr %r13,0 # get base
17.LPG1: 22.LPG1:
18 23
19 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 24 l %r1,.Lbase_cc-.LPG1(%r13)
25 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
20 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 26 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
21 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 27 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
22 # move IPL device to lowcore 28 # move IPL device to lowcore
@@ -69,10 +75,12 @@ startup_continue:
69.Lduald:.rept 8 75.Lduald:.rept 8
70 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
71 .endr 77 .endr
78.Lbase_cc:
79 .long sched_clock_base_cc
72 80
73 .org 0x12000
74 .globl _ehead 81 .globl _ehead
75_ehead: 82_ehead:
83
76#ifdef CONFIG_SHARED_KERNEL 84#ifdef CONFIG_SHARED_KERNEL
77 .org 0x100000 85 .org 0x100000
78#endif 86#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index d984a2a380c3..39580e768658 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head64.S 2 * arch/s390/kernel/head64.S
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright (C) IBM Corp. 1999,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,80 +10,17 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 larl %r1,sched_clock_base_cc
17.LPG1: sll %r13,1 # remove high order bit 22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
18 srl %r13,1 23 larl %r13,.LPG1 # get base
19
20#ifdef CONFIG_ZFCPDUMP
21
22 # check if we have been ipled using zfcp dump:
23
24 tm 0xb9,0x01 # test if subchannel is enabled
25 jno .nodump # subchannel disabled
26 l %r1,0xb8
27 la %r5,.Lipl_schib-.LPG1(%r13)
28 stsch 0(%r5) # get schib of subchannel
29 jne .nodump # schib not available
30 tm 5(%r5),0x01 # devno valid?
31 jno .nodump
32 tm 4(%r5),0x80 # qdio capable device?
33 jno .nodump
34 l %r2,20(%r0) # address of ipl parameter block
35 lhi %r3,0
36 ic %r3,0x148(%r2) # get opt field
37 chi %r3,0x20 # load with dump?
38 jne .nodump
39
40 # store all prefix registers in case of load with dump:
41
42 la %r7,0 # base register for 0 page
43 la %r8,0 # first cpu
44 l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
45 ahi %r11,4 # skip boot cpu
46 lr %r12,%r11
47 ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
48 stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
491:
50 cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
51 je 4f # if yes get next cpu
522:
53 lr %r9,%r7
54 sigp %r9,%r8,0x9 # stop & store status of cpu
55 brc 8,3f # accepted
56 brc 4,4f # status stored: next cpu
57 brc 2,2b # busy: try again
58 brc 1,4f # not op: next cpu
593:
60 mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
61 ahi %r11,4 # next element in prefix array
62 clr %r11,%r12
63 je 5f # no more space in prefix array
644:
65 ahi %r8,1 # next cpu (r8 += 1)
66 chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
67 jle 1b # jump if not last cpu
685:
69 lhi %r1,2 # mode 2 = esame (dump)
70 j 6f
71 .align 4
72.Lipl_schib:
73 .rept 13
74 .long 0
75 .endr
76.nodump:
77 lhi %r1,1 # mode 1 = esame (normal ipl)
786:
79#else
80 lhi %r1,1 # mode 1 = esame (normal ipl)
81#endif /* CONFIG_ZFCPDUMP */
82 mvi __LC_AR_MODE_ID,1 # set esame flag
83 slr %r0,%r0 # set cpuid to zero
84 sigp %r1,%r0,0x12 # switch to esame mode
85 sam64 # switch to 64 bit mode
86 llgfr %r13,%r13 # clear high-order half of base reg
87 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half 24 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
88 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 25 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
89 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 26 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
@@ -108,6 +45,7 @@ startup_continue:
108 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, 45 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
109 # virtual and never return ... 46 # virtual and never return ...
110 .align 16 47 .align 16
48.LPG1:
111.Lentry:.quad 0x0000000180000000,_stext 49.Lentry:.quad 0x0000000180000000,_stext
112.Lctl: .quad 0x04350002 # cr0: various things 50.Lctl: .quad 0x04350002 # cr0: various things
113 .quad 0 # cr1: primary space segment table 51 .quad 0 # cr1: primary space segment table
@@ -130,12 +68,6 @@ startup_continue:
130.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 68.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
131.Lnop: .long 0x07000700 69.Lnop: .long 0x07000700
132.Lzero64:.fill 16,4,0x0 70.Lzero64:.fill 16,4,0x0
133#ifdef CONFIG_ZFCPDUMP
134.Lcurrent_cpu:
135 .long 0x0
136.Lpref_arr_ptr:
137 .long zfcpdump_prefix_array
138#endif /* CONFIG_ZFCPDUMP */
139.Lparmaddr: 71.Lparmaddr:
140 .quad PARMAREA 72 .quad PARMAREA
141 .align 64 73 .align 64
@@ -146,9 +78,9 @@ startup_continue:
146 .long 0x80000000,0,0,0 # invalid access-list entries 78 .long 0x80000000,0,0,0 # invalid access-list entries
147 .endr 79 .endr
148 80
149 .org 0x12000
150 .globl _ehead 81 .globl _ehead
151_ehead: 82_ehead:
83
152#ifdef CONFIG_SHARED_KERNEL 84#ifdef CONFIG_SHARED_KERNEL
153 .org 0x100000 85 .org 0x100000
154#endif 86#endif
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4d73296fed74..7eedbbcb54aa 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -553,7 +553,7 @@ out:
553 return rc; 553 return rc;
554} 554}
555 555
556static void ipl_run(struct shutdown_trigger *trigger) 556static void __ipl_run(void *unused)
557{ 557{
558 diag308(DIAG308_IPL, NULL); 558 diag308(DIAG308_IPL, NULL);
559 if (MACHINE_IS_VM) 559 if (MACHINE_IS_VM)
@@ -562,6 +562,11 @@ static void ipl_run(struct shutdown_trigger *trigger)
562 reipl_ccw_dev(&ipl_info.data.ccw.dev_id); 562 reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
563} 563}
564 564
565static void ipl_run(struct shutdown_trigger *trigger)
566{
567 smp_switch_to_ipl_cpu(__ipl_run, NULL);
568}
569
565static int __init ipl_init(void) 570static int __init ipl_init(void)
566{ 571{
567 int rc; 572 int rc;
@@ -1039,7 +1044,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
1039 sprintf(dst + pos, " PARM %s", vmparm); 1044 sprintf(dst + pos, " PARM %s", vmparm);
1040} 1045}
1041 1046
1042static void reipl_run(struct shutdown_trigger *trigger) 1047static void __reipl_run(void *unused)
1043{ 1048{
1044 struct ccw_dev_id devid; 1049 struct ccw_dev_id devid;
1045 static char buf[128]; 1050 static char buf[128];
@@ -1087,6 +1092,11 @@ static void reipl_run(struct shutdown_trigger *trigger)
1087 disabled_wait((unsigned long) __builtin_return_address(0)); 1092 disabled_wait((unsigned long) __builtin_return_address(0));
1088} 1093}
1089 1094
1095static void reipl_run(struct shutdown_trigger *trigger)
1096{
1097 smp_switch_to_ipl_cpu(__reipl_run, NULL);
1098}
1099
1090static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1100static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
1091{ 1101{
1092 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; 1102 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
@@ -1369,20 +1379,18 @@ static struct kobj_attribute dump_type_attr =
1369 1379
1370static struct kset *dump_kset; 1380static struct kset *dump_kset;
1371 1381
1372static void dump_run(struct shutdown_trigger *trigger) 1382static void __dump_run(void *unused)
1373{ 1383{
1374 struct ccw_dev_id devid; 1384 struct ccw_dev_id devid;
1375 static char buf[100]; 1385 static char buf[100];
1376 1386
1377 switch (dump_method) { 1387 switch (dump_method) {
1378 case DUMP_METHOD_CCW_CIO: 1388 case DUMP_METHOD_CCW_CIO:
1379 smp_send_stop();
1380 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1389 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1381 devid.ssid = 0; 1390 devid.ssid = 0;
1382 reipl_ccw_dev(&devid); 1391 reipl_ccw_dev(&devid);
1383 break; 1392 break;
1384 case DUMP_METHOD_CCW_VM: 1393 case DUMP_METHOD_CCW_VM:
1385 smp_send_stop();
1386 sprintf(buf, "STORE STATUS"); 1394 sprintf(buf, "STORE STATUS");
1387 __cpcmd(buf, NULL, 0, NULL); 1395 __cpcmd(buf, NULL, 0, NULL);
1388 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 1396 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
@@ -1396,10 +1404,17 @@ static void dump_run(struct shutdown_trigger *trigger)
1396 diag308(DIAG308_SET, dump_block_fcp); 1404 diag308(DIAG308_SET, dump_block_fcp);
1397 diag308(DIAG308_DUMP, NULL); 1405 diag308(DIAG308_DUMP, NULL);
1398 break; 1406 break;
1399 case DUMP_METHOD_NONE: 1407 default:
1400 return; 1408 break;
1401 } 1409 }
1402 printk(KERN_EMERG "Dump failed!\n"); 1410}
1411
1412static void dump_run(struct shutdown_trigger *trigger)
1413{
1414 if (dump_method == DUMP_METHOD_NONE)
1415 return;
1416 smp_send_stop();
1417 smp_switch_to_ipl_cpu(__dump_run, NULL);
1403} 1418}
1404 1419
1405static int __init dump_ccw_init(void) 1420static int __init dump_ccw_init(void)
@@ -1577,7 +1592,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1577static int vmcmd_init(void) 1592static int vmcmd_init(void)
1578{ 1593{
1579 if (!MACHINE_IS_VM) 1594 if (!MACHINE_IS_VM)
1580 return -ENOTSUPP; 1595 return -EOPNOTSUPP;
1581 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); 1596 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
1582 if (!vmcmd_kset) 1597 if (!vmcmd_kset)
1583 return -ENOMEM; 1598 return -ENOMEM;
@@ -1595,7 +1610,7 @@ static void stop_run(struct shutdown_trigger *trigger)
1595{ 1610{
1596 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1611 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1597 disabled_wait((unsigned long) __builtin_return_address(0)); 1612 disabled_wait((unsigned long) __builtin_return_address(0));
1598 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 1613 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1599 cpu_relax(); 1614 cpu_relax();
1600 for (;;); 1615 for (;;);
1601} 1616}
@@ -1902,7 +1917,6 @@ void __init ipl_update_parameters(void)
1902void __init ipl_save_parameters(void) 1917void __init ipl_save_parameters(void)
1903{ 1918{
1904 struct cio_iplinfo iplinfo; 1919 struct cio_iplinfo iplinfo;
1905 unsigned int *ipl_ptr;
1906 void *src, *dst; 1920 void *src, *dst;
1907 1921
1908 if (cio_get_iplinfo(&iplinfo)) 1922 if (cio_get_iplinfo(&iplinfo))
@@ -1913,11 +1927,10 @@ void __init ipl_save_parameters(void)
1913 if (!iplinfo.is_qdio) 1927 if (!iplinfo.is_qdio)
1914 return; 1928 return;
1915 ipl_flags |= IPL_PARMBLOCK_VALID; 1929 ipl_flags |= IPL_PARMBLOCK_VALID;
1916 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; 1930 src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
1917 src = (void *)(unsigned long)*ipl_ptr;
1918 dst = (void *)IPL_PARMBLOCK_ORIGIN; 1931 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1919 memmove(dst, src, PAGE_SIZE); 1932 memmove(dst, src, PAGE_SIZE);
1920 *ipl_ptr = IPL_PARMBLOCK_ORIGIN; 1933 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
1921} 1934}
1922 1935
1923static LIST_HEAD(rcall); 1936static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 131d7ee8b416..a922d51df6bf 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -54,11 +54,11 @@ void machine_shutdown(void)
54{ 54{
55} 55}
56 56
57void machine_kexec(struct kimage *image) 57static void __machine_kexec(void *data)
58{ 58{
59 relocate_kernel_t data_mover; 59 relocate_kernel_t data_mover;
60 struct kimage *image = data;
60 61
61 smp_send_stop();
62 pfault_fini(); 62 pfault_fini();
63 s390_reset_system(); 63 s390_reset_system();
64 64
@@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image)
68 (*data_mover)(&image->head, image->start); 68 (*data_mover)(&image->head, image->start);
69 for (;;); 69 for (;;);
70} 70}
71
72void machine_kexec(struct kimage *image)
73{
74 smp_send_stop();
75 smp_switch_to_ipl_cpu(__machine_kexec, image);
76}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 2f481cc3d1c9..cb899d9f8505 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,7 +6,7 @@
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 */ 7 */
8 8
9#include <asm/lowcore.h> 9#include <asm/asm-offsets.h>
10 10
11# 11#
12# do_reipl_asm 12# do_reipl_asm
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 774147824c3d..5e73dee63baa 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,7 +4,7 @@
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
6 6
7#include <asm/lowcore.h> 7#include <asm/asm-offsets.h>
8 8
9# 9#
10# do_reipl_asm 10# do_reipl_asm
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index e27ca63076d1..27af3bf3a009 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -9,8 +9,10 @@
9 */ 9 */
10 10
11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler 11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
12LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
12LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter 13LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
13LC_EXT_INT_CODE = 0x86 # addr of ext int code 14LC_EXT_INT_CODE = 0x86 # addr of ext int code
15LC_AR_MODE_ID = 0xa3
14 16
15# 17#
16# Subroutine which waits synchronously until either an external interruption 18# Subroutine which waits synchronously until either an external interruption
@@ -30,8 +32,16 @@ _sclp_wait_int:
30.LbaseS1: 32.LbaseS1:
31 ahi %r15,-96 # create stack frame 33 ahi %r15,-96 # create stack frame
32 la %r8,LC_EXT_NEW_PSW # register int handler 34 la %r8,LC_EXT_NEW_PSW # register int handler
33 mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) 35 la %r9,.LextpswS1-.LbaseS1(%r13)
34 mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) 36#ifdef CONFIG_64BIT
37 tm LC_AR_MODE_ID,1
38 jno .Lesa1
39 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
40 la %r9,.LextpswS1_64-.LbaseS1(%r13)
41.Lesa1:
42#endif
43 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
44 mvc 0(16,%r8),0(%r9)
35 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 45 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
36 ltr %r2,%r2 46 ltr %r2,%r2
37 jz .LsetctS1 47 jz .LsetctS1
@@ -64,15 +74,19 @@ _sclp_wait_int:
64.LtimeoutS1: 74.LtimeoutS1:
65 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting 75 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting
66 # restore old handler 76 # restore old handler
67 mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) 77 mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
68 lm %r6,%r15,120(%r15) # restore registers 78 lm %r6,%r15,120(%r15) # restore registers
69 br %r14 # return to caller 79 br %r14 # return to caller
70 80
71 .align 8 81 .align 8
72.LoldpswS1: 82.LoldpswS1:
73 .long 0, 0 # old ext int PSW 83 .long 0, 0, 0, 0 # old ext int PSW
74.LextpswS1: 84.LextpswS1:
75 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 85 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
86#ifdef CONFIG_64BIT
87.LextpswS1_64:
88 .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
89#endif
76.LwaitpswS1: 90.LwaitpswS1:
77 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 91 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
78.LtimeS1: 92.LtimeS1:
@@ -250,6 +264,13 @@ _sclp_print:
250_sclp_print_early: 264_sclp_print_early:
251 stm %r6,%r15,24(%r15) # save registers 265 stm %r6,%r15,24(%r15) # save registers
252 ahi %r15,-96 # create stack frame 266 ahi %r15,-96 # create stack frame
267#ifdef CONFIG_64BIT
268 tm LC_AR_MODE_ID,1
269 jno .Lesa2
270 ahi %r15,-80
271 stmh %r6,%r15,96(%r15) # store upper register halves
272.Lesa2:
273#endif
253 lr %r10,%r2 # save string pointer 274 lr %r10,%r2 # save string pointer
254 lhi %r2,0 275 lhi %r2,0
255 bras %r14,_sclp_setup # enable console 276 bras %r14,_sclp_setup # enable console
@@ -262,6 +283,13 @@ _sclp_print_early:
262 lhi %r2,1 283 lhi %r2,1
263 bras %r14,_sclp_setup # disable console 284 bras %r14,_sclp_setup # disable console
264.LendS5: 285.LendS5:
286#ifdef CONFIG_64BIT
287 tm LC_AR_MODE_ID,1
288 jno .Lesa3
289 lmh %r6,%r15,96(%r15) # store upper register halves
290 ahi %r15,80
291.Lesa3:
292#endif
265 lm %r6,%r15,120(%r15) # restore registers 293 lm %r6,%r15,120(%r15) # restore registers
266 br %r14 294 br %r14
267 295
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8d8957b38ab3..77a63ae419f0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -396,15 +396,12 @@ static void __init
396setup_lowcore(void) 396setup_lowcore(void)
397{ 397{
398 struct _lowcore *lc; 398 struct _lowcore *lc;
399 int lc_pages;
400 399
401 /* 400 /*
402 * Setup lowcore for boot cpu 401 * Setup lowcore for boot cpu
403 */ 402 */
404 lc_pages = sizeof(void *) == 8 ? 2 : 1; 403 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
405 lc = (struct _lowcore *) 404 lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
406 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
407 memset(lc, 0, lc_pages * PAGE_SIZE);
408 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 405 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
409 lc->restart_psw.addr = 406 lc->restart_psw.addr =
410 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 407 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
@@ -804,7 +801,7 @@ setup_arch(char **cmdline_p)
804 if (MACHINE_IS_VM) 801 if (MACHINE_IS_VM)
805 pr_info("Linux is running as a z/VM " 802 pr_info("Linux is running as a z/VM "
806 "guest operating system in 31-bit mode\n"); 803 "guest operating system in 31-bit mode\n");
807 else 804 else if (MACHINE_IS_LPAR)
808 pr_info("Linux is running natively in 31-bit mode\n"); 805 pr_info("Linux is running natively in 31-bit mode\n");
809 if (MACHINE_HAS_IEEE) 806 if (MACHINE_HAS_IEEE)
810 pr_info("The hardware system has IEEE compatible " 807 pr_info("The hardware system has IEEE compatible "
@@ -818,7 +815,7 @@ setup_arch(char **cmdline_p)
818 "guest operating system in 64-bit mode\n"); 815 "guest operating system in 64-bit mode\n");
819 else if (MACHINE_IS_KVM) 816 else if (MACHINE_IS_KVM)
820 pr_info("Linux is running under KVM in 64-bit mode\n"); 817 pr_info("Linux is running under KVM in 64-bit mode\n");
821 else 818 else if (MACHINE_IS_LPAR)
822 pr_info("Linux is running natively in 64-bit mode\n"); 819 pr_info("Linux is running natively in 64-bit mode\n");
823#endif /* CONFIG_64BIT */ 820#endif /* CONFIG_64BIT */
824 821
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 76a6fdd46c45..8b10127c00ad 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -36,6 +36,7 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/timex.h> 37#include <linux/timex.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39#include <asm/asm-offsets.h>
39#include <asm/ipl.h> 40#include <asm/ipl.h>
40#include <asm/setup.h> 41#include <asm/setup.h>
41#include <asm/sigp.h> 42#include <asm/sigp.h>
@@ -53,7 +54,7 @@
53#include "entry.h" 54#include "entry.h"
54 55
55/* logical cpu to cpu address */ 56/* logical cpu to cpu address */
56int __cpu_logical_map[NR_CPUS]; 57unsigned short __cpu_logical_map[NR_CPUS];
57 58
58static struct task_struct *current_set[NR_CPUS]; 59static struct task_struct *current_set[NR_CPUS];
59 60
@@ -72,13 +73,13 @@ static int cpu_management;
72 73
73static DEFINE_PER_CPU(struct cpu, cpu_devices); 74static DEFINE_PER_CPU(struct cpu, cpu_devices);
74 75
75static void smp_ext_bitcall(int, ec_bit_sig); 76static void smp_ext_bitcall(int, int);
76 77
77static int cpu_stopped(int cpu) 78static int raw_cpu_stopped(int cpu)
78{ 79{
79 __u32 status; 80 u32 status;
80 81
81 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { 82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
82 case sigp_status_stored: 83 case sigp_status_stored:
83 /* Check for stopped and check stop state */ 84 /* Check for stopped and check stop state */
84 if (status & 0x50) 85 if (status & 0x50)
@@ -90,6 +91,44 @@ static int cpu_stopped(int cpu)
90 return 0; 91 return 0;
91} 92}
92 93
94static inline int cpu_stopped(int cpu)
95{
96 return raw_cpu_stopped(cpu_logical_map(cpu));
97}
98
99void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
100{
101 struct _lowcore *lc, *current_lc;
102 struct stack_frame *sf;
103 struct pt_regs *regs;
104 unsigned long sp;
105
106 if (smp_processor_id() == 0)
107 func(data);
108 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
109 /* Disable lowcore protection */
110 __ctl_clear_bit(0, 28);
111 current_lc = lowcore_ptr[smp_processor_id()];
112 lc = lowcore_ptr[0];
113 if (!lc)
114 lc = current_lc;
115 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
116 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
117 if (!cpu_online(0))
118 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
119 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
120 cpu_relax();
121 sp = lc->panic_stack;
122 sp -= sizeof(struct pt_regs);
123 regs = (struct pt_regs *) sp;
124 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
125 regs->psw = lc->psw_save_area;
126 sp -= STACK_FRAME_OVERHEAD;
127 sf = (struct stack_frame *) sp;
128 sf->back_chain = regs->gprs[15];
129 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
130}
131
93void smp_send_stop(void) 132void smp_send_stop(void)
94{ 133{
95 int cpu, rc; 134 int cpu, rc;
@@ -103,7 +142,7 @@ void smp_send_stop(void)
103 if (cpu == smp_processor_id()) 142 if (cpu == smp_processor_id())
104 continue; 143 continue;
105 do { 144 do {
106 rc = signal_processor(cpu, sigp_stop); 145 rc = sigp(cpu, sigp_stop);
107 } while (rc == sigp_busy); 146 } while (rc == sigp_busy);
108 147
109 while (!cpu_stopped(cpu)) 148 while (!cpu_stopped(cpu))
@@ -139,13 +178,13 @@ static void do_ext_call_interrupt(__u16 code)
139 * Send an external call sigp to another cpu and return without waiting 178 * Send an external call sigp to another cpu and return without waiting
140 * for its completion. 179 * for its completion.
141 */ 180 */
142static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 181static void smp_ext_bitcall(int cpu, int sig)
143{ 182{
144 /* 183 /*
145 * Set signaling bit in lowcore of target cpu and kick it 184 * Set signaling bit in lowcore of target cpu and kick it
146 */ 185 */
147 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 186 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
148 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 187 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
149 udelay(10); 188 udelay(10);
150} 189}
151 190
@@ -239,24 +278,8 @@ void smp_ctl_clear_bit(int cr, int bit)
239} 278}
240EXPORT_SYMBOL(smp_ctl_clear_bit); 279EXPORT_SYMBOL(smp_ctl_clear_bit);
241 280
242/*
243 * In early ipl state a temp. logically cpu number is needed, so the sigp
244 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
245 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
246 */
247#define CPU_INIT_NO 1
248
249#ifdef CONFIG_ZFCPDUMP 281#ifdef CONFIG_ZFCPDUMP
250 282
251/*
252 * zfcpdump_prefix_array holds prefix registers for the following scenario:
253 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
254 * save its prefix registers, since they get lost, when switching from 31 bit
255 * to 64 bit.
256 */
257unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
258 __attribute__((__section__(".data")));
259
260static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 283static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
261{ 284{
262 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 285 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
@@ -266,21 +289,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
266 "the dump\n", cpu, NR_CPUS - 1); 289 "the dump\n", cpu, NR_CPUS - 1);
267 return; 290 return;
268 } 291 }
269 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 292 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
270 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 293 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
271 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
272 sigp_busy)
273 cpu_relax(); 294 cpu_relax();
274 memcpy(zfcpdump_save_areas[cpu], 295 memcpy(zfcpdump_save_areas[cpu],
275 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 296 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
276 SAVE_AREA_SIZE); 297 sizeof(struct save_area));
277#ifdef CONFIG_64BIT
278 /* copy original prefix register */
279 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
280#endif
281} 298}
282 299
283union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 300struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
284EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 301EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
285 302
286#else 303#else
@@ -389,8 +406,7 @@ static void __init smp_detect_cpus(void)
389 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { 406 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
390 if (cpu == boot_cpu_addr) 407 if (cpu == boot_cpu_addr)
391 continue; 408 continue;
392 __cpu_logical_map[CPU_INIT_NO] = cpu; 409 if (!raw_cpu_stopped(cpu))
393 if (!cpu_stopped(CPU_INIT_NO))
394 continue; 410 continue;
395 smp_get_save_area(c_cpus, cpu); 411 smp_get_save_area(c_cpus, cpu);
396 c_cpus++; 412 c_cpus++;
@@ -413,8 +429,7 @@ static void __init smp_detect_cpus(void)
413 cpu_addr = info->cpu[cpu].address; 429 cpu_addr = info->cpu[cpu].address;
414 if (cpu_addr == boot_cpu_addr) 430 if (cpu_addr == boot_cpu_addr)
415 continue; 431 continue;
416 __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 432 if (!raw_cpu_stopped(cpu_addr)) {
417 if (!cpu_stopped(CPU_INIT_NO)) {
418 s_cpus++; 433 s_cpus++;
419 continue; 434 continue;
420 } 435 }
@@ -533,18 +548,18 @@ static void smp_free_lowcore(int cpu)
533/* Upping and downing of CPUs */ 548/* Upping and downing of CPUs */
534int __cpuinit __cpu_up(unsigned int cpu) 549int __cpuinit __cpu_up(unsigned int cpu)
535{ 550{
536 struct task_struct *idle;
537 struct _lowcore *cpu_lowcore; 551 struct _lowcore *cpu_lowcore;
552 struct task_struct *idle;
538 struct stack_frame *sf; 553 struct stack_frame *sf;
539 sigp_ccode ccode;
540 u32 lowcore; 554 u32 lowcore;
555 int ccode;
541 556
542 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 557 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
543 return -EIO; 558 return -EIO;
544 if (smp_alloc_lowcore(cpu)) 559 if (smp_alloc_lowcore(cpu))
545 return -ENOMEM; 560 return -ENOMEM;
546 do { 561 do {
547 ccode = signal_processor(cpu, sigp_initial_cpu_reset); 562 ccode = sigp(cpu, sigp_initial_cpu_reset);
548 if (ccode == sigp_busy) 563 if (ccode == sigp_busy)
549 udelay(10); 564 udelay(10);
550 if (ccode == sigp_not_operational) 565 if (ccode == sigp_not_operational)
@@ -552,7 +567,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
552 } while (ccode == sigp_busy); 567 } while (ccode == sigp_busy);
553 568
554 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 569 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
555 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 570 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
556 udelay(10); 571 udelay(10);
557 572
558 idle = current_set[cpu]; 573 idle = current_set[cpu];
@@ -578,7 +593,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
578 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 593 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
579 eieio(); 594 eieio();
580 595
581 while (signal_processor(cpu, sigp_restart) == sigp_busy) 596 while (sigp(cpu, sigp_restart) == sigp_busy)
582 udelay(10); 597 udelay(10);
583 598
584 while (!cpu_online(cpu)) 599 while (!cpu_online(cpu))
@@ -640,7 +655,7 @@ void __cpu_die(unsigned int cpu)
640 /* Wait until target cpu is down */ 655 /* Wait until target cpu is down */
641 while (!cpu_stopped(cpu)) 656 while (!cpu_stopped(cpu))
642 cpu_relax(); 657 cpu_relax();
643 while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) 658 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
644 udelay(10); 659 udelay(10);
645 smp_free_lowcore(cpu); 660 smp_free_lowcore(cpu);
646 pr_info("Processor %d stopped\n", cpu); 661 pr_info("Processor %d stopped\n", cpu);
@@ -649,7 +664,7 @@ void __cpu_die(unsigned int cpu)
649void cpu_die(void) 664void cpu_die(void)
650{ 665{
651 idle_task_exit(); 666 idle_task_exit();
652 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 667 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
653 cpu_relax(); 668 cpu_relax();
654 for (;;); 669 for (;;);
655} 670}
@@ -765,7 +780,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
765 get_online_cpus(); 780 get_online_cpus();
766 mutex_lock(&smp_cpu_state_mutex); 781 mutex_lock(&smp_cpu_state_mutex);
767 rc = -EBUSY; 782 rc = -EBUSY;
768 if (cpu_online(cpu)) 783 /* disallow configuration changes of online cpus and cpu 0 */
784 if (cpu_online(cpu) || cpu == 0)
769 goto out; 785 goto out;
770 rc = 0; 786 rc = 0;
771 switch (val) { 787 switch (val) {
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
new file mode 100644
index 000000000000..469f11b574fa
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu.S
@@ -0,0 +1,58 @@
1/*
2 * 31-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stm %r6,%r15,__SF_GPRS(%r15)
23 lr %r1,%r15
24 ahi %r15,-STACK_FRAME_OVERHEAD
25 st %r1,__SF_BACKCHAIN(%r15)
26 basr %r13,0
270: la %r1,.gprregs_addr-0b(%r13)
28 l %r1,0(%r1)
29 stm %r0,%r15,0(%r1)
301: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
31 brc 2,1b /* busy, try again */
322: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
33 brc 2,2b /* busy, try again */
343: j 3b
35
36 .globl smp_restart_cpu
37smp_restart_cpu:
38 basr %r13,0
390: la %r1,.gprregs_addr-0b(%r13)
40 l %r1,0(%r1)
41 lm %r0,%r15,0(%r1)
421: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
43 brc 10,1b /* busy, accepted (status 0), running */
44 tmll %r0,0x40 /* Test if calling CPU is stopped */
45 jz 1b
46 ltr %r4,%r4 /* New stack ? */
47 jz 1f
48 lr %r15,%r4
491: basr %r14,%r2
50
51.gprregs_addr:
52 .long .gprregs
53
54 .section .data,"aw",@progbits
55.gprregs:
56 .rept 16
57 .long 0
58 .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
new file mode 100644
index 000000000000..d94aacc898cb
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -0,0 +1,51 @@
1/*
2 * 64-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stmg %r6,%r15,__SF_GPRS(%r15)
23 lgr %r1,%r15
24 aghi %r15,-STACK_FRAME_OVERHEAD
25 stg %r1,__SF_BACKCHAIN(%r15)
26 larl %r1,.gprregs
27 stmg %r0,%r15,0(%r1)
281: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
29 brc 2,1b /* busy, try again */
302: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
31 brc 2,2b /* busy, try again */
323: j 3b
33
34 .globl smp_restart_cpu
35smp_restart_cpu:
36 larl %r1,.gprregs
37 lmg %r0,%r15,0(%r1)
381: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
39 brc 10,1b /* busy, accepted (status 0), running */
40 tmll %r0,0x40 /* Test if calling CPU is stopped */
41 jz 1b
42 ltgr %r4,%r4 /* New stack ? */
43 jz 1f
44 lgr %r15,%r4
451: basr %r14,%r2
46
47 .section .data,"aw",@progbits
48.gprregs:
49 .rept 16
50 .quad 0
51 .endr
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 0c26cc1898ec..b354427e03b7 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -176,7 +176,7 @@ pgm_check_entry:
176 cgr %r1,%r2 176 cgr %r1,%r2
177 je restore_registers /* r1 = r2 -> nothing to do */ 177 je restore_registers /* r1 = r2 -> nothing to do */
178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
179 mvc __LC_RESTART_PSW(16,%r0),0(%r4) 179 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1803: 1803:
181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
182 brc 8,4f /* accepted */ 182 brc 8,4f /* accepted */
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 65065ac48ed3..a8f93f1705ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -51,14 +51,6 @@
51#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 51#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
52#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 52#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
53 53
54/*
55 * Create a small time difference between the timer interrupts
56 * on the different cpus to avoid lock contention.
57 */
58#define CPU_DEVIATION (smp_processor_id() << 12)
59
60#define TICK_SIZE tick
61
62u64 sched_clock_base_cc = -1; /* Force to data section. */ 54u64 sched_clock_base_cc = -1; /* Force to data section. */
63EXPORT_SYMBOL_GPL(sched_clock_base_cc); 55EXPORT_SYMBOL_GPL(sched_clock_base_cc);
64 56
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 5f99e66c51c3..6bc9c197aa91 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -23,6 +23,7 @@
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/compat.h> 25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 8300309698fa..9e4c84187cf5 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -39,7 +39,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
39 vcpu->run->s390_reset_flags = 0; 39 vcpu->run->s390_reset_flags = 0;
40 break; 40 break;
41 default: 41 default:
42 return -ENOTSUPP; 42 return -EOPNOTSUPP;
43 } 43 }
44 44
45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -62,6 +62,6 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
62 case 0x308: 62 case 0x308:
63 return __diag_ipl_functions(vcpu); 63 return __diag_ipl_functions(vcpu);
64 default: 64 default:
65 return -ENOTSUPP; 65 return -EOPNOTSUPP;
66 } 66 }
67} 67}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index b40096494e46..3ddc30895e31 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -32,7 +32,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
32 32
33 vcpu->stat.instruction_lctlg++; 33 vcpu->stat.instruction_lctlg++;
34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
35 return -ENOTSUPP; 35 return -EOPNOTSUPP;
36 36
37 useraddr = disp2; 37 useraddr = disp2;
38 if (base2) 38 if (base2)
@@ -138,7 +138,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
138 rc = __kvm_s390_vcpu_store_status(vcpu, 138 rc = __kvm_s390_vcpu_store_status(vcpu,
139 KVM_S390_STORE_STATUS_NOADDR); 139 KVM_S390_STORE_STATUS_NOADDR);
140 if (rc >= 0) 140 if (rc >= 0)
141 rc = -ENOTSUPP; 141 rc = -EOPNOTSUPP;
142 } 142 }
143 143
144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { 144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
@@ -150,7 +150,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
153 rc = -ENOTSUPP; 153 rc = -EOPNOTSUPP;
154 } 154 }
155 155
156 spin_unlock_bh(&vcpu->arch.local_int.lock); 156 spin_unlock_bh(&vcpu->arch.local_int.lock);
@@ -171,9 +171,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
171 2*PAGE_SIZE); 171 2*PAGE_SIZE);
172 if (rc) 172 if (rc)
173 /* user will receive sigsegv, exit to user */ 173 /* user will receive sigsegv, exit to user */
174 rc = -ENOTSUPP; 174 rc = -EOPNOTSUPP;
175 } else 175 } else
176 rc = -ENOTSUPP; 176 rc = -EOPNOTSUPP;
177 177
178 if (rc) 178 if (rc)
179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
@@ -189,7 +189,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
189 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; 189 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
190 if (handler) 190 if (handler)
191 return handler(vcpu); 191 return handler(vcpu);
192 return -ENOTSUPP; 192 return -EOPNOTSUPP;
193} 193}
194 194
195static int handle_prog(struct kvm_vcpu *vcpu) 195static int handle_prog(struct kvm_vcpu *vcpu)
@@ -206,7 +206,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
206 rc = handle_instruction(vcpu); 206 rc = handle_instruction(vcpu);
207 rc2 = handle_prog(vcpu); 207 rc2 = handle_prog(vcpu);
208 208
209 if (rc == -ENOTSUPP) 209 if (rc == -EOPNOTSUPP)
210 vcpu->arch.sie_block->icptcode = 0x04; 210 vcpu->arch.sie_block->icptcode = 0x04;
211 if (rc) 211 if (rc)
212 return rc; 212 return rc;
@@ -231,9 +231,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
231 u8 code = vcpu->arch.sie_block->icptcode; 231 u8 code = vcpu->arch.sie_block->icptcode;
232 232
233 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) 233 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
234 return -ENOTSUPP; 234 return -EOPNOTSUPP;
235 func = intercept_funcs[code >> 2]; 235 func = intercept_funcs[code >> 2];
236 if (func) 236 if (func)
237 return func(vcpu); 237 return func(vcpu);
238 return -ENOTSUPP; 238 return -EOPNOTSUPP;
239} 239}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 43486c2408e1..834774d8d5f3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -10,12 +10,12 @@
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 11 */
12 12
13#include <asm/lowcore.h>
14#include <asm/uaccess.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17#include <linux/kvm_host.h> 14#include <linux/kvm_host.h>
15#include <linux/hrtimer.h>
18#include <linux/signal.h> 16#include <linux/signal.h>
17#include <asm/asm-offsets.h>
18#include <asm/uaccess.h>
19#include "kvm-s390.h" 19#include "kvm-s390.h"
20#include "gaccess.h" 20#include "gaccess.h"
21 21
@@ -187,8 +187,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
187 if (rc == -EFAULT) 187 if (rc == -EFAULT)
188 exception = 1; 188 exception = 1;
189 189
190 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, 190 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
191 inti->ext.ext_params2); 191 inti->ext.ext_params2);
192 if (rc == -EFAULT) 192 if (rc == -EFAULT)
193 exception = 1; 193 exception = 1;
194 break; 194 break;
@@ -342,7 +342,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
342 if (psw_interrupts_disabled(vcpu)) { 342 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu); 344 __unset_cpu_idle(vcpu);
345 return -ENOTSUPP; /* disabled wait */ 345 return -EOPNOTSUPP; /* disabled wait */
346 } 346 }
347 347
348 if (psw_extint_disabled(vcpu) || 348 if (psw_extint_disabled(vcpu) ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f8bcaefd7d34..3fa0a10e4668 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <asm/asm-offsets.h>
26#include <asm/lowcore.h> 27#include <asm/lowcore.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
28#include <asm/nmi.h> 29#include <asm/nmi.h>
@@ -543,7 +544,7 @@ rerun_vcpu:
543 rc = -EINTR; 544 rc = -EINTR;
544 } 545 }
545 546
546 if (rc == -ENOTSUPP) { 547 if (rc == -EOPNOTSUPP) {
547 /* intercept cannot be handled in-kernel, prepare kvm-run */ 548 /* intercept cannot be handled in-kernel, prepare kvm-run */
548 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 549 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
549 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 550 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
@@ -603,45 +604,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
603 } else 604 } else
604 prefix = 0; 605 prefix = 0;
605 606
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), 607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
607 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 608 vcpu->arch.guest_fpregs.fprs, 128, prefix))
608 return -EFAULT; 609 return -EFAULT;
609 610
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), 611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
611 vcpu->arch.guest_gprs, 128, prefix)) 612 vcpu->arch.guest_gprs, 128, prefix))
612 return -EFAULT; 613 return -EFAULT;
613 614
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
615 &vcpu->arch.sie_block->gpsw, 16, prefix)) 616 &vcpu->arch.sie_block->gpsw, 16, prefix))
616 return -EFAULT; 617 return -EFAULT;
617 618
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
619 &vcpu->arch.sie_block->prefix, 4, prefix)) 620 &vcpu->arch.sie_block->prefix, 4, prefix))
620 return -EFAULT; 621 return -EFAULT;
621 622
622 if (__guestcopy(vcpu, 623 if (__guestcopy(vcpu,
623 addr + offsetof(struct save_area_s390x, fp_ctrl_reg), 624 addr + offsetof(struct save_area, fp_ctrl_reg),
624 &vcpu->arch.guest_fpregs.fpc, 4, prefix)) 625 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
625 return -EFAULT; 626 return -EFAULT;
626 627
627 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), 628 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
628 &vcpu->arch.sie_block->todpr, 4, prefix)) 629 &vcpu->arch.sie_block->todpr, 4, prefix))
629 return -EFAULT; 630 return -EFAULT;
630 631
631 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), 632 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
632 &vcpu->arch.sie_block->cputm, 8, prefix)) 633 &vcpu->arch.sie_block->cputm, 8, prefix))
633 return -EFAULT; 634 return -EFAULT;
634 635
635 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), 636 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
636 &vcpu->arch.sie_block->ckc, 8, prefix)) 637 &vcpu->arch.sie_block->ckc, 8, prefix))
637 return -EFAULT; 638 return -EFAULT;
638 639
639 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), 640 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
640 &vcpu->arch.guest_acrs, 64, prefix)) 641 &vcpu->arch.guest_acrs, 64, prefix))
641 return -EFAULT; 642 return -EFAULT;
642 643
643 if (__guestcopy(vcpu, 644 if (__guestcopy(vcpu,
644 addr + offsetof(struct save_area_s390x, ctrl_regs), 645 addr + offsetof(struct save_area, ctrl_regs),
645 &vcpu->arch.sie_block->gcr, 128, prefix)) 646 &vcpu->arch.sie_block->gcr, 128, prefix))
646 return -EFAULT; 647 return -EFAULT;
647 return 0; 648 return 0;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d426aac8095d..28c55677eb39 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -323,5 +323,5 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
323 else 323 else
324 return handler(vcpu); 324 return handler(vcpu);
325 } 325 }
326 return -ENOTSUPP; 326 return -EOPNOTSUPP;
327} 327}
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 15ee1111de58..241a48459b66 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -172,7 +172,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
172 rc = 0; /* order accepted */ 172 rc = 0; /* order accepted */
173 break; 173 break;
174 default: 174 default:
175 rc = -ENOTSUPP; 175 rc = -EOPNOTSUPP;
176 } 176 }
177 return rc; 177 return rc;
178} 178}
@@ -293,7 +293,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
293 vcpu->stat.instruction_sigp_restart++; 293 vcpu->stat.instruction_sigp_restart++;
294 /* user space must know about restart */ 294 /* user space must know about restart */
295 default: 295 default:
296 return -ENOTSUPP; 296 return -EOPNOTSUPP;
297 } 297 }
298 298
299 if (rc < 0) 299 if (rc < 0)
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 97975ec7a274..cd54a1c352af 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
7lib-$(CONFIG_64BIT) += uaccess_mvcos.o 7lib-$(CONFIG_64BIT) += uaccess_mvcos.o
8lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index cff327f109a8..91754ffb9203 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
46 unsigned int owner;
46 47
47 while (1) { 48 while (1) {
48 if (count-- <= 0) { 49 owner = lp->owner_cpu;
49 unsigned int owner = lp->owner_cpu; 50 if (!owner || smp_vcpu_scheduled(~owner)) {
50 if (owner != 0) 51 for (count = spin_retry; count > 0; count--) {
51 _raw_yield_cpu(~owner); 52 if (arch_spin_is_locked(lp))
52 count = spin_retry; 53 continue;
54 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
55 cpu) == 0)
56 return;
57 }
58 if (MACHINE_IS_LPAR)
59 continue;
53 } 60 }
54 if (arch_spin_is_locked(lp)) 61 owner = lp->owner_cpu;
55 continue; 62 if (owner)
63 _raw_yield_cpu(~owner);
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 65 return;
58 } 66 }
@@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 71{
64 int count = spin_retry; 72 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 73 unsigned int cpu = ~smp_processor_id();
74 unsigned int owner;
66 75
67 local_irq_restore(flags); 76 local_irq_restore(flags);
68 while (1) { 77 while (1) {
69 if (count-- <= 0) { 78 owner = lp->owner_cpu;
70 unsigned int owner = lp->owner_cpu; 79 if (!owner || smp_vcpu_scheduled(~owner)) {
71 if (owner != 0) 80 for (count = spin_retry; count > 0; count--) {
72 _raw_yield_cpu(~owner); 81 if (arch_spin_is_locked(lp))
73 count = spin_retry; 82 continue;
83 local_irq_disable();
84 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
85 cpu) == 0)
86 return;
87 local_irq_restore(flags);
88 }
89 if (MACHINE_IS_LPAR)
90 continue;
74 } 91 }
75 if (arch_spin_is_locked(lp)) 92 owner = lp->owner_cpu;
76 continue; 93 if (owner)
94 _raw_yield_cpu(~owner);
77 local_irq_disable(); 95 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return; 97 return;
@@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
100void arch_spin_relax(arch_spinlock_t *lock) 118void arch_spin_relax(arch_spinlock_t *lock)
101{ 119{
102 unsigned int cpu = lock->owner_cpu; 120 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 121 if (cpu != 0) {
104 _raw_yield_cpu(~cpu); 122 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu);
125 }
105} 126}
106EXPORT_SYMBOL(arch_spin_relax); 127EXPORT_SYMBOL(arch_spin_relax);
107 128
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/s390/lib/usercopy.c
@@ -0,0 +1,8 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 5c8457129603..6409fd57eb04 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -309,7 +309,7 @@ query_segment_type (struct dcss_segment *seg)
309 } 309 }
310#endif 310#endif
311 if (qout->segcnt > 6) { 311 if (qout->segcnt > 6) {
312 rc = -ENOTSUPP; 312 rc = -EOPNOTSUPP;
313 goto out_free; 313 goto out_free;
314 } 314 }
315 315
@@ -324,11 +324,11 @@ query_segment_type (struct dcss_segment *seg)
324 for (i=0; i<qout->segcnt; i++) { 324 for (i=0; i<qout->segcnt; i++) {
325 if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && 325 if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
326 ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { 326 ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
327 rc = -ENOTSUPP; 327 rc = -EOPNOTSUPP;
328 goto out_free; 328 goto out_free;
329 } 329 }
330 if (start != qout->range[i].start >> PAGE_SHIFT) { 330 if (start != qout->range[i].start >> PAGE_SHIFT) {
331 rc = -ENOTSUPP; 331 rc = -EOPNOTSUPP;
332 goto out_free; 332 goto out_free;
333 } 333 }
334 start = (qout->range[i].end >> PAGE_SHIFT) + 1; 334 start = (qout->range[i].end >> PAGE_SHIFT) + 1;
@@ -357,7 +357,7 @@ query_segment_type (struct dcss_segment *seg)
357 * -ENOSYS : we are not running on VM 357 * -ENOSYS : we are not running on VM
358 * -EIO : could not perform query diagnose 358 * -EIO : could not perform query diagnose
359 * -ENOENT : no such segment 359 * -ENOENT : no such segment
360 * -ENOTSUPP: multi-part segment cannot be used with linux 360 * -EOPNOTSUPP: multi-part segment cannot be used with linux
361 * -ENOMEM : out of memory 361 * -ENOMEM : out of memory
362 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h 362 * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
363 */ 363 */
@@ -515,7 +515,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
515 * -ENOSYS : we are not running on VM 515 * -ENOSYS : we are not running on VM
516 * -EIO : could not perform query or load diagnose 516 * -EIO : could not perform query or load diagnose
517 * -ENOENT : no such segment 517 * -ENOENT : no such segment
518 * -ENOTSUPP: multi-part segment cannot be used with linux 518 * -EOPNOTSUPP: multi-part segment cannot be used with linux
519 * -ENOSPC : segment cannot be used (overlaps with storage) 519 * -ENOSPC : segment cannot be used (overlaps with storage)
520 * -EBUSY : segment can temporarily not be used (overlaps with dcss) 520 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
521 * -ERANGE : segment cannot be used (exceeds kernel mapping range) 521 * -ERANGE : segment cannot be used (exceeds kernel mapping range)
@@ -742,7 +742,7 @@ void segment_warning(int rc, char *seg_name)
742 pr_err("Loading or querying DCSS %s resulted in a " 742 pr_err("Loading or querying DCSS %s resulted in a "
743 "hardware error\n", seg_name); 743 "hardware error\n", seg_name);
744 break; 744 break;
745 case -ENOTSUPP: 745 case -EOPNOTSUPP:
746 pr_err("DCSS %s has multiple page ranges and cannot be " 746 pr_err("DCSS %s has multiple page ranges and cannot be "
747 "loaded or queried\n", seg_name); 747 "loaded or queried\n", seg_name);
748 break; 748 break;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc102e70d9c2..3040d7c78fe0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -30,6 +30,7 @@
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <asm/asm-offsets.h>
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 36#include <asm/s390_ext.h>
@@ -59,15 +60,13 @@ static inline int notify_page_fault(struct pt_regs *regs)
59{ 60{
60 int ret = 0; 61 int ret = 0;
61 62
62#ifdef CONFIG_KPROBES
63 /* kprobe_running() needs smp_processor_id() */ 63 /* kprobe_running() needs smp_processor_id() */
64 if (!user_mode(regs)) { 64 if (kprobes_built_in() && !user_mode(regs)) {
65 preempt_disable(); 65 preempt_disable();
66 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 66 if (kprobe_running() && kprobe_fault_handler(regs, 14))
67 ret = 1; 67 ret = 1;
68 preempt_enable(); 68 preempt_enable();
69 } 69 }
70#endif
71 return ret; 70 return ret;
72} 71}
73 72
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 765647952221..d5865e4024ce 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -143,33 +143,34 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
143} 143}
144#endif 144#endif
145 145
146void free_initmem(void) 146void free_init_pages(char *what, unsigned long begin, unsigned long end)
147{ 147{
148 unsigned long addr; 148 unsigned long addr = begin;
149 149
150 addr = (unsigned long)(&__init_begin); 150 if (begin >= end)
151 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 151 return;
152 for (; addr < end; addr += PAGE_SIZE) {
152 ClearPageReserved(virt_to_page(addr)); 153 ClearPageReserved(virt_to_page(addr));
153 init_page_count(virt_to_page(addr)); 154 init_page_count(virt_to_page(addr));
154 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 155 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
156 PAGE_SIZE);
155 free_page(addr); 157 free_page(addr);
156 totalram_pages++; 158 totalram_pages++;
157 } 159 }
158 printk ("Freeing unused kernel memory: %ldk freed\n", 160 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
159 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); 161}
162
163void free_initmem(void)
164{
165 free_init_pages("unused kernel memory",
166 (unsigned long)&__init_begin,
167 (unsigned long)&__init_end);
160} 168}
161 169
162#ifdef CONFIG_BLK_DEV_INITRD 170#ifdef CONFIG_BLK_DEV_INITRD
163void free_initrd_mem(unsigned long start, unsigned long end) 171void free_initrd_mem(unsigned long start, unsigned long end)
164{ 172{
165 if (start < end) 173 free_init_pages("initrd memory", start, end);
166 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
167 for (; start < end; start += PAGE_SIZE) {
168 ClearPageReserved(virt_to_page(start));
169 init_page_count(virt_to_page(start));
170 free_page(start);
171 totalram_pages++;
172 }
173} 174}
174#endif 175#endif
175 176
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5905936c7c60..9ab1ae40565f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -20,6 +20,7 @@
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/hdreg.h> 21#include <linux/hdreg.h>
22#include <linux/async.h> 22#include <linux/async.h>
23#include <linux/mutex.h>
23 24
24#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
@@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void)
112 INIT_WORK(&device->restore_device, do_restore_device); 113 INIT_WORK(&device->restore_device, do_restore_device);
113 device->state = DASD_STATE_NEW; 114 device->state = DASD_STATE_NEW;
114 device->target = DASD_STATE_NEW; 115 device->target = DASD_STATE_NEW;
116 mutex_init(&device->state_mutex);
115 117
116 return device; 118 return device;
117} 119}
@@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
321 device->state = DASD_STATE_READY; 323 device->state = DASD_STATE_READY;
322 return rc; 324 return rc;
323 } 325 }
324 dasd_destroy_partitions(block);
325 dasd_flush_request_queue(block); 326 dasd_flush_request_queue(block);
327 dasd_destroy_partitions(block);
326 block->blocks = 0; 328 block->blocks = 0;
327 block->bp_block = 0; 329 block->bp_block = 0;
328 block->s2b_shift = 0; 330 block->s2b_shift = 0;
@@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device)
484 if (rc) 486 if (rc)
485 device->target = device->state; 487 device->target = device->state;
486 488
487 if (device->state == device->target) { 489 if (device->state == device->target)
488 wake_up(&dasd_init_waitq); 490 wake_up(&dasd_init_waitq);
489 dasd_put_device(device);
490 }
491 491
492 /* let user-space know that the device status changed */ 492 /* let user-space know that the device status changed */
493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device)
502static void do_kick_device(struct work_struct *work) 502static void do_kick_device(struct work_struct *work)
503{ 503{
504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
505 mutex_lock(&device->state_mutex);
505 dasd_change_state(device); 506 dasd_change_state(device);
507 mutex_unlock(&device->state_mutex);
506 dasd_schedule_device_bh(device); 508 dasd_schedule_device_bh(device);
507 dasd_put_device(device); 509 dasd_put_device(device);
508} 510}
@@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device)
539void dasd_set_target_state(struct dasd_device *device, int target) 541void dasd_set_target_state(struct dasd_device *device, int target)
540{ 542{
541 dasd_get_device(device); 543 dasd_get_device(device);
544 mutex_lock(&device->state_mutex);
542 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 545 /* If we are in probeonly mode stop at DASD_STATE_READY. */
543 if (dasd_probeonly && target > DASD_STATE_READY) 546 if (dasd_probeonly && target > DASD_STATE_READY)
544 target = DASD_STATE_READY; 547 target = DASD_STATE_READY;
545 if (device->target != target) { 548 if (device->target != target) {
546 if (device->state == target) { 549 if (device->state == target)
547 wake_up(&dasd_init_waitq); 550 wake_up(&dasd_init_waitq);
548 dasd_put_device(device);
549 }
550 device->target = target; 551 device->target = target;
551 } 552 }
552 if (device->state != device->target) 553 if (device->state != device->target)
553 dasd_change_state(device); 554 dasd_change_state(device);
555 mutex_unlock(&device->state_mutex);
556 dasd_put_device(device);
554} 557}
555 558
556/* 559/*
@@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
1000 return; 1003 return;
1001 } 1004 }
1002 1005
1003 device = (struct dasd_device *) cqr->startdev; 1006 device = dasd_device_from_cdev_locked(cdev);
1004 if (device == NULL || 1007 if (IS_ERR(device)) {
1005 device != dasd_device_from_cdev_locked(cdev) || 1008 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1009 "unable to get device from cdev");
1010 return;
1011 }
1012
1013 if (!cqr->startdev ||
1014 device != cqr->startdev ||
1015 strncmp(cqr->startdev->discipline->ebcname,
1016 (char *) &cqr->magic, 4)) {
1007 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1017 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1008 "invalid device in request"); 1018 "invalid device in request");
1019 dasd_put_device(device);
1009 return; 1020 return;
1010 } 1021 }
1011 1022
@@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
1692 cqr, rc); 1703 cqr, rc);
1693 } else { 1704 } else {
1694 cqr->stopclk = get_clock(); 1705 cqr->stopclk = get_clock();
1695 rc = 1;
1696 } 1706 }
1697 break; 1707 break;
1698 default: /* already finished or clear pending - do nothing */ 1708 default: /* already finished or clear pending - do nothing */
@@ -2170,9 +2180,13 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2170static int dasd_open(struct block_device *bdev, fmode_t mode) 2180static int dasd_open(struct block_device *bdev, fmode_t mode)
2171{ 2181{
2172 struct dasd_block *block = bdev->bd_disk->private_data; 2182 struct dasd_block *block = bdev->bd_disk->private_data;
2173 struct dasd_device *base = block->base; 2183 struct dasd_device *base;
2174 int rc; 2184 int rc;
2175 2185
2186 if (!block)
2187 return -ENODEV;
2188
2189 base = block->base;
2176 atomic_inc(&block->open_count); 2190 atomic_inc(&block->open_count);
2177 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2191 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2178 rc = -ENODEV; 2192 rc = -ENODEV;
@@ -2285,11 +2299,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2285 if (ret) 2299 if (ret)
2286 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2300 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2287 dev_name(&cdev->dev), ret); 2301 dev_name(&cdev->dev), ret);
2288 else {
2289 struct dasd_device *device = dasd_device_from_cdev(cdev);
2290 wait_event(dasd_init_waitq, _wait_for_device(device));
2291 dasd_put_device(device);
2292 }
2293} 2302}
2294 2303
2295/* 2304/*
@@ -2424,6 +2433,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2424 } else 2433 } else
2425 pr_debug("dasd_generic device %s found\n", 2434 pr_debug("dasd_generic device %s found\n",
2426 dev_name(&cdev->dev)); 2435 dev_name(&cdev->dev));
2436
2437 wait_event(dasd_init_waitq, _wait_for_device(device));
2438
2427 dasd_put_device(device); 2439 dasd_put_device(device);
2428 return rc; 2440 return rc;
2429} 2441}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 4cac5b54f26a..d49766f3b940 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 ssize_t len; 874 ssize_t len;
875 875
876 device = dasd_device_from_cdev(to_ccwdev(dev)); 876 device = dasd_device_from_cdev(to_ccwdev(dev));
877 if (!IS_ERR(device) && device->discipline) { 877 if (IS_ERR(device))
878 goto out;
879 else if (!device->discipline) {
880 dasd_put_device(device);
881 goto out;
882 } else {
878 len = snprintf(buf, PAGE_SIZE, "%s\n", 883 len = snprintf(buf, PAGE_SIZE, "%s\n",
879 device->discipline->name); 884 device->discipline->name);
880 dasd_put_device(device); 885 dasd_put_device(device);
881 } else 886 return len;
882 len = snprintf(buf, PAGE_SIZE, "none\n"); 887 }
888out:
889 len = snprintf(buf, PAGE_SIZE, "none\n");
883 return len; 890 return len;
884} 891}
885 892
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d3198303b93c..94f92a1247f2 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block)
88 if (block->gdp) { 88 if (block->gdp) {
89 del_gendisk(block->gdp); 89 del_gendisk(block->gdp);
90 block->gdp->queue = NULL; 90 block->gdp->queue = NULL;
91 block->gdp->private_data = NULL;
91 put_disk(block->gdp); 92 put_disk(block->gdp);
92 block->gdp = NULL; 93 block->gdp = NULL;
93 } 94 }
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e4c2143dabf6..ed73ce550822 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -368,6 +368,7 @@ struct dasd_device {
368 368
369 /* Device state and target state. */ 369 /* Device state and target state. */
370 int state, target; 370 int state, target;
371 struct mutex state_mutex;
371 int stopped; /* device (ccw_device_start) was stopped */ 372 int stopped; /* device (ccw_device_start) was stopped */
372 373
373 /* reference count. */ 374 /* reference count. */
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 71f95f54866f..f13a0bdd148c 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = {
165 .release = seq_release, 165 .release = seq_release,
166}; 166};
167 167
168static int
169dasd_calc_metrics(char *page, char **start, off_t off,
170 int count, int *eof, int len)
171{
172 len = (len > off) ? len - off : 0;
173 if (len > count)
174 len = count;
175 if (len < count)
176 *eof = 1;
177 *start = page + off;
178 return len;
179}
180
181#ifdef CONFIG_DASD_PROFILE 168#ifdef CONFIG_DASD_PROFILE
182static char * 169static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
183dasd_statistics_array(char *str, unsigned int *array, int factor)
184{ 170{
185 int i; 171 int i;
186 172
187 for (i = 0; i < 32; i++) { 173 for (i = 0; i < 32; i++) {
188 str += sprintf(str, "%7d ", array[i] / factor); 174 seq_printf(m, "%7d ", array[i] / factor);
189 if (i == 15) 175 if (i == 15)
190 str += sprintf(str, "\n"); 176 seq_putc(m, '\n');
191 } 177 }
192 str += sprintf(str,"\n"); 178 seq_putc(m, '\n');
193 return str;
194} 179}
195#endif /* CONFIG_DASD_PROFILE */ 180#endif /* CONFIG_DASD_PROFILE */
196 181
197static int 182static int dasd_stats_proc_show(struct seq_file *m, void *v)
198dasd_statistics_read(char *page, char **start, off_t off,
199 int count, int *eof, void *data)
200{ 183{
201 unsigned long len;
202#ifdef CONFIG_DASD_PROFILE 184#ifdef CONFIG_DASD_PROFILE
203 struct dasd_profile_info_t *prof; 185 struct dasd_profile_info_t *prof;
204 char *str;
205 int factor; 186 int factor;
206 187
207 /* check for active profiling */ 188 /* check for active profiling */
208 if (dasd_profile_level == DASD_PROFILE_OFF) { 189 if (dasd_profile_level == DASD_PROFILE_OFF) {
209 len = sprintf(page, "Statistics are off - they might be " 190 seq_printf(m, "Statistics are off - they might be "
210 "switched on using 'echo set on > " 191 "switched on using 'echo set on > "
211 "/proc/dasd/statistics'\n"); 192 "/proc/dasd/statistics'\n");
212 return dasd_calc_metrics(page, start, off, count, eof, len); 193 return 0;
213 } 194 }
214 195
215 prof = &dasd_global_profile; 196 prof = &dasd_global_profile;
@@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off,
217 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; 198 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
218 factor *= 10); 199 factor *= 10);
219 200
220 str = page; 201 seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
221 str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); 202 seq_printf(m, "with %u sectors(512B each)\n",
222 str += sprintf(str, "with %u sectors(512B each)\n",
223 prof->dasd_io_sects); 203 prof->dasd_io_sects);
224 str += sprintf(str, "Scale Factor is %d\n", factor); 204 seq_printf(m, "Scale Factor is %d\n", factor);
225 str += sprintf(str, 205 seq_printf(m,
226 " __<4 ___8 __16 __32 __64 _128 " 206 " __<4 ___8 __16 __32 __64 _128 "
227 " _256 _512 __1k __2k __4k __8k " 207 " _256 _512 __1k __2k __4k __8k "
228 " _16k _32k _64k 128k\n"); 208 " _16k _32k _64k 128k\n");
229 str += sprintf(str, 209 seq_printf(m,
230 " _256 _512 __1M __2M __4M __8M " 210 " _256 _512 __1M __2M __4M __8M "
231 " _16M _32M _64M 128M 256M 512M " 211 " _16M _32M _64M 128M 256M 512M "
232 " __1G __2G __4G " " _>4G\n"); 212 " __1G __2G __4G " " _>4G\n");
233 213
234 str += sprintf(str, "Histogram of sizes (512B secs)\n"); 214 seq_printf(m, "Histogram of sizes (512B secs)\n");
235 str = dasd_statistics_array(str, prof->dasd_io_secs, factor); 215 dasd_statistics_array(m, prof->dasd_io_secs, factor);
236 str += sprintf(str, "Histogram of I/O times (microseconds)\n"); 216 seq_printf(m, "Histogram of I/O times (microseconds)\n");
237 str = dasd_statistics_array(str, prof->dasd_io_times, factor); 217 dasd_statistics_array(m, prof->dasd_io_times, factor);
238 str += sprintf(str, "Histogram of I/O times per sector\n"); 218 seq_printf(m, "Histogram of I/O times per sector\n");
239 str = dasd_statistics_array(str, prof->dasd_io_timps, factor); 219 dasd_statistics_array(m, prof->dasd_io_timps, factor);
240 str += sprintf(str, "Histogram of I/O time till ssch\n"); 220 seq_printf(m, "Histogram of I/O time till ssch\n");
241 str = dasd_statistics_array(str, prof->dasd_io_time1, factor); 221 dasd_statistics_array(m, prof->dasd_io_time1, factor);
242 str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); 222 seq_printf(m, "Histogram of I/O time between ssch and irq\n");
243 str = dasd_statistics_array(str, prof->dasd_io_time2, factor); 223 dasd_statistics_array(m, prof->dasd_io_time2, factor);
244 str += sprintf(str, "Histogram of I/O time between ssch " 224 seq_printf(m, "Histogram of I/O time between ssch "
245 "and irq per sector\n"); 225 "and irq per sector\n");
246 str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); 226 dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
247 str += sprintf(str, "Histogram of I/O time between irq and end\n"); 227 seq_printf(m, "Histogram of I/O time between irq and end\n");
248 str = dasd_statistics_array(str, prof->dasd_io_time3, factor); 228 dasd_statistics_array(m, prof->dasd_io_time3, factor);
249 str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); 229 seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
250 str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); 230 dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
251 len = str - page;
252#else 231#else
253 len = sprintf(page, "Statistics are not activated in this kernel\n"); 232 seq_printf(m, "Statistics are not activated in this kernel\n");
254#endif 233#endif
255 return dasd_calc_metrics(page, start, off, count, eof, len); 234 return 0;
256} 235}
257 236
258static int 237static int dasd_stats_proc_open(struct inode *inode, struct file *file)
259dasd_statistics_write(struct file *file, const char __user *user_buf, 238{
260 unsigned long user_len, void *data) 239 return single_open(file, dasd_stats_proc_show, NULL);
240}
241
242static ssize_t dasd_stats_proc_write(struct file *file,
243 const char __user *user_buf, size_t user_len, loff_t *pos)
261{ 244{
262#ifdef CONFIG_DASD_PROFILE 245#ifdef CONFIG_DASD_PROFILE
263 char *buffer, *str; 246 char *buffer, *str;
@@ -308,6 +291,15 @@ out_error:
308#endif /* CONFIG_DASD_PROFILE */ 291#endif /* CONFIG_DASD_PROFILE */
309} 292}
310 293
294static const struct file_operations dasd_stats_proc_fops = {
295 .owner = THIS_MODULE,
296 .open = dasd_stats_proc_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300 .write = dasd_stats_proc_write,
301};
302
311/* 303/*
312 * Create dasd proc-fs entries. 304 * Create dasd proc-fs entries.
313 * In case creation failed, cleanup and return -ENOENT. 305 * In case creation failed, cleanup and return -ENOENT.
@@ -324,13 +316,12 @@ dasd_proc_init(void)
324 &dasd_devices_file_ops); 316 &dasd_devices_file_ops);
325 if (!dasd_devices_entry) 317 if (!dasd_devices_entry)
326 goto out_nodevices; 318 goto out_nodevices;
327 dasd_statistics_entry = create_proc_entry("statistics", 319 dasd_statistics_entry = proc_create("statistics",
328 S_IFREG | S_IRUGO | S_IWUSR, 320 S_IFREG | S_IRUGO | S_IWUSR,
329 dasd_proc_root_entry); 321 dasd_proc_root_entry,
322 &dasd_stats_proc_fops);
330 if (!dasd_statistics_entry) 323 if (!dasd_statistics_entry)
331 goto out_nostatistics; 324 goto out_nostatistics;
332 dasd_statistics_entry->read_proc = dasd_statistics_read;
333 dasd_statistics_entry->write_proc = dasd_statistics_write;
334 return 0; 325 return 0;
335 326
336 out_nostatistics: 327 out_nostatistics:
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 82daa3c1dc9c..3438658b66b7 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <asm/asm-offsets.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
19#include <asm/sclp.h> 20#include <asm/sclp.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
@@ -40,12 +41,12 @@ enum arch_id {
40/* dump system info */ 41/* dump system info */
41 42
42struct sys_info { 43struct sys_info {
43 enum arch_id arch; 44 enum arch_id arch;
44 unsigned long sa_base; 45 unsigned long sa_base;
45 u32 sa_size; 46 u32 sa_size;
46 int cpu_map[NR_CPUS]; 47 int cpu_map[NR_CPUS];
47 unsigned long mem_size; 48 unsigned long mem_size;
48 union save_area lc_mask; 49 struct save_area lc_mask;
49}; 50};
50 51
51struct ipib_info { 52struct ipib_info {
@@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
183 return 0; 184 return 0;
184} 185}
185 186
186#ifdef __s390x__
187/*
188 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
189 */
190static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
191 int cpu)
192{
193 int i;
194
195 for (i = 0; i < 16; i++) {
196 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
197 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
198 out->s390.ctrl_regs[i] =
199 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
200 }
201 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
202 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
203 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
204 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
205 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
206 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
207 out->s390.psw[1] |= 0x8; /* set bit 12 */
208 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
209 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
210 out->s390.pref_reg = in->s390x.pref_reg;
211 out->s390.timer = in->s390x.timer;
212 out->s390.clk_cmp = in->s390x.clk_cmp;
213}
214
215static void __init s390x_to_s390_save_areas(void)
216{
217 int i = 1;
218 static union save_area tmp;
219
220 while (zfcpdump_save_areas[i]) {
221 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
222 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
223 i++;
224 }
225}
226
227#endif /* __s390x__ */
228
229static int __init init_cpu_info(enum arch_id arch) 187static int __init init_cpu_info(enum arch_id arch)
230{ 188{
231 union save_area *sa; 189 struct save_area *sa;
232 190
233 /* get info for boot cpu from lowcore, stored in the HSA */ 191 /* get info for boot cpu from lowcore, stored in the HSA */
234 192
@@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch)
241 return -EIO; 199 return -EIO;
242 } 200 }
243 zfcpdump_save_areas[0] = sa; 201 zfcpdump_save_areas[0] = sa;
244
245#ifdef __s390x__
246 /* convert s390x regs to s390, if we are dumping an s390 Linux */
247
248 if (arch == ARCH_S390)
249 s390x_to_s390_save_areas();
250#endif
251
252 return 0; 202 return 0;
253} 203}
254 204
255static DEFINE_MUTEX(zcore_mutex); 205static DEFINE_MUTEX(zcore_mutex);
256 206
257#define DUMP_VERSION 0x3 207#define DUMP_VERSION 0x5
258#define DUMP_MAGIC 0xa8190173618f23fdULL 208#define DUMP_MAGIC 0xa8190173618f23fdULL
259#define DUMP_ARCH_S390X 2 209#define DUMP_ARCH_S390X 2
260#define DUMP_ARCH_S390 1 210#define DUMP_ARCH_S390 1
@@ -279,7 +229,14 @@ struct zcore_header {
279 u32 volnr; 229 u32 volnr;
280 u32 build_arch; 230 u32 build_arch;
281 u64 rmem_size; 231 u64 rmem_size;
282 char pad2[4016]; 232 u8 mvdump;
233 u16 cpu_cnt;
234 u16 real_cpu_cnt;
235 u8 end_pad1[0x200-0x061];
236 u64 mvdump_sign;
237 u64 mvdump_zipl_time;
238 u8 end_pad2[0x800-0x210];
239 u32 lc_vec[512];
283} __attribute__((packed,__aligned__(16))); 240} __attribute__((packed,__aligned__(16)));
284 241
285static struct zcore_header zcore_header = { 242static struct zcore_header zcore_header = {
@@ -289,7 +246,7 @@ static struct zcore_header zcore_header = {
289 .dump_level = 0, 246 .dump_level = 0,
290 .page_size = PAGE_SIZE, 247 .page_size = PAGE_SIZE,
291 .mem_start = 0, 248 .mem_start = 0,
292#ifdef __s390x__ 249#ifdef CONFIG_64BIT
293 .build_arch = DUMP_ARCH_S390X, 250 .build_arch = DUMP_ARCH_S390X,
294#else 251#else
295 .build_arch = DUMP_ARCH_S390, 252 .build_arch = DUMP_ARCH_S390,
@@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
340 unsigned long prefix; 297 unsigned long prefix;
341 unsigned long sa_off, len, buf_off; 298 unsigned long sa_off, len, buf_off;
342 299
343 if (sys_info.arch == ARCH_S390) 300 prefix = zfcpdump_save_areas[i]->pref_reg;
344 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
345 else
346 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
347
348 sa_start = prefix + sys_info.sa_base; 301 sa_start = prefix + sys_info.sa_base;
349 sa_end = prefix + sys_info.sa_base + sys_info.sa_size; 302 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
350 303
@@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = {
561 .release = zcore_reipl_release, 514 .release = zcore_reipl_release,
562}; 515};
563 516
517#ifdef CONFIG_32BIT
564 518
565static void __init set_s390_lc_mask(union save_area *map) 519static void __init set_lc_mask(struct save_area *map)
566{ 520{
567 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); 521 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
568 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); 522 memset(&map->timer, 0xff, sizeof(map->timer));
569 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); 523 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
570 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); 524 memset(&map->psw, 0xff, sizeof(map->psw));
571 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); 525 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
572 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); 526 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
573 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); 527 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
574 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); 528 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
575 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); 529 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
576} 530}
577 531
578static void __init set_s390x_lc_mask(union save_area *map) 532#else /* CONFIG_32BIT */
533
534static void __init set_lc_mask(struct save_area *map)
579{ 535{
580 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); 536 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
581 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); 537 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
582 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); 538 memset(&map->psw, 0xff, sizeof(map->psw));
583 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); 539 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
584 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); 540 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
585 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); 541 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
586 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); 542 memset(&map->timer, 0xff, sizeof(map->timer));
587 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); 543 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
588 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); 544 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
589 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); 545 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
590} 546}
591 547
548#endif /* CONFIG_32BIT */
549
592/* 550/*
593 * Initialize dump globals for a given architecture 551 * Initialize dump globals for a given architecture
594 */ 552 */
@@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch)
599 switch (arch) { 557 switch (arch) {
600 case ARCH_S390X: 558 case ARCH_S390X:
601 pr_alert("DETECTED 'S390X (64 bit) OS'\n"); 559 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
602 sys_info.sa_base = SAVE_AREA_BASE_S390X;
603 sys_info.sa_size = sizeof(struct save_area_s390x);
604 set_s390x_lc_mask(&sys_info.lc_mask);
605 break; 560 break;
606 case ARCH_S390: 561 case ARCH_S390:
607 pr_alert("DETECTED 'S390 (32 bit) OS'\n"); 562 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
608 sys_info.sa_base = SAVE_AREA_BASE_S390;
609 sys_info.sa_size = sizeof(struct save_area_s390);
610 set_s390_lc_mask(&sys_info.lc_mask);
611 break; 563 break;
612 default: 564 default:
613 pr_alert("0x%x is an unknown architecture.\n",arch); 565 pr_alert("0x%x is an unknown architecture.\n",arch);
614 return -EINVAL; 566 return -EINVAL;
615 } 567 }
568 sys_info.sa_base = SAVE_AREA_BASE;
569 sys_info.sa_size = sizeof(struct save_area);
616 sys_info.arch = arch; 570 sys_info.arch = arch;
571 set_lc_mask(&sys_info.lc_mask);
617 rc = init_cpu_info(arch); 572 rc = init_cpu_info(arch);
618 if (rc) 573 if (rc)
619 return rc; 574 return rc;
@@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem)
660 615
661static int __init zcore_header_init(int arch, struct zcore_header *hdr) 616static int __init zcore_header_init(int arch, struct zcore_header *hdr)
662{ 617{
663 int rc; 618 int rc, i;
664 unsigned long memory = 0; 619 unsigned long memory = 0;
620 u32 prefix;
665 621
666 if (arch == ARCH_S390X) 622 if (arch == ARCH_S390X)
667 hdr->arch_id = DUMP_ARCH_S390X; 623 hdr->arch_id = DUMP_ARCH_S390X;
@@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
676 hdr->num_pages = memory / PAGE_SIZE; 632 hdr->num_pages = memory / PAGE_SIZE;
677 hdr->tod = get_clock(); 633 hdr->tod = get_clock();
678 get_cpu_id(&hdr->cpu_id); 634 get_cpu_id(&hdr->cpu_id);
635 for (i = 0; zfcpdump_save_areas[i]; i++) {
636 prefix = zfcpdump_save_areas[i]->pref_reg;
637 hdr->real_cpu_cnt++;
638 if (!prefix)
639 continue;
640 hdr->lc_vec[hdr->cpu_cnt] = prefix;
641 hdr->cpu_cnt++;
642 }
679 return 0; 643 return 0;
680} 644}
681 645
@@ -741,14 +705,21 @@ static int __init zcore_init(void)
741 if (rc) 705 if (rc)
742 goto fail; 706 goto fail;
743 707
744#ifndef __s390x__ 708#ifdef CONFIG_64BIT
709 if (arch == ARCH_S390) {
710 pr_alert("The 64-bit dump tool cannot be used for a "
711 "32-bit system\n");
712 rc = -EINVAL;
713 goto fail;
714 }
715#else /* CONFIG_64BIT */
745 if (arch == ARCH_S390X) { 716 if (arch == ARCH_S390X) {
746 pr_alert("The 32-bit dump tool cannot be used for a " 717 pr_alert("The 32-bit dump tool cannot be used for a "
747 "64-bit system\n"); 718 "64-bit system\n");
748 rc = -EINVAL; 719 rc = -EINVAL;
749 goto fail; 720 goto fail;
750 } 721 }
751#endif 722#endif /* CONFIG_64BIT */
752 723
753 rc = sys_info_init(arch); 724 rc = sys_info_init(arch);
754 if (rc) 725 if (rc)
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7a28a3029a3f..37df42af05ec 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
224 */ 224 */
225void ccw_request_handler(struct ccw_device *cdev) 225void ccw_request_handler(struct ccw_device *cdev)
226{ 226{
227 struct irb *irb = (struct irb *)&S390_lowcore.irb;
227 struct ccw_request *req = &cdev->private->req; 228 struct ccw_request *req = &cdev->private->req;
228 struct irb *irb = (struct irb *) __LC_IRB;
229 enum io_status status; 229 enum io_status status;
230 int rc = -EOPNOTSUPP; 230 int rc = -EOPNOTSUPP;
231 231
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e567648..4038f5b4f144 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
574 secm_area->request.length = 0x0050; 574 secm_area->request.length = 0x0050;
575 secm_area->request.code = 0x0016; 575 secm_area->request.code = 0x0016;
576 576
577 secm_area->key = PAGE_DEFAULT_KEY; 577 secm_area->key = PAGE_DEFAULT_KEY >> 4;
578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
580 580
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index c84ac9443079..852612f5dba0 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
51{ 51{
52 struct chsc_private *private = sch->private; 52 struct chsc_private *private = sch->private;
53 struct chsc_request *request = private->request; 53 struct chsc_request *request = private->request;
54 struct irb *irb = (struct irb *)__LC_IRB; 54 struct irb *irb = (struct irb *)&S390_lowcore.irb;
55 55
56 CHSC_LOG(4, "irb"); 56 CHSC_LOG(4, "irb");
57 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 57 CHSC_LOG_HEX(4, irb, sizeof(*irb));
@@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
237 int ret = -ENODEV; 237 int ret = -ENODEV;
238 char dbf[10]; 238 char dbf[10];
239 239
240 chsc_area->header.key = PAGE_DEFAULT_KEY; 240 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
241 while ((sch = chsc_get_next_subchannel(sch))) { 241 while ((sch = chsc_get_next_subchannel(sch))) {
242 spin_lock(sch->lock); 242 spin_lock(sch->lock);
243 private = sch->private; 243 private = sch->private;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 126f240715a4..f736cdcf08ad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
625 /* 625 /*
626 * Get interrupt information from lowcore 626 * Get interrupt information from lowcore
627 */ 627 */
628 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 628 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
629 irb = (struct irb *) __LC_IRB; 629 irb = (struct irb *)&S390_lowcore.irb;
630 do { 630 do {
631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
632 /* 632 /*
@@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
661 * We don't do this for VM because a tpi drops the cpu 661 * We don't do this for VM because a tpi drops the cpu
662 * out of the sie which costs more cycles than it saves. 662 * out of the sie which costs more cycles than it saves.
663 */ 663 */
664 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 664 } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
665 irq_exit(); 665 irq_exit();
666 set_irq_regs(old_regs); 666 set_irq_regs(old_regs);
667} 667}
@@ -682,10 +682,10 @@ static int cio_tpi(void)
682 struct irb *irb; 682 struct irb *irb;
683 int irq_context; 683 int irq_context;
684 684
685 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 685 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
686 if (tpi(NULL) != 1) 686 if (tpi(NULL) != 1)
687 return 0; 687 return 0;
688 irb = (struct irb *) __LC_IRB; 688 irb = (struct irb *)&S390_lowcore.irb;
689 /* Store interrupt response block to lowcore. */ 689 /* Store interrupt response block to lowcore. */
690 if (tsch(tpi_info->schid, irb) != 0) 690 if (tsch(tpi_info->schid, irb) != 0)
691 /* Not status pending or not operational. */ 691 /* Not status pending or not operational. */
@@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
885 struct tpi_info ti; 885 struct tpi_info ti;
886 886
887 if (tpi(&ti)) { 887 if (tpi(&ti)) {
888 tsch(ti.schid, (struct irb *)__LC_IRB); 888 tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
889 if (schid_equal(&ti.schid, &schid)) 889 if (schid_equal(&ti.schid, &schid))
890 return 0; 890 return 0;
891 } 891 }
@@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1083 struct subchannel_id schid; 1083 struct subchannel_id schid;
1084 struct schib schib; 1084 struct schib schib;
1085 1085
1086 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1087 if (!schid.one) 1087 if (!schid.one)
1088 return -ENODEV; 1088 return -ENODEV;
1089 if (stsch(schid, &schib)) 1089 if (stsch(schid, &schib))
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e76..425f741a280c 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -8,15 +8,16 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/semaphore.h>
12#include <linux/mutex.h> 11#include <linux/mutex.h>
13#include <linux/kthread.h> 12#include <linux/kthread.h>
14#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h>
15#include <asm/crw.h> 15#include <asm/crw.h>
16 16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex); 17static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS]; 18static crw_handler_t crw_handlers[NR_RSCS];
19static atomic_t crw_nr_req = ATOMIC_INIT(0);
20static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
20 21
21/** 22/**
22 * crw_register_handler() - register a channel report word handler 23 * crw_register_handler() - register a channel report word handler
@@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
59static int crw_collect_info(void *unused) 60static int crw_collect_info(void *unused)
60{ 61{
61 struct crw crw[2]; 62 struct crw crw[2];
62 int ccode; 63 int ccode, signal;
63 unsigned int chain; 64 unsigned int chain;
64 int ignore;
65 65
66repeat: 66repeat:
67 ignore = down_interruptible(&crw_semaphore); 67 signal = wait_event_interruptible(crw_handler_wait_q,
68 atomic_read(&crw_nr_req) > 0);
69 if (unlikely(signal))
70 atomic_inc(&crw_nr_req);
68 chain = 0; 71 chain = 0;
69 while (1) { 72 while (1) {
70 crw_handler_t handler; 73 crw_handler_t handler;
@@ -122,25 +125,23 @@ repeat:
122 /* chain is always 0 or 1 here. */ 125 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0; 126 chain = crw[chain].chn ? chain + 1 : 0;
124 } 127 }
128 if (atomic_dec_and_test(&crw_nr_req))
129 wake_up(&crw_handler_wait_q);
125 goto repeat; 130 goto repeat;
126 return 0; 131 return 0;
127} 132}
128 133
129void crw_handle_channel_report(void) 134void crw_handle_channel_report(void)
130{ 135{
131 up(&crw_semaphore); 136 atomic_inc(&crw_nr_req);
137 wake_up(&crw_handler_wait_q);
132} 138}
133 139
134/* 140void crw_wait_for_channel_report(void)
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{ 141{
140 init_MUTEX_LOCKED(&crw_semaphore); 142 crw_handle_channel_report();
141 return 0; 143 wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
142} 144}
143pure_initcall(crw_init_semaphore);
144 145
145/* 146/*
146 * Machine checks for the channel subsystem must be enabled 147 * Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa14..2769da54f2b9 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/proc_fs.h>
21#include <asm/isc.h> 22#include <asm/isc.h>
22#include <asm/crw.h> 23#include <asm/crw.h>
23 24
@@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
232 if (!get_device(&sch->dev)) 233 if (!get_device(&sch->dev))
233 return; 234 return;
234 sch->todo = todo; 235 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) { 236 if (!queue_work(cio_work_q, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */ 237 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev); 238 put_device(&sch->dev);
238 } 239 }
@@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
543} 544}
544 545
545static DECLARE_WORK(slow_path_work, css_slow_path_func); 546static DECLARE_WORK(slow_path_work, css_slow_path_func);
546struct workqueue_struct *slow_path_wq; 547struct workqueue_struct *cio_work_q;
547 548
548void css_schedule_eval(struct subchannel_id schid) 549void css_schedule_eval(struct subchannel_id schid)
549{ 550{
@@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
552 spin_lock_irqsave(&slow_subchannel_lock, flags); 553 spin_lock_irqsave(&slow_subchannel_lock, flags);
553 idset_sch_add(slow_subchannel_set, schid); 554 idset_sch_add(slow_subchannel_set, schid);
554 atomic_set(&css_eval_scheduled, 1); 555 atomic_set(&css_eval_scheduled, 1);
555 queue_work(slow_path_wq, &slow_path_work); 556 queue_work(cio_work_q, &slow_path_work);
556 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
557} 558}
558 559
@@ -563,7 +564,7 @@ void css_schedule_eval_all(void)
563 spin_lock_irqsave(&slow_subchannel_lock, flags); 564 spin_lock_irqsave(&slow_subchannel_lock, flags);
564 idset_fill(slow_subchannel_set); 565 idset_fill(slow_subchannel_set);
565 atomic_set(&css_eval_scheduled, 1); 566 atomic_set(&css_eval_scheduled, 1);
566 queue_work(slow_path_wq, &slow_path_work); 567 queue_work(cio_work_q, &slow_path_work);
567 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
568} 569}
569 570
@@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
594 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 spin_lock_irqsave(&slow_subchannel_lock, flags);
595 idset_add_set(slow_subchannel_set, unreg_set); 596 idset_add_set(slow_subchannel_set, unreg_set);
596 atomic_set(&css_eval_scheduled, 1); 597 atomic_set(&css_eval_scheduled, 1);
597 queue_work(slow_path_wq, &slow_path_work); 598 queue_work(cio_work_q, &slow_path_work);
598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
599 idset_free(unreg_set); 600 idset_free(unreg_set);
600} 601}
601 602
602void css_wait_for_slow_path(void) 603void css_wait_for_slow_path(void)
603{ 604{
604 flush_workqueue(slow_path_wq); 605 flush_workqueue(cio_work_q);
605} 606}
606 607
607/* Schedule reprobing of all unregistered subchannels. */ 608/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void)
992 ret = css_bus_init(); 993 ret = css_bus_init();
993 if (ret) 994 if (ret)
994 return ret; 995 return ret;
995 996 cio_work_q = create_singlethread_workqueue("cio");
997 if (!cio_work_q) {
998 ret = -ENOMEM;
999 goto out_bus;
1000 }
996 ret = io_subchannel_init(); 1001 ret = io_subchannel_init();
997 if (ret) 1002 if (ret)
998 css_bus_cleanup(); 1003 goto out_wq;
999 1004
1000 return ret; 1005 return ret;
1006out_wq:
1007 destroy_workqueue(cio_work_q);
1008out_bus:
1009 css_bus_cleanup();
1010 return ret;
1001} 1011}
1002subsys_initcall(channel_subsystem_init); 1012subsys_initcall(channel_subsystem_init);
1003 1013
@@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused)
1006 struct css_driver *cssdrv = to_cssdriver(drv); 1016 struct css_driver *cssdrv = to_cssdriver(drv);
1007 1017
1008 if (cssdrv->settle) 1018 if (cssdrv->settle)
1009 cssdrv->settle(); 1019 return cssdrv->settle();
1010 return 0; 1020 return 0;
1011} 1021}
1012 1022
1023int css_complete_work(void)
1024{
1025 int ret;
1026
1027 /* Wait for the evaluation of subchannels to finish. */
1028 ret = wait_event_interruptible(css_eval_wq,
1029 atomic_read(&css_eval_scheduled) == 0);
1030 if (ret)
1031 return -EINTR;
1032 flush_workqueue(cio_work_q);
1033 /* Wait for the subchannel type specific initialization to finish */
1034 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1035}
1036
1037
1013/* 1038/*
1014 * Wait for the initialization of devices to finish, to make sure we are 1039 * Wait for the initialization of devices to finish, to make sure we are
1015 * done with our setup if the search for the root device starts. 1040 * done with our setup if the search for the root device starts.
@@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void)
1018{ 1043{
1019 /* Start initial subchannel evaluation. */ 1044 /* Start initial subchannel evaluation. */
1020 css_schedule_eval_all(); 1045 css_schedule_eval_all();
1021 /* Wait for the evaluation of subchannels to finish. */ 1046 css_complete_work();
1022 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1047 return 0;
1023 /* Wait for the subchannel type specific initialization to finish */
1024 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1025} 1048}
1026subsys_initcall_sync(channel_subsystem_init_sync); 1049subsys_initcall_sync(channel_subsystem_init_sync);
1027 1050
1051#ifdef CONFIG_PROC_FS
1052static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *ppos)
1054{
1055 int ret;
1056
1057 /* Handle pending CRW's. */
1058 crw_wait_for_channel_report();
1059 ret = css_complete_work();
1060
1061 return ret ? ret : count;
1062}
1063
1064static const struct file_operations cio_settle_proc_fops = {
1065 .write = cio_settle_write,
1066};
1067
1068static int __init cio_settle_init(void)
1069{
1070 struct proc_dir_entry *entry;
1071
1072 entry = proc_create("cio_settle", S_IWUSR, NULL,
1073 &cio_settle_proc_fops);
1074 if (!entry)
1075 return -ENOMEM;
1076 return 0;
1077}
1078device_initcall(cio_settle_init);
1079#endif /*CONFIG_PROC_FS*/
1080
1028int sch_is_pseudo_sch(struct subchannel *sch) 1081int sch_is_pseudo_sch(struct subchannel *sch)
1029{ 1082{
1030 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1083 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index fe84b92cde60..7e37886de231 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -95,7 +95,7 @@ struct css_driver {
95 int (*freeze)(struct subchannel *); 95 int (*freeze)(struct subchannel *);
96 int (*thaw) (struct subchannel *); 96 int (*thaw) (struct subchannel *);
97 int (*restore)(struct subchannel *); 97 int (*restore)(struct subchannel *);
98 void (*settle)(void); 98 int (*settle)(void);
99 const char *name; 99 const char *name;
100}; 100};
101 101
@@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
146/* Helper functions to build lists for the slow path. */ 146/* Helper functions to build lists for the slow path. */
147void css_schedule_eval(struct subchannel_id schid); 147void css_schedule_eval(struct subchannel_id schid);
148void css_schedule_eval_all(void); 148void css_schedule_eval_all(void);
149int css_complete_work(void);
149 150
150int sch_is_pseudo_sch(struct subchannel *); 151int sch_is_pseudo_sch(struct subchannel *);
151struct schib; 152struct schib;
152int css_sch_is_valid(struct schib *); 153int css_sch_is_valid(struct schib *);
153 154
154extern struct workqueue_struct *slow_path_wq; 155extern struct workqueue_struct *cio_work_q;
155void css_wait_for_slow_path(void); 156void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); 157void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
157#endif 158#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a6c7d5426fb2..c6abb75c4615 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
137 int); 137 int);
138static void recovery_func(unsigned long data); 138static void recovery_func(unsigned long data);
139struct workqueue_struct *ccw_device_work;
140wait_queue_head_t ccw_device_init_wq; 139wait_queue_head_t ccw_device_init_wq;
141atomic_t ccw_device_init_count; 140atomic_t ccw_device_init_count;
142 141
@@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
159 return 0; 158 return 0;
160} 159}
161 160
162static void io_subchannel_settle(void) 161static int io_subchannel_settle(void)
163{ 162{
164 wait_event(ccw_device_init_wq, 163 int ret;
165 atomic_read(&ccw_device_init_count) == 0); 164
166 flush_workqueue(ccw_device_work); 165 ret = wait_event_interruptible(ccw_device_init_wq,
166 atomic_read(&ccw_device_init_count) == 0);
167 if (ret)
168 return -EINTR;
169 flush_workqueue(cio_work_q);
170 return 0;
167} 171}
168 172
169static struct css_driver io_subchannel_driver = { 173static struct css_driver io_subchannel_driver = {
@@ -188,27 +192,13 @@ int __init io_subchannel_init(void)
188 atomic_set(&ccw_device_init_count, 0); 192 atomic_set(&ccw_device_init_count, 0);
189 setup_timer(&recovery_timer, recovery_func, 0); 193 setup_timer(&recovery_timer, recovery_func, 0);
190 194
191 ccw_device_work = create_singlethread_workqueue("cio"); 195 ret = bus_register(&ccw_bus_type);
192 if (!ccw_device_work) 196 if (ret)
193 return -ENOMEM; 197 return ret;
194 slow_path_wq = create_singlethread_workqueue("kslowcrw");
195 if (!slow_path_wq) {
196 ret = -ENOMEM;
197 goto out_err;
198 }
199 if ((ret = bus_register (&ccw_bus_type)))
200 goto out_err;
201
202 ret = css_driver_register(&io_subchannel_driver); 198 ret = css_driver_register(&io_subchannel_driver);
203 if (ret) 199 if (ret)
204 goto out_err; 200 bus_unregister(&ccw_bus_type);
205 201
206 return 0;
207out_err:
208 if (ccw_device_work)
209 destroy_workqueue(ccw_device_work);
210 if (slow_path_wq)
211 destroy_workqueue(slow_path_wq);
212 return ret; 202 return ret;
213} 203}
214 204
@@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1348 /* Not operational. */ 1338 /* Not operational. */
1349 if (!cdev) 1339 if (!cdev)
1350 return IO_SCH_UNREG; 1340 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE)) 1341 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1352 return IO_SCH_UNREG; 1342 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG; 1343 return IO_SCH_ORPH_UNREG;
1354 } 1344 }
@@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1356 if (!cdev) 1346 if (!cdev)
1357 return IO_SCH_ATTACH; 1347 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1348 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE)) 1349 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1360 return IO_SCH_UNREG_ATTACH; 1350 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH; 1351 return IO_SCH_ORPH_ATTACH;
1362 } 1352 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1353 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 1354 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1365 return IO_SCH_UNREG; 1355 return IO_SCH_UNREG;
1366 return IO_SCH_DISC; 1356 return IO_SCH_DISC;
1367 } 1357 }
@@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1410 rc = 0; 1400 rc = 0;
1411 goto out_unlock; 1401 goto out_unlock;
1412 case IO_SCH_VERIFY: 1402 case IO_SCH_VERIFY:
1403 if (cdev->private->flags.resuming == 1) {
1404 if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1405 ccw_device_set_notoper(cdev);
1406 break;
1407 }
1408 }
1413 /* Trigger path verification. */ 1409 /* Trigger path verification. */
1414 io_subchannel_verify(sch); 1410 io_subchannel_verify(sch);
1415 rc = 0; 1411 rc = 0;
@@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1448 break; 1444 break;
1449 case IO_SCH_UNREG_ATTACH: 1445 case IO_SCH_UNREG_ATTACH:
1450 /* Unregister ccw device. */ 1446 /* Unregister ccw device. */
1451 ccw_device_unregister(cdev); 1447 if (!cdev->private->flags.resuming)
1448 ccw_device_unregister(cdev);
1452 break; 1449 break;
1453 default: 1450 default:
1454 break; 1451 break;
@@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1457 switch (action) { 1454 switch (action) {
1458 case IO_SCH_ORPH_UNREG: 1455 case IO_SCH_ORPH_UNREG:
1459 case IO_SCH_UNREG: 1456 case IO_SCH_UNREG:
1460 css_sch_device_unregister(sch); 1457 if (!cdev || !cdev->private->flags.resuming)
1458 css_sch_device_unregister(sch);
1461 break; 1459 break;
1462 case IO_SCH_ORPH_ATTACH: 1460 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH: 1461 case IO_SCH_UNREG_ATTACH:
@@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1779{ 1777{
1780 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1778 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1781 1779
1782 if (cio_is_console(sch->schid)) 1780 spin_lock_irq(sch->lock);
1783 goto out; 1781 if (cio_is_console(sch->schid)) {
1782 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1783 goto out_unlock;
1784 }
1784 /* 1785 /*
1785 * While we were sleeping, devices may have gone or become 1786 * While we were sleeping, devices may have gone or become
1786 * available again. Kick re-detection. 1787 * available again. Kick re-detection.
1787 */ 1788 */
1788 spin_lock_irq(sch->lock);
1789 cdev->private->flags.resuming = 1; 1789 cdev->private->flags.resuming = 1;
1790 css_schedule_eval(sch->schid);
1791 spin_unlock_irq(sch->lock);
1792 css_complete_work();
1793
1794 /* cdev may have been moved to a different subchannel. */
1795 sch = to_subchannel(cdev->dev.parent);
1796 spin_lock_irq(sch->lock);
1797 if (cdev->private->state != DEV_STATE_ONLINE &&
1798 cdev->private->state != DEV_STATE_OFFLINE)
1799 goto out_unlock;
1800
1790 ccw_device_recognition(cdev); 1801 ccw_device_recognition(cdev);
1791 spin_unlock_irq(sch->lock); 1802 spin_unlock_irq(sch->lock);
1792 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1803 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1793 cdev->private->state == DEV_STATE_DISCONNECTED); 1804 cdev->private->state == DEV_STATE_DISCONNECTED);
1794out: 1805 spin_lock_irq(sch->lock);
1806
1807out_unlock:
1795 cdev->private->flags.resuming = 0; 1808 cdev->private->flags.resuming = 0;
1809 spin_unlock_irq(sch->lock);
1796} 1810}
1797 1811
1798static int resume_handle_boxed(struct ccw_device *cdev) 1812static int resume_handle_boxed(struct ccw_device *cdev)
1799{ 1813{
1800 cdev->private->state = DEV_STATE_BOXED; 1814 cdev->private->state = DEV_STATE_BOXED;
1801 if (ccw_device_notify(cdev, CIO_BOXED)) 1815 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1802 return 0; 1816 return 0;
1803 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1817 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1804 return -ENODEV; 1818 return -ENODEV;
@@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
1807static int resume_handle_disc(struct ccw_device *cdev) 1821static int resume_handle_disc(struct ccw_device *cdev)
1808{ 1822{
1809 cdev->private->state = DEV_STATE_DISCONNECTED; 1823 cdev->private->state = DEV_STATE_DISCONNECTED;
1810 if (ccw_device_notify(cdev, CIO_GONE)) 1824 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1811 return 0; 1825 return 0;
1812 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1826 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1813 return -ENODEV; 1827 return -ENODEV;
@@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
1816static int ccw_device_pm_restore(struct device *dev) 1830static int ccw_device_pm_restore(struct device *dev)
1817{ 1831{
1818 struct ccw_device *cdev = to_ccwdev(dev); 1832 struct ccw_device *cdev = to_ccwdev(dev);
1819 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1833 struct subchannel *sch;
1820 int ret = 0, cm_enabled; 1834 int ret = 0;
1821 1835
1822 __ccw_device_pm_restore(cdev); 1836 __ccw_device_pm_restore(cdev);
1837 sch = to_subchannel(cdev->dev.parent);
1823 spin_lock_irq(sch->lock); 1838 spin_lock_irq(sch->lock);
1824 if (cio_is_console(sch->schid)) { 1839 if (cio_is_console(sch->schid))
1825 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1826 spin_unlock_irq(sch->lock);
1827 goto out_restore; 1840 goto out_restore;
1828 } 1841
1829 cdev->private->flags.donotify = 0;
1830 /* check recognition results */ 1842 /* check recognition results */
1831 switch (cdev->private->state) { 1843 switch (cdev->private->state) {
1832 case DEV_STATE_OFFLINE: 1844 case DEV_STATE_OFFLINE:
1845 case DEV_STATE_ONLINE:
1846 cdev->private->flags.donotify = 0;
1833 break; 1847 break;
1834 case DEV_STATE_BOXED: 1848 case DEV_STATE_BOXED:
1835 ret = resume_handle_boxed(cdev); 1849 ret = resume_handle_boxed(cdev);
1836 spin_unlock_irq(sch->lock);
1837 if (ret) 1850 if (ret)
1838 goto out; 1851 goto out_unlock;
1839 goto out_restore; 1852 goto out_restore;
1840 case DEV_STATE_DISCONNECTED:
1841 goto out_disc_unlock;
1842 default: 1853 default:
1843 goto out_unreg_unlock; 1854 ret = resume_handle_disc(cdev);
1844 } 1855 if (ret)
1845 /* check if the device id has changed */ 1856 goto out_unlock;
1846 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1857 goto out_restore;
1847 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1848 "changed from %04x to %04x)\n",
1849 sch->schid.ssid, sch->schid.sch_no,
1850 cdev->private->dev_id.devno,
1851 sch->schib.pmcw.dev);
1852 goto out_unreg_unlock;
1853 } 1858 }
1854 /* check if the device type has changed */ 1859 /* check if the device type has changed */
1855 if (!ccw_device_test_sense_data(cdev)) { 1860 if (!ccw_device_test_sense_data(cdev)) {
@@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev)
1858 ret = -ENODEV; 1863 ret = -ENODEV;
1859 goto out_unlock; 1864 goto out_unlock;
1860 } 1865 }
1861 if (!cdev->online) { 1866 if (!cdev->online)
1862 ret = 0;
1863 goto out_unlock; 1867 goto out_unlock;
1864 }
1865 ret = ccw_device_online(cdev);
1866 if (ret)
1867 goto out_disc_unlock;
1868 1868
1869 cm_enabled = cdev->private->cmb != NULL; 1869 if (ccw_device_online(cdev)) {
1870 ret = resume_handle_disc(cdev);
1871 if (ret)
1872 goto out_unlock;
1873 goto out_restore;
1874 }
1870 spin_unlock_irq(sch->lock); 1875 spin_unlock_irq(sch->lock);
1871
1872 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1876 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1873 if (cdev->private->state != DEV_STATE_ONLINE) { 1877 spin_lock_irq(sch->lock);
1874 spin_lock_irq(sch->lock); 1878
1875 goto out_disc_unlock; 1879 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1880 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1881 ret = -ENODEV;
1882 goto out_unlock;
1876 } 1883 }
1877 if (cm_enabled) { 1884
1885 /* reenable cmf, if needed */
1886 if (cdev->private->cmb) {
1887 spin_unlock_irq(sch->lock);
1878 ret = ccw_set_cmf(cdev, 1); 1888 ret = ccw_set_cmf(cdev, 1);
1889 spin_lock_irq(sch->lock);
1879 if (ret) { 1890 if (ret) {
1880 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1891 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1881 "(rc=%d)\n", cdev->private->dev_id.ssid, 1892 "(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev)
1885 } 1896 }
1886 1897
1887out_restore: 1898out_restore:
1899 spin_unlock_irq(sch->lock);
1888 if (cdev->online && cdev->drv && cdev->drv->restore) 1900 if (cdev->online && cdev->drv && cdev->drv->restore)
1889 ret = cdev->drv->restore(cdev); 1901 ret = cdev->drv->restore(cdev);
1890out:
1891 return ret; 1902 return ret;
1892 1903
1893out_disc_unlock:
1894 ret = resume_handle_disc(cdev);
1895 spin_unlock_irq(sch->lock);
1896 if (ret)
1897 return ret;
1898 goto out_restore;
1899
1900out_unreg_unlock:
1901 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1902 ret = -ENODEV;
1903out_unlock: 1904out_unlock:
1904 spin_unlock_irq(sch->lock); 1905 spin_unlock_irq(sch->lock);
1905 return ret; 1906 return ret;
@@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2028 /* Get workqueue ref. */ 2029 /* Get workqueue ref. */
2029 if (!get_device(&cdev->dev)) 2030 if (!get_device(&cdev->dev))
2030 return; 2031 return;
2031 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { 2032 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2032 /* Already queued, release workqueue ref. */ 2033 /* Already queued, release workqueue ref. */
2033 put_device(&cdev->dev); 2034 put_device(&cdev->dev);
2034 } 2035 }
@@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register);
2041EXPORT_SYMBOL(ccw_driver_unregister); 2042EXPORT_SYMBOL(ccw_driver_unregister);
2042EXPORT_SYMBOL(get_ccwdev_by_busid); 2043EXPORT_SYMBOL(get_ccwdev_by_busid);
2043EXPORT_SYMBOL(ccw_bus_type); 2044EXPORT_SYMBOL(ccw_bus_type);
2044EXPORT_SYMBOL(ccw_device_work);
2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index bcfe13e42638..379de2d1ec49 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,7 +4,7 @@
4#include <asm/ccwdev.h> 4#include <asm/ccwdev.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7#include <linux/notifier.h>
8#include "io_sch.h" 8#include "io_sch.h"
9 9
10/* 10/*
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
71 cdev->private->state == DEV_STATE_BOXED); 71 cdev->private->state == DEV_STATE_BOXED);
72} 72}
73 73
74extern struct workqueue_struct *ccw_device_work;
75extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
76extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
77int __init io_subchannel_init(void); 76int __init io_subchannel_init(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ae760658a131..c56ab94612f9 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
313 } 313 }
314} 314}
315 315
316/**
317 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured
319 * @event: event that occurred
320 *
321 * Returns:
322 * -%EINVAL if the device is offline or has no driver.
323 * -%EOPNOTSUPP if the device's driver has no notifier registered.
324 * %NOTIFY_OK if the driver wants to keep the device.
325 * %NOTIFY_BAD if the driver doesn't want to keep the device.
326 */
316int ccw_device_notify(struct ccw_device *cdev, int event) 327int ccw_device_notify(struct ccw_device *cdev, int event)
317{ 328{
329 int ret = -EINVAL;
330
318 if (!cdev->drv) 331 if (!cdev->drv)
319 return 0; 332 goto out;
320 if (!cdev->online) 333 if (!cdev->online)
321 return 0; 334 goto out;
322 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 335 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
323 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 336 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
324 event); 337 event);
325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 338 if (!cdev->drv->notify) {
339 ret = -EOPNOTSUPP;
340 goto out;
341 }
342 if (cdev->drv->notify(cdev, event))
343 ret = NOTIFY_OK;
344 else
345 ret = NOTIFY_BAD;
346out:
347 return ret;
326} 348}
327 349
328static void ccw_device_oper_notify(struct ccw_device *cdev) 350static void ccw_device_oper_notify(struct ccw_device *cdev)
329{ 351{
330 if (ccw_device_notify(cdev, CIO_OPER)) { 352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
331 /* Reenable channel measurements, if needed. */ 353 /* Reenable channel measurements, if needed. */
332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
333 return; 355 return;
@@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
361 case DEV_STATE_BOXED: 383 case DEV_STATE_BOXED:
362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 384 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
363 cdev->private->dev_id.devno, sch->schid.sch_no); 385 cdev->private->dev_id.devno, sch->schid.sch_no);
364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 386 if (cdev->online &&
387 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 388 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
366 cdev->private->flags.donotify = 0; 389 cdev->private->flags.donotify = 0;
367 break; 390 break;
368 case DEV_STATE_NOT_OPER: 391 case DEV_STATE_NOT_OPER:
369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 392 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
370 cdev->private->dev_id.devno, sch->schid.sch_no); 393 cdev->private->dev_id.devno, sch->schid.sch_no);
371 if (!ccw_device_notify(cdev, CIO_GONE)) 394 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 395 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
373 else 396 else
374 ccw_device_set_disconnected(cdev); 397 ccw_device_set_disconnected(cdev);
@@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
378 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
379 "%04x\n", cdev->private->dev_id.devno, 402 "%04x\n", cdev->private->dev_id.devno,
380 sch->schid.sch_no); 403 sch->schid.sch_no);
381 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
383 else 406 else
384 ccw_device_set_disconnected(cdev); 407 ccw_device_set_disconnected(cdev);
@@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev)
586static void ccw_device_generic_notoper(struct ccw_device *cdev, 609static void ccw_device_generic_notoper(struct ccw_device *cdev,
587 enum dev_event dev_event) 610 enum dev_event dev_event)
588{ 611{
589 if (!ccw_device_notify(cdev, CIO_GONE)) 612 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 613 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
591 else 614 else
592 ccw_device_set_disconnected(cdev); 615 ccw_device_set_disconnected(cdev);
@@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
667 struct irb *irb; 690 struct irb *irb;
668 int is_cmd; 691 int is_cmd;
669 692
670 irb = (struct irb *) __LC_IRB; 693 irb = (struct irb *)&S390_lowcore.irb;
671 is_cmd = !scsw_is_tm(&irb->scsw); 694 is_cmd = !scsw_is_tm(&irb->scsw);
672 /* Check for unsolicited interrupt. */ 695 /* Check for unsolicited interrupt. */
673 if (!scsw_is_solicited(&irb->scsw)) { 696 if (!scsw_is_solicited(&irb->scsw)) {
@@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
732{ 755{
733 struct irb *irb; 756 struct irb *irb;
734 757
735 irb = (struct irb *) __LC_IRB; 758 irb = (struct irb *)&S390_lowcore.irb;
736 /* Check for unsolicited interrupt. */ 759 /* Check for unsolicited interrupt. */
737 if (scsw_stctl(&irb->scsw) == 760 if (scsw_stctl(&irb->scsw) ==
738 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 761 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 44f2f6a97f33..48aa0647432b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,18 +208,27 @@ struct qdio_dev_perf_stat {
208 unsigned int eqbs_partial; 208 unsigned int eqbs_partial;
209 unsigned int sqbs; 209 unsigned int sqbs;
210 unsigned int sqbs_partial; 210 unsigned int sqbs_partial;
211} ____cacheline_aligned;
212
213struct qdio_queue_perf_stat {
214 /*
215 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
216 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
217 * aka 127 SBALs found.
218 */
219 unsigned int nr_sbals[8];
220 unsigned int nr_sbal_error;
221 unsigned int nr_sbal_nop;
222 unsigned int nr_sbal_total;
211}; 223};
212 224
213struct qdio_input_q { 225struct qdio_input_q {
214 /* input buffer acknowledgement flag */ 226 /* input buffer acknowledgement flag */
215 int polling; 227 int polling;
216
217 /* first ACK'ed buffer */ 228 /* first ACK'ed buffer */
218 int ack_start; 229 int ack_start;
219
220 /* how much sbals are acknowledged with qebsm */ 230 /* how much sbals are acknowledged with qebsm */
221 int ack_count; 231 int ack_count;
222
223 /* last time of noticing incoming data */ 232 /* last time of noticing incoming data */
224 u64 timestamp; 233 u64 timestamp;
225}; 234};
@@ -227,40 +236,27 @@ struct qdio_input_q {
227struct qdio_output_q { 236struct qdio_output_q {
228 /* PCIs are enabled for the queue */ 237 /* PCIs are enabled for the queue */
229 int pci_out_enabled; 238 int pci_out_enabled;
230
231 /* IQDIO: output multiple buffers (enhanced SIGA) */ 239 /* IQDIO: output multiple buffers (enhanced SIGA) */
232 int use_enh_siga; 240 int use_enh_siga;
233
234 /* timer to check for more outbound work */ 241 /* timer to check for more outbound work */
235 struct timer_list timer; 242 struct timer_list timer;
236}; 243};
237 244
245/*
246 * Note on cache alignment: grouped slsb and write mostly data at the beginning
247 * sbal[] is read-only and starts on a new cacheline followed by read mostly.
248 */
238struct qdio_q { 249struct qdio_q {
239 struct slsb slsb; 250 struct slsb slsb;
251
240 union { 252 union {
241 struct qdio_input_q in; 253 struct qdio_input_q in;
242 struct qdio_output_q out; 254 struct qdio_output_q out;
243 } u; 255 } u;
244 256
245 /* queue number */
246 int nr;
247
248 /* bitmask of queue number */
249 int mask;
250
251 /* input or output queue */
252 int is_input_q;
253
254 /* list of thinint input queues */
255 struct list_head entry;
256
257 /* upper-layer program handler */
258 qdio_handler_t (*handler);
259
260 /* 257 /*
261 * inbound: next buffer the program should check for 258 * inbound: next buffer the program should check for
262 * outbound: next buffer to check for having been processed 259 * outbound: next buffer to check if adapter processed it
263 * by the card
264 */ 260 */
265 int first_to_check; 261 int first_to_check;
266 262
@@ -273,16 +269,32 @@ struct qdio_q {
273 /* number of buffers in use by the adapter */ 269 /* number of buffers in use by the adapter */
274 atomic_t nr_buf_used; 270 atomic_t nr_buf_used;
275 271
276 struct qdio_irq *irq_ptr;
277 struct dentry *debugfs_q;
278 struct tasklet_struct tasklet;
279
280 /* error condition during a data transfer */ 272 /* error condition during a data transfer */
281 unsigned int qdio_error; 273 unsigned int qdio_error;
282 274
283 struct sl *sl; 275 struct tasklet_struct tasklet;
284 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 276 struct qdio_queue_perf_stat q_stats;
277
278 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
279
280 /* queue number */
281 int nr;
282
283 /* bitmask of queue number */
284 int mask;
285
286 /* input or output queue */
287 int is_input_q;
288
289 /* list of thinint input queues */
290 struct list_head entry;
285 291
292 /* upper-layer program handler */
293 qdio_handler_t (*handler);
294
295 struct dentry *debugfs_q;
296 struct qdio_irq *irq_ptr;
297 struct sl *sl;
286 /* 298 /*
287 * Warning: Leave this member at the end so it won't be cleared in 299 * Warning: Leave this member at the end so it won't be cleared in
288 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * qdio_fill_qs. A page is allocated under this pointer and used for
@@ -317,12 +329,8 @@ struct qdio_irq {
317 struct qdio_ssqd_desc ssqd_desc; 329 struct qdio_ssqd_desc ssqd_desc;
318 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); 330 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
319 331
320 struct qdio_dev_perf_stat perf_stat;
321 int perf_stat_enabled; 332 int perf_stat_enabled;
322 /* 333
323 * Warning: Leave these members together at the end so they won't be
324 * cleared in qdio_setup_irq.
325 */
326 struct qdr *qdr; 334 struct qdr *qdr;
327 unsigned long chsc_page; 335 unsigned long chsc_page;
328 336
@@ -331,6 +339,7 @@ struct qdio_irq {
331 339
332 debug_info_t *debug_area; 340 debug_info_t *debug_area;
333 struct mutex setup_mutex; 341 struct mutex setup_mutex;
342 struct qdio_dev_perf_stat perf_stat;
334}; 343};
335 344
336/* helper functions */ 345/* helper functions */
@@ -341,9 +350,20 @@ struct qdio_irq {
341 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ 350 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
342 css_general_characteristics.aif_osa) 351 css_general_characteristics.aif_osa)
343 352
344#define qperf(qdev,attr) qdev->perf_stat.attr 353#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
345#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ 354
346 q->irq_ptr->perf_stat.attr++ 355#define qperf_inc(__q, __attr) \
356({ \
357 struct qdio_irq *qdev = (__q)->irq_ptr; \
358 if (qdev->perf_stat_enabled) \
359 (qdev->perf_stat.__attr)++; \
360})
361
362static inline void account_sbals_error(struct qdio_q *q, int count)
363{
364 q->q_stats.nr_sbal_error += count;
365 q->q_stats.nr_sbal_total += count;
366}
347 367
348/* the highest iqdio queue is used for multicast */ 368/* the highest iqdio queue is used for multicast */
349static inline int multicast_outbound(struct qdio_q *q) 369static inline int multicast_outbound(struct qdio_q *q)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f49761ff9a00..c94eb2a0fa2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v)
60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
63 seq_printf(m, "slsb buffer states:\n"); 63 seq_printf(m, "SBAL states:\n");
64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
65 65
66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v)
97 } 97 }
98 seq_printf(m, "\n"); 98 seq_printf(m, "\n");
99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); 99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
100
101 seq_printf(m, "\nSBAL statistics:");
102 if (!q->irq_ptr->perf_stat_enabled) {
103 seq_printf(m, " disabled\n");
104 return 0;
105 }
106
107 seq_printf(m, "\n1 2.. 4.. 8.. "
108 "16.. 32.. 64.. 127\n");
109 for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
110 seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
111 seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
112 q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
113 q->q_stats.nr_sbal_total);
100 return 0; 114 return 0;
101} 115}
102 116
@@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
181{ 195{
182 struct seq_file *seq = file->private_data; 196 struct seq_file *seq = file->private_data;
183 struct qdio_irq *irq_ptr = seq->private; 197 struct qdio_irq *irq_ptr = seq->private;
198 struct qdio_q *q;
184 unsigned long val; 199 unsigned long val;
185 char buf[8]; 200 char buf[8];
186 int ret; 201 int ret, i;
187 202
188 if (!irq_ptr) 203 if (!irq_ptr)
189 return 0; 204 return 0;
@@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
201 case 0: 216 case 0:
202 irq_ptr->perf_stat_enabled = 0; 217 irq_ptr->perf_stat_enabled = 0;
203 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 218 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
219 for_each_input_queue(irq_ptr, q, i)
220 memset(&q->q_stats, 0, sizeof(q->q_stats));
221 for_each_output_queue(irq_ptr, q, i)
222 memset(&q->q_stats, 0, sizeof(q->q_stats));
204 break; 223 break;
205 case 1: 224 case 1:
206 irq_ptr->perf_stat_enabled = 1; 225 irq_ptr->perf_stat_enabled = 1;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 62b654af9237..232ef047ba34 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
393} 393}
394 394
395static inline void account_sbals(struct qdio_q *q, int count)
396{
397 int pos = 0;
398
399 q->q_stats.nr_sbal_total += count;
400 if (count == QDIO_MAX_BUFFERS_MASK) {
401 q->q_stats.nr_sbals[7]++;
402 return;
403 }
404 while (count >>= 1)
405 pos++;
406 q->q_stats.nr_sbals[pos]++;
407}
408
395static void announce_buffer_error(struct qdio_q *q, int count) 409static void announce_buffer_error(struct qdio_q *q, int count)
396{ 410{
397 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 411 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
487 q->first_to_check = add_buf(q->first_to_check, count); 501 q->first_to_check = add_buf(q->first_to_check, count);
488 if (atomic_sub(count, &q->nr_buf_used) == 0) 502 if (atomic_sub(count, &q->nr_buf_used) == 0)
489 qperf_inc(q, inbound_queue_full); 503 qperf_inc(q, inbound_queue_full);
504 if (q->irq_ptr->perf_stat_enabled)
505 account_sbals(q, count);
490 break; 506 break;
491 case SLSB_P_INPUT_ERROR: 507 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 508 announce_buffer_error(q, count);
493 /* process the buffer, the upper layer will take care of it */ 509 /* process the buffer, the upper layer will take care of it */
494 q->first_to_check = add_buf(q->first_to_check, count); 510 q->first_to_check = add_buf(q->first_to_check, count);
495 atomic_sub(count, &q->nr_buf_used); 511 atomic_sub(count, &q->nr_buf_used);
512 if (q->irq_ptr->perf_stat_enabled)
513 account_sbals_error(q, count);
496 break; 514 break;
497 case SLSB_CU_INPUT_EMPTY: 515 case SLSB_CU_INPUT_EMPTY:
498 case SLSB_P_INPUT_NOT_INIT: 516 case SLSB_P_INPUT_NOT_INIT:
499 case SLSB_P_INPUT_ACK: 517 case SLSB_P_INPUT_ACK:
518 if (q->irq_ptr->perf_stat_enabled)
519 q->q_stats.nr_sbal_nop++;
500 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 520 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
501 break; 521 break;
502 default: 522 default:
@@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
514 534
515 if ((bufnr != q->last_move) || q->qdio_error) { 535 if ((bufnr != q->last_move) || q->qdio_error) {
516 q->last_move = bufnr; 536 q->last_move = bufnr;
517 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) 537 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
518 q->u.in.timestamp = get_usecs(); 538 q->u.in.timestamp = get_usecs();
519 return 1; 539 return 1;
520 } else 540 } else
@@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
643 663
644 atomic_sub(count, &q->nr_buf_used); 664 atomic_sub(count, &q->nr_buf_used);
645 q->first_to_check = add_buf(q->first_to_check, count); 665 q->first_to_check = add_buf(q->first_to_check, count);
666 if (q->irq_ptr->perf_stat_enabled)
667 account_sbals(q, count);
646 break; 668 break;
647 case SLSB_P_OUTPUT_ERROR: 669 case SLSB_P_OUTPUT_ERROR:
648 announce_buffer_error(q, count); 670 announce_buffer_error(q, count);
649 /* process the buffer, the upper layer will take care of it */ 671 /* process the buffer, the upper layer will take care of it */
650 q->first_to_check = add_buf(q->first_to_check, count); 672 q->first_to_check = add_buf(q->first_to_check, count);
651 atomic_sub(count, &q->nr_buf_used); 673 atomic_sub(count, &q->nr_buf_used);
674 if (q->irq_ptr->perf_stat_enabled)
675 account_sbals_error(q, count);
652 break; 676 break;
653 case SLSB_CU_OUTPUT_PRIMED: 677 case SLSB_CU_OUTPUT_PRIMED:
654 /* the adapter has not fetched the output yet */ 678 /* the adapter has not fetched the output yet */
679 if (q->irq_ptr->perf_stat_enabled)
680 q->q_stats.nr_sbal_nop++;
655 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
656 break; 682 break;
657 case SLSB_P_OUTPUT_NOT_INIT: 683 case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 8c2dea5fa2b4..7f4a75465140 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
333 irq_ptr->qdr->qdf0[i + nr].slsba = 333 irq_ptr->qdr->qdf0[i + nr].slsba =
334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
335 335
336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; 336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; 337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; 338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; 339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
340} 340}
341 341
342static void setup_qdr(struct qdio_irq *irq_ptr, 342static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; 353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
354 354
355 for (i = 0; i < qdio_init->no_input_qs; i++) 355 for (i = 0; i < qdio_init->no_input_qs; i++)
356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
383 int rc; 383 int rc;
384 384
385 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); 385 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
386 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
387 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
388 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
389 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
390
391 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
392 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
393
386 /* wipes qib.ac, required by ar7063 */ 394 /* wipes qib.ac, required by ar7063 */
387 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 395 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
388 396
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 091d904d3182..9942c1031b25 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
198 .code = 0x0021, 198 .code = 0x0021,
199 }; 199 };
200 scssc_area->operation_code = 0; 200 scssc_area->operation_code = 0;
201 scssc_area->ks = PAGE_DEFAULT_KEY; 201 scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
202 scssc_area->kc = PAGE_DEFAULT_KEY; 202 scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
203 scssc_area->isc = QDIO_AIRQ_ISC; 203 scssc_area->isc = QDIO_AIRQ_ISC;
204 scssc_area->schid = irq_ptr->schid; 204 scssc_area->schid = irq_ptr->schid;
205 205
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index c68be24e27d9..ba50fe02e572 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -33,6 +33,7 @@
33#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/smp_lock.h> 38#include <linux/smp_lock.h>
38#include <asm/atomic.h> 39#include <asm/atomic.h>
@@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = {
912 */ 913 */
913static struct proc_dir_entry *zcrypt_entry; 914static struct proc_dir_entry *zcrypt_entry;
914 915
915static int sprintcl(unsigned char *outaddr, unsigned char *addr, 916static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
916 unsigned int len)
917{ 917{
918 int hl, i; 918 int i;
919 919
920 hl = 0;
921 for (i = 0; i < len; i++) 920 for (i = 0; i < len; i++)
922 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); 921 seq_printf(m, "%01x", (unsigned int) addr[i]);
923 hl += sprintf(outaddr+hl, " "); 922 seq_putc(m, ' ');
924 return hl;
925} 923}
926 924
927static int sprintrw(unsigned char *outaddr, unsigned char *addr, 925static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
928 unsigned int len)
929{ 926{
930 int hl, inl, c, cx; 927 int inl, c, cx;
931 928
932 hl = sprintf(outaddr, " "); 929 seq_printf(m, " ");
933 inl = 0; 930 inl = 0;
934 for (c = 0; c < (len / 16); c++) { 931 for (c = 0; c < (len / 16); c++) {
935 hl += sprintcl(outaddr+hl, addr+inl, 16); 932 sprintcl(m, addr+inl, 16);
936 inl += 16; 933 inl += 16;
937 } 934 }
938 cx = len%16; 935 cx = len%16;
939 if (cx) { 936 if (cx) {
940 hl += sprintcl(outaddr+hl, addr+inl, cx); 937 sprintcl(m, addr+inl, cx);
941 inl += cx; 938 inl += cx;
942 } 939 }
943 hl += sprintf(outaddr+hl, "\n"); 940 seq_putc(m, '\n');
944 return hl;
945} 941}
946 942
947static int sprinthx(unsigned char *title, unsigned char *outaddr, 943static void sprinthx(unsigned char *title, struct seq_file *m,
948 unsigned char *addr, unsigned int len) 944 unsigned char *addr, unsigned int len)
949{ 945{
950 int hl, inl, r, rx; 946 int inl, r, rx;
951 947
952 hl = sprintf(outaddr, "\n%s\n", title); 948 seq_printf(m, "\n%s\n", title);
953 inl = 0; 949 inl = 0;
954 for (r = 0; r < (len / 64); r++) { 950 for (r = 0; r < (len / 64); r++) {
955 hl += sprintrw(outaddr+hl, addr+inl, 64); 951 sprintrw(m, addr+inl, 64);
956 inl += 64; 952 inl += 64;
957 } 953 }
958 rx = len % 64; 954 rx = len % 64;
959 if (rx) { 955 if (rx) {
960 hl += sprintrw(outaddr+hl, addr+inl, rx); 956 sprintrw(m, addr+inl, rx);
961 inl += rx; 957 inl += rx;
962 } 958 }
963 hl += sprintf(outaddr+hl, "\n"); 959 seq_putc(m, '\n');
964 return hl;
965} 960}
966 961
967static int sprinthx4(unsigned char *title, unsigned char *outaddr, 962static void sprinthx4(unsigned char *title, struct seq_file *m,
968 unsigned int *array, unsigned int len) 963 unsigned int *array, unsigned int len)
969{ 964{
970 int hl, r; 965 int r;
971 966
972 hl = sprintf(outaddr, "\n%s\n", title); 967 seq_printf(m, "\n%s\n", title);
973 for (r = 0; r < len; r++) { 968 for (r = 0; r < len; r++) {
974 if ((r % 8) == 0) 969 if ((r % 8) == 0)
975 hl += sprintf(outaddr+hl, " "); 970 seq_printf(m, " ");
976 hl += sprintf(outaddr+hl, "%08X ", array[r]); 971 seq_printf(m, "%08X ", array[r]);
977 if ((r % 8) == 7) 972 if ((r % 8) == 7)
978 hl += sprintf(outaddr+hl, "\n"); 973 seq_putc(m, '\n');
979 } 974 }
980 hl += sprintf(outaddr+hl, "\n"); 975 seq_putc(m, '\n');
981 return hl;
982} 976}
983 977
984static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, 978static int zcrypt_proc_show(struct seq_file *m, void *v)
985 int count, int *eof, void *data)
986{ 979{
987 unsigned char *workarea; 980 char workarea[sizeof(int) * AP_DEVICES];
988 int len; 981
989 982 seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
990 len = 0; 983 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
991 984 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
992 /* resp_buff is a page. Use the right half for a work area */ 985 seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
993 workarea = resp_buff + 2000; 986 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
994 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", 987 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
995 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 988 seq_printf(m, "PCIXCC MCL2 count: %d\n",
996 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", 989 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
997 ap_domain_index); 990 seq_printf(m, "PCIXCC MCL3 count: %d\n",
998 len += sprintf(resp_buff + len, "Total device count: %d\n", 991 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
999 zcrypt_device_count); 992 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1000 len += sprintf(resp_buff + len, "PCICA count: %d\n", 993 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1001 zcrypt_count_type(ZCRYPT_PCICA)); 994 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1002 len += sprintf(resp_buff + len, "PCICC count: %d\n", 995 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1003 zcrypt_count_type(ZCRYPT_PCICC)); 996 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1004 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", 997 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1005 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 998 seq_printf(m, "Total open handles: %d\n\n",
1006 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", 999 atomic_read(&zcrypt_open_count));
1007 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1008 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
1009 zcrypt_count_type(ZCRYPT_CEX2C));
1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX2A));
1012 len += sprintf(resp_buff + len, "CEX3C count: %d\n",
1013 zcrypt_count_type(ZCRYPT_CEX3C));
1014 len += sprintf(resp_buff + len, "CEX3A count: %d\n",
1015 zcrypt_count_type(ZCRYPT_CEX3A));
1016 len += sprintf(resp_buff + len, "requestq count: %d\n",
1017 zcrypt_requestq_count());
1018 len += sprintf(resp_buff + len, "pendingq count: %d\n",
1019 zcrypt_pendingq_count());
1020 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
1021 atomic_read(&zcrypt_open_count));
1022 zcrypt_status_mask(workarea); 1000 zcrypt_status_mask(workarea);
1023 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1001 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1024 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1002 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1025 resp_buff+len, workarea, AP_DEVICES); 1003 m, workarea, AP_DEVICES);
1026 zcrypt_qdepth_mask(workarea); 1004 zcrypt_qdepth_mask(workarea);
1027 len += sprinthx("Waiting work element counts", 1005 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1028 resp_buff+len, workarea, AP_DEVICES);
1029 zcrypt_perdev_reqcnt((int *) workarea); 1006 zcrypt_perdev_reqcnt((int *) workarea);
1030 len += sprinthx4("Per-device successfully completed request counts", 1007 sprinthx4("Per-device successfully completed request counts",
1031 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 1008 m, (unsigned int *) workarea, AP_DEVICES);
1032 *eof = 1; 1009 return 0;
1033 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); 1010}
1034 return len; 1011
1012static int zcrypt_proc_open(struct inode *inode, struct file *file)
1013{
1014 return single_open(file, zcrypt_proc_show, NULL);
1035} 1015}
1036 1016
1037static void zcrypt_disable_card(int index) 1017static void zcrypt_disable_card(int index)
@@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index)
1061 spin_unlock_bh(&zcrypt_device_lock); 1041 spin_unlock_bh(&zcrypt_device_lock);
1062} 1042}
1063 1043
1064static int zcrypt_status_write(struct file *file, const char __user *buffer, 1044static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1065 unsigned long count, void *data) 1045 size_t count, loff_t *pos)
1066{ 1046{
1067 unsigned char *lbuf, *ptr; 1047 unsigned char *lbuf, *ptr;
1068 unsigned long local_count; 1048 size_t local_count;
1069 int j; 1049 int j;
1070 1050
1071 if (count <= 0) 1051 if (count <= 0)
@@ -1115,6 +1095,15 @@ out:
1115 return count; 1095 return count;
1116} 1096}
1117 1097
1098static const struct file_operations zcrypt_proc_fops = {
1099 .owner = THIS_MODULE,
1100 .open = zcrypt_proc_open,
1101 .read = seq_read,
1102 .llseek = seq_lseek,
1103 .release = single_release,
1104 .write = zcrypt_proc_write,
1105};
1106
1118static int zcrypt_rng_device_count; 1107static int zcrypt_rng_device_count;
1119static u32 *zcrypt_rng_buffer; 1108static u32 *zcrypt_rng_buffer;
1120static int zcrypt_rng_buffer_index; 1109static int zcrypt_rng_buffer_index;
@@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void)
1197 goto out; 1186 goto out;
1198 1187
1199 /* Set up the proc file system */ 1188 /* Set up the proc file system */
1200 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1189 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
1201 if (!zcrypt_entry) { 1190 if (!zcrypt_entry) {
1202 rc = -ENOMEM; 1191 rc = -ENOMEM;
1203 goto out_misc; 1192 goto out_misc;
1204 } 1193 }
1205 zcrypt_entry->data = NULL;
1206 zcrypt_entry->read_proc = zcrypt_status_read;
1207 zcrypt_entry->write_proc = zcrypt_status_write;
1208 1194
1209 return 0; 1195 return 0;
1210 1196
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2930fc763ac5..b2fc4fd63f7f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code)
340 return; 340 return;
341 341
342 /* The LSB might be overloaded, we have to mask it */ 342 /* The LSB might be overloaded, we have to mask it */
343 vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); 343 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
344 344
345 /* We use the LSB of extparam, to decide, if this interrupt is a config 345 /* We use the LSB of extparam, to decide, if this interrupt is a config
346 * change or a "standard" interrupt */ 346 * change or a "standard" interrupt */
347 config_changed = (*(int *) __LC_EXT_PARAMS & 1); 347 config_changed = S390_lowcore.ext_params & 1;
348 348
349 if (config_changed) { 349 if (config_changed) {
350 struct virtio_driver *drv; 350 struct virtio_driver *drv;
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 0cc4d55151b7..39ad4b230a4a 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -362,6 +362,11 @@ typedef struct elf64_shdr {
362#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 362#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
363#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 363#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
364#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ 364#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
365#define NT_S390_TIMER 0x301 /* s390 timer register */
366#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
367#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
368#define NT_S390_CTRS 0x304 /* s390 control registers */
369#define NT_S390_PREFIX 0x305 /* s390 prefix register */
365 370
366 371
367/* Note header in a PT_NOTE section */ 372/* Note header in a PT_NOTE section */