aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-09 12:04:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-09 12:04:10 -0400
commitbecdce1c66b21ce1c0452e16127182ef692f47ba (patch)
treea37f26fbbc43fad56b12881f6d57dc4a0fdb8d98
parentf8cf2f16a7c95acce497bfafa90e7c6d8397d653 (diff)
parent92fa7a13c845c91f6a8177250474bbcab7fcf45e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - Improvements for the spectre defense: * The spectre related code is consolidated to a single file nospec-branch.c * Automatic enable/disable for the spectre v2 defenses (expoline vs. nobp) * Syslog messages for specve v2 are added * Enable CONFIG_GENERIC_CPU_VULNERABILITIES and define the attribute functions for spectre v1 and v2 - Add helper macros for assembler alternatives and use them to shorten the code in entry.S. - Add support for persistent configuration data via the SCLP Store Data interface. The H/W interface requires a page table that uses 4K pages only, the code to setup such an address space is added as well. - Enable virtio GPU emulation in QEMU. To do this the depends statements for a few common Kconfig options are modified. - Add support for format-3 channel path descriptors and add a binary sysfs interface to export the associated utility strings. - Add a sysfs attribute to control the IFCC handling in case of constant channel errors. - The vfio-ccw changes from Cornelia. - Bug fixes and cleanups. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (40 commits) s390/kvm: improve stack frame constants in entry.S s390/lpp: use assembler alternatives for the LPP instruction s390/entry.S: use assembler alternatives s390: add assembler macros for CPU alternatives s390: add sysfs attributes for spectre s390: report spectre mitigation via syslog s390: add automatic detection of the spectre defense s390: move nobp parameter functions to nospec-branch.c s390/cio: add util_string sysfs attribute s390/chsc: query utility strings via fmt3 channel path descriptor s390/cio: rename struct channel_path_desc s390/cio: fix unbind of io_subchannel_driver s390/qdio: split up CCQ handling for EQBS / SQBS s390/qdio: don't retry EQBS after CCQ 96 s390/qdio: restrict buffer merging to eligible devices s390/qdio: don't merge ERROR output buffers s390/qdio: simplify math in get_*_buffer_frontier() s390/decompressor: trim uncompressed image head during the build s390/crypto: Fix kernel crash on aes_s390 module remove. s390/defkeymap: fix global init to zero ...
-rw-r--r--Documentation/s390/vfio-ccw.txt79
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Makefile8
-rw-r--r--arch/s390/boot/compressed/Makefile16
-rw-r--r--arch/s390/boot/compressed/head.S6
-rw-r--r--arch/s390/boot/compressed/misc.c10
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S1
-rw-r--r--arch/s390/crypto/aes_s390.c5
-rw-r--r--arch/s390/include/asm/alternative-asm.h108
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/chpid.h2
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/css_chars.h6
-rw-r--r--arch/s390/include/asm/nospec-branch.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/scsw.h4
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/uapi/asm/dasd.h38
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/alternative.c24
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S96
-rw-r--r--arch/s390/kernel/module.c11
-rw-r--r--arch/s390/kernel/nospec-branch.c120
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/suspend.c4
-rw-r--r--arch/s390/mm/dump_pagetables.c4
-rw-r--r--arch/s390/mm/pgalloc.c293
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_3990_erp.c17
-rw-r--r--drivers/s390/block/dasd_devmap.c43
-rw-r--r--drivers/s390/block/dasd_eckd.c27
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/defkeymap.c66
-rw-r--r--drivers/s390/char/keyboard.c32
-rw-r--r--drivers/s390/char/keyboard.h11
-rw-r--r--drivers/s390/char/sclp.c58
-rw-r--r--drivers/s390/char/sclp.h61
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c38
-rw-r--r--drivers/s390/char/sclp_sd.c569
-rw-r--r--drivers/s390/char/sclp_tty.c5
-rw-r--r--drivers/s390/cio/chp.c34
-rw-r--r--drivers/s390/cio/chp.h5
-rw-r--r--drivers/s390/cio/chsc.c59
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/device.c16
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/qdio_main.c131
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c5
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/console/Kconfig6
56 files changed, 1633 insertions, 475 deletions
diff --git a/Documentation/s390/vfio-ccw.txt b/Documentation/s390/vfio-ccw.txt
index 90b3dfead81b..2be11ad864ff 100644
--- a/Documentation/s390/vfio-ccw.txt
+++ b/Documentation/s390/vfio-ccw.txt
@@ -28,7 +28,7 @@ every detail. More information/reference could be found here:
28 https://en.wikipedia.org/wiki/Channel_I/O 28 https://en.wikipedia.org/wiki/Channel_I/O
29- s390 architecture: 29- s390 architecture:
30 s390 Principles of Operation manual (IBM Form. No. SA22-7832) 30 s390 Principles of Operation manual (IBM Form. No. SA22-7832)
31- The existing Qemu code which implements a simple emulated channel 31- The existing QEMU code which implements a simple emulated channel
32 subsystem could also be a good reference. It makes it easier to follow 32 subsystem could also be a good reference. It makes it easier to follow
33 the flow. 33 the flow.
34 qemu/hw/s390x/css.c 34 qemu/hw/s390x/css.c
@@ -39,22 +39,22 @@ For vfio mediated device framework:
39Motivation of vfio-ccw 39Motivation of vfio-ccw
40---------------------- 40----------------------
41 41
42Currently, a guest virtualized via qemu/kvm on s390 only sees 42Typically, a guest virtualized via QEMU/KVM on s390 only sees
43paravirtualized virtio devices via the "Virtio Over Channel I/O 43paravirtualized virtio devices via the "Virtio Over Channel I/O
44(virtio-ccw)" transport. This makes virtio devices discoverable via 44(virtio-ccw)" transport. This makes virtio devices discoverable via
45standard operating system algorithms for handling channel devices. 45standard operating system algorithms for handling channel devices.
46 46
47However this is not enough. On s390 for the majority of devices, which 47However this is not enough. On s390 for the majority of devices, which
48use the standard Channel I/O based mechanism, we also need to provide 48use the standard Channel I/O based mechanism, we also need to provide
49the functionality of passing through them to a Qemu virtual machine. 49the functionality of passing through them to a QEMU virtual machine.
50This includes devices that don't have a virtio counterpart (e.g. tape 50This includes devices that don't have a virtio counterpart (e.g. tape
51drives) or that have specific characteristics which guests want to 51drives) or that have specific characteristics which guests want to
52exploit. 52exploit.
53 53
54For passing a device to a guest, we want to use the same interface as 54For passing a device to a guest, we want to use the same interface as
55everybody else, namely vfio. Thus, we would like to introduce vfio 55everybody else, namely vfio. We implement this vfio support for channel
56support for channel devices. And we would like to name this new vfio 56devices via the vfio mediated device framework and the subchannel device
57device "vfio-ccw". 57driver "vfio_ccw".
58 58
59Access patterns of CCW devices 59Access patterns of CCW devices
60------------------------------ 60------------------------------
@@ -99,7 +99,7 @@ As mentioned above, we realize vfio-ccw with a mdev implementation.
99Channel I/O does not have IOMMU hardware support, so the physical 99Channel I/O does not have IOMMU hardware support, so the physical
100vfio-ccw device does not have an IOMMU level translation or isolation. 100vfio-ccw device does not have an IOMMU level translation or isolation.
101 101
102Sub-channel I/O instructions are all privileged instructions, When 102Subchannel I/O instructions are all privileged instructions. When
103handling the I/O instruction interception, vfio-ccw has the software 103handling the I/O instruction interception, vfio-ccw has the software
104policing and translation how the channel program is programmed before 104policing and translation how the channel program is programmed before
105it gets sent to hardware. 105it gets sent to hardware.
@@ -121,7 +121,7 @@ devices:
121- The vfio_mdev driver for the mediated vfio ccw device. 121- The vfio_mdev driver for the mediated vfio ccw device.
122 This is provided by the mdev framework. It is a vfio device driver for 122 This is provided by the mdev framework. It is a vfio device driver for
123 the mdev that created by vfio_ccw. 123 the mdev that created by vfio_ccw.
124 It realize a group of vfio device driver callbacks, adds itself to a 124 It realizes a group of vfio device driver callbacks, adds itself to a
125 vfio group, and registers itself to the mdev framework as a mdev 125 vfio group, and registers itself to the mdev framework as a mdev
126 driver. 126 driver.
127 It uses a vfio iommu backend that uses the existing map and unmap 127 It uses a vfio iommu backend that uses the existing map and unmap
@@ -178,7 +178,7 @@ vfio-ccw I/O region
178 178
179An I/O region is used to accept channel program request from user 179An I/O region is used to accept channel program request from user
180space and store I/O interrupt result for user space to retrieve. The 180space and store I/O interrupt result for user space to retrieve. The
181defination of the region is: 181definition of the region is:
182 182
183struct ccw_io_region { 183struct ccw_io_region {
184#define ORB_AREA_SIZE 12 184#define ORB_AREA_SIZE 12
@@ -198,30 +198,23 @@ irb_area stores the I/O result.
198 198
199ret_code stores a return code for each access of the region. 199ret_code stores a return code for each access of the region.
200 200
201vfio-ccw patches overview 201vfio-ccw operation details
202------------------------- 202--------------------------
203 203
204For now, our patches are rebased on the latest mdev implementation. 204vfio-ccw follows what vfio-pci did on the s390 platform and uses
205vfio-ccw follows what vfio-pci did on the s390 paltform and uses 205vfio-iommu-type1 as the vfio iommu backend.
206vfio-iommu-type1 as the vfio iommu backend. It's a good start to launch
207the code review for vfio-ccw. Note that the implementation is far from
208complete yet; but we'd like to get feedback for the general
209architecture.
210 206
211* CCW translation APIs 207* CCW translation APIs
212- Description: 208 A group of APIs (start with 'cp_') to do CCW translation. The CCWs
213 These introduce a group of APIs (start with 'cp_') to do CCW 209 passed in by a user space program are organized with their guest
214 translation. The CCWs passed in by a user space program are 210 physical memory addresses. These APIs will copy the CCWs into kernel
215 organized with their guest physical memory addresses. These APIs 211 space, and assemble a runnable kernel channel program by updating the
216 will copy the CCWs into the kernel space, and assemble a runnable 212 guest physical addresses with their corresponding host physical addresses.
217 kernel channel program by updating the guest physical addresses with 213 Note that we have to use IDALs even for direct-access CCWs, as the
218 their corresponding host physical addresses. 214 referenced memory can be located anywhere, including above 2G.
219- Patches:
220 vfio: ccw: introduce channel program interfaces
221 215
222* vfio_ccw device driver 216* vfio_ccw device driver
223- Description: 217 This driver utilizes the CCW translation APIs and introduces
224 The following patches utilizes the CCW translation APIs and introduce
225 vfio_ccw, which is the driver for the I/O subchannel devices you want 218 vfio_ccw, which is the driver for the I/O subchannel devices you want
226 to pass through. 219 to pass through.
227 vfio_ccw implements the following vfio ioctls: 220 vfio_ccw implements the following vfio ioctls:
@@ -236,20 +229,14 @@ architecture.
236 This also provides the SET_IRQ ioctl to setup an event notifier to 229 This also provides the SET_IRQ ioctl to setup an event notifier to
237 notify the user space program the I/O completion in an asynchronous 230 notify the user space program the I/O completion in an asynchronous
238 way. 231 way.
239- Patches: 232
240 vfio: ccw: basic implementation for vfio_ccw driver 233The use of vfio-ccw is not limited to QEMU, while QEMU is definitely a
241 vfio: ccw: introduce ccw_io_region
242 vfio: ccw: realize VFIO_DEVICE_GET_REGION_INFO ioctl
243 vfio: ccw: realize VFIO_DEVICE_RESET ioctl
244 vfio: ccw: realize VFIO_DEVICE_G(S)ET_IRQ_INFO ioctls
245
246The user of vfio-ccw is not limited to Qemu, while Qemu is definitely a
247good example to get understand how these patches work. Here is a little 234good example to get understand how these patches work. Here is a little
248bit more detail how an I/O request triggered by the Qemu guest will be 235bit more detail how an I/O request triggered by the QEMU guest will be
249handled (without error handling). 236handled (without error handling).
250 237
251Explanation: 238Explanation:
252Q1-Q7: Qemu side process. 239Q1-Q7: QEMU side process.
253K1-K5: Kernel side process. 240K1-K5: Kernel side process.
254 241
255Q1. Get I/O region info during initialization. 242Q1. Get I/O region info during initialization.
@@ -263,7 +250,7 @@ Q4. Write the guest channel program and ORB to the I/O region.
263 K2. Translate the guest channel program to a host kernel space 250 K2. Translate the guest channel program to a host kernel space
264 channel program, which becomes runnable for a real device. 251 channel program, which becomes runnable for a real device.
265 K3. With the necessary information contained in the orb passed in 252 K3. With the necessary information contained in the orb passed in
266 by Qemu, issue the ccwchain to the device. 253 by QEMU, issue the ccwchain to the device.
267 K4. Return the ssch CC code. 254 K4. Return the ssch CC code.
268Q5. Return the CC code to the guest. 255Q5. Return the CC code to the guest.
269 256
@@ -271,7 +258,7 @@ Q5. Return the CC code to the guest.
271 258
272 K5. Interrupt handler gets the I/O result and write the result to 259 K5. Interrupt handler gets the I/O result and write the result to
273 the I/O region. 260 the I/O region.
274 K6. Signal Qemu to retrieve the result. 261 K6. Signal QEMU to retrieve the result.
275Q6. Get the signal and event handler reads out the result from the I/O 262Q6. Get the signal and event handler reads out the result from the I/O
276 region. 263 region.
277Q7. Update the irb for the guest. 264Q7. Update the irb for the guest.
@@ -289,10 +276,20 @@ More information for DASD and ECKD could be found here:
289https://en.wikipedia.org/wiki/Direct-access_storage_device 276https://en.wikipedia.org/wiki/Direct-access_storage_device
290https://en.wikipedia.org/wiki/Count_key_data 277https://en.wikipedia.org/wiki/Count_key_data
291 278
292Together with the corresponding work in Qemu, we can bring the passed 279Together with the corresponding work in QEMU, we can bring the passed
293through DASD/ECKD device online in a guest now and use it as a block 280through DASD/ECKD device online in a guest now and use it as a block
294device. 281device.
295 282
283While the current code allows the guest to start channel programs via
284START SUBCHANNEL, support for HALT SUBCHANNEL or CLEAR SUBCHANNEL is
285not yet implemented.
286
287vfio-ccw supports classic (command mode) channel I/O only. Transport
288mode (HPF) is not supported.
289
290QDIO subchannels are currently not supported. Classic devices other than
291DASD/ECKD might work, but have not been tested.
292
296Reference 293Reference
297--------- 294---------
2981. ESA/s390 Principles of Operation manual (IBM Form. No. SA22-7832) 2951. ESA/s390 Principles of Operation manual (IBM Form. No. SA22-7832)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eaee7087886f..32a0d5b958bf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -120,6 +120,7 @@ config S390
120 select GENERIC_CLOCKEVENTS 120 select GENERIC_CLOCKEVENTS
121 select GENERIC_CPU_AUTOPROBE 121 select GENERIC_CPU_AUTOPROBE
122 select GENERIC_CPU_DEVICES if !SMP 122 select GENERIC_CPU_DEVICES if !SMP
123 select GENERIC_CPU_VULNERABILITIES
123 select GENERIC_FIND_FIRST_BIT 124 select GENERIC_FIND_FIRST_BIT
124 select GENERIC_SMP_IDLE_THREAD 125 select GENERIC_SMP_IDLE_THREAD
125 select GENERIC_TIME_VSYSCALL 126 select GENERIC_TIME_VSYSCALL
@@ -576,7 +577,7 @@ choice
576config EXPOLINE_OFF 577config EXPOLINE_OFF
577 bool "spectre_v2=off" 578 bool "spectre_v2=off"
578 579
579config EXPOLINE_MEDIUM 580config EXPOLINE_AUTO
580 bool "spectre_v2=auto" 581 bool "spectre_v2=auto"
581 582
582config EXPOLINE_FULL 583config EXPOLINE_FULL
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 2ced3239cb84..c79936d02f7b 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -47,9 +47,6 @@ cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
47 47
48cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include 48cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
49 49
50#KBUILD_IMAGE is necessary for make rpm
51KBUILD_IMAGE :=arch/s390/boot/image
52
53# 50#
54# Prevent tail-call optimizations, to get clearer backtraces: 51# Prevent tail-call optimizations, to get clearer backtraces:
55# 52#
@@ -84,7 +81,7 @@ ifdef CONFIG_EXPOLINE
84 CC_FLAGS_EXPOLINE += -mfunction-return=thunk 81 CC_FLAGS_EXPOLINE += -mfunction-return=thunk
85 CC_FLAGS_EXPOLINE += -mindirect-branch-table 82 CC_FLAGS_EXPOLINE += -mindirect-branch-table
86 export CC_FLAGS_EXPOLINE 83 export CC_FLAGS_EXPOLINE
87 cflags-y += $(CC_FLAGS_EXPOLINE) 84 cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
88 endif 85 endif
89endif 86endif
90 87
@@ -126,6 +123,9 @@ tools := arch/s390/tools
126 123
127all: image bzImage 124all: image bzImage
128 125
126#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
127KBUILD_IMAGE := $(boot)/bzImage
128
129install: vmlinux 129install: vmlinux
130 $(Q)$(MAKE) $(build)=$(boot) $@ 130 $(Q)$(MAKE) $(build)=$(boot) $@
131 131
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 26d6a94f40f6..5766f7b9b271 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -29,11 +29,16 @@ LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
29$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) 29$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
30 $(call if_changed,ld) 30 $(call if_changed,ld)
31 31
32sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p' 32TRIM_HEAD_SIZE := 0x11000
33 33
34quiet_cmd_sizes = GEN $@ 34sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - $(TRIM_HEAD_SIZE))/p'
35
36quiet_cmd_sizes = GEN $@
35 cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@ 37 cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
36 38
39quiet_cmd_trim_head = TRIM $@
40 cmd_trim_head = tail -c +$$(($(TRIM_HEAD_SIZE) + 1)) $< > $@
41
37$(obj)/sizes.h: vmlinux 42$(obj)/sizes.h: vmlinux
38 $(call if_changed,sizes) 43 $(call if_changed,sizes)
39 44
@@ -43,10 +48,13 @@ $(obj)/head.o: $(obj)/sizes.h
43CFLAGS_misc.o += -I$(objtree)/$(obj) 48CFLAGS_misc.o += -I$(objtree)/$(obj)
44$(obj)/misc.o: $(obj)/sizes.h 49$(obj)/misc.o: $(obj)/sizes.h
45 50
46OBJCOPYFLAGS_vmlinux.bin := -R .comment -S 51OBJCOPYFLAGS_vmlinux.bin.full := -R .comment -S
47$(obj)/vmlinux.bin: vmlinux 52$(obj)/vmlinux.bin.full: vmlinux
48 $(call if_changed,objcopy) 53 $(call if_changed,objcopy)
49 54
55$(obj)/vmlinux.bin: $(obj)/vmlinux.bin.full
56 $(call if_changed,trim_head)
57
50vmlinux.bin.all-y := $(obj)/vmlinux.bin 58vmlinux.bin.all-y := $(obj)/vmlinux.bin
51 59
52suffix-$(CONFIG_KERNEL_GZIP) := gz 60suffix-$(CONFIG_KERNEL_GZIP) := gz
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 231d1491d431..9f94eca0f467 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -23,12 +23,10 @@ ENTRY(startup_continue)
23 aghi %r15,-160 23 aghi %r15,-160
24 brasl %r14,decompress_kernel 24 brasl %r14,decompress_kernel
25 # Set up registers for memory mover. We move the decompressed image to 25 # Set up registers for memory mover. We move the decompressed image to
26 # 0x11000, starting at offset 0x11000 in the decompressed image so 26 # 0x11000, where startup_continue of the decompressed image is supposed
27 # that code living at 0x11000 in the image will end up at 0x11000 in 27 # to be.
28 # memory.
29 lgr %r4,%r2 28 lgr %r4,%r2
30 lg %r2,.Loffset-.LPG1(%r13) 29 lg %r2,.Loffset-.LPG1(%r13)
31 la %r4,0(%r2,%r4)
32 lg %r3,.Lmvsize-.LPG1(%r13) 30 lg %r3,.Lmvsize-.LPG1(%r13)
33 lgr %r5,%r3 31 lgr %r5,%r3
34 # Move the memory mover someplace safe so it doesn't overwrite itself. 32 # Move the memory mover someplace safe so it doesn't overwrite itself.
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index cecf38b9ec82..63838a17e56a 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -27,8 +27,8 @@
27/* Symbols defined by linker scripts */ 27/* Symbols defined by linker scripts */
28extern char input_data[]; 28extern char input_data[];
29extern int input_len; 29extern int input_len;
30extern char _text, _end; 30extern char _end[];
31extern char _bss, _ebss; 31extern char _bss[], _ebss[];
32 32
33static void error(char *m); 33static void error(char *m);
34 34
@@ -144,7 +144,7 @@ unsigned long decompress_kernel(void)
144{ 144{
145 void *output, *kernel_end; 145 void *output, *kernel_end;
146 146
147 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); 147 output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
148 kernel_end = output + SZ__bss_start; 148 kernel_end = output + SZ__bss_start;
149 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); 149 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 150
@@ -166,8 +166,8 @@ unsigned long decompress_kernel(void)
166 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be 166 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
167 * initialized afterwards since they reside in bss. 167 * initialized afterwards since they reside in bss.
168 */ 168 */
169 memset(&_bss, 0, &_ebss - &_bss); 169 memset(_bss, 0, _ebss - _bss);
170 free_mem_ptr = (unsigned long) &_end; 170 free_mem_ptr = (unsigned long) _end;
171 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 171 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
172 172
173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 8150132b144f..d43c2db12d30 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
52 /* Sections to be discarded */ 52 /* Sections to be discarded */
53 /DISCARD/ : { 53 /DISCARD/ : {
54 *(.eh_frame) 54 *(.eh_frame)
55 *(__ex_table)
55 *(*__ksymtab*) 56 *(*__ksymtab*)
56 } 57 }
57} 58}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index d60798737d86..fa9b7dd1a513 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1047,6 +1047,7 @@ static struct aead_alg gcm_aes_aead = {
1047 1047
1048static struct crypto_alg *aes_s390_algs_ptr[5]; 1048static struct crypto_alg *aes_s390_algs_ptr[5];
1049static int aes_s390_algs_num; 1049static int aes_s390_algs_num;
1050static struct aead_alg *aes_s390_aead_alg;
1050 1051
1051static int aes_s390_register_alg(struct crypto_alg *alg) 1052static int aes_s390_register_alg(struct crypto_alg *alg)
1052{ 1053{
@@ -1065,7 +1066,8 @@ static void aes_s390_fini(void)
1065 if (ctrblk) 1066 if (ctrblk)
1066 free_page((unsigned long) ctrblk); 1067 free_page((unsigned long) ctrblk);
1067 1068
1068 crypto_unregister_aead(&gcm_aes_aead); 1069 if (aes_s390_aead_alg)
1070 crypto_unregister_aead(aes_s390_aead_alg);
1069} 1071}
1070 1072
1071static int __init aes_s390_init(void) 1073static int __init aes_s390_init(void)
@@ -1123,6 +1125,7 @@ static int __init aes_s390_init(void)
1123 ret = crypto_register_aead(&gcm_aes_aead); 1125 ret = crypto_register_aead(&gcm_aes_aead);
1124 if (ret) 1126 if (ret)
1125 goto out_err; 1127 goto out_err;
1128 aes_s390_aead_alg = &gcm_aes_aead;
1126 } 1129 }
1127 1130
1128 return 0; 1131 return 0;
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..955d620db23e
--- /dev/null
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -0,0 +1,108 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_ALTERNATIVE_ASM_H
3#define _ASM_S390_ALTERNATIVE_ASM_H
4
5#ifdef __ASSEMBLY__
6
7/*
8 * Check the length of an instruction sequence. The length may not be larger
9 * than 254 bytes and it has to be divisible by 2.
10 */
11.macro alt_len_check start,end
12 .if ( \end - \start ) > 254
13 .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
14 .endif
15 .if ( \end - \start ) % 2
16 .error "cpu alternatives instructions length is odd\n"
17 .endif
18.endm
19
20/*
21 * Issue one struct alt_instr descriptor entry (need to put it into
22 * the section .altinstructions, see below). This entry contains
23 * enough information for the alternatives patching code to patch an
24 * instruction. See apply_alternatives().
25 */
26.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
27 .long \orig_start - .
28 .long \alt_start - .
29 .word \feature
30 .byte \orig_end - \orig_start
31 .byte \alt_end - \alt_start
32.endm
33
34/*
35 * Fill up @bytes with nops. The macro emits 6-byte nop instructions
36 * for the bulk of the area, possibly followed by a 4-byte and/or
37 * a 2-byte nop if the size of the area is not divisible by 6.
38 */
39.macro alt_pad_fill bytes
40 .fill ( \bytes ) / 6, 6, 0xc0040000
41 .fill ( \bytes ) % 6 / 4, 4, 0x47000000
42 .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
43.endm
44
45/*
46 * Fill up @bytes with nops. If the number of bytes is larger
47 * than 6, emit a jg instruction to branch over all nops, then
48 * fill an area of size (@bytes - 6) with nop instructions.
49 */
50.macro alt_pad bytes
51 .if ( \bytes > 0 )
52 .if ( \bytes > 6 )
53 jg . + \bytes
54 alt_pad_fill \bytes - 6
55 .else
56 alt_pad_fill \bytes
57 .endif
58 .endif
59.endm
60
61/*
62 * Define an alternative between two instructions. If @feature is
63 * present, early code in apply_alternatives() replaces @oldinstr with
64 * @newinstr. ".skip" directive takes care of proper instruction padding
65 * in case @newinstr is longer than @oldinstr.
66 */
67.macro ALTERNATIVE oldinstr, newinstr, feature
68 .pushsection .altinstr_replacement,"ax"
69770: \newinstr
70771: .popsection
71772: \oldinstr
72773: alt_len_check 770b, 771b
73 alt_len_check 772b, 773b
74 alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
75774: .pushsection .altinstructions,"a"
76 alt_entry 772b, 774b, 770b, 771b, \feature
77 .popsection
78.endm
79
80/*
81 * Define an alternative between two instructions. If @feature is
82 * present, early code in apply_alternatives() replaces @oldinstr with
83 * @newinstr. ".skip" directive takes care of proper instruction padding
84 * in case @newinstr is longer than @oldinstr.
85 */
86.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
87 .pushsection .altinstr_replacement,"ax"
88770: \newinstr1
89771: \newinstr2
90772: .popsection
91773: \oldinstr
92774: alt_len_check 770b, 771b
93 alt_len_check 771b, 772b
94 alt_len_check 773b, 774b
95 .if ( 771b - 770b > 772b - 771b )
96 alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
97 .else
98 alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
99 .endif
100775: .pushsection .altinstructions,"a"
101 alt_entry 773b, 775b, 770b, 771b,\feature1
102 alt_entry 773b, 775b, 771b, 772b,\feature2
103 .popsection
104.endm
105
106#endif /* __ASSEMBLY__ */
107
108#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 633f8da86137..20bce136b2e5 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -230,5 +230,5 @@ int ccw_device_siosl(struct ccw_device *);
230 230
231extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); 231extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
232 232
233struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int); 233struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
234#endif /* _S390_CCWDEV_H_ */ 234#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index 4773f747915c..20e0d22f29e9 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -9,7 +9,7 @@
9#include <uapi/asm/chpid.h> 9#include <uapi/asm/chpid.h>
10#include <asm/cio.h> 10#include <asm/cio.h>
11 11
12struct channel_path_desc { 12struct channel_path_desc_fmt0 {
13 u8 flags; 13 u8 flags;
14 u8 lsn; 14 u8 lsn;
15 u8 desc; 15 u8 desc;
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index dc84a0171bb3..847a04262b9c 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -227,7 +227,7 @@ struct esw_eadm {
227 * a field is valid; a field not being valid is always passed as %0. 227 * a field is valid; a field not being valid is always passed as %0.
228 * If a unit check occurred, @ecw may contain sense data; this is retrieved 228 * If a unit check occurred, @ecw may contain sense data; this is retrieved
229 * by the common I/O layer itself if the device doesn't support concurrent 229 * by the common I/O layer itself if the device doesn't support concurrent
230 * sense (so that the device driver never needs to perform basic sene itself). 230 * sense (so that the device driver never needs to perform basic sense itself).
231 * For unsolicited interrupts, the irb is passed as-is (expect for sense data, 231 * For unsolicited interrupts, the irb is passed as-is (expect for sense data,
232 * if applicable). 232 * if applicable).
233 */ 233 */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index dd08db491b89..f58d17e9dd65 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -29,12 +29,12 @@
29/* CPU measurement facility support */ 29/* CPU measurement facility support */
30static inline int cpum_cf_avail(void) 30static inline int cpum_cf_avail(void)
31{ 31{
32 return MACHINE_HAS_LPP && test_facility(67); 32 return test_facility(40) && test_facility(67);
33} 33}
34 34
35static inline int cpum_sf_avail(void) 35static inline int cpum_sf_avail(void)
36{ 36{
37 return MACHINE_HAS_LPP && test_facility(68); 37 return test_facility(40) && test_facility(68);
38} 38}
39 39
40 40
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index fb56fa3283a2..0563fd3e8458 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -32,8 +32,10 @@ struct css_general_char {
32 u32 fcx : 1; /* bit 88 */ 32 u32 fcx : 1; /* bit 88 */
33 u32 : 19; 33 u32 : 19;
34 u32 alt_ssi : 1; /* bit 108 */ 34 u32 alt_ssi : 1; /* bit 108 */
35 u32:1; 35 u32 : 1;
36 u32 narf:1; /* bit 110 */ 36 u32 narf : 1; /* bit 110 */
37 u32 : 12;
38 u32 util_str : 1;/* bit 123 */
37} __packed; 39} __packed;
38 40
39extern struct css_general_char css_general_characteristics; 41extern struct css_general_char css_general_characteristics;
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index 7df48e5cf36f..35bf28fe4c64 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -6,12 +6,10 @@
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8 8
9extern int nospec_call_disable; 9extern int nospec_disable;
10extern int nospec_return_disable;
11 10
12void nospec_init_branches(void); 11void nospec_init_branches(void);
13void nospec_call_revert(s32 *start, s32 *end); 12void nospec_revert(s32 *start, s32 *end);
14void nospec_return_revert(s32 *start, s32 *end);
15 13
16#endif /* __ASSEMBLY__ */ 14#endif /* __ASSEMBLY__ */
17 15
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index c7b4333d1de0..f0f9bcf94c03 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -151,4 +151,7 @@ void vmem_map_init(void);
151void *vmem_crst_alloc(unsigned long val); 151void *vmem_crst_alloc(unsigned long val);
152pte_t *vmem_pte_alloc(void); 152pte_t *vmem_pte_alloc(void);
153 153
154unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
155void base_asce_free(unsigned long asce);
156
154#endif /* _S390_PGALLOC_H */ 157#endif /* _S390_PGALLOC_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 79b7ffa91832..c00f7b031628 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -390,10 +390,10 @@ static inline int scsw_cmd_is_valid_key(union scsw *scsw)
390} 390}
391 391
392/** 392/**
393 * scsw_cmd_is_valid_sctl - check fctl field validity 393 * scsw_cmd_is_valid_sctl - check sctl field validity
394 * @scsw: pointer to scsw 394 * @scsw: pointer to scsw
395 * 395 *
396 * Return non-zero if the fctl field of the specified command mode scsw is 396 * Return non-zero if the sctl field of the specified command mode scsw is
397 * valid, zero otherwise. 397 * valid, zero otherwise.
398 */ 398 */
399static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) 399static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 2eb0c8a7b664..124154fdfc97 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -25,7 +25,6 @@
25#define MACHINE_FLAG_DIAG44 _BITUL(6) 25#define MACHINE_FLAG_DIAG44 _BITUL(6)
26#define MACHINE_FLAG_EDAT1 _BITUL(7) 26#define MACHINE_FLAG_EDAT1 _BITUL(7)
27#define MACHINE_FLAG_EDAT2 _BITUL(8) 27#define MACHINE_FLAG_EDAT2 _BITUL(8)
28#define MACHINE_FLAG_LPP _BITUL(9)
29#define MACHINE_FLAG_TOPOLOGY _BITUL(10) 28#define MACHINE_FLAG_TOPOLOGY _BITUL(10)
30#define MACHINE_FLAG_TE _BITUL(11) 29#define MACHINE_FLAG_TE _BITUL(11)
31#define MACHINE_FLAG_TLB_LC _BITUL(12) 30#define MACHINE_FLAG_TLB_LC _BITUL(12)
@@ -66,7 +65,6 @@ extern void detect_memory_memblock(void);
66#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 65#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
67#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 66#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
68#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 67#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
69#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
70#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 68#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
71#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 69#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
72#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 70#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 451c601406b6..832be5c2584f 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -68,25 +68,27 @@ typedef struct dasd_information2_t {
68#define DASD_FORMAT_CDL 2 68#define DASD_FORMAT_CDL 2
69/* 69/*
70 * values to be used for dasd_information_t.features 70 * values to be used for dasd_information_t.features
71 * 0x00: default features 71 * 0x100: default features
72 * 0x01: readonly (ro) 72 * 0x001: readonly (ro)
73 * 0x02: use diag discipline (diag) 73 * 0x002: use diag discipline (diag)
74 * 0x04: set the device initially online (internal use only) 74 * 0x004: set the device initially online (internal use only)
75 * 0x08: enable ERP related logging 75 * 0x008: enable ERP related logging
76 * 0x10: allow I/O to fail on lost paths 76 * 0x010: allow I/O to fail on lost paths
77 * 0x20: allow I/O to fail when a lock was stolen 77 * 0x020: allow I/O to fail when a lock was stolen
78 * 0x40: give access to raw eckd data 78 * 0x040: give access to raw eckd data
79 * 0x80: enable discard support 79 * 0x080: enable discard support
80 * 0x100: enable autodisable for IFCC errors (default)
80 */ 81 */
81#define DASD_FEATURE_DEFAULT 0x00 82#define DASD_FEATURE_READONLY 0x001
82#define DASD_FEATURE_READONLY 0x01 83#define DASD_FEATURE_USEDIAG 0x002
83#define DASD_FEATURE_USEDIAG 0x02 84#define DASD_FEATURE_INITIAL_ONLINE 0x004
84#define DASD_FEATURE_INITIAL_ONLINE 0x04 85#define DASD_FEATURE_ERPLOG 0x008
85#define DASD_FEATURE_ERPLOG 0x08 86#define DASD_FEATURE_FAILFAST 0x010
86#define DASD_FEATURE_FAILFAST 0x10 87#define DASD_FEATURE_FAILONSLCK 0x020
87#define DASD_FEATURE_FAILONSLCK 0x20 88#define DASD_FEATURE_USERAW 0x040
88#define DASD_FEATURE_USERAW 0x40 89#define DASD_FEATURE_DISCARD 0x080
89#define DASD_FEATURE_DISCARD 0x80 90#define DASD_FEATURE_PATH_AUTODISABLE 0x100
91#define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE
90 92
91#define DASD_PARTN_BITS 2 93#define DASD_PARTN_BITS 2
92 94
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7f27e3da9709..b06a6f79c1ec 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -61,11 +61,11 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
61obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 61obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
62obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o 62obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
63obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o 63obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
64obj-y += nospec-branch.o
64 65
65extra-y += head.o head64.o vmlinux.lds 66extra-y += head.o head64.o vmlinux.lds
66 67
67obj-$(CONFIG_EXPOLINE) += nospec-branch.o 68CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
68CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE)
69 69
70obj-$(CONFIG_MODULES) += module.o 70obj-$(CONFIG_MODULES) += module.o
71obj-$(CONFIG_SMP) += smp.o 71obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 22476135f738..8e1f2aee85ef 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -2,6 +2,7 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <asm/alternative.h> 3#include <asm/alternative.h>
4#include <asm/facility.h> 4#include <asm/facility.h>
5#include <asm/nospec-branch.h>
5 6
6#define MAX_PATCH_LEN (255 - 1) 7#define MAX_PATCH_LEN (255 - 1)
7 8
@@ -15,29 +16,6 @@ static int __init disable_alternative_instructions(char *str)
15 16
16early_param("noaltinstr", disable_alternative_instructions); 17early_param("noaltinstr", disable_alternative_instructions);
17 18
18static int __init nobp_setup_early(char *str)
19{
20 bool enabled;
21 int rc;
22
23 rc = kstrtobool(str, &enabled);
24 if (rc)
25 return rc;
26 if (enabled && test_facility(82))
27 __set_facility(82, S390_lowcore.alt_stfle_fac_list);
28 else
29 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
30 return 0;
31}
32early_param("nobp", nobp_setup_early);
33
34static int __init nospec_setup_early(char *str)
35{
36 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
37 return 0;
38}
39early_param("nospec", nospec_setup_early);
40
41struct brcl_insn { 19struct brcl_insn {
42 u16 opc; 20 u16 opc;
43 s32 disp; 21 s32 disp;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 587b195b588d..cfe2c45c5180 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -63,6 +63,7 @@ int main(void)
63 OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]); 63 OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
64 OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]); 64 OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
65 OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]); 65 OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
66 OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
66 BLANK(); 67 BLANK();
67 /* timeval/timezone offsets for use by vdso */ 68 /* timeval/timezone offsets for use by vdso */
68 OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count); 69 OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index ac707a9f729e..b00b515baa53 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -67,7 +67,7 @@ static noinline __init void init_kernel_storage_key(void)
67#if PAGE_DEFAULT_KEY 67#if PAGE_DEFAULT_KEY
68 unsigned long end_pfn, init_pfn; 68 unsigned long end_pfn, init_pfn;
69 69
70 end_pfn = PFN_UP(__pa(&_end)); 70 end_pfn = PFN_UP(__pa(_end));
71 71
72 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 72 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
73 page_set_storage_key(init_pfn << PAGE_SHIFT, 73 page_set_storage_key(init_pfn << PAGE_SHIFT,
@@ -242,8 +242,6 @@ static __init void detect_machine_facilities(void)
242 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2; 242 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
243 if (test_facility(3)) 243 if (test_facility(3))
244 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 244 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
245 if (test_facility(40))
246 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
247 if (test_facility(50) && test_facility(73)) { 245 if (test_facility(50) && test_facility(73)) {
248 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 246 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
249 __ctl_set_bit(0, 55); 247 __ctl_set_bit(0, 55);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index a5621ea6d123..3f22f139a041 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -11,6 +11,7 @@
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15#include <asm/cache.h> 16#include <asm/cache.h>
16#include <asm/ctl_reg.h> 17#include <asm/ctl_reg.h>
@@ -57,6 +58,8 @@ _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
57 _CIF_ASCE_SECONDARY | _CIF_FPU) 58 _CIF_ASCE_SECONDARY | _CIF_FPU)
58_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 59_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
59 60
61_LPP_OFFSET = __LC_LPP
62
60#define BASED(name) name-cleanup_critical(%r13) 63#define BASED(name) name-cleanup_critical(%r13)
61 64
62 .macro TRACE_IRQS_ON 65 .macro TRACE_IRQS_ON
@@ -162,65 +165,22 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
162 .endm 165 .endm
163 166
164 .macro BPOFF 167 .macro BPOFF
165 .pushsection .altinstr_replacement, "ax" 168 ALTERNATIVE "", ".long 0xb2e8c000", 82
166660: .long 0xb2e8c000
167 .popsection
168661: .long 0x47000000
169 .pushsection .altinstructions, "a"
170 .long 661b - .
171 .long 660b - .
172 .word 82
173 .byte 4
174 .byte 4
175 .popsection
176 .endm 169 .endm
177 170
178 .macro BPON 171 .macro BPON
179 .pushsection .altinstr_replacement, "ax" 172 ALTERNATIVE "", ".long 0xb2e8d000", 82
180662: .long 0xb2e8d000
181 .popsection
182663: .long 0x47000000
183 .pushsection .altinstructions, "a"
184 .long 663b - .
185 .long 662b - .
186 .word 82
187 .byte 4
188 .byte 4
189 .popsection
190 .endm 173 .endm
191 174
192 .macro BPENTER tif_ptr,tif_mask 175 .macro BPENTER tif_ptr,tif_mask
193 .pushsection .altinstr_replacement, "ax" 176 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
194662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop 177 "", 82
195 .word 0xc004, 0x0000, 0x0000 # 6 byte nop
196 .popsection
197664: TSTMSK \tif_ptr,\tif_mask
198 jz . + 8
199 .long 0xb2e8d000
200 .pushsection .altinstructions, "a"
201 .long 664b - .
202 .long 662b - .
203 .word 82
204 .byte 12
205 .byte 12
206 .popsection
207 .endm 178 .endm
208 179
209 .macro BPEXIT tif_ptr,tif_mask 180 .macro BPEXIT tif_ptr,tif_mask
210 TSTMSK \tif_ptr,\tif_mask 181 TSTMSK \tif_ptr,\tif_mask
211 .pushsection .altinstr_replacement, "ax" 182 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
212662: jnz . + 8 183 "jnz .+8; .long 0xb2e8d000", 82
213 .long 0xb2e8d000
214 .popsection
215664: jz . + 8
216 .long 0xb2e8c000
217 .pushsection .altinstructions, "a"
218 .long 664b - .
219 .long 662b - .
220 .word 82
221 .byte 8
222 .byte 8
223 .popsection
224 .endm 184 .endm
225 185
226#ifdef CONFIG_EXPOLINE 186#ifdef CONFIG_EXPOLINE
@@ -323,10 +283,8 @@ ENTRY(__switch_to)
323 aghi %r3,__TASK_pid 283 aghi %r3,__TASK_pid
324 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 284 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
325 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 285 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
326 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 286 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
327 jz 0f 287 BR_R1USE_R14
328 .insn s,0xb2800000,__LC_LPP # set program parameter
3290: BR_R1USE_R14
330 288
331.L__critical_start: 289.L__critical_start:
332 290
@@ -339,10 +297,10 @@ ENTRY(__switch_to)
339ENTRY(sie64a) 297ENTRY(sie64a)
340 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 298 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
341 lg %r12,__LC_CURRENT 299 lg %r12,__LC_CURRENT
342 stg %r2,__SF_EMPTY(%r15) # save control block pointer 300 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
343 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 301 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
344 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 302 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
345 mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags 303 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
346 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 304 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
347 jno .Lsie_load_guest_gprs 305 jno .Lsie_load_guest_gprs
348 brasl %r14,load_fpu_regs # load guest fp/vx regs 306 brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -353,18 +311,18 @@ ENTRY(sie64a)
353 jz .Lsie_gmap 311 jz .Lsie_gmap
354 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 312 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
355.Lsie_gmap: 313.Lsie_gmap:
356 lg %r14,__SF_EMPTY(%r15) # get control block pointer 314 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
357 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 315 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
358 tm __SIE_PROG20+3(%r14),3 # last exit... 316 tm __SIE_PROG20+3(%r14),3 # last exit...
359 jnz .Lsie_skip 317 jnz .Lsie_skip
360 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 318 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
361 jo .Lsie_skip # exit if fp/vx regs changed 319 jo .Lsie_skip # exit if fp/vx regs changed
362 BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 320 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
363.Lsie_entry: 321.Lsie_entry:
364 sie 0(%r14) 322 sie 0(%r14)
365.Lsie_exit: 323.Lsie_exit:
366 BPOFF 324 BPOFF
367 BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 325 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
368.Lsie_skip: 326.Lsie_skip:
369 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 327 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
370 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 328 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -383,7 +341,7 @@ ENTRY(sie64a)
383 nopr 7 341 nopr 7
384 .globl sie_exit 342 .globl sie_exit
385sie_exit: 343sie_exit:
386 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 344 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
387 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 345 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
388 xgr %r0,%r0 # clear guest registers to 346 xgr %r0,%r0 # clear guest registers to
389 xgr %r1,%r1 # prevent speculative use 347 xgr %r1,%r1 # prevent speculative use
@@ -392,11 +350,11 @@ sie_exit:
392 xgr %r4,%r4 350 xgr %r4,%r4
393 xgr %r5,%r5 351 xgr %r5,%r5
394 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 352 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
395 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code 353 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
396 BR_R1USE_R14 354 BR_R1USE_R14
397.Lsie_fault: 355.Lsie_fault:
398 lghi %r14,-EFAULT 356 lghi %r14,-EFAULT
399 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 357 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
400 j sie_exit 358 j sie_exit
401 359
402 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 360 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
@@ -685,7 +643,7 @@ ENTRY(pgm_check_handler)
685 slg %r14,BASED(.Lsie_critical_start) 643 slg %r14,BASED(.Lsie_critical_start)
686 clg %r14,BASED(.Lsie_critical_length) 644 clg %r14,BASED(.Lsie_critical_length)
687 jhe 0f 645 jhe 0f
688 lg %r14,__SF_EMPTY(%r15) # get control block pointer 646 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
689 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 647 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
690 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 648 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
691 larl %r9,sie_exit # skip forward to sie_exit 649 larl %r9,sie_exit # skip forward to sie_exit
@@ -1285,10 +1243,8 @@ ENTRY(mcck_int_handler)
1285# PSW restart interrupt handler 1243# PSW restart interrupt handler
1286# 1244#
1287ENTRY(restart_int_handler) 1245ENTRY(restart_int_handler)
1288 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 1246 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1289 jz 0f 1247 stg %r15,__LC_SAVE_AREA_RESTART
1290 .insn s,0xb2800000,__LC_LPP
12910: stg %r15,__LC_SAVE_AREA_RESTART
1292 lg %r15,__LC_RESTART_STACK 1248 lg %r15,__LC_RESTART_STACK
1293 aghi %r15,-__PT_SIZE # create pt_regs on stack 1249 aghi %r15,-__PT_SIZE # create pt_regs on stack
1294 xc 0(__PT_SIZE,%r15),0(%r15) 1250 xc 0(__PT_SIZE,%r15),0(%r15)
@@ -1397,8 +1353,8 @@ cleanup_critical:
1397 clg %r9,BASED(.Lsie_crit_mcck_length) 1353 clg %r9,BASED(.Lsie_crit_mcck_length)
1398 jh 1f 1354 jh 1f
1399 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 1355 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
14001: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 13561: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1401 lg %r9,__SF_EMPTY(%r15) # get control block pointer 1357 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
1402 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1358 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1403 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1359 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1404 larl %r9,sie_exit # skip forward to sie_exit 1360 larl %r9,sie_exit # skip forward to sie_exit
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 1fc6d1ff92d3..5a83be955c70 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -159,7 +159,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
159 me->core_layout.size += me->arch.got_size; 159 me->core_layout.size += me->arch.got_size;
160 me->arch.plt_offset = me->core_layout.size; 160 me->arch.plt_offset = me->core_layout.size;
161 if (me->arch.plt_size) { 161 if (me->arch.plt_size) {
162 if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable) 162 if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
163 me->arch.plt_size += PLT_ENTRY_SIZE; 163 me->arch.plt_size += PLT_ENTRY_SIZE;
164 me->core_layout.size += me->arch.plt_size; 164 me->core_layout.size += me->arch.plt_size;
165 } 165 }
@@ -318,8 +318,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
318 info->plt_offset; 318 info->plt_offset;
319 ip[0] = 0x0d10e310; /* basr 1,0 */ 319 ip[0] = 0x0d10e310; /* basr 1,0 */
320 ip[1] = 0x100a0004; /* lg 1,10(1) */ 320 ip[1] = 0x100a0004; /* lg 1,10(1) */
321 if (IS_ENABLED(CONFIG_EXPOLINE) && 321 if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
322 !nospec_call_disable) {
323 unsigned int *ij; 322 unsigned int *ij;
324 ij = me->core_layout.base + 323 ij = me->core_layout.base +
325 me->arch.plt_offset + 324 me->arch.plt_offset +
@@ -440,7 +439,7 @@ int module_finalize(const Elf_Ehdr *hdr,
440 void *aseg; 439 void *aseg;
441 440
442 if (IS_ENABLED(CONFIG_EXPOLINE) && 441 if (IS_ENABLED(CONFIG_EXPOLINE) &&
443 !nospec_call_disable && me->arch.plt_size) { 442 !nospec_disable && me->arch.plt_size) {
444 unsigned int *ij; 443 unsigned int *ij;
445 444
446 ij = me->core_layout.base + me->arch.plt_offset + 445 ij = me->core_layout.base + me->arch.plt_offset +
@@ -467,11 +466,11 @@ int module_finalize(const Elf_Ehdr *hdr,
467 466
468 if (IS_ENABLED(CONFIG_EXPOLINE) && 467 if (IS_ENABLED(CONFIG_EXPOLINE) &&
469 (!strcmp(".nospec_call_table", secname))) 468 (!strcmp(".nospec_call_table", secname)))
470 nospec_call_revert(aseg, aseg + s->sh_size); 469 nospec_revert(aseg, aseg + s->sh_size);
471 470
472 if (IS_ENABLED(CONFIG_EXPOLINE) && 471 if (IS_ENABLED(CONFIG_EXPOLINE) &&
473 (!strcmp(".nospec_return_table", secname))) 472 (!strcmp(".nospec_return_table", secname)))
474 nospec_return_revert(aseg, aseg + s->sh_size); 473 nospec_revert(aseg, aseg + s->sh_size);
475 } 474 }
476 475
477 jump_label_apply_nops(me); 476 jump_label_apply_nops(me);
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 9aff72d3abda..14867ec5f726 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,32 +1,108 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/device.h>
3#include <asm/nospec-branch.h> 4#include <asm/nospec-branch.h>
4 5
5int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); 6static int __init nobp_setup_early(char *str)
6int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL); 7{
8 bool enabled;
9 int rc;
10
11 rc = kstrtobool(str, &enabled);
12 if (rc)
13 return rc;
14 if (enabled && test_facility(82)) {
15 /*
16 * The user explicitely requested nobp=1, enable it and
17 * disable the expoline support.
18 */
19 __set_facility(82, S390_lowcore.alt_stfle_fac_list);
20 if (IS_ENABLED(CONFIG_EXPOLINE))
21 nospec_disable = 1;
22 } else {
23 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
24 }
25 return 0;
26}
27early_param("nobp", nobp_setup_early);
28
29static int __init nospec_setup_early(char *str)
30{
31 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
32 return 0;
33}
34early_param("nospec", nospec_setup_early);
35
36static int __init nospec_report(void)
37{
38 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
39 pr_info("Spectre V2 mitigation: execute trampolines.\n");
40 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
41 pr_info("Spectre V2 mitigation: limited branch prediction.\n");
42 return 0;
43}
44arch_initcall(nospec_report);
45
46#ifdef CONFIG_SYSFS
47ssize_t cpu_show_spectre_v1(struct device *dev,
48 struct device_attribute *attr, char *buf)
49{
50 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
51}
52
53ssize_t cpu_show_spectre_v2(struct device *dev,
54 struct device_attribute *attr, char *buf)
55{
56 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
57 return sprintf(buf, "Mitigation: execute trampolines\n");
58 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
59 return sprintf(buf, "Mitigation: limited branch prediction.\n");
60 return sprintf(buf, "Vulnerable\n");
61}
62#endif
63
64#ifdef CONFIG_EXPOLINE
65
66int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
7 67
8static int __init nospectre_v2_setup_early(char *str) 68static int __init nospectre_v2_setup_early(char *str)
9{ 69{
10 nospec_call_disable = 1; 70 nospec_disable = 1;
11 nospec_return_disable = 1;
12 return 0; 71 return 0;
13} 72}
14early_param("nospectre_v2", nospectre_v2_setup_early); 73early_param("nospectre_v2", nospectre_v2_setup_early);
15 74
75static int __init spectre_v2_auto_early(void)
76{
77 if (IS_ENABLED(CC_USING_EXPOLINE)) {
78 /*
79 * The kernel has been compiled with expolines.
80 * Keep expolines enabled and disable nobp.
81 */
82 nospec_disable = 0;
83 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
84 }
85 /*
86 * If the kernel has not been compiled with expolines the
87 * nobp setting decides what is done, this depends on the
88 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
89 */
90 return 0;
91}
92#ifdef CONFIG_EXPOLINE_AUTO
93early_initcall(spectre_v2_auto_early);
94#endif
95
16static int __init spectre_v2_setup_early(char *str) 96static int __init spectre_v2_setup_early(char *str)
17{ 97{
18 if (str && !strncmp(str, "on", 2)) { 98 if (str && !strncmp(str, "on", 2)) {
19 nospec_call_disable = 0; 99 nospec_disable = 0;
20 nospec_return_disable = 0; 100 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
21 }
22 if (str && !strncmp(str, "off", 3)) {
23 nospec_call_disable = 1;
24 nospec_return_disable = 1;
25 }
26 if (str && !strncmp(str, "auto", 4)) {
27 nospec_call_disable = 0;
28 nospec_return_disable = 1;
29 } 101 }
102 if (str && !strncmp(str, "off", 3))
103 nospec_disable = 1;
104 if (str && !strncmp(str, "auto", 4))
105 spectre_v2_auto_early();
30 return 0; 106 return 0;
31} 107}
32early_param("spectre_v2", spectre_v2_setup_early); 108early_param("spectre_v2", spectre_v2_setup_early);
@@ -79,15 +155,9 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
79 } 155 }
80} 156}
81 157
82void __init_or_module nospec_call_revert(s32 *start, s32 *end) 158void __init_or_module nospec_revert(s32 *start, s32 *end)
83{
84 if (nospec_call_disable)
85 __nospec_revert(start, end);
86}
87
88void __init_or_module nospec_return_revert(s32 *start, s32 *end)
89{ 159{
90 if (nospec_return_disable) 160 if (nospec_disable)
91 __nospec_revert(start, end); 161 __nospec_revert(start, end);
92} 162}
93 163
@@ -95,6 +165,8 @@ extern s32 __nospec_call_start[], __nospec_call_end[];
95extern s32 __nospec_return_start[], __nospec_return_end[]; 165extern s32 __nospec_return_start[], __nospec_return_end[];
96void __init nospec_init_branches(void) 166void __init nospec_init_branches(void)
97{ 167{
98 nospec_call_revert(__nospec_call_start, __nospec_call_end); 168 nospec_revert(__nospec_call_start, __nospec_call_end);
99 nospec_return_revert(__nospec_return_start, __nospec_return_end); 169 nospec_revert(__nospec_return_start, __nospec_return_end);
100} 170}
171
172#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a6a91f01a17a..7b58a712f818 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -221,6 +221,8 @@ static void __init conmode_default(void)
221 SET_CONSOLE_SCLP; 221 SET_CONSOLE_SCLP;
222#endif 222#endif
223 } 223 }
224 if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
225 conswitchp = &dummy_con;
224} 226}
225 227
226#ifdef CONFIG_CRASH_DUMP 228#ifdef CONFIG_CRASH_DUMP
@@ -413,12 +415,12 @@ static void __init setup_resources(void)
413 struct memblock_region *reg; 415 struct memblock_region *reg;
414 int j; 416 int j;
415 417
416 code_resource.start = (unsigned long) &_text; 418 code_resource.start = (unsigned long) _text;
417 code_resource.end = (unsigned long) &_etext - 1; 419 code_resource.end = (unsigned long) _etext - 1;
418 data_resource.start = (unsigned long) &_etext; 420 data_resource.start = (unsigned long) _etext;
419 data_resource.end = (unsigned long) &_edata - 1; 421 data_resource.end = (unsigned long) _edata - 1;
420 bss_resource.start = (unsigned long) &__bss_start; 422 bss_resource.start = (unsigned long) __bss_start;
421 bss_resource.end = (unsigned long) &__bss_stop - 1; 423 bss_resource.end = (unsigned long) __bss_stop - 1;
422 424
423 for_each_memblock(memory, reg) { 425 for_each_memblock(memory, reg) {
424 res = memblock_virt_alloc(sizeof(*res), 8); 426 res = memblock_virt_alloc(sizeof(*res), 8);
@@ -667,7 +669,7 @@ static void __init check_initrd(void)
667 */ 669 */
668static void __init reserve_kernel(void) 670static void __init reserve_kernel(void)
669{ 671{
670 unsigned long start_pfn = PFN_UP(__pa(&_end)); 672 unsigned long start_pfn = PFN_UP(__pa(_end));
671 673
672#ifdef CONFIG_DMA_API_DEBUG 674#ifdef CONFIG_DMA_API_DEBUG
673 /* 675 /*
@@ -888,9 +890,9 @@ void __init setup_arch(char **cmdline_p)
888 890
889 /* Is init_mm really needed? */ 891 /* Is init_mm really needed? */
890 init_mm.start_code = PAGE_OFFSET; 892 init_mm.start_code = PAGE_OFFSET;
891 init_mm.end_code = (unsigned long) &_etext; 893 init_mm.end_code = (unsigned long) _etext;
892 init_mm.end_data = (unsigned long) &_edata; 894 init_mm.end_data = (unsigned long) _edata;
893 init_mm.brk = (unsigned long) &_end; 895 init_mm.brk = (unsigned long) _end;
894 896
895 parse_early_param(); 897 parse_early_param();
896#ifdef CONFIG_CRASH_DUMP 898#ifdef CONFIG_CRASH_DUMP
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index ce329c876d8c..75b7b307946e 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -153,8 +153,8 @@ int pfn_is_nosave(unsigned long pfn)
153{ 153{
154 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); 154 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
155 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); 155 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
156 unsigned long end_rodata_pfn = PFN_DOWN(__pa(&__end_rodata)) - 1; 156 unsigned long end_rodata_pfn = PFN_DOWN(__pa(__end_rodata)) - 1;
157 unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); 157 unsigned long stext_pfn = PFN_DOWN(__pa(_stext));
158 158
159 /* Always save lowcore pages (LC protection might be enabled). */ 159 /* Always save lowcore pages (LC protection might be enabled). */
160 if (pfn <= LC_PAGES) 160 if (pfn <= LC_PAGES)
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 507f23ba2034..7cdea2ec51e9 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -24,8 +24,8 @@ enum address_markers_idx {
24 24
25static struct addr_marker address_markers[] = { 25static struct addr_marker address_markers[] = {
26 [IDENTITY_NR] = {0, "Identity Mapping"}, 26 [IDENTITY_NR] = {0, "Identity Mapping"},
27 [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"}, 27 [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
28 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 28 [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
29 [VMEMMAP_NR] = {0, "vmemmap Area"}, 29 [VMEMMAP_NR] = {0, "vmemmap Area"},
30 [VMALLOC_NR] = {0, "vmalloc Area"}, 30 [VMALLOC_NR] = {0, "vmalloc Area"},
31 [MODULES_NR] = {0, "Modules Area"}, 31 [MODULES_NR] = {0, "Modules Area"},
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index cb364153c43c..562f72955956 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -6,8 +6,9 @@
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */ 7 */
8 8
9#include <linux/mm.h>
10#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
11#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
12#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
13#include <asm/gmap.h> 14#include <asm/gmap.h>
@@ -366,3 +367,293 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
366 if ((*batch)->nr == MAX_TABLE_BATCH) 367 if ((*batch)->nr == MAX_TABLE_BATCH)
367 tlb_flush_mmu(tlb); 368 tlb_flush_mmu(tlb);
368} 369}
370
371/*
372 * Base infrastructure required to generate basic asces, region, segment,
373 * and page tables that do not make use of enhanced features like EDAT1.
374 */
375
376static struct kmem_cache *base_pgt_cache;
377
378static unsigned long base_pgt_alloc(void)
379{
380 u64 *table;
381
382 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
383 if (table)
384 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
385 return (unsigned long) table;
386}
387
388static void base_pgt_free(unsigned long table)
389{
390 kmem_cache_free(base_pgt_cache, (void *) table);
391}
392
393static unsigned long base_crst_alloc(unsigned long val)
394{
395 unsigned long table;
396
397 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
398 if (table)
399 crst_table_init((unsigned long *)table, val);
400 return table;
401}
402
403static void base_crst_free(unsigned long table)
404{
405 free_pages(table, CRST_ALLOC_ORDER);
406}
407
408#define BASE_ADDR_END_FUNC(NAME, SIZE) \
409static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
410 unsigned long end) \
411{ \
412 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
413 \
414 return (next - 1) < (end - 1) ? next : end; \
415}
416
417BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
418BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
419BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
420BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
421BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
422
423static inline unsigned long base_lra(unsigned long address)
424{
425 unsigned long real;
426
427 asm volatile(
428 " lra %0,0(%1)\n"
429 : "=d" (real) : "a" (address) : "cc");
430 return real;
431}
432
433static int base_page_walk(unsigned long origin, unsigned long addr,
434 unsigned long end, int alloc)
435{
436 unsigned long *pte, next;
437
438 if (!alloc)
439 return 0;
440 pte = (unsigned long *) origin;
441 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
442 do {
443 next = base_page_addr_end(addr, end);
444 *pte = base_lra(addr);
445 } while (pte++, addr = next, addr < end);
446 return 0;
447}
448
449static int base_segment_walk(unsigned long origin, unsigned long addr,
450 unsigned long end, int alloc)
451{
452 unsigned long *ste, next, table;
453 int rc;
454
455 ste = (unsigned long *) origin;
456 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
457 do {
458 next = base_segment_addr_end(addr, end);
459 if (*ste & _SEGMENT_ENTRY_INVALID) {
460 if (!alloc)
461 continue;
462 table = base_pgt_alloc();
463 if (!table)
464 return -ENOMEM;
465 *ste = table | _SEGMENT_ENTRY;
466 }
467 table = *ste & _SEGMENT_ENTRY_ORIGIN;
468 rc = base_page_walk(table, addr, next, alloc);
469 if (rc)
470 return rc;
471 if (!alloc)
472 base_pgt_free(table);
473 cond_resched();
474 } while (ste++, addr = next, addr < end);
475 return 0;
476}
477
478static int base_region3_walk(unsigned long origin, unsigned long addr,
479 unsigned long end, int alloc)
480{
481 unsigned long *rtte, next, table;
482 int rc;
483
484 rtte = (unsigned long *) origin;
485 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
486 do {
487 next = base_region3_addr_end(addr, end);
488 if (*rtte & _REGION_ENTRY_INVALID) {
489 if (!alloc)
490 continue;
491 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
492 if (!table)
493 return -ENOMEM;
494 *rtte = table | _REGION3_ENTRY;
495 }
496 table = *rtte & _REGION_ENTRY_ORIGIN;
497 rc = base_segment_walk(table, addr, next, alloc);
498 if (rc)
499 return rc;
500 if (!alloc)
501 base_crst_free(table);
502 } while (rtte++, addr = next, addr < end);
503 return 0;
504}
505
506static int base_region2_walk(unsigned long origin, unsigned long addr,
507 unsigned long end, int alloc)
508{
509 unsigned long *rste, next, table;
510 int rc;
511
512 rste = (unsigned long *) origin;
513 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
514 do {
515 next = base_region2_addr_end(addr, end);
516 if (*rste & _REGION_ENTRY_INVALID) {
517 if (!alloc)
518 continue;
519 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
520 if (!table)
521 return -ENOMEM;
522 *rste = table | _REGION2_ENTRY;
523 }
524 table = *rste & _REGION_ENTRY_ORIGIN;
525 rc = base_region3_walk(table, addr, next, alloc);
526 if (rc)
527 return rc;
528 if (!alloc)
529 base_crst_free(table);
530 } while (rste++, addr = next, addr < end);
531 return 0;
532}
533
534static int base_region1_walk(unsigned long origin, unsigned long addr,
535 unsigned long end, int alloc)
536{
537 unsigned long *rfte, next, table;
538 int rc;
539
540 rfte = (unsigned long *) origin;
541 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
542 do {
543 next = base_region1_addr_end(addr, end);
544 if (*rfte & _REGION_ENTRY_INVALID) {
545 if (!alloc)
546 continue;
547 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
548 if (!table)
549 return -ENOMEM;
550 *rfte = table | _REGION1_ENTRY;
551 }
552 table = *rfte & _REGION_ENTRY_ORIGIN;
553 rc = base_region2_walk(table, addr, next, alloc);
554 if (rc)
555 return rc;
556 if (!alloc)
557 base_crst_free(table);
558 } while (rfte++, addr = next, addr < end);
559 return 0;
560}
561
562/**
563 * base_asce_free - free asce and tables returned from base_asce_alloc()
564 * @asce: asce to be freed
565 *
566 * Frees all region, segment, and page tables that were allocated with a
567 * corresponding base_asce_alloc() call.
568 */
569void base_asce_free(unsigned long asce)
570{
571 unsigned long table = asce & _ASCE_ORIGIN;
572
573 if (!asce)
574 return;
575 switch (asce & _ASCE_TYPE_MASK) {
576 case _ASCE_TYPE_SEGMENT:
577 base_segment_walk(table, 0, _REGION3_SIZE, 0);
578 break;
579 case _ASCE_TYPE_REGION3:
580 base_region3_walk(table, 0, _REGION2_SIZE, 0);
581 break;
582 case _ASCE_TYPE_REGION2:
583 base_region2_walk(table, 0, _REGION1_SIZE, 0);
584 break;
585 case _ASCE_TYPE_REGION1:
586 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
587 break;
588 }
589 base_crst_free(table);
590}
591
592static int base_pgt_cache_init(void)
593{
594 static DEFINE_MUTEX(base_pgt_cache_mutex);
595 unsigned long sz = _PAGE_TABLE_SIZE;
596
597 if (base_pgt_cache)
598 return 0;
599 mutex_lock(&base_pgt_cache_mutex);
600 if (!base_pgt_cache)
601 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
602 mutex_unlock(&base_pgt_cache_mutex);
603 return base_pgt_cache ? 0 : -ENOMEM;
604}
605
606/**
607 * base_asce_alloc - create kernel mapping without enhanced DAT features
608 * @addr: virtual start address of kernel mapping
609 * @num_pages: number of consecutive pages
610 *
611 * Generate an asce, including all required region, segment and page tables,
612 * that can be used to access the virtual kernel mapping. The difference is
613 * that the returned asce does not make use of any enhanced DAT features like
614 * e.g. large pages. This is required for some I/O functions that pass an
615 * asce, like e.g. some service call requests.
616 *
617 * Note: the returned asce may NEVER be attached to any cpu. It may only be
618 * used for I/O requests. tlb entries that might result because the
619 * asce was attached to a cpu won't be cleared.
620 */
621unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
622{
623 unsigned long asce, table, end;
624 int rc;
625
626 if (base_pgt_cache_init())
627 return 0;
628 end = addr + num_pages * PAGE_SIZE;
629 if (end <= _REGION3_SIZE) {
630 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
631 if (!table)
632 return 0;
633 rc = base_segment_walk(table, addr, end, 1);
634 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
635 } else if (end <= _REGION2_SIZE) {
636 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
637 if (!table)
638 return 0;
639 rc = base_region3_walk(table, addr, end, 1);
640 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
641 } else if (end <= _REGION1_SIZE) {
642 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
643 if (!table)
644 return 0;
645 rc = base_region2_walk(table, addr, end, 1);
646 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
647 } else {
648 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
649 if (!table)
650 return 0;
651 rc = base_region1_walk(table, addr, end, 1);
652 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
653 }
654 if (rc) {
655 base_asce_free(asce);
656 asce = 0;
657 }
658 return asce;
659}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index b5692a284bd8..04143c08bd6e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3918,8 +3918,13 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3918 cqr = refers; 3918 cqr = refers;
3919 } 3919 }
3920 3920
3921 if (cqr->block) 3921 /*
3922 list_del_init(&cqr->blocklist); 3922 * _dasd_requeue_request already checked for a valid
3923 * blockdevice, no need to check again
3924 * all erp requests (cqr->refers) have a cqr->block
3925 * pointer copy from the original cqr
3926 */
3927 list_del_init(&cqr->blocklist);
3923 cqr->block->base->discipline->free_cp( 3928 cqr->block->base->discipline->free_cp(
3924 cqr, (struct request *) cqr->callback_data); 3929 cqr, (struct request *) cqr->callback_data);
3925 } 3930 }
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee14d8e45c97..ee73b0607e47 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2214,15 +2214,28 @@ static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
2214{ 2214{
2215 int pos = pathmask_to_pos(lpum); 2215 int pos = pathmask_to_pos(lpum);
2216 2216
2217 if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) {
2218 dev_err(&device->cdev->dev,
2219 "Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
2220 device->path[pos].cssid, device->path[pos].chpid, lpum);
2221 goto out;
2222 }
2223
2217 /* no remaining path, cannot disable */ 2224 /* no remaining path, cannot disable */
2218 if (!(dasd_path_get_opm(device) & ~lpum)) 2225 if (!(dasd_path_get_opm(device) & ~lpum)) {
2219 return; 2226 dev_err(&device->cdev->dev,
2227 "Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
2228 device->path[pos].cssid, device->path[pos].chpid, lpum);
2229 goto out;
2230 }
2220 2231
2221 dev_err(&device->cdev->dev, 2232 dev_err(&device->cdev->dev,
2222 "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n", 2233 "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
2223 device->path[pos].cssid, device->path[pos].chpid, lpum); 2234 device->path[pos].cssid, device->path[pos].chpid, lpum);
2224 dasd_path_remove_opm(device, lpum); 2235 dasd_path_remove_opm(device, lpum);
2225 dasd_path_add_ifccpm(device, lpum); 2236 dasd_path_add_ifccpm(device, lpum);
2237
2238out:
2226 device->path[pos].errorclk = 0; 2239 device->path[pos].errorclk = 0;
2227 atomic_set(&device->path[pos].error_count, 0); 2240 atomic_set(&device->path[pos].error_count, 0);
2228} 2241}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index e7cd28ff1984..b9ebb565ee2c 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1550,9 +1550,49 @@ dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
1550 dasd_put_device(device); 1550 dasd_put_device(device);
1551 return count; 1551 return count;
1552} 1552}
1553
1554static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show, 1553static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
1555 dasd_path_threshold_store); 1554 dasd_path_threshold_store);
1555
1556/*
1557 * configure if path is disabled after IFCC/CCC error threshold is
1558 * exceeded
1559 */
1560static ssize_t
1561dasd_path_autodisable_show(struct device *dev,
1562 struct device_attribute *attr, char *buf)
1563{
1564 struct dasd_devmap *devmap;
1565 int flag;
1566
1567 devmap = dasd_find_busid(dev_name(dev));
1568 if (!IS_ERR(devmap))
1569 flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0;
1570 else
1571 flag = (DASD_FEATURE_DEFAULT &
1572 DASD_FEATURE_PATH_AUTODISABLE) != 0;
1573 return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
1574}
1575
1576static ssize_t
1577dasd_path_autodisable_store(struct device *dev,
1578 struct device_attribute *attr,
1579 const char *buf, size_t count)
1580{
1581 unsigned int val;
1582 int rc;
1583
1584 if (kstrtouint(buf, 0, &val) || val > 1)
1585 return -EINVAL;
1586
1587 rc = dasd_set_feature(to_ccwdev(dev),
1588 DASD_FEATURE_PATH_AUTODISABLE, val);
1589
1590 return rc ? : count;
1591}
1592
1593static DEVICE_ATTR(path_autodisable, 0644,
1594 dasd_path_autodisable_show,
1595 dasd_path_autodisable_store);
1556/* 1596/*
1557 * interval for IFCC/CCC checks 1597 * interval for IFCC/CCC checks
1558 * meaning time with no IFCC/CCC error before the error counter 1598 * meaning time with no IFCC/CCC error before the error counter
@@ -1623,6 +1663,7 @@ static struct attribute * dasd_attrs[] = {
1623 &dev_attr_host_access_count.attr, 1663 &dev_attr_host_access_count.attr,
1624 &dev_attr_path_masks.attr, 1664 &dev_attr_path_masks.attr,
1625 &dev_attr_path_threshold.attr, 1665 &dev_attr_path_threshold.attr,
1666 &dev_attr_path_autodisable.attr,
1626 &dev_attr_path_interval.attr, 1667 &dev_attr_path_interval.attr,
1627 &dev_attr_path_reset.attr, 1668 &dev_attr_path_reset.attr,
1628 &dev_attr_hpf.attr, 1669 &dev_attr_hpf.attr,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 29397a9dba68..be208e7adcb4 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -214,24 +214,25 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
214 geo->head |= head; 214 geo->head |= head;
215} 215}
216 216
217static int check_XRC(struct ccw1 *ccw, struct DE_eckd_data *data, 217static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
218 struct dasd_device *device) 218 struct dasd_device *device)
219{ 219{
220 struct dasd_eckd_private *private = device->private; 220 struct dasd_eckd_private *private = device->private;
221 int rc; 221 int rc;
222 222
223 if (!private->rdc_data.facilities.XRC_supported) 223 rc = get_phys_clock(&data->ep_sys_time);
224 /*
225 * Ignore return code if XRC is not supported or
226 * sync clock is switched off
227 */
228 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
229 rc == -EOPNOTSUPP || rc == -EACCES)
224 return 0; 230 return 0;
225 231
226 /* switch on System Time Stamp - needed for XRC Support */ 232 /* switch on System Time Stamp - needed for XRC Support */
227 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 233 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
228 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 234 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
229 235
230 rc = get_phys_clock(&data->ep_sys_time);
231 /* Ignore return code if sync clock is switched off. */
232 if (rc == -EOPNOTSUPP || rc == -EACCES)
233 rc = 0;
234
235 if (ccw) { 236 if (ccw) {
236 ccw->count = sizeof(struct DE_eckd_data); 237 ccw->count = sizeof(struct DE_eckd_data);
237 ccw->flags |= CCW_FLAG_SLI; 238 ccw->flags |= CCW_FLAG_SLI;
@@ -286,12 +287,12 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
286 case DASD_ECKD_CCW_WRITE_KD_MT: 287 case DASD_ECKD_CCW_WRITE_KD_MT:
287 data->mask.perm = 0x02; 288 data->mask.perm = 0x02;
288 data->attributes.operation = private->attrib.operation; 289 data->attributes.operation = private->attrib.operation;
289 rc = check_XRC(ccw, data, device); 290 rc = set_timestamp(ccw, data, device);
290 break; 291 break;
291 case DASD_ECKD_CCW_WRITE_CKD: 292 case DASD_ECKD_CCW_WRITE_CKD:
292 case DASD_ECKD_CCW_WRITE_CKD_MT: 293 case DASD_ECKD_CCW_WRITE_CKD_MT:
293 data->attributes.operation = DASD_BYPASS_CACHE; 294 data->attributes.operation = DASD_BYPASS_CACHE;
294 rc = check_XRC(ccw, data, device); 295 rc = set_timestamp(ccw, data, device);
295 break; 296 break;
296 case DASD_ECKD_CCW_ERASE: 297 case DASD_ECKD_CCW_ERASE:
297 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 298 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -299,7 +300,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
299 data->mask.perm = 0x3; 300 data->mask.perm = 0x3;
300 data->mask.auth = 0x1; 301 data->mask.auth = 0x1;
301 data->attributes.operation = DASD_BYPASS_CACHE; 302 data->attributes.operation = DASD_BYPASS_CACHE;
302 rc = check_XRC(ccw, data, device); 303 rc = set_timestamp(ccw, data, device);
303 break; 304 break;
304 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 305 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
305 data->mask.perm = 0x03; 306 data->mask.perm = 0x03;
@@ -310,7 +311,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
310 data->mask.perm = 0x02; 311 data->mask.perm = 0x02;
311 data->attributes.operation = private->attrib.operation; 312 data->attributes.operation = private->attrib.operation;
312 data->blk_size = blksize; 313 data->blk_size = blksize;
313 rc = check_XRC(ccw, data, device); 314 rc = set_timestamp(ccw, data, device);
314 break; 315 break;
315 default: 316 default:
316 dev_err(&device->cdev->dev, 317 dev_err(&device->cdev->dev,
@@ -993,7 +994,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
993 struct dasd_eckd_private *private, path_private; 994 struct dasd_eckd_private *private, path_private;
994 struct dasd_uid *uid; 995 struct dasd_uid *uid;
995 char print_path_uid[60], print_device_uid[60]; 996 char print_path_uid[60], print_device_uid[60];
996 struct channel_path_desc *chp_desc; 997 struct channel_path_desc_fmt0 *chp_desc;
997 struct subchannel_id sch_id; 998 struct subchannel_id sch_id;
998 999
999 private = device->private; 1000 private = device->private;
@@ -3440,7 +3441,7 @@ static int prepare_itcw(struct itcw *itcw,
3440 dedata->mask.perm = 0x02; 3441 dedata->mask.perm = 0x02;
3441 dedata->attributes.operation = basepriv->attrib.operation; 3442 dedata->attributes.operation = basepriv->attrib.operation;
3442 dedata->blk_size = blksize; 3443 dedata->blk_size = blksize;
3443 rc = check_XRC(NULL, dedata, basedev); 3444 rc = set_timestamp(NULL, dedata, basedev);
3444 dedata->ga_extended |= 0x42; 3445 dedata->ga_extended |= 0x42;
3445 lredata->operation.orientation = 0x0; 3446 lredata->operation.orientation = 0x0;
3446 lredata->operation.operation = 0x3F; 3447 lredata->operation.operation = 0x3F;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index a2b33a22c82a..d049e2d74484 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -23,7 +23,7 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
23 23
24obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 24obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
25 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ 25 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
26 sclp_early.o sclp_early_core.o 26 sclp_early.o sclp_early_core.o sclp_sd.o
27 27
28obj-$(CONFIG_TN3270) += raw3270.o 28obj-$(CONFIG_TN3270) += raw3270.o
29obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 29obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 98a5c459a1bf..60845d467a1b 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -9,7 +9,9 @@
9#include <linux/kbd_kern.h> 9#include <linux/kbd_kern.h>
10#include <linux/kbd_diacr.h> 10#include <linux/kbd_diacr.h>
11 11
12u_short plain_map[NR_KEYS] = { 12#include "keyboard.h"
13
14u_short ebc_plain_map[NR_KEYS] = {
13 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 15 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
14 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 16 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
15 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 17 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
@@ -85,12 +87,12 @@ static u_short shift_ctrl_map[NR_KEYS] = {
85 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 87 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
86}; 88};
87 89
88ushort *key_maps[MAX_NR_KEYMAPS] = { 90ushort *ebc_key_maps[MAX_NR_KEYMAPS] = {
89 plain_map, shift_map, NULL, NULL, 91 ebc_plain_map, shift_map, NULL, NULL,
90 ctrl_map, shift_ctrl_map, NULL, 92 ctrl_map, shift_ctrl_map, NULL,
91}; 93};
92 94
93unsigned int keymap_count = 4; 95unsigned int ebc_keymap_count = 4;
94 96
95 97
96/* 98/*
@@ -99,7 +101,7 @@ unsigned int keymap_count = 4;
99 * the default and allocate dynamically in chunks of 512 bytes. 101 * the default and allocate dynamically in chunks of 512 bytes.
100 */ 102 */
101 103
102char func_buf[] = { 104char ebc_func_buf[] = {
103 '\033', '[', '[', 'A', 0, 105 '\033', '[', '[', 'A', 0,
104 '\033', '[', '[', 'B', 0, 106 '\033', '[', '[', 'B', 0,
105 '\033', '[', '[', 'C', 0, 107 '\033', '[', '[', 'C', 0,
@@ -123,37 +125,37 @@ char func_buf[] = {
123}; 125};
124 126
125 127
126char *funcbufptr = func_buf; 128char *ebc_funcbufptr = ebc_func_buf;
127int funcbufsize = sizeof(func_buf); 129int ebc_funcbufsize = sizeof(ebc_func_buf);
128int funcbufleft = 0; /* space left */ 130int ebc_funcbufleft; /* space left */
129 131
130char *func_table[MAX_NR_FUNC] = { 132char *ebc_func_table[MAX_NR_FUNC] = {
131 func_buf + 0, 133 ebc_func_buf + 0,
132 func_buf + 5, 134 ebc_func_buf + 5,
133 func_buf + 10, 135 ebc_func_buf + 10,
134 func_buf + 15, 136 ebc_func_buf + 15,
135 func_buf + 20, 137 ebc_func_buf + 20,
136 func_buf + 25, 138 ebc_func_buf + 25,
137 func_buf + 31, 139 ebc_func_buf + 31,
138 func_buf + 37, 140 ebc_func_buf + 37,
139 func_buf + 43, 141 ebc_func_buf + 43,
140 func_buf + 49, 142 ebc_func_buf + 49,
141 func_buf + 55, 143 ebc_func_buf + 55,
142 func_buf + 61, 144 ebc_func_buf + 61,
143 func_buf + 67, 145 ebc_func_buf + 67,
144 func_buf + 73, 146 ebc_func_buf + 73,
145 func_buf + 79, 147 ebc_func_buf + 79,
146 func_buf + 85, 148 ebc_func_buf + 85,
147 func_buf + 91, 149 ebc_func_buf + 91,
148 func_buf + 97, 150 ebc_func_buf + 97,
149 func_buf + 103, 151 ebc_func_buf + 103,
150 func_buf + 109, 152 ebc_func_buf + 109,
151 NULL, 153 NULL,
152}; 154};
153 155
154struct kbdiacruc accent_table[MAX_DIACR] = { 156struct kbdiacruc ebc_accent_table[MAX_DIACR] = {
155 {'^', 'c', 0003}, {'^', 'd', 0004}, 157 {'^', 'c', 0003}, {'^', 'd', 0004},
156 {'^', 'z', 0032}, {'^', 0012, 0000}, 158 {'^', 'z', 0032}, {'^', 0012, 0000},
157}; 159};
158 160
159unsigned int accent_table_size = 4; 161unsigned int ebc_accent_table_size = 4;
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 5b505fdaedec..db1fbf9b00b5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -54,24 +54,24 @@ kbd_alloc(void) {
54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); 54 kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
55 if (!kbd) 55 if (!kbd)
56 goto out; 56 goto out;
57 kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); 57 kbd->key_maps = kzalloc(sizeof(ebc_key_maps), GFP_KERNEL);
58 if (!kbd->key_maps) 58 if (!kbd->key_maps)
59 goto out_kbd; 59 goto out_kbd;
60 for (i = 0; i < ARRAY_SIZE(key_maps); i++) { 60 for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
61 if (key_maps[i]) { 61 if (ebc_key_maps[i]) {
62 kbd->key_maps[i] = kmemdup(key_maps[i], 62 kbd->key_maps[i] = kmemdup(ebc_key_maps[i],
63 sizeof(u_short) * NR_KEYS, 63 sizeof(u_short) * NR_KEYS,
64 GFP_KERNEL); 64 GFP_KERNEL);
65 if (!kbd->key_maps[i]) 65 if (!kbd->key_maps[i])
66 goto out_maps; 66 goto out_maps;
67 } 67 }
68 } 68 }
69 kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); 69 kbd->func_table = kzalloc(sizeof(ebc_func_table), GFP_KERNEL);
70 if (!kbd->func_table) 70 if (!kbd->func_table)
71 goto out_maps; 71 goto out_maps;
72 for (i = 0; i < ARRAY_SIZE(func_table); i++) { 72 for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) {
73 if (func_table[i]) { 73 if (ebc_func_table[i]) {
74 kbd->func_table[i] = kstrdup(func_table[i], 74 kbd->func_table[i] = kstrdup(ebc_func_table[i],
75 GFP_KERNEL); 75 GFP_KERNEL);
76 if (!kbd->func_table[i]) 76 if (!kbd->func_table[i])
77 goto out_func; 77 goto out_func;
@@ -81,22 +81,22 @@ kbd_alloc(void) {
81 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); 81 kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
82 if (!kbd->fn_handler) 82 if (!kbd->fn_handler)
83 goto out_func; 83 goto out_func;
84 kbd->accent_table = kmemdup(accent_table, 84 kbd->accent_table = kmemdup(ebc_accent_table,
85 sizeof(struct kbdiacruc) * MAX_DIACR, 85 sizeof(struct kbdiacruc) * MAX_DIACR,
86 GFP_KERNEL); 86 GFP_KERNEL);
87 if (!kbd->accent_table) 87 if (!kbd->accent_table)
88 goto out_fn_handler; 88 goto out_fn_handler;
89 kbd->accent_table_size = accent_table_size; 89 kbd->accent_table_size = ebc_accent_table_size;
90 return kbd; 90 return kbd;
91 91
92out_fn_handler: 92out_fn_handler:
93 kfree(kbd->fn_handler); 93 kfree(kbd->fn_handler);
94out_func: 94out_func:
95 for (i = 0; i < ARRAY_SIZE(func_table); i++) 95 for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
96 kfree(kbd->func_table[i]); 96 kfree(kbd->func_table[i]);
97 kfree(kbd->func_table); 97 kfree(kbd->func_table);
98out_maps: 98out_maps:
99 for (i = 0; i < ARRAY_SIZE(key_maps); i++) 99 for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
100 kfree(kbd->key_maps[i]); 100 kfree(kbd->key_maps[i]);
101 kfree(kbd->key_maps); 101 kfree(kbd->key_maps);
102out_kbd: 102out_kbd:
@@ -112,10 +112,10 @@ kbd_free(struct kbd_data *kbd)
112 112
113 kfree(kbd->accent_table); 113 kfree(kbd->accent_table);
114 kfree(kbd->fn_handler); 114 kfree(kbd->fn_handler);
115 for (i = 0; i < ARRAY_SIZE(func_table); i++) 115 for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
116 kfree(kbd->func_table[i]); 116 kfree(kbd->func_table[i]);
117 kfree(kbd->func_table); 117 kfree(kbd->func_table);
118 for (i = 0; i < ARRAY_SIZE(key_maps); i++) 118 for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
119 kfree(kbd->key_maps[i]); 119 kfree(kbd->key_maps[i]);
120 kfree(kbd->key_maps); 120 kfree(kbd->key_maps);
121 kfree(kbd); 121 kfree(kbd);
@@ -131,7 +131,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
131 int i, j, k; 131 int i, j, k;
132 132
133 memset(ascebc, 0x40, 256); 133 memset(ascebc, 0x40, 256);
134 for (i = 0; i < ARRAY_SIZE(key_maps); i++) { 134 for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
135 keymap = kbd->key_maps[i]; 135 keymap = kbd->key_maps[i];
136 if (!keymap) 136 if (!keymap)
137 continue; 137 continue;
@@ -158,7 +158,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
158 int i, j, k; 158 int i, j, k;
159 159
160 memset(ebcasc, ' ', 256); 160 memset(ebcasc, ' ', 256);
161 for (i = 0; i < ARRAY_SIZE(key_maps); i++) { 161 for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
162 keymap = kbd->key_maps[i]; 162 keymap = kbd->key_maps[i];
163 if (!keymap) 163 if (!keymap)
164 continue; 164 continue;
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index a074d9711628..c467589c7f45 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -14,6 +14,17 @@
14 14
15struct kbd_data; 15struct kbd_data;
16 16
17extern int ebc_funcbufsize, ebc_funcbufleft;
18extern char *ebc_func_table[MAX_NR_FUNC];
19extern char ebc_func_buf[];
20extern char *ebc_funcbufptr;
21extern unsigned int ebc_keymap_count;
22
23extern struct kbdiacruc ebc_accent_table[];
24extern unsigned int ebc_accent_table_size;
25extern unsigned short *ebc_key_maps[MAX_NR_KEYMAPS];
26extern unsigned short ebc_plain_map[NR_KEYS];
27
17typedef void (fn_handler_fn)(struct kbd_data *); 28typedef void (fn_handler_fn)(struct kbd_data *);
18 29
19/* 30/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index e4e2df7a478e..e9aa71cdfc44 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -417,7 +417,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
417 reg = NULL; 417 reg = NULL;
418 list_for_each(l, &sclp_reg_list) { 418 list_for_each(l, &sclp_reg_list) {
419 reg = list_entry(l, struct sclp_register, list); 419 reg = list_entry(l, struct sclp_register, list);
420 if (reg->receive_mask & (1 << (32 - evbuf->type))) 420 if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
421 break; 421 break;
422 else 422 else
423 reg = NULL; 423 reg = NULL;
@@ -618,9 +618,12 @@ struct sclp_statechangebuf {
618 u16 _zeros : 12; 618 u16 _zeros : 12;
619 u16 mask_length; 619 u16 mask_length;
620 u64 sclp_active_facility_mask; 620 u64 sclp_active_facility_mask;
621 sccb_mask_t sclp_receive_mask; 621 u8 masks[2 * 1021 + 4]; /* variable length */
622 sccb_mask_t sclp_send_mask; 622 /*
623 u32 read_data_function_mask; 623 * u8 sclp_receive_mask[mask_length];
624 * u8 sclp_send_mask[mask_length];
625 * u32 read_data_function_mask;
626 */
624} __attribute__((packed)); 627} __attribute__((packed));
625 628
626 629
@@ -631,14 +634,14 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
631 unsigned long flags; 634 unsigned long flags;
632 struct sclp_statechangebuf *scbuf; 635 struct sclp_statechangebuf *scbuf;
633 636
637 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
638
634 scbuf = (struct sclp_statechangebuf *) evbuf; 639 scbuf = (struct sclp_statechangebuf *) evbuf;
635 if (scbuf->mask_length != sizeof(sccb_mask_t))
636 return;
637 spin_lock_irqsave(&sclp_lock, flags); 640 spin_lock_irqsave(&sclp_lock, flags);
638 if (scbuf->validity_sclp_receive_mask) 641 if (scbuf->validity_sclp_receive_mask)
639 sclp_receive_mask = scbuf->sclp_receive_mask; 642 sclp_receive_mask = sccb_get_recv_mask(scbuf);
640 if (scbuf->validity_sclp_send_mask) 643 if (scbuf->validity_sclp_send_mask)
641 sclp_send_mask = scbuf->sclp_send_mask; 644 sclp_send_mask = sccb_get_send_mask(scbuf);
642 spin_unlock_irqrestore(&sclp_lock, flags); 645 spin_unlock_irqrestore(&sclp_lock, flags);
643 if (scbuf->validity_sclp_active_facility_mask) 646 if (scbuf->validity_sclp_active_facility_mask)
644 sclp.facilities = scbuf->sclp_active_facility_mask; 647 sclp.facilities = scbuf->sclp_active_facility_mask;
@@ -748,7 +751,7 @@ EXPORT_SYMBOL(sclp_remove_processed);
748 751
749/* Prepare init mask request. Called while sclp_lock is locked. */ 752/* Prepare init mask request. Called while sclp_lock is locked. */
750static inline void 753static inline void
751__sclp_make_init_req(u32 receive_mask, u32 send_mask) 754__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
752{ 755{
753 struct init_sccb *sccb; 756 struct init_sccb *sccb;
754 757
@@ -761,12 +764,15 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
761 sclp_init_req.callback = NULL; 764 sclp_init_req.callback = NULL;
762 sclp_init_req.callback_data = NULL; 765 sclp_init_req.callback_data = NULL;
763 sclp_init_req.sccb = sccb; 766 sclp_init_req.sccb = sccb;
764 sccb->header.length = sizeof(struct init_sccb); 767 sccb->header.length = sizeof(*sccb);
765 sccb->mask_length = sizeof(sccb_mask_t); 768 if (sclp_mask_compat_mode)
766 sccb->receive_mask = receive_mask; 769 sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
767 sccb->send_mask = send_mask; 770 else
768 sccb->sclp_receive_mask = 0; 771 sccb->mask_length = sizeof(sccb_mask_t);
769 sccb->sclp_send_mask = 0; 772 sccb_set_recv_mask(sccb, receive_mask);
773 sccb_set_send_mask(sccb, send_mask);
774 sccb_set_sclp_recv_mask(sccb, 0);
775 sccb_set_sclp_send_mask(sccb, 0);
770} 776}
771 777
772/* Start init mask request. If calculate is non-zero, calculate the mask as 778/* Start init mask request. If calculate is non-zero, calculate the mask as
@@ -822,8 +828,8 @@ sclp_init_mask(int calculate)
822 sccb->header.response_code == 0x20) { 828 sccb->header.response_code == 0x20) {
823 /* Successful request */ 829 /* Successful request */
824 if (calculate) { 830 if (calculate) {
825 sclp_receive_mask = sccb->sclp_receive_mask; 831 sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
826 sclp_send_mask = sccb->sclp_send_mask; 832 sclp_send_mask = sccb_get_sclp_send_mask(sccb);
827 } else { 833 } else {
828 sclp_receive_mask = 0; 834 sclp_receive_mask = 0;
829 sclp_send_mask = 0; 835 sclp_send_mask = 0;
@@ -974,12 +980,18 @@ sclp_check_interface(void)
974 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 980 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
975 spin_lock_irqsave(&sclp_lock, flags); 981 spin_lock_irqsave(&sclp_lock, flags);
976 del_timer(&sclp_request_timer); 982 del_timer(&sclp_request_timer);
977 if (sclp_init_req.status == SCLP_REQ_DONE && 983 rc = -EBUSY;
978 sccb->header.response_code == 0x20) { 984 if (sclp_init_req.status == SCLP_REQ_DONE) {
979 rc = 0; 985 if (sccb->header.response_code == 0x20) {
980 break; 986 rc = 0;
981 } else 987 break;
982 rc = -EBUSY; 988 } else if (sccb->header.response_code == 0x74f0) {
989 if (!sclp_mask_compat_mode) {
990 sclp_mask_compat_mode = true;
991 retry = 0;
992 }
993 }
994 }
983 } 995 }
984 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); 996 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
985 spin_unlock_irqrestore(&sclp_lock, flags); 997 spin_unlock_irqrestore(&sclp_lock, flags);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index f41f6e2ca063..1fe4918088e7 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -18,7 +18,7 @@
18#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) 18#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
19#define SCLP_CONSOLE_PAGES 6 19#define SCLP_CONSOLE_PAGES 6
20 20
21#define SCLP_EVTYP_MASK(T) (1U << (32 - (T))) 21#define SCLP_EVTYP_MASK(T) (1UL << (sizeof(sccb_mask_t) * BITS_PER_BYTE - (T)))
22 22
23#define EVTYP_OPCMD 0x01 23#define EVTYP_OPCMD 0x01
24#define EVTYP_MSG 0x02 24#define EVTYP_MSG 0x02
@@ -28,6 +28,7 @@
28#define EVTYP_PMSGCMD 0x09 28#define EVTYP_PMSGCMD 0x09
29#define EVTYP_ASYNC 0x0A 29#define EVTYP_ASYNC 0x0A
30#define EVTYP_CTLPROGIDENT 0x0B 30#define EVTYP_CTLPROGIDENT 0x0B
31#define EVTYP_STORE_DATA 0x0C
31#define EVTYP_ERRNOTIFY 0x18 32#define EVTYP_ERRNOTIFY 0x18
32#define EVTYP_VT220MSG 0x1A 33#define EVTYP_VT220MSG 0x1A
33#define EVTYP_SDIAS 0x1C 34#define EVTYP_SDIAS 0x1C
@@ -42,6 +43,7 @@
42#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD) 43#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
43#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC) 44#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC)
44#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT) 45#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
46#define EVTYP_STORE_DATA_MASK SCLP_EVTYP_MASK(EVTYP_STORE_DATA)
45#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY) 47#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
46#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG) 48#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG)
47#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS) 49#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS)
@@ -85,7 +87,7 @@ enum sclp_pm_event {
85#define SCLP_PANIC_PRIO 1 87#define SCLP_PANIC_PRIO 1
86#define SCLP_PANIC_PRIO_CLIENT 0 88#define SCLP_PANIC_PRIO_CLIENT 0
87 89
88typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 90typedef u64 sccb_mask_t;
89 91
90struct sccb_header { 92struct sccb_header {
91 u16 length; 93 u16 length;
@@ -98,12 +100,53 @@ struct init_sccb {
98 struct sccb_header header; 100 struct sccb_header header;
99 u16 _reserved; 101 u16 _reserved;
100 u16 mask_length; 102 u16 mask_length;
101 sccb_mask_t receive_mask; 103 u8 masks[4 * 1021]; /* variable length */
102 sccb_mask_t send_mask; 104 /*
103 sccb_mask_t sclp_receive_mask; 105 * u8 receive_mask[mask_length];
104 sccb_mask_t sclp_send_mask; 106 * u8 send_mask[mask_length];
107 * u8 sclp_receive_mask[mask_length];
108 * u8 sclp_send_mask[mask_length];
109 */
105} __attribute__((packed)); 110} __attribute__((packed));
106 111
112#define SCLP_MASK_SIZE_COMPAT 4
113
114static inline sccb_mask_t sccb_get_mask(u8 *masks, size_t len, int i)
115{
116 sccb_mask_t res = 0;
117
118 memcpy(&res, masks + i * len, min(sizeof(res), len));
119 return res;
120}
121
122static inline void sccb_set_mask(u8 *masks, size_t len, int i, sccb_mask_t val)
123{
124 memset(masks + i * len, 0, len);
125 memcpy(masks + i * len, &val, min(sizeof(val), len));
126}
127
128#define sccb_get_generic_mask(sccb, i) \
129({ \
130 __typeof__(sccb) __sccb = sccb; \
131 \
132 sccb_get_mask(__sccb->masks, __sccb->mask_length, i); \
133})
134#define sccb_get_recv_mask(sccb) sccb_get_generic_mask(sccb, 0)
135#define sccb_get_send_mask(sccb) sccb_get_generic_mask(sccb, 1)
136#define sccb_get_sclp_recv_mask(sccb) sccb_get_generic_mask(sccb, 2)
137#define sccb_get_sclp_send_mask(sccb) sccb_get_generic_mask(sccb, 3)
138
139#define sccb_set_generic_mask(sccb, i, val) \
140({ \
141 __typeof__(sccb) __sccb = sccb; \
142 \
143 sccb_set_mask(__sccb->masks, __sccb->mask_length, i, val); \
144})
145#define sccb_set_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 0, val)
146#define sccb_set_send_mask(sccb, val) sccb_set_generic_mask(sccb, 1, val)
147#define sccb_set_sclp_recv_mask(sccb, val) sccb_set_generic_mask(sccb, 2, val)
148#define sccb_set_sclp_send_mask(sccb, val) sccb_set_generic_mask(sccb, 3, val)
149
107struct read_cpu_info_sccb { 150struct read_cpu_info_sccb {
108 struct sccb_header header; 151 struct sccb_header header;
109 u16 nr_configured; 152 u16 nr_configured;
@@ -221,15 +264,17 @@ extern int sclp_init_state;
221extern int sclp_console_pages; 264extern int sclp_console_pages;
222extern int sclp_console_drop; 265extern int sclp_console_drop;
223extern unsigned long sclp_console_full; 266extern unsigned long sclp_console_full;
267extern bool sclp_mask_compat_mode;
224 268
225extern char sclp_early_sccb[PAGE_SIZE]; 269extern char sclp_early_sccb[PAGE_SIZE];
226 270
227void sclp_early_wait_irq(void); 271void sclp_early_wait_irq(void);
228int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb); 272int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
229unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb); 273unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
274unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
230int sclp_early_set_event_mask(struct init_sccb *sccb, 275int sclp_early_set_event_mask(struct init_sccb *sccb,
231 unsigned long receive_mask, 276 sccb_mask_t receive_mask,
232 unsigned long send_mask); 277 sccb_mask_t send_mask);
233 278
234/* useful inlines */ 279/* useful inlines */
235 280
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 6b1891539c84..9a74abb9224d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -249,7 +249,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
249 if (sccb->header.response_code != 0x20) 249 if (sccb->header.response_code != 0x20)
250 return; 250 return;
251 251
252 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) 252 if (sclp_early_con_check_vt220(sccb))
253 sclp.has_vt220 = 1; 253 sclp.has_vt220 = 1;
254 254
255 if (sclp_early_con_check_linemode(sccb)) 255 if (sclp_early_con_check_linemode(sccb))
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 17b0c67f3e8d..5f8d9ea69ebd 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -14,6 +14,11 @@
14 14
15char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); 15char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
16int sclp_init_state __section(.data) = sclp_init_state_uninitialized; 16int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
17/*
18 * Used to keep track of the size of the event masks. Qemu until version 2.11
19 * only supports 4 and needs a workaround.
20 */
21bool sclp_mask_compat_mode;
17 22
18void sclp_early_wait_irq(void) 23void sclp_early_wait_irq(void)
19{ 24{
@@ -142,16 +147,24 @@ static void sclp_early_print_vt220(const char *str, unsigned int len)
142} 147}
143 148
144int sclp_early_set_event_mask(struct init_sccb *sccb, 149int sclp_early_set_event_mask(struct init_sccb *sccb,
145 unsigned long receive_mask, 150 sccb_mask_t receive_mask,
146 unsigned long send_mask) 151 sccb_mask_t send_mask)
147{ 152{
153retry:
148 memset(sccb, 0, sizeof(*sccb)); 154 memset(sccb, 0, sizeof(*sccb));
149 sccb->header.length = sizeof(*sccb); 155 sccb->header.length = sizeof(*sccb);
150 sccb->mask_length = sizeof(sccb_mask_t); 156 if (sclp_mask_compat_mode)
151 sccb->receive_mask = receive_mask; 157 sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
152 sccb->send_mask = send_mask; 158 else
159 sccb->mask_length = sizeof(sccb_mask_t);
160 sccb_set_recv_mask(sccb, receive_mask);
161 sccb_set_send_mask(sccb, send_mask);
153 if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb)) 162 if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
154 return -EIO; 163 return -EIO;
164 if ((sccb->header.response_code == 0x74f0) && !sclp_mask_compat_mode) {
165 sclp_mask_compat_mode = true;
166 goto retry;
167 }
155 if (sccb->header.response_code != 0x20) 168 if (sccb->header.response_code != 0x20)
156 return -EIO; 169 return -EIO;
157 return 0; 170 return 0;
@@ -159,19 +172,28 @@ int sclp_early_set_event_mask(struct init_sccb *sccb,
159 172
160unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb) 173unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
161{ 174{
162 if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK)) 175 if (!(sccb_get_sclp_send_mask(sccb) & EVTYP_OPCMD_MASK))
163 return 0; 176 return 0;
164 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) 177 if (!(sccb_get_sclp_recv_mask(sccb) & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
165 return 0; 178 return 0;
166 return 1; 179 return 1;
167} 180}
168 181
182unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb)
183{
184 if (sccb_get_sclp_send_mask(sccb) & EVTYP_VT220MSG_MASK)
185 return 1;
186 return 0;
187}
188
169static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) 189static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
170{ 190{
171 unsigned long receive_mask, send_mask; 191 unsigned long receive_mask, send_mask;
172 struct init_sccb *sccb; 192 struct init_sccb *sccb;
173 int rc; 193 int rc;
174 194
195 BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
196
175 *have_linemode = *have_vt220 = 0; 197 *have_linemode = *have_vt220 = 0;
176 sccb = (struct init_sccb *) &sclp_early_sccb; 198 sccb = (struct init_sccb *) &sclp_early_sccb;
177 receive_mask = disable ? 0 : EVTYP_OPCMD_MASK; 199 receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
@@ -180,7 +202,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
180 if (rc) 202 if (rc)
181 return rc; 203 return rc;
182 *have_linemode = sclp_early_con_check_linemode(sccb); 204 *have_linemode = sclp_early_con_check_linemode(sccb);
183 *have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK; 205 *have_vt220 = !!(sccb_get_send_mask(sccb) & EVTYP_VT220MSG_MASK);
184 return rc; 206 return rc;
185} 207}
186 208
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
new file mode 100644
index 000000000000..99f41db5123b
--- /dev/null
+++ b/drivers/s390/char/sclp_sd.c
@@ -0,0 +1,569 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SCLP Store Data support and sysfs interface
4 *
5 * Copyright IBM Corp. 2017
6 */
7
8#define KMSG_COMPONENT "sclp_sd"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/completion.h>
12#include <linux/kobject.h>
13#include <linux/list.h>
14#include <linux/printk.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/async.h>
18#include <linux/export.h>
19#include <linux/mutex.h>
20
21#include <asm/pgalloc.h>
22
23#include "sclp.h"
24
25#define SD_EQ_STORE_DATA 0
26#define SD_EQ_HALT 1
27#define SD_EQ_SIZE 2
28
29#define SD_DI_CONFIG 3
30
31struct sclp_sd_evbuf {
32 struct evbuf_header hdr;
33 u8 eq;
34 u8 di;
35 u8 rflags;
36 u64 :56;
37 u32 id;
38 u16 :16;
39 u8 fmt;
40 u8 status;
41 u64 sat;
42 u64 sa;
43 u32 esize;
44 u32 dsize;
45} __packed;
46
47struct sclp_sd_sccb {
48 struct sccb_header hdr;
49 struct sclp_sd_evbuf evbuf;
50} __packed __aligned(PAGE_SIZE);
51
52/**
53 * struct sclp_sd_data - Result of a Store Data request
54 * @esize_bytes: Resulting esize in bytes
55 * @dsize_bytes: Resulting dsize in bytes
56 * @data: Pointer to data - must be released using vfree()
57 */
58struct sclp_sd_data {
59 size_t esize_bytes;
60 size_t dsize_bytes;
61 void *data;
62};
63
64/**
65 * struct sclp_sd_listener - Listener for asynchronous Store Data response
66 * @list: For enqueueing this struct
67 * @id: Event ID of response to listen for
68 * @completion: Can be used to wait for response
69 * @evbuf: Contains the resulting Store Data response after completion
70 */
71struct sclp_sd_listener {
72 struct list_head list;
73 u32 id;
74 struct completion completion;
75 struct sclp_sd_evbuf evbuf;
76};
77
78/**
79 * struct sclp_sd_file - Sysfs representation of a Store Data entity
80 * @kobj: Kobject
81 * @data_attr: Attribute for accessing data contents
82 * @data_mutex: Mutex to serialize access and updates to @data
83 * @data: Data associated with this entity
84 * @di: DI value associated with this entity
85 */
86struct sclp_sd_file {
87 struct kobject kobj;
88 struct bin_attribute data_attr;
89 struct mutex data_mutex;
90 struct sclp_sd_data data;
91 u8 di;
92};
93#define to_sd_file(x) container_of(x, struct sclp_sd_file, kobj)
94
95static struct kset *sclp_sd_kset;
96static struct sclp_sd_file *config_file;
97
98static LIST_HEAD(sclp_sd_queue);
99static DEFINE_SPINLOCK(sclp_sd_queue_lock);
100
101/**
102 * sclp_sd_listener_add() - Add listener for Store Data responses
103 * @listener: Listener to add
104 */
105static void sclp_sd_listener_add(struct sclp_sd_listener *listener)
106{
107 spin_lock_irq(&sclp_sd_queue_lock);
108 list_add_tail(&listener->list, &sclp_sd_queue);
109 spin_unlock_irq(&sclp_sd_queue_lock);
110}
111
112/**
113 * sclp_sd_listener_remove() - Remove listener for Store Data responses
114 * @listener: Listener to remove
115 */
116static void sclp_sd_listener_remove(struct sclp_sd_listener *listener)
117{
118 spin_lock_irq(&sclp_sd_queue_lock);
119 list_del(&listener->list);
120 spin_unlock_irq(&sclp_sd_queue_lock);
121}
122
123/**
124 * sclp_sd_listener_init() - Initialize a Store Data response listener
125 * @id: Event ID to listen for
126 *
127 * Initialize a listener for asynchronous Store Data responses. This listener
128 * can afterwards be used to wait for a specific response and to retrieve
129 * the associated response data.
130 */
131static void sclp_sd_listener_init(struct sclp_sd_listener *listener, u32 id)
132{
133 memset(listener, 0, sizeof(*listener));
134 listener->id = id;
135 init_completion(&listener->completion);
136}
137
138/**
139 * sclp_sd_receiver() - Receiver for Store Data events
140 * @evbuf_hdr: Header of received events
141 *
142 * Process Store Data events and complete listeners with matching event IDs.
143 */
144static void sclp_sd_receiver(struct evbuf_header *evbuf_hdr)
145{
146 struct sclp_sd_evbuf *evbuf = (struct sclp_sd_evbuf *) evbuf_hdr;
147 struct sclp_sd_listener *listener;
148 int found = 0;
149
150 pr_debug("received event (id=0x%08x)\n", evbuf->id);
151 spin_lock(&sclp_sd_queue_lock);
152 list_for_each_entry(listener, &sclp_sd_queue, list) {
153 if (listener->id != evbuf->id)
154 continue;
155
156 listener->evbuf = *evbuf;
157 complete(&listener->completion);
158 found = 1;
159 break;
160 }
161 spin_unlock(&sclp_sd_queue_lock);
162
163 if (!found)
164 pr_debug("unsolicited event (id=0x%08x)\n", evbuf->id);
165}
166
167static struct sclp_register sclp_sd_register = {
168 .send_mask = EVTYP_STORE_DATA_MASK,
169 .receive_mask = EVTYP_STORE_DATA_MASK,
170 .receiver_fn = sclp_sd_receiver,
171};
172
173/**
174 * sclp_sd_sync() - Perform Store Data request synchronously
175 * @page: Address of work page - must be below 2GB
176 * @eq: Input EQ value
177 * @di: Input DI value
178 * @sat: Input SAT value
179 * @sa: Input SA value used to specify the address of the target buffer
180 * @dsize_ptr: Optional pointer to input and output DSIZE value
181 * @esize_ptr: Optional pointer to output ESIZE value
182 *
183 * Perform Store Data request with specified parameters and wait for completion.
184 *
185 * Return %0 on success and store resulting DSIZE and ESIZE values in
186 * @dsize_ptr and @esize_ptr (if provided). Return non-zero on error.
187 */
188static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
189 u32 *dsize_ptr, u32 *esize_ptr)
190{
191 struct sclp_sd_sccb *sccb = (void *) page;
192 struct sclp_sd_listener listener;
193 struct sclp_sd_evbuf *evbuf;
194 int rc;
195
196 sclp_sd_listener_init(&listener, (u32) (addr_t) sccb);
197 sclp_sd_listener_add(&listener);
198
199 /* Prepare SCCB */
200 memset(sccb, 0, PAGE_SIZE);
201 sccb->hdr.length = sizeof(sccb->hdr) + sizeof(sccb->evbuf);
202 evbuf = &sccb->evbuf;
203 evbuf->hdr.length = sizeof(*evbuf);
204 evbuf->hdr.type = EVTYP_STORE_DATA;
205 evbuf->eq = eq;
206 evbuf->di = di;
207 evbuf->id = listener.id;
208 evbuf->fmt = 1;
209 evbuf->sat = sat;
210 evbuf->sa = sa;
211 if (dsize_ptr)
212 evbuf->dsize = *dsize_ptr;
213
214 /* Perform command */
215 pr_debug("request (eq=%d, di=%d, id=0x%08x)\n", eq, di, listener.id);
216 rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
217 pr_debug("request done (rc=%d)\n", rc);
218 if (rc)
219 goto out;
220
221 /* Evaluate response */
222 if (sccb->hdr.response_code == 0x73f0) {
223 pr_debug("event not supported\n");
224 rc = -EIO;
225 goto out_remove;
226 }
227 if (sccb->hdr.response_code != 0x0020 || !(evbuf->hdr.flags & 0x80)) {
228 rc = -EIO;
229 goto out;
230 }
231 if (!(evbuf->rflags & 0x80)) {
232 rc = wait_for_completion_interruptible(&listener.completion);
233 if (rc)
234 goto out;
235 evbuf = &listener.evbuf;
236 }
237 switch (evbuf->status) {
238 case 0:
239 if (dsize_ptr)
240 *dsize_ptr = evbuf->dsize;
241 if (esize_ptr)
242 *esize_ptr = evbuf->esize;
243 pr_debug("success (dsize=%u, esize=%u)\n", evbuf->dsize,
244 evbuf->esize);
245 break;
246 case 3:
247 rc = -ENOENT;
248 break;
249 default:
250 rc = -EIO;
251 break;
252
253 }
254
255out:
256 if (rc && rc != -ENOENT) {
257 /* Provide some information about what went wrong */
258 pr_warn("Store Data request failed (eq=%d, di=%d, "
259 "response=0x%04x, flags=0x%02x, status=%d, rc=%d)\n",
260 eq, di, sccb->hdr.response_code, evbuf->hdr.flags,
261 evbuf->status, rc);
262 }
263
264out_remove:
265 sclp_sd_listener_remove(&listener);
266
267 return rc;
268}
269
270/**
271 * sclp_sd_store_data() - Obtain data for specified Store Data entity
272 * @result: Resulting data
273 * @di: DI value associated with this entity
274 *
275 * Perform a series of Store Data requests to obtain the size and contents of
276 * the specified Store Data entity.
277 *
278 * Return:
279 * %0: Success - result is stored in @result. @result->data must be
280 * released using vfree() after use.
281 * %-ENOENT: No data available for this entity
282 * %<0: Other error
283 */
284static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
285{
286 u32 dsize = 0, esize = 0;
287 unsigned long page, asce = 0;
288 void *data = NULL;
289 int rc;
290
291 page = __get_free_page(GFP_KERNEL | GFP_DMA);
292 if (!page)
293 return -ENOMEM;
294
295 /* Get size */
296 rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize);
297 if (rc)
298 goto out;
299 if (dsize == 0)
300 goto out_result;
301
302 /* Allocate memory */
303 data = vzalloc((size_t) dsize * PAGE_SIZE);
304 if (!data) {
305 rc = -ENOMEM;
306 goto out;
307 }
308
309 /* Get translation table for buffer */
310 asce = base_asce_alloc((unsigned long) data, dsize);
311 if (!asce) {
312 vfree(data);
313 rc = -ENOMEM;
314 goto out;
315 }
316
317 /* Get data */
318 rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize,
319 &esize);
320 if (rc) {
321 /* Cancel running request if interrupted */
322 if (rc == -ERESTARTSYS)
323 sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
324 vfree(data);
325 goto out;
326 }
327
328out_result:
329 result->esize_bytes = (size_t) esize * PAGE_SIZE;
330 result->dsize_bytes = (size_t) dsize * PAGE_SIZE;
331 result->data = data;
332
333out:
334 base_asce_free(asce);
335 free_page(page);
336
337 return rc;
338}
339
340/**
341 * sclp_sd_data_reset() - Reset Store Data result buffer
342 * @data: Data buffer to reset
343 *
344 * Reset @data to initial state and release associated memory.
345 */
346static void sclp_sd_data_reset(struct sclp_sd_data *data)
347{
348 vfree(data->data);
349 data->data = NULL;
350 data->dsize_bytes = 0;
351 data->esize_bytes = 0;
352}
353
354/**
355 * sclp_sd_file_release() - Release function for sclp_sd_file object
356 * @kobj: Kobject embedded in sclp_sd_file object
357 */
358static void sclp_sd_file_release(struct kobject *kobj)
359{
360 struct sclp_sd_file *sd_file = to_sd_file(kobj);
361
362 sclp_sd_data_reset(&sd_file->data);
363 kfree(sd_file);
364}
365
366/**
367 * sclp_sd_file_update() - Update contents of sclp_sd_file object
368 * @sd_file: Object to update
369 *
370 * Obtain the current version of data associated with the Store Data entity
371 * @sd_file.
372 *
373 * On success, return %0 and generate a KOBJ_CHANGE event to indicate that the
374 * data may have changed. Return non-zero otherwise.
375 */
376static int sclp_sd_file_update(struct sclp_sd_file *sd_file)
377{
378 const char *name = kobject_name(&sd_file->kobj);
379 struct sclp_sd_data data;
380 int rc;
381
382 rc = sclp_sd_store_data(&data, sd_file->di);
383 if (rc) {
384 if (rc == -ENOENT) {
385 pr_info("No data is available for the %s data entity\n",
386 name);
387 }
388 return rc;
389 }
390
391 mutex_lock(&sd_file->data_mutex);
392 sclp_sd_data_reset(&sd_file->data);
393 sd_file->data = data;
394 mutex_unlock(&sd_file->data_mutex);
395
396 pr_info("A %zu-byte %s data entity was retrieved\n", data.dsize_bytes,
397 name);
398 kobject_uevent(&sd_file->kobj, KOBJ_CHANGE);
399
400 return 0;
401}
402
403/**
404 * sclp_sd_file_update_async() - Wrapper for asynchronous update call
405 * @data: Object to update
406 */
407static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
408{
409 struct sclp_sd_file *sd_file = data;
410
411 sclp_sd_file_update(sd_file);
412}
413
414/**
415 * reload_store() - Store function for "reload" sysfs attribute
416 * @kobj: Kobject of sclp_sd_file object
417 *
418 * Initiate a reload of the data associated with an sclp_sd_file object.
419 */
420static ssize_t reload_store(struct kobject *kobj, struct kobj_attribute *attr,
421 const char *buf, size_t count)
422{
423 struct sclp_sd_file *sd_file = to_sd_file(kobj);
424
425 sclp_sd_file_update(sd_file);
426
427 return count;
428}
429
430static struct kobj_attribute reload_attr = __ATTR_WO(reload);
431
432static struct attribute *sclp_sd_file_default_attrs[] = {
433 &reload_attr.attr,
434 NULL,
435};
436
437static struct kobj_type sclp_sd_file_ktype = {
438 .sysfs_ops = &kobj_sysfs_ops,
439 .release = sclp_sd_file_release,
440 .default_attrs = sclp_sd_file_default_attrs,
441};
442
443/**
444 * data_read() - Read function for "read" sysfs attribute
445 * @kobj: Kobject of sclp_sd_file object
446 * @buffer: Target buffer
447 * @off: Requested file offset
448 * @size: Requested number of bytes
449 *
450 * Store the requested portion of the Store Data entity contents into the
451 * specified buffer. Return the number of bytes stored on success, or %0
452 * on EOF.
453 */
454static ssize_t data_read(struct file *file, struct kobject *kobj,
455 struct bin_attribute *attr, char *buffer,
456 loff_t off, size_t size)
457{
458 struct sclp_sd_file *sd_file = to_sd_file(kobj);
459 size_t data_size;
460 char *data;
461
462 mutex_lock(&sd_file->data_mutex);
463
464 data = sd_file->data.data;
465 data_size = sd_file->data.dsize_bytes;
466 if (!data || off >= data_size) {
467 size = 0;
468 } else {
469 if (off + size > data_size)
470 size = data_size - off;
471 memcpy(buffer, data + off, size);
472 }
473
474 mutex_unlock(&sd_file->data_mutex);
475
476 return size;
477}
478
479/**
480 * sclp_sd_file_create() - Add a sysfs file representing a Store Data entity
481 * @name: Name of file
482 * @di: DI value associated with this entity
483 *
484 * Create a sysfs directory with the given @name located under
485 *
486 * /sys/firmware/sclp_sd/
487 *
488 * The files in this directory can be used to access the contents of the Store
489 * Data entity associated with @DI.
490 *
491 * Return pointer to resulting sclp_sd_file object on success, %NULL otherwise.
492 * The object must be freed by calling kobject_put() on the embedded kobject
493 * pointer after use.
494 */
495static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
496{
497 struct sclp_sd_file *sd_file;
498 int rc;
499
500 sd_file = kzalloc(sizeof(*sd_file), GFP_KERNEL);
501 if (!sd_file)
502 return NULL;
503 sd_file->di = di;
504 mutex_init(&sd_file->data_mutex);
505
506 /* Create kobject located under /sys/firmware/sclp_sd/ */
507 sd_file->kobj.kset = sclp_sd_kset;
508 rc = kobject_init_and_add(&sd_file->kobj, &sclp_sd_file_ktype, NULL,
509 "%s", name);
510 if (rc) {
511 kobject_put(&sd_file->kobj);
512 return NULL;
513 }
514
515 sysfs_bin_attr_init(&sd_file->data_attr);
516 sd_file->data_attr.attr.name = "data";
517 sd_file->data_attr.attr.mode = 0444;
518 sd_file->data_attr.read = data_read;
519
520 rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
521 if (rc) {
522 kobject_put(&sd_file->kobj);
523 return NULL;
524 }
525
526 /*
527 * For completeness only - users interested in entity data should listen
528 * for KOBJ_CHANGE instead.
529 */
530 kobject_uevent(&sd_file->kobj, KOBJ_ADD);
531
532 /* Don't let a slow Store Data request delay further initialization */
533 async_schedule(sclp_sd_file_update_async, sd_file);
534
535 return sd_file;
536}
537
538/**
539 * sclp_sd_init() - Initialize sclp_sd support and register sysfs files
540 */
541static __init int sclp_sd_init(void)
542{
543 int rc;
544
545 rc = sclp_register(&sclp_sd_register);
546 if (rc)
547 return rc;
548
549 /* Create kset named "sclp_sd" located under /sys/firmware/ */
550 rc = -ENOMEM;
551 sclp_sd_kset = kset_create_and_add("sclp_sd", NULL, firmware_kobj);
552 if (!sclp_sd_kset)
553 goto err_kset;
554
555 rc = -EINVAL;
556 config_file = sclp_sd_file_create("config", SD_DI_CONFIG);
557 if (!config_file)
558 goto err_config;
559
560 return 0;
561
562err_config:
563 kset_unregister(sclp_sd_kset);
564err_kset:
565 sclp_unregister(&sclp_sd_register);
566
567 return rc;
568}
569device_initcall(sclp_sd_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 9f7b87d6d434..5aff8b684eb2 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -502,7 +502,10 @@ sclp_tty_init(void)
502 int i; 502 int i;
503 int rc; 503 int rc;
504 504
505 if (!CONSOLE_IS_SCLP) 505 /* z/VM multiplexes the line mode output on the 32xx screen */
506 if (MACHINE_IS_VM && !CONSOLE_IS_SCLP)
507 return 0;
508 if (!sclp.has_linemode)
506 return 0; 509 return 0;
507 driver = alloc_tty_driver(1); 510 driver = alloc_tty_driver(1);
508 if (!driver) 511 if (!driver)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index f95b452b8bbc..afbdee74147d 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -384,6 +384,28 @@ static ssize_t chp_chid_external_show(struct device *dev,
384} 384}
385static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); 385static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
386 386
387static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
388 struct bin_attribute *attr, char *buf,
389 loff_t off, size_t count)
390{
391 struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
392 ssize_t rc;
393
394 mutex_lock(&chp->lock);
395 rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
396 sizeof(chp->desc_fmt3.util_str));
397 mutex_unlock(&chp->lock);
398
399 return rc;
400}
401static BIN_ATTR_RO(util_string,
402 sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
403
404static struct bin_attribute *chp_bin_attrs[] = {
405 &bin_attr_util_string,
406 NULL,
407};
408
387static struct attribute *chp_attrs[] = { 409static struct attribute *chp_attrs[] = {
388 &dev_attr_status.attr, 410 &dev_attr_status.attr,
389 &dev_attr_configure.attr, 411 &dev_attr_configure.attr,
@@ -396,6 +418,7 @@ static struct attribute *chp_attrs[] = {
396}; 418};
397static struct attribute_group chp_attr_group = { 419static struct attribute_group chp_attr_group = {
398 .attrs = chp_attrs, 420 .attrs = chp_attrs,
421 .bin_attrs = chp_bin_attrs,
399}; 422};
400static const struct attribute_group *chp_attr_groups[] = { 423static const struct attribute_group *chp_attr_groups[] = {
401 &chp_attr_group, 424 &chp_attr_group,
@@ -422,7 +445,7 @@ int chp_update_desc(struct channel_path *chp)
422{ 445{
423 int rc; 446 int rc;
424 447
425 rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); 448 rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
426 if (rc) 449 if (rc)
427 return rc; 450 return rc;
428 451
@@ -431,6 +454,7 @@ int chp_update_desc(struct channel_path *chp)
431 * hypervisors implement the required chsc commands. 454 * hypervisors implement the required chsc commands.
432 */ 455 */
433 chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); 456 chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
457 chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
434 chsc_get_channel_measurement_chars(chp); 458 chsc_get_channel_measurement_chars(chp);
435 459
436 return 0; 460 return 0;
@@ -506,20 +530,20 @@ out:
506 * On success return a newly allocated copy of the channel-path description 530 * On success return a newly allocated copy of the channel-path description
507 * data associated with the given channel-path ID. Return %NULL on error. 531 * data associated with the given channel-path ID. Return %NULL on error.
508 */ 532 */
509struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) 533struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
510{ 534{
511 struct channel_path *chp; 535 struct channel_path *chp;
512 struct channel_path_desc *desc; 536 struct channel_path_desc_fmt0 *desc;
513 537
514 chp = chpid_to_chp(chpid); 538 chp = chpid_to_chp(chpid);
515 if (!chp) 539 if (!chp)
516 return NULL; 540 return NULL;
517 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 541 desc = kmalloc(sizeof(*desc), GFP_KERNEL);
518 if (!desc) 542 if (!desc)
519 return NULL; 543 return NULL;
520 544
521 mutex_lock(&chp->lock); 545 mutex_lock(&chp->lock);
522 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 546 memcpy(desc, &chp->desc, sizeof(*desc));
523 mutex_unlock(&chp->lock); 547 mutex_unlock(&chp->lock);
524 return desc; 548 return desc;
525} 549}
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 7e80323cd261..20259f3fbf45 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -44,8 +44,9 @@ struct channel_path {
44 struct chp_id chpid; 44 struct chp_id chpid;
45 struct mutex lock; /* Serialize access to below members. */ 45 struct mutex lock; /* Serialize access to below members. */
46 int state; 46 int state;
47 struct channel_path_desc desc; 47 struct channel_path_desc_fmt0 desc;
48 struct channel_path_desc_fmt1 desc_fmt1; 48 struct channel_path_desc_fmt1 desc_fmt1;
49 struct channel_path_desc_fmt3 desc_fmt3;
49 /* Channel-measurement related stuff: */ 50 /* Channel-measurement related stuff: */
50 int cmg; 51 int cmg;
51 int shared; 52 int shared;
@@ -61,7 +62,7 @@ static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
61int chp_get_status(struct chp_id chpid); 62int chp_get_status(struct chp_id chpid);
62u8 chp_get_sch_opm(struct subchannel *sch); 63u8 chp_get_sch_opm(struct subchannel *sch);
63int chp_is_registered(struct chp_id chpid); 64int chp_is_registered(struct chp_id chpid);
64struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); 65struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid);
65void chp_remove_cmg_attr(struct channel_path *chp); 66void chp_remove_cmg_attr(struct channel_path *chp);
66int chp_add_cmg_attr(struct channel_path *chp); 67int chp_add_cmg_attr(struct channel_path *chp);
67int chp_update_desc(struct channel_path *chp); 68int chp_update_desc(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c08fc5a8df0c..6652a49a49b1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -915,6 +915,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
915 return -EINVAL; 915 return -EINVAL;
916 if ((rfmt == 2) && !css_general_characteristics.cib) 916 if ((rfmt == 2) && !css_general_characteristics.cib)
917 return -EINVAL; 917 return -EINVAL;
918 if ((rfmt == 3) && !css_general_characteristics.util_str)
919 return -EINVAL;
918 920
919 memset(page, 0, PAGE_SIZE); 921 memset(page, 0, PAGE_SIZE);
920 scpd_area = page; 922 scpd_area = page;
@@ -940,43 +942,30 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
940} 942}
941EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 943EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
942 944
943int chsc_determine_base_channel_path_desc(struct chp_id chpid, 945#define chsc_det_chp_desc(FMT, c) \
944 struct channel_path_desc *desc) 946int chsc_determine_fmt##FMT##_channel_path_desc( \
945{ 947 struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
946 struct chsc_scpd *scpd_area; 948{ \
947 unsigned long flags; 949 struct chsc_scpd *scpd_area; \
948 int ret; 950 unsigned long flags; \
949 951 int ret; \
950 spin_lock_irqsave(&chsc_page_lock, flags); 952 \
951 scpd_area = chsc_page; 953 spin_lock_irqsave(&chsc_page_lock, flags); \
952 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 954 scpd_area = chsc_page; \
953 if (ret) 955 ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
954 goto out; 956 scpd_area); \
955 957 if (ret) \
956 memcpy(desc, scpd_area->data, sizeof(*desc)); 958 goto out; \
957out: 959 \
958 spin_unlock_irqrestore(&chsc_page_lock, flags); 960 memcpy(desc, scpd_area->data, sizeof(*desc)); \
959 return ret; 961out: \
962 spin_unlock_irqrestore(&chsc_page_lock, flags); \
963 return ret; \
960} 964}
961 965
962int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 966chsc_det_chp_desc(0, 0)
963 struct channel_path_desc_fmt1 *desc) 967chsc_det_chp_desc(1, 1)
964{ 968chsc_det_chp_desc(3, 0)
965 struct chsc_scpd *scpd_area;
966 unsigned long flags;
967 int ret;
968
969 spin_lock_irqsave(&chsc_page_lock, flags);
970 scpd_area = chsc_page;
971 ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area);
972 if (ret)
973 goto out;
974
975 memcpy(desc, scpd_area->data, sizeof(*desc));
976out:
977 spin_unlock_irqrestore(&chsc_page_lock, flags);
978 return ret;
979}
980 969
981static void 970static void
982chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 971chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index dda5953534b7..5c9f0dd33f4e 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -40,6 +40,11 @@ struct channel_path_desc_fmt1 {
40 u32 zeros[2]; 40 u32 zeros[2];
41} __attribute__ ((packed)); 41} __attribute__ ((packed));
42 42
43struct channel_path_desc_fmt3 {
44 struct channel_path_desc_fmt1 fmt1_desc;
45 u8 util_str[64];
46};
47
43struct channel_path; 48struct channel_path;
44 49
45struct css_chsc_char { 50struct css_chsc_char {
@@ -147,10 +152,12 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable);
147int chsc_chp_vary(struct chp_id chpid, int on); 152int chsc_chp_vary(struct chp_id chpid, int on);
148int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 153int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
149 int c, int m, void *page); 154 int c, int m, void *page);
150int chsc_determine_base_channel_path_desc(struct chp_id chpid, 155int chsc_determine_fmt0_channel_path_desc(struct chp_id chpid,
151 struct channel_path_desc *desc); 156 struct channel_path_desc_fmt0 *desc);
152int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 157int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
153 struct channel_path_desc_fmt1 *desc); 158 struct channel_path_desc_fmt1 *desc);
159int chsc_determine_fmt3_channel_path_desc(struct chp_id chpid,
160 struct channel_path_desc_fmt3 *desc);
154void chsc_chp_online(struct chp_id chpid); 161void chsc_chp_online(struct chp_id chpid);
155void chsc_chp_offline(struct chp_id chpid); 162void chsc_chp_offline(struct chp_id chpid);
156int chsc_get_channel_measurement_chars(struct channel_path *chp); 163int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f50ea035aa9b..1540229a37bb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1073,8 +1073,7 @@ out_schedule:
1073 return 0; 1073 return 0;
1074} 1074}
1075 1075
1076static int 1076static int io_subchannel_remove(struct subchannel *sch)
1077io_subchannel_remove (struct subchannel *sch)
1078{ 1077{
1079 struct io_subchannel_private *io_priv = to_io_private(sch); 1078 struct io_subchannel_private *io_priv = to_io_private(sch);
1080 struct ccw_device *cdev; 1079 struct ccw_device *cdev;
@@ -1082,14 +1081,12 @@ io_subchannel_remove (struct subchannel *sch)
1082 cdev = sch_get_cdev(sch); 1081 cdev = sch_get_cdev(sch);
1083 if (!cdev) 1082 if (!cdev)
1084 goto out_free; 1083 goto out_free;
1085 io_subchannel_quiesce(sch); 1084
1086 /* Set ccw device to not operational and drop reference. */ 1085 ccw_device_unregister(cdev);
1087 spin_lock_irq(cdev->ccwlock); 1086 spin_lock_irq(sch->lock);
1088 sch_set_cdev(sch, NULL); 1087 sch_set_cdev(sch, NULL);
1089 set_io_private(sch, NULL); 1088 set_io_private(sch, NULL);
1090 cdev->private->state = DEV_STATE_NOT_OPER; 1089 spin_unlock_irq(sch->lock);
1091 spin_unlock_irq(cdev->ccwlock);
1092 ccw_device_unregister(cdev);
1093out_free: 1090out_free:
1094 kfree(io_priv); 1091 kfree(io_priv);
1095 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1092 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
@@ -1721,6 +1718,7 @@ static int ccw_device_remove(struct device *dev)
1721{ 1718{
1722 struct ccw_device *cdev = to_ccwdev(dev); 1719 struct ccw_device *cdev = to_ccwdev(dev);
1723 struct ccw_driver *cdrv = cdev->drv; 1720 struct ccw_driver *cdrv = cdev->drv;
1721 struct subchannel *sch;
1724 int ret; 1722 int ret;
1725 1723
1726 if (cdrv->remove) 1724 if (cdrv->remove)
@@ -1746,7 +1744,9 @@ static int ccw_device_remove(struct device *dev)
1746 ccw_device_set_timeout(cdev, 0); 1744 ccw_device_set_timeout(cdev, 0);
1747 cdev->drv = NULL; 1745 cdev->drv = NULL;
1748 cdev->private->int_class = IRQIO_CIO; 1746 cdev->private->int_class = IRQIO_CIO;
1747 sch = to_subchannel(cdev->dev.parent);
1749 spin_unlock_irq(cdev->ccwlock); 1748 spin_unlock_irq(cdev->ccwlock);
1749 io_subchannel_quiesce(sch);
1750 __disable_cmf(cdev); 1750 __disable_cmf(cdev);
1751 1751
1752 return 0; 1752 return 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 75ce12a24dc2..aecfebb74157 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -460,8 +460,8 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
460 * On success return a newly allocated copy of the channel-path description 460 * On success return a newly allocated copy of the channel-path description
461 * data associated with the given channel path. Return %NULL on error. 461 * data associated with the given channel path. Return %NULL on error.
462 */ 462 */
463struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, 463struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
464 int chp_idx) 464 int chp_idx)
465{ 465{
466 struct subchannel *sch; 466 struct subchannel *sch;
467 struct chp_id chpid; 467 struct chp_id chpid;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d5b02de02a3a..a337281337a7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -98,22 +98,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
98 return cc; 98 return cc;
99} 99}
100 100
101static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
102{
103 /* all done or next buffer state different */
104 if (ccq == 0 || ccq == 32)
105 return 0;
106 /* no buffer processed */
107 if (ccq == 97)
108 return 1;
109 /* not all buffers processed */
110 if (ccq == 96)
111 return 2;
112 /* notify devices immediately */
113 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
114 return -EIO;
115}
116
117/** 101/**
118 * qdio_do_eqbs - extract buffer states for QEBSM 102 * qdio_do_eqbs - extract buffer states for QEBSM
119 * @q: queue to manipulate 103 * @q: queue to manipulate
@@ -128,7 +112,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
128static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 112static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
129 int start, int count, int auto_ack) 113 int start, int count, int auto_ack)
130{ 114{
131 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; 115 int tmp_count = count, tmp_start = start, nr = q->nr;
132 unsigned int ccq = 0; 116 unsigned int ccq = 0;
133 117
134 qperf_inc(q, eqbs); 118 qperf_inc(q, eqbs);
@@ -138,34 +122,30 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
138again: 122again:
139 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
140 auto_ack); 124 auto_ack);
141 rc = qdio_check_ccq(q, ccq);
142 if (!rc)
143 return count - tmp_count;
144 125
145 if (rc == 1) { 126 switch (ccq) {
146 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 127 case 0:
147 goto again; 128 case 32:
148 } 129 /* all done, or next buffer state different */
149 130 return count - tmp_count;
150 if (rc == 2) { 131 case 96:
132 /* not all buffers processed */
151 qperf_inc(q, eqbs_partial); 133 qperf_inc(q, eqbs_partial);
152 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 134 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
153 tmp_count); 135 tmp_count);
154 /* 136 return count - tmp_count;
155 * Retry once, if that fails bail out and process the 137 case 97:
156 * extracted buffers before trying again. 138 /* no buffer processed */
157 */ 139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
158 if (!retried++) 140 goto again;
159 goto again; 141 default:
160 else 142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
161 return count - tmp_count; 143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_kick, count, q->irq_ptr->int_parm);
147 return 0;
162 } 148 }
163
164 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
165 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
166 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
167 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
168 return 0;
169} 149}
170 150
171/** 151/**
@@ -185,7 +165,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
185 unsigned int ccq = 0; 165 unsigned int ccq = 0;
186 int tmp_count = count, tmp_start = start; 166 int tmp_count = count, tmp_start = start;
187 int nr = q->nr; 167 int nr = q->nr;
188 int rc;
189 168
190 if (!count) 169 if (!count)
191 return 0; 170 return 0;
@@ -195,26 +174,32 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
195 nr += q->irq_ptr->nr_input_qs; 174 nr += q->irq_ptr->nr_input_qs;
196again: 175again:
197 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
198 rc = qdio_check_ccq(q, ccq); 177
199 if (!rc) { 178 switch (ccq) {
179 case 0:
180 case 32:
181 /* all done, or active buffer adapter-owned */
200 WARN_ON_ONCE(tmp_count); 182 WARN_ON_ONCE(tmp_count);
201 return count - tmp_count; 183 return count - tmp_count;
202 } 184 case 96:
203 185 /* not all buffers processed */
204 if (rc == 1 || rc == 2) {
205 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
206 qperf_inc(q, sqbs_partial); 187 qperf_inc(q, sqbs_partial);
207 goto again; 188 goto again;
189 default:
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_kick, count, q->irq_ptr->int_parm);
195 return 0;
208 } 196 }
209
210 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
211 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
212 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
213 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
214 return 0;
215} 197}
216 198
217/* returns number of examined buffers and their common state in *state */ 199/*
200 * Returns number of examined buffers and their common state in *state.
201 * Requested number of buffers-to-examine must be > 0.
202 */
218static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 203static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
219 unsigned char *state, unsigned int count, 204 unsigned char *state, unsigned int count,
220 int auto_ack, int merge_pending) 205 int auto_ack, int merge_pending)
@@ -225,17 +210,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
225 if (is_qebsm(q)) 210 if (is_qebsm(q))
226 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
227 212
228 for (i = 0; i < count; i++) { 213 /* get initial state: */
229 if (!__state) { 214 __state = q->slsb.val[bufnr];
230 __state = q->slsb.val[bufnr]; 215 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
231 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) 216 __state = SLSB_P_OUTPUT_EMPTY;
232 __state = SLSB_P_OUTPUT_EMPTY; 217
233 } else if (merge_pending) { 218 for (i = 1; i < count; i++) {
234 if ((q->slsb.val[bufnr] & __state) != __state)
235 break;
236 } else if (q->slsb.val[bufnr] != __state)
237 break;
238 bufnr = next_buf(bufnr); 219 bufnr = next_buf(bufnr);
220
221 /* merge PENDING into EMPTY: */
222 if (merge_pending &&
223 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
224 __state == SLSB_P_OUTPUT_EMPTY)
225 continue;
226
227 /* stop if next state differs from initial state: */
228 if (q->slsb.val[bufnr] != __state)
229 break;
239 } 230 }
240 *state = __state; 231 *state = __state;
241 return i; 232 return i;
@@ -502,8 +493,8 @@ static inline void inbound_primed(struct qdio_q *q, int count)
502 493
503static int get_inbound_buffer_frontier(struct qdio_q *q) 494static int get_inbound_buffer_frontier(struct qdio_q *q)
504{ 495{
505 int count, stop;
506 unsigned char state = 0; 496 unsigned char state = 0;
497 int count;
507 498
508 q->timestamp = get_tod_clock_fast(); 499 q->timestamp = get_tod_clock_fast();
509 500
@@ -512,9 +503,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
512 * would return 0. 503 * would return 0.
513 */ 504 */
514 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 505 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
515 stop = add_buf(q->first_to_check, count); 506 if (!count)
516
517 if (q->first_to_check == stop)
518 goto out; 507 goto out;
519 508
520 /* 509 /*
@@ -734,8 +723,8 @@ void qdio_inbound_processing(unsigned long data)
734 723
735static int get_outbound_buffer_frontier(struct qdio_q *q) 724static int get_outbound_buffer_frontier(struct qdio_q *q)
736{ 725{
737 int count, stop;
738 unsigned char state = 0; 726 unsigned char state = 0;
727 int count;
739 728
740 q->timestamp = get_tod_clock_fast(); 729 q->timestamp = get_tod_clock_fast();
741 730
@@ -751,11 +740,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
751 * would return 0. 740 * would return 0.
752 */ 741 */
753 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 742 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
754 stop = add_buf(q->first_to_check, count); 743 if (!count)
755 if (q->first_to_check == stop)
756 goto out; 744 goto out;
757 745
758 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); 746 count = get_buf_states(q, q->first_to_check, &state, count, 0,
747 q->u.out.use_cq);
759 if (!count) 748 if (!count)
760 goto out; 749 goto out;
761 750
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index c30420c517b1..ff6963ad6e39 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -124,6 +124,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
124 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { 124 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
125 orb = (union orb *)io_region->orb_area; 125 orb = (union orb *)io_region->orb_area;
126 126
127 /* Don't try to build a cp if transport mode is specified. */
128 if (orb->tm.b) {
129 io_region->ret_code = -EOPNOTSUPP;
130 goto err_out;
131 }
127 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), 132 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
128 orb); 133 orb);
129 if (io_region->ret_code) 134 if (io_region->ret_code)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 19203340f879..04fefa5bb08d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1369,7 +1369,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
1369static void qeth_update_from_chp_desc(struct qeth_card *card) 1369static void qeth_update_from_chp_desc(struct qeth_card *card)
1370{ 1370{
1371 struct ccw_device *ccwdev; 1371 struct ccw_device *ccwdev;
1372 struct channel_path_desc *chp_dsc; 1372 struct channel_path_desc_fmt0 *chp_dsc;
1373 1373
1374 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1374 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1375 1375
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index e5041c605fd0..0840d27381ea 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -11,7 +11,7 @@ if TTY
11 11
12config VT 12config VT
13 bool "Virtual terminal" if EXPERT 13 bool "Virtual terminal" if EXPERT
14 depends on !S390 && !UML 14 depends on !UML
15 select INPUT 15 select INPUT
16 default y 16 default y
17 ---help--- 17 ---help---
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3c20af999893..4f950c686055 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -3,7 +3,8 @@
3# 3#
4 4
5menu "Graphics support" 5menu "Graphics support"
6 depends on HAS_IOMEM 6
7if HAS_IOMEM
7 8
8config HAVE_FB_ATMEL 9config HAVE_FB_ATMEL
9 bool 10 bool
@@ -36,6 +37,8 @@ config VIDEOMODE_HELPERS
36config HDMI 37config HDMI
37 bool 38 bool
38 39
40endif # HAS_IOMEM
41
39if VT 42if VT
40 source "drivers/video/console/Kconfig" 43 source "drivers/video/console/Kconfig"
41endif 44endif
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 198574b7dbef..4110ba7d7ca9 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -8,7 +8,7 @@ config VGA_CONSOLE
8 bool "VGA text console" if EXPERT || !X86 8 bool "VGA text console" if EXPERT || !X86
9 depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !SUPERH && \ 9 depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !SUPERH && \
10 (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ 10 (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
11 !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 11 !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 && !S390
12 default y 12 default y
13 help 13 help
14 Saying Y here will allow you to use Linux in text mode through a 14 Saying Y here will allow you to use Linux in text mode through a
@@ -84,7 +84,7 @@ config MDA_CONSOLE
84 84
85config SGI_NEWPORT_CONSOLE 85config SGI_NEWPORT_CONSOLE
86 tristate "SGI Newport Console support" 86 tristate "SGI Newport Console support"
87 depends on SGI_IP22 87 depends on SGI_IP22 && HAS_IOMEM
88 select FONT_SUPPORT 88 select FONT_SUPPORT
89 help 89 help
90 Say Y here if you want the console on the Newport aka XL graphics 90 Say Y here if you want the console on the Newport aka XL graphics
@@ -152,7 +152,7 @@ config FRAMEBUFFER_CONSOLE_ROTATION
152 152
153config STI_CONSOLE 153config STI_CONSOLE
154 bool "STI text console" 154 bool "STI text console"
155 depends on PARISC 155 depends on PARISC && HAS_IOMEM
156 select FONT_SUPPORT 156 select FONT_SUPPORT
157 default y 157 default y
158 help 158 help