aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 19:33:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 19:33:33 -0500
commit2ec4584eb89b8933d1ee307f2fc9c42e745847d7 (patch)
tree9e30e5b308f9d782b466e4298b7a0004b648b09d
parentaa3ecf388adc90bde90776bba71a7f2d278fc4e3 (diff)
parentc19805f870c1fa87c69819eb1e18d9c5fc398f58 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The main bulk of the s390 patches for the 4.10 merge window: - Add support for the contiguous memory allocator. - The recovery for I/O errors in the dasd device driver is improved, the driver will now remove channel paths that are not working properly. - Additional fields are added to /proc/sysinfo, the extended partition name and the partition UUID. - New naming for PCI devices with system defined UIDs. - The last few remaining alloc_bootmem calls are converted to memblock. - The thread_info structure is stripped down and moved to the task_struct. The only field left in thread_info is the flags field. - Rework of the arch topology code to fix a fake numa issue. - Refactoring of the atomic primitives and add a new preempt_count implementation. - Clocksource steering for the STP sync check offsets. - The s390 specific headers are changed to make them usable with CLANG. - Bug fixes and cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (70 commits) s390/cpumf: Use configuration level indication for sampling data s390: provide memmove implementation s390: cleanup arch/s390/kernel Makefile s390: fix initrd corruptions with gcov/kcov instrumented kernels s390: exclude early C code from gcov profiling s390/dasd: channel path aware error recovery s390/dasd: extend dasd path handling s390: remove unused labels from entry.S s390/vmlogrdr: fix IUCV buffer allocation s390/crypto: unlock on error in prng_tdes_read() s390/sysinfo: show partition extended name and UUID if available s390/numa: pin all possible cpus to nodes early s390/numa: establish cpu to node mapping early s390/topology: use cpu_topology array instead of per cpu variable s390/smp: initialize cpu_present_mask in setup_arch s390/topology: always use s390 specific sched_domain_topology_level s390/smp: use smp_get_base_cpu() helper function s390/numa: always use logical cpu and core ids s390: Remove VLAIS in ptff() and clear_table() s390: fix machine check panic stack switch ...
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/boot/compressed/head.S2
-rw-r--r--arch/s390/configs/default_defconfig6
-rw-r--r--arch/s390/configs/gcov_defconfig4
-rw-r--r--arch/s390/configs/performance_defconfig4
-rw-r--r--arch/s390/crypto/prng.c6
-rw-r--r--arch/s390/hypfs/inode.c24
-rw-r--r--arch/s390/include/asm/Kbuild4
-rw-r--r--arch/s390/include/asm/asm-offsets.h1
-rw-r--r--arch/s390/include/asm/atomic.h207
-rw-r--r--arch/s390/include/asm/atomic_ops.h130
-rw-r--r--arch/s390/include/asm/bitops.h62
-rw-r--r--arch/s390/include/asm/cpu_mf.h3
-rw-r--r--arch/s390/include/asm/elf.h6
-rw-r--r--arch/s390/include/asm/facilities_src.h82
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/pci_clp.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h22
-rw-r--r--arch/s390/include/asm/preempt.h137
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/sclp.h10
-rw-r--r--arch/s390/include/asm/scsw.h6
-rw-r--r--arch/s390/include/asm/smp.h8
-rw-r--r--arch/s390/include/asm/string.h3
-rw-r--r--arch/s390/include/asm/sysinfo.h7
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/include/asm/timex.h41
-rw-r--r--arch/s390/include/asm/topology.h28
-rw-r--r--arch/s390/include/asm/uaccess.h6
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/Kbuild5
-rw-r--r--arch/s390/kernel/Makefile65
-rw-r--r--arch/s390/kernel/asm-offsets.c17
-rw-r--r--arch/s390/kernel/compat_signal.c4
-rw-r--r--arch/s390/kernel/early.c50
-rw-r--r--arch/s390/kernel/entry.S51
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/lgr.c5
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c53
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/ptrace.c14
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/signal.c14
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/swsusp.S2
-rw-r--r--arch/s390/kernel/sysinfo.c33
-rw-r--r--arch/s390/kernel/time.c191
-rw-r--r--arch/s390/kernel/topology.c53
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S23
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S23
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S11
-rw-r--r--arch/s390/kernel/vtime.c26
-rw-r--r--arch/s390/lib/mem.S39
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/vmem.c9
-rw-r--r--arch/s390/numa/mode_emu.c38
-rw-r--r--arch/s390/numa/toptree.c16
-rw-r--r--arch/s390/pci/pci.c8
-rw-r--r--arch/s390/pci/pci_clp.c3
-rw-r--r--arch/s390/pci/pci_debug.c2
-rw-r--r--arch/s390/pci/pci_dma.c36
-rw-r--r--arch/s390/tools/Makefile2
-rw-r--r--arch/s390/tools/gen_facilities.c76
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c7
-rw-r--r--drivers/s390/block/dasd.c301
-rw-r--r--drivers/s390/block/dasd_3990_erp.c52
-rw-r--r--drivers/s390/block/dasd_devmap.c326
-rw-r--r--drivers/s390/block/dasd_eckd.c323
-rw-r--r--drivers/s390/block/dasd_eckd.h5
-rw-r--r--drivers/s390/block/dasd_eer.c29
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h449
-rw-r--r--drivers/s390/char/con3215.c12
-rw-r--r--drivers/s390/char/sclp.h23
-rw-r--r--drivers/s390/char/sclp_cmd.c25
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/sclp_quiesce.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/zcore.c22
-rw-r--r--drivers/s390/cio/cmf.c10
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c6
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/device_ops.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/virtio/virtio_ccw.c25
95 files changed, 2265 insertions, 1240 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 3d3ed12b2c29..1174508ee597 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10557,7 +10557,7 @@ F: arch/s390/pci/
10557F: drivers/pci/hotplug/s390_pci_hpc.c 10557F: drivers/pci/hotplug/s390_pci_hpc.c
10558 10558
10559S390 ZCRYPT DRIVER 10559S390 ZCRYPT DRIVER
10560M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com> 10560M: Harald Freudenberger <freude@de.ibm.com>
10561L: linux-s390@vger.kernel.org 10561L: linux-s390@vger.kernel.org
10562W: http://www.ibm.com/developerworks/linux/linux390/ 10562W: http://www.ibm.com/developerworks/linux/linux390/
10563S: Supported 10563S: Supported
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 028f97be5bae..c6722112527d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -136,6 +136,7 @@ config S390
136 select HAVE_CMPXCHG_LOCAL 136 select HAVE_CMPXCHG_LOCAL
137 select HAVE_DEBUG_KMEMLEAK 137 select HAVE_DEBUG_KMEMLEAK
138 select HAVE_DMA_API_DEBUG 138 select HAVE_DMA_API_DEBUG
139 select HAVE_DMA_CONTIGUOUS
139 select HAVE_DYNAMIC_FTRACE 140 select HAVE_DYNAMIC_FTRACE
140 select HAVE_DYNAMIC_FTRACE_WITH_REGS 141 select HAVE_DYNAMIC_FTRACE_WITH_REGS
141 select HAVE_EFFICIENT_UNALIGNED_ACCESS 142 select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -169,6 +170,7 @@ config S390
169 select OLD_SIGSUSPEND3 170 select OLD_SIGSUSPEND3
170 select SPARSE_IRQ 171 select SPARSE_IRQ
171 select SYSCTL_EXCEPTION_TRACE 172 select SYSCTL_EXCEPTION_TRACE
173 select THREAD_INFO_IN_TASK
172 select TTY 174 select TTY
173 select VIRT_CPU_ACCOUNTING 175 select VIRT_CPU_ACCOUNTING
174 select ARCH_HAS_SCALED_CPUTIME 176 select ARCH_HAS_SCALED_CPUTIME
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 28c4f96a2d9c..11f6254c561e 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -46,7 +46,7 @@ mover_end:
46 46
47 .align 8 47 .align 8
48.Lstack: 48.Lstack:
49 .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) 49 .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
50.Loffset: 50.Loffset:
51 .quad 0x11000 51 .quad 0x11000
52.Lmvsize: 52.Lmvsize:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 45968686f918..e659daffe368 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -66,6 +66,8 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
66CONFIG_CLEANCACHE=y 66CONFIG_CLEANCACHE=y
67CONFIG_FRONTSWAP=y 67CONFIG_FRONTSWAP=y
68CONFIG_CMA=y 68CONFIG_CMA=y
69CONFIG_CMA_DEBUG=y
70CONFIG_CMA_DEBUGFS=y
69CONFIG_MEM_SOFT_DIRTY=y 71CONFIG_MEM_SOFT_DIRTY=y
70CONFIG_ZPOOL=m 72CONFIG_ZPOOL=m
71CONFIG_ZBUD=m 73CONFIG_ZBUD=m
@@ -366,6 +368,8 @@ CONFIG_BPF_JIT=y
366CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
367CONFIG_NET_TCPPROBE=m 369CONFIG_NET_TCPPROBE=m
368CONFIG_DEVTMPFS=y 370CONFIG_DEVTMPFS=y
371CONFIG_DMA_CMA=y
372CONFIG_CMA_SIZE_MBYTES=0
369CONFIG_CONNECTOR=y 373CONFIG_CONNECTOR=y
370CONFIG_BLK_DEV_LOOP=m 374CONFIG_BLK_DEV_LOOP=m
371CONFIG_BLK_DEV_CRYPTOLOOP=m 375CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -438,7 +442,6 @@ CONFIG_TUN=m
438CONFIG_VETH=m 442CONFIG_VETH=m
439CONFIG_VIRTIO_NET=m 443CONFIG_VIRTIO_NET=m
440CONFIG_NLMON=m 444CONFIG_NLMON=m
441CONFIG_VHOST_NET=m
442# CONFIG_NET_VENDOR_ARC is not set 445# CONFIG_NET_VENDOR_ARC is not set
443# CONFIG_NET_VENDOR_CHELSIO is not set 446# CONFIG_NET_VENDOR_CHELSIO is not set
444# CONFIG_NET_VENDOR_INTEL is not set 447# CONFIG_NET_VENDOR_INTEL is not set
@@ -693,3 +696,4 @@ CONFIG_CMM=m
693CONFIG_APPLDATA_BASE=y 696CONFIG_APPLDATA_BASE=y
694CONFIG_KVM=m 697CONFIG_KVM=m
695CONFIG_KVM_S390_UCONTROL=y 698CONFIG_KVM_S390_UCONTROL=y
699CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1dd05e345c4d..95ceac50bc65 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
362CONFIG_NET_PKTGEN=m 362CONFIG_NET_PKTGEN=m
363CONFIG_NET_TCPPROBE=m 363CONFIG_NET_TCPPROBE=m
364CONFIG_DEVTMPFS=y 364CONFIG_DEVTMPFS=y
365CONFIG_DMA_CMA=y
366CONFIG_CMA_SIZE_MBYTES=0
365CONFIG_CONNECTOR=y 367CONFIG_CONNECTOR=y
366CONFIG_BLK_DEV_LOOP=m 368CONFIG_BLK_DEV_LOOP=m
367CONFIG_BLK_DEV_CRYPTOLOOP=m 369CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
434CONFIG_VETH=m 436CONFIG_VETH=m
435CONFIG_VIRTIO_NET=m 437CONFIG_VIRTIO_NET=m
436CONFIG_NLMON=m 438CONFIG_NLMON=m
437CONFIG_VHOST_NET=m
438# CONFIG_NET_VENDOR_ARC is not set 439# CONFIG_NET_VENDOR_ARC is not set
439# CONFIG_NET_VENDOR_CHELSIO is not set 440# CONFIG_NET_VENDOR_CHELSIO is not set
440# CONFIG_NET_VENDOR_INTEL is not set 441# CONFIG_NET_VENDOR_INTEL is not set
@@ -633,3 +634,4 @@ CONFIG_CMM=m
633CONFIG_APPLDATA_BASE=y 634CONFIG_APPLDATA_BASE=y
634CONFIG_KVM=m 635CONFIG_KVM=m
635CONFIG_KVM_S390_UCONTROL=y 636CONFIG_KVM_S390_UCONTROL=y
637CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 29d1178666f0..bc7b176f5795 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
362CONFIG_NET_PKTGEN=m 362CONFIG_NET_PKTGEN=m
363CONFIG_NET_TCPPROBE=m 363CONFIG_NET_TCPPROBE=m
364CONFIG_DEVTMPFS=y 364CONFIG_DEVTMPFS=y
365CONFIG_DMA_CMA=y
366CONFIG_CMA_SIZE_MBYTES=0
365CONFIG_CONNECTOR=y 367CONFIG_CONNECTOR=y
366CONFIG_BLK_DEV_LOOP=m 368CONFIG_BLK_DEV_LOOP=m
367CONFIG_BLK_DEV_CRYPTOLOOP=m 369CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@ CONFIG_TUN=m
434CONFIG_VETH=m 436CONFIG_VETH=m
435CONFIG_VIRTIO_NET=m 437CONFIG_VIRTIO_NET=m
436CONFIG_NLMON=m 438CONFIG_NLMON=m
437CONFIG_VHOST_NET=m
438# CONFIG_NET_VENDOR_ARC is not set 439# CONFIG_NET_VENDOR_ARC is not set
439# CONFIG_NET_VENDOR_CHELSIO is not set 440# CONFIG_NET_VENDOR_CHELSIO is not set
440# CONFIG_NET_VENDOR_INTEL is not set 441# CONFIG_NET_VENDOR_INTEL is not set
@@ -632,3 +633,4 @@ CONFIG_CMM=m
632CONFIG_APPLDATA_BASE=y 633CONFIG_APPLDATA_BASE=y
633CONFIG_KVM=m 634CONFIG_KVM=m
634CONFIG_KVM_S390_UCONTROL=y 635CONFIG_KVM_S390_UCONTROL=y
636CONFIG_VHOST_NET=m
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9cc050f9536c..1113389d0a39 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
507 prng_data->prngws.byte_counter += n; 507 prng_data->prngws.byte_counter += n;
508 prng_data->prngws.reseed_counter += n; 508 prng_data->prngws.reseed_counter += n;
509 509
510 if (copy_to_user(ubuf, prng_data->buf, chunk)) 510 if (copy_to_user(ubuf, prng_data->buf, chunk)) {
511 return -EFAULT; 511 ret = -EFAULT;
512 break;
513 }
512 514
513 nbytes -= chunk; 515 nbytes -= chunk;
514 ret += chunk; 516 ret += chunk;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb224d03..cf8a2d92467f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright IBM Corp. 2006, 2008 4 * Copyright IBM Corp. 2006, 2008
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 5 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 * License: GPL
6 */ 7 */
7 8
8#define KMSG_COMPONENT "hypfs" 9#define KMSG_COMPONENT "hypfs"
@@ -18,7 +19,8 @@
18#include <linux/time.h> 19#include <linux/time.h>
19#include <linux/parser.h> 20#include <linux/parser.h>
20#include <linux/sysfs.h> 21#include <linux/sysfs.h>
21#include <linux/module.h> 22#include <linux/init.h>
23#include <linux/kobject.h>
22#include <linux/seq_file.h> 24#include <linux/seq_file.h>
23#include <linux/mount.h> 25#include <linux/mount.h>
24#include <linux/uio.h> 26#include <linux/uio.h>
@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
443 .mount = hypfs_mount, 445 .mount = hypfs_mount,
444 .kill_sb = hypfs_kill_super 446 .kill_sb = hypfs_kill_super
445}; 447};
446MODULE_ALIAS_FS("s390_hypfs");
447 448
448static const struct super_operations hypfs_s_ops = { 449static const struct super_operations hypfs_s_ops = {
449 .statfs = simple_statfs, 450 .statfs = simple_statfs,
@@ -497,21 +498,4 @@ fail_dbfs_exit:
497 pr_err("Initialization of hypfs failed with rc=%i\n", rc); 498 pr_err("Initialization of hypfs failed with rc=%i\n", rc);
498 return rc; 499 return rc;
499} 500}
500 501device_initcall(hypfs_init)
501static void __exit hypfs_exit(void)
502{
503 unregister_filesystem(&hypfs_type);
504 sysfs_remove_mount_point(hypervisor_kobj, "s390");
505 hypfs_diag0c_exit();
506 hypfs_sprp_exit();
507 hypfs_vm_exit();
508 hypfs_diag_exit();
509 hypfs_dbfs_exit();
510}
511
512module_init(hypfs_init)
513module_exit(hypfs_exit)
514
515MODULE_LICENSE("GPL");
516MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
517MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 20f196b82a6e..8aea32fe8bd2 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,6 @@
1 1generic-y += asm-offsets.h
2
3generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += dma-contiguous.h
4generic-y += export.h 4generic-y += export.h
5generic-y += irq_work.h 5generic-y += irq_work.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/s390/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d28cc2f5b7b2..f7f69dfd2db2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,13 +1,8 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2009 2 * Copyright IBM Corp. 1999, 2016
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 * Arnd Bergmann <arndb@de.ibm.com>, 5 * Arnd Bergmann,
6 *
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10 *
11 */ 6 */
12 7
13#ifndef __ARCH_S390_ATOMIC__ 8#ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
15 10
16#include <linux/compiler.h> 11#include <linux/compiler.h>
17#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/atomic_ops.h>
18#include <asm/barrier.h> 14#include <asm/barrier.h>
19#include <asm/cmpxchg.h> 15#include <asm/cmpxchg.h>
20 16
21#define ATOMIC_INIT(i) { (i) } 17#define ATOMIC_INIT(i) { (i) }
22 18
23#define __ATOMIC_NO_BARRIER "\n"
24
25#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
26
27#define __ATOMIC_OR "lao"
28#define __ATOMIC_AND "lan"
29#define __ATOMIC_ADD "laa"
30#define __ATOMIC_XOR "lax"
31#define __ATOMIC_BARRIER "bcr 14,0\n"
32
33#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
34({ \
35 int old_val; \
36 \
37 typecheck(atomic_t *, ptr); \
38 asm volatile( \
39 op_string " %0,%2,%1\n" \
40 __barrier \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
42 : "d" (op_val) \
43 : "cc", "memory"); \
44 old_val; \
45})
46
47#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
48
49#define __ATOMIC_OR "or"
50#define __ATOMIC_AND "nr"
51#define __ATOMIC_ADD "ar"
52#define __ATOMIC_XOR "xr"
53#define __ATOMIC_BARRIER "\n"
54
55#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
56({ \
57 int old_val, new_val; \
58 \
59 typecheck(atomic_t *, ptr); \
60 asm volatile( \
61 " l %0,%2\n" \
62 "0: lr %1,%0\n" \
63 op_string " %1,%3\n" \
64 " cs %0,%1,%2\n" \
65 " jl 0b" \
66 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
67 : "d" (op_val) \
68 : "cc", "memory"); \
69 old_val; \
70})
71
72#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
73
74static inline int atomic_read(const atomic_t *v) 19static inline int atomic_read(const atomic_t *v)
75{ 20{
76 int c; 21 int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
90 35
91static inline int atomic_add_return(int i, atomic_t *v) 36static inline int atomic_add_return(int i, atomic_t *v)
92{ 37{
93 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; 38 return __atomic_add_barrier(i, &v->counter) + i;
94} 39}
95 40
96static inline int atomic_fetch_add(int i, atomic_t *v) 41static inline int atomic_fetch_add(int i, atomic_t *v)
97{ 42{
98 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER); 43 return __atomic_add_barrier(i, &v->counter);
99} 44}
100 45
101static inline void atomic_add(int i, atomic_t *v) 46static inline void atomic_add(int i, atomic_t *v)
102{ 47{
103#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 48#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
104 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { 49 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
105 asm volatile( 50 __atomic_add_const(i, &v->counter);
106 "asi %0,%1\n"
107 : "+Q" (v->counter)
108 : "i" (i)
109 : "cc", "memory");
110 return; 51 return;
111 } 52 }
112#endif 53#endif
113 __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); 54 __atomic_add(i, &v->counter);
114} 55}
115 56
116#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) 57#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
125#define atomic_dec_return(_v) atomic_sub_return(1, _v) 66#define atomic_dec_return(_v) atomic_sub_return(1, _v)
126#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 67#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
127 68
128#define ATOMIC_OPS(op, OP) \ 69#define ATOMIC_OPS(op) \
129static inline void atomic_##op(int i, atomic_t *v) \ 70static inline void atomic_##op(int i, atomic_t *v) \
130{ \ 71{ \
131 __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ 72 __atomic_##op(i, &v->counter); \
132} \ 73} \
133static inline int atomic_fetch_##op(int i, atomic_t *v) \ 74static inline int atomic_fetch_##op(int i, atomic_t *v) \
134{ \ 75{ \
135 return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \ 76 return __atomic_##op##_barrier(i, &v->counter); \
136} 77}
137 78
138ATOMIC_OPS(and, AND) 79ATOMIC_OPS(and)
139ATOMIC_OPS(or, OR) 80ATOMIC_OPS(or)
140ATOMIC_OPS(xor, XOR) 81ATOMIC_OPS(xor)
141 82
142#undef ATOMIC_OPS 83#undef ATOMIC_OPS
143 84
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
145 86
146static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 87static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
147{ 88{
148 asm volatile( 89 return __atomic_cmpxchg(&v->counter, old, new);
149 " cs %0,%2,%1"
150 : "+d" (old), "+Q" (v->counter)
151 : "d" (new)
152 : "cc", "memory");
153 return old;
154} 90}
155 91
156static inline int __atomic_add_unless(atomic_t *v, int a, int u) 92static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
168 return c; 104 return c;
169} 105}
170 106
171
172#undef __ATOMIC_LOOP
173
174#define ATOMIC64_INIT(i) { (i) } 107#define ATOMIC64_INIT(i) { (i) }
175 108
176#define __ATOMIC64_NO_BARRIER "\n" 109static inline long atomic64_read(const atomic64_t *v)
177
178#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
179
180#define __ATOMIC64_OR "laog"
181#define __ATOMIC64_AND "lang"
182#define __ATOMIC64_ADD "laag"
183#define __ATOMIC64_XOR "laxg"
184#define __ATOMIC64_BARRIER "bcr 14,0\n"
185
186#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
187({ \
188 long long old_val; \
189 \
190 typecheck(atomic64_t *, ptr); \
191 asm volatile( \
192 op_string " %0,%2,%1\n" \
193 __barrier \
194 : "=d" (old_val), "+Q" ((ptr)->counter) \
195 : "d" (op_val) \
196 : "cc", "memory"); \
197 old_val; \
198})
199
200#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
201
202#define __ATOMIC64_OR "ogr"
203#define __ATOMIC64_AND "ngr"
204#define __ATOMIC64_ADD "agr"
205#define __ATOMIC64_XOR "xgr"
206#define __ATOMIC64_BARRIER "\n"
207
208#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
209({ \
210 long long old_val, new_val; \
211 \
212 typecheck(atomic64_t *, ptr); \
213 asm volatile( \
214 " lg %0,%2\n" \
215 "0: lgr %1,%0\n" \
216 op_string " %1,%3\n" \
217 " csg %0,%1,%2\n" \
218 " jl 0b" \
219 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
220 : "d" (op_val) \
221 : "cc", "memory"); \
222 old_val; \
223})
224
225#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
226
227static inline long long atomic64_read(const atomic64_t *v)
228{ 110{
229 long long c; 111 long c;
230 112
231 asm volatile( 113 asm volatile(
232 " lg %0,%1\n" 114 " lg %0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
234 return c; 116 return c;
235} 117}
236 118
237static inline void atomic64_set(atomic64_t *v, long long i) 119static inline void atomic64_set(atomic64_t *v, long i)
238{ 120{
239 asm volatile( 121 asm volatile(
240 " stg %1,%0\n" 122 " stg %1,%0\n"
241 : "=Q" (v->counter) : "d" (i)); 123 : "=Q" (v->counter) : "d" (i));
242} 124}
243 125
244static inline long long atomic64_add_return(long long i, atomic64_t *v) 126static inline long atomic64_add_return(long i, atomic64_t *v)
245{ 127{
246 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; 128 return __atomic64_add_barrier(i, &v->counter) + i;
247} 129}
248 130
249static inline long long atomic64_fetch_add(long long i, atomic64_t *v) 131static inline long atomic64_fetch_add(long i, atomic64_t *v)
250{ 132{
251 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER); 133 return __atomic64_add_barrier(i, &v->counter);
252} 134}
253 135
254static inline void atomic64_add(long long i, atomic64_t *v) 136static inline void atomic64_add(long i, atomic64_t *v)
255{ 137{
256#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 138#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
257 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { 139 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
258 asm volatile( 140 __atomic64_add_const(i, &v->counter);
259 "agsi %0,%1\n"
260 : "+Q" (v->counter)
261 : "i" (i)
262 : "cc", "memory");
263 return; 141 return;
264 } 142 }
265#endif 143#endif
266 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); 144 __atomic64_add(i, &v->counter);
267} 145}
268 146
269#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 147#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
270 148
271static inline long long atomic64_cmpxchg(atomic64_t *v, 149static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
272 long long old, long long new)
273{ 150{
274 asm volatile( 151 return __atomic64_cmpxchg(&v->counter, old, new);
275 " csg %0,%2,%1"
276 : "+d" (old), "+Q" (v->counter)
277 : "d" (new)
278 : "cc", "memory");
279 return old;
280} 152}
281 153
282#define ATOMIC64_OPS(op, OP) \ 154#define ATOMIC64_OPS(op) \
283static inline void atomic64_##op(long i, atomic64_t *v) \ 155static inline void atomic64_##op(long i, atomic64_t *v) \
284{ \ 156{ \
285 __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ 157 __atomic64_##op(i, &v->counter); \
286} \ 158} \
287static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ 159static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
288{ \ 160{ \
289 return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \ 161 return __atomic64_##op##_barrier(i, &v->counter); \
290} 162}
291 163
292ATOMIC64_OPS(and, AND) 164ATOMIC64_OPS(and)
293ATOMIC64_OPS(or, OR) 165ATOMIC64_OPS(or)
294ATOMIC64_OPS(xor, XOR) 166ATOMIC64_OPS(xor)
295 167
296#undef ATOMIC64_OPS 168#undef ATOMIC64_OPS
297#undef __ATOMIC64_LOOP
298 169
299static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 170static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
300{ 171{
301 long long c, old; 172 long c, old;
302 173
303 c = atomic64_read(v); 174 c = atomic64_read(v);
304 for (;;) { 175 for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
312 return c != u; 183 return c != u;
313} 184}
314 185
315static inline long long atomic64_dec_if_positive(atomic64_t *v) 186static inline long atomic64_dec_if_positive(atomic64_t *v)
316{ 187{
317 long long c, old, dec; 188 long c, old, dec;
318 189
319 c = atomic64_read(v); 190 c = atomic64_read(v);
320 for (;;) { 191 for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
333#define atomic64_inc(_v) atomic64_add(1, _v) 204#define atomic64_inc(_v) atomic64_add(1, _v)
334#define atomic64_inc_return(_v) atomic64_add_return(1, _v) 205#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
335#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) 206#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
336#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) 207#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
337#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v) 208#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
338#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) 209#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
339#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) 210#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
340#define atomic64_dec(_v) atomic64_sub(1, _v) 211#define atomic64_dec(_v) atomic64_sub(1, _v)
341#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) 212#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
new file mode 100644
index 000000000000..ac9e2b939d04
--- /dev/null
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -0,0 +1,130 @@
1/*
2 * Low level function for atomic operations
3 *
4 * Copyright IBM Corp. 1999, 2016
5 */
6
7#ifndef __ARCH_S390_ATOMIC_OPS__
8#define __ARCH_S390_ATOMIC_OPS__
9
10#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
11
12#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
13static inline op_type op_name(op_type val, op_type *ptr) \
14{ \
15 op_type old; \
16 \
17 asm volatile( \
18 op_string " %[old],%[val],%[ptr]\n" \
19 op_barrier \
20 : [old] "=d" (old), [ptr] "+Q" (*ptr) \
21 : [val] "d" (val) : "cc", "memory"); \
22 return old; \
23} \
24
25#define __ATOMIC_OPS(op_name, op_type, op_string) \
26 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
27 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
28
29__ATOMIC_OPS(__atomic_add, int, "laa")
30__ATOMIC_OPS(__atomic_and, int, "lan")
31__ATOMIC_OPS(__atomic_or, int, "lao")
32__ATOMIC_OPS(__atomic_xor, int, "lax")
33
34__ATOMIC_OPS(__atomic64_add, long, "laag")
35__ATOMIC_OPS(__atomic64_and, long, "lang")
36__ATOMIC_OPS(__atomic64_or, long, "laog")
37__ATOMIC_OPS(__atomic64_xor, long, "laxg")
38
39#undef __ATOMIC_OPS
40#undef __ATOMIC_OP
41
42static inline void __atomic_add_const(int val, int *ptr)
43{
44 asm volatile(
45 " asi %[ptr],%[val]\n"
46 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
47}
48
49static inline void __atomic64_add_const(long val, long *ptr)
50{
51 asm volatile(
52 " agsi %[ptr],%[val]\n"
53 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
54}
55
56#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
57
58#define __ATOMIC_OP(op_name, op_string) \
59static inline int op_name(int val, int *ptr) \
60{ \
61 int old, new; \
62 \
63 asm volatile( \
64 "0: lr %[new],%[old]\n" \
65 op_string " %[new],%[val]\n" \
66 " cs %[old],%[new],%[ptr]\n" \
67 " jl 0b" \
68 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
69 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
70 return old; \
71}
72
73#define __ATOMIC_OPS(op_name, op_string) \
74 __ATOMIC_OP(op_name, op_string) \
75 __ATOMIC_OP(op_name##_barrier, op_string)
76
77__ATOMIC_OPS(__atomic_add, "ar")
78__ATOMIC_OPS(__atomic_and, "nr")
79__ATOMIC_OPS(__atomic_or, "or")
80__ATOMIC_OPS(__atomic_xor, "xr")
81
82#undef __ATOMIC_OPS
83
84#define __ATOMIC64_OP(op_name, op_string) \
85static inline long op_name(long val, long *ptr) \
86{ \
87 long old, new; \
88 \
89 asm volatile( \
90 "0: lgr %[new],%[old]\n" \
91 op_string " %[new],%[val]\n" \
92 " csg %[old],%[new],%[ptr]\n" \
93 " jl 0b" \
94 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
95 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
96 return old; \
97}
98
99#define __ATOMIC64_OPS(op_name, op_string) \
100 __ATOMIC64_OP(op_name, op_string) \
101 __ATOMIC64_OP(op_name##_barrier, op_string)
102
103__ATOMIC64_OPS(__atomic64_add, "agr")
104__ATOMIC64_OPS(__atomic64_and, "ngr")
105__ATOMIC64_OPS(__atomic64_or, "ogr")
106__ATOMIC64_OPS(__atomic64_xor, "xgr")
107
108#undef __ATOMIC64_OPS
109
110#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
111
112static inline int __atomic_cmpxchg(int *ptr, int old, int new)
113{
114 asm volatile(
115 " cs %[old],%[new],%[ptr]"
116 : [old] "+d" (old), [ptr] "+Q" (*ptr)
117 : [new] "d" (new) : "cc", "memory");
118 return old;
119}
120
121static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
122{
123 asm volatile(
124 " csg %[old],%[new],%[ptr]"
125 : [old] "+d" (old), [ptr] "+Q" (*ptr)
126 : [new] "d" (new) : "cc", "memory");
127 return old;
128}
129
130#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10da6b5..d92047da5ccb 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
42 42
43#include <linux/typecheck.h> 43#include <linux/typecheck.h>
44#include <linux/compiler.h> 44#include <linux/compiler.h>
45#include <asm/atomic_ops.h>
45#include <asm/barrier.h> 46#include <asm/barrier.h>
46 47
47#define __BITOPS_NO_BARRIER "\n"
48
49#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50
51#define __BITOPS_OR "laog"
52#define __BITOPS_AND "lang"
53#define __BITOPS_XOR "laxg"
54#define __BITOPS_BARRIER "bcr 14,0\n"
55
56#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
57({ \
58 unsigned long __old; \
59 \
60 typecheck(unsigned long *, (__addr)); \
61 asm volatile( \
62 __op_string " %0,%2,%1\n" \
63 __barrier \
64 : "=d" (__old), "+Q" (*(__addr)) \
65 : "d" (__val) \
66 : "cc", "memory"); \
67 __old; \
68})
69
70#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
71
72#define __BITOPS_OR "ogr"
73#define __BITOPS_AND "ngr"
74#define __BITOPS_XOR "xgr"
75#define __BITOPS_BARRIER "\n"
76
77#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
78({ \
79 unsigned long __old, __new; \
80 \
81 typecheck(unsigned long *, (__addr)); \
82 asm volatile( \
83 " lg %0,%2\n" \
84 "0: lgr %1,%0\n" \
85 __op_string " %1,%3\n" \
86 " csg %0,%1,%2\n" \
87 " jl 0b" \
88 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
89 : "d" (__val) \
90 : "cc", "memory"); \
91 __old; \
92})
93
94#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
95
96#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 48#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
97 49
98static inline unsigned long * 50static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
128 } 80 }
129#endif 81#endif
130 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 82 mask = 1UL << (nr & (BITS_PER_LONG - 1));
131 __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); 83 __atomic64_or(mask, addr);
132} 84}
133 85
134static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) 86static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
149 } 101 }
150#endif 102#endif
151 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 103 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
152 __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); 104 __atomic64_and(mask, addr);
153} 105}
154 106
155static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) 107static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
170 } 122 }
171#endif 123#endif
172 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 124 mask = 1UL << (nr & (BITS_PER_LONG - 1));
173 __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); 125 __atomic64_xor(mask, addr);
174} 126}
175 127
176static inline int 128static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
180 unsigned long old, mask; 132 unsigned long old, mask;
181 133
182 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 134 mask = 1UL << (nr & (BITS_PER_LONG - 1));
183 old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); 135 old = __atomic64_or_barrier(mask, addr);
184 return (old & mask) != 0; 136 return (old & mask) != 0;
185} 137}
186 138
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
191 unsigned long old, mask; 143 unsigned long old, mask;
192 144
193 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 145 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
194 old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); 146 old = __atomic64_and_barrier(mask, addr);
195 return (old & ~mask) != 0; 147 return (old & ~mask) != 0;
196} 148}
197 149
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
202 unsigned long old, mask; 154 unsigned long old, mask;
203 155
204 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 156 mask = 1UL << (nr & (BITS_PER_LONG - 1));
205 old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); 157 old = __atomic64_xor_barrier(mask, addr);
206 return (old & mask) != 0; 158 return (old & mask) != 0;
207} 159}
208 160
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 03516476127b..b69d8bc231a5 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -104,7 +104,8 @@ struct hws_basic_entry {
104 unsigned int P:1; /* 28 PSW Problem state */ 104 unsigned int P:1; /* 28 PSW Problem state */
105 unsigned int AS:2; /* 29-30 PSW address-space control */ 105 unsigned int AS:2; /* 29-30 PSW address-space control */
106 unsigned int I:1; /* 31 entry valid or invalid */ 106 unsigned int I:1; /* 31 entry valid or invalid */
107 unsigned int:16; 107 unsigned int CL:2; /* 32-33 Configuration Level */
108 unsigned int:14;
108 unsigned int prim_asn:16; /* primary ASN */ 109 unsigned int prim_asn:16; /* primary ASN */
109 unsigned long long ia; /* Instruction Address */ 110 unsigned long long ia; /* Instruction Address */
110 unsigned long long gpp; /* Guest Program Parameter */ 111 unsigned long long gpp; /* Guest Program Parameter */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d3c94c..f4381e1fb19e 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -193,7 +193,7 @@ extern char elf_platform[];
193do { \ 193do { \
194 set_personality(PER_LINUX | \ 194 set_personality(PER_LINUX | \
195 (current->personality & (~PER_MASK))); \ 195 (current->personality & (~PER_MASK))); \
196 current_thread_info()->sys_call_table = \ 196 current->thread.sys_call_table = \
197 (unsigned long) &sys_call_table; \ 197 (unsigned long) &sys_call_table; \
198} while (0) 198} while (0)
199#else /* CONFIG_COMPAT */ 199#else /* CONFIG_COMPAT */
@@ -204,11 +204,11 @@ do { \
204 (current->personality & ~PER_MASK)); \ 204 (current->personality & ~PER_MASK)); \
205 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 205 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
206 set_thread_flag(TIF_31BIT); \ 206 set_thread_flag(TIF_31BIT); \
207 current_thread_info()->sys_call_table = \ 207 current->thread.sys_call_table = \
208 (unsigned long) &sys_call_table_emu; \ 208 (unsigned long) &sys_call_table_emu; \
209 } else { \ 209 } else { \
210 clear_thread_flag(TIF_31BIT); \ 210 clear_thread_flag(TIF_31BIT); \
211 current_thread_info()->sys_call_table = \ 211 current->thread.sys_call_table = \
212 (unsigned long) &sys_call_table; \ 212 (unsigned long) &sys_call_table; \
213 } \ 213 } \
214} while (0) 214} while (0)
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
deleted file mode 100644
index 3b758f66e48b..000000000000
--- a/arch/s390/include/asm/facilities_src.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Copyright IBM Corp. 2015
3 */
4
5#ifndef S390_GEN_FACILITIES_C
6#error "This file can only be included by gen_facilities.c"
7#endif
8
9#include <linux/kconfig.h>
10
11struct facility_def {
12 char *name;
13 int *bits;
14};
15
16static struct facility_def facility_defs[] = {
17 {
18 /*
19 * FACILITIES_ALS contains the list of facilities that are
20 * required to run a kernel that is compiled e.g. with
21 * -march=<machine>.
22 */
23 .name = "FACILITIES_ALS",
24 .bits = (int[]){
25#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
26 0, /* N3 instructions */
27 1, /* z/Arch mode installed */
28#endif
29#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
30 18, /* long displacement facility */
31#endif
32#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
33 7, /* stfle */
34 17, /* message security assist */
35 21, /* extended-immediate facility */
36 25, /* store clock fast */
37#endif
38#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
39 27, /* mvcos */
40 32, /* compare and swap and store */
41 33, /* compare and swap and store 2 */
42 34, /* general extension facility */
43 35, /* execute extensions */
44#endif
45#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
46 45, /* fast-BCR, etc. */
47#endif
48#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
49 49, /* misc-instruction-extensions */
50 52, /* interlocked facility 2 */
51#endif
52#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
53 53, /* load-and-zero-rightmost-byte, etc. */
54#endif
55 -1 /* END */
56 }
57 },
58 {
59 .name = "FACILITIES_KVM",
60 .bits = (int[]){
61 0, /* N3 instructions */
62 1, /* z/Arch mode installed */
63 2, /* z/Arch mode active */
64 3, /* DAT-enhancement */
65 4, /* idte segment table */
66 5, /* idte region table */
67 6, /* ASN-and-LX reuse */
68 7, /* stfle */
69 8, /* enhanced-DAT 1 */
70 9, /* sense-running-status */
71 10, /* conditional sske */
72 13, /* ipte-range */
73 14, /* nonquiescing key-setting */
74 73, /* transactional execution */
75 75, /* access-exception-fetch/store indication */
76 76, /* msa extension 3 */
77 77, /* msa extension 4 */
78 78, /* enhanced-DAT 2 */
79 -1 /* END */
80 }
81 },
82};
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 4da22b2f0521..edb5161df7e2 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
97extern void do_reipl(void); 97extern void do_reipl(void);
98extern void do_halt(void); 98extern void do_halt(void);
99extern void do_poff(void); 99extern void do_poff(void);
100extern void ipl_save_parameters(void); 100extern void ipl_verify_parameters(void);
101extern void ipl_update_parameters(void); 101extern void ipl_update_parameters(void);
102extern size_t append_ipl_vmparm(char *, size_t); 102extern size_t append_ipl_vmparm(char *, size_t);
103extern size_t append_ipl_scpdata(char *, size_t); 103extern size_t append_ipl_scpdata(char *, size_t);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78f423c..9bfad2ad6312 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -95,7 +95,7 @@ struct lowcore {
95 95
96 /* Current process. */ 96 /* Current process. */
97 __u64 current_task; /* 0x0310 */ 97 __u64 current_task; /* 0x0310 */
98 __u64 thread_info; /* 0x0318 */ 98 __u8 pad_0x318[0x320-0x318]; /* 0x0318 */
99 __u64 kernel_stack; /* 0x0320 */ 99 __u64 kernel_stack; /* 0x0320 */
100 100
101 /* Interrupt, panic and restart stack. */ 101 /* Interrupt, panic and restart stack. */
@@ -126,7 +126,8 @@ struct lowcore {
126 __u64 percpu_offset; /* 0x0378 */ 126 __u64 percpu_offset; /* 0x0378 */
127 __u64 vdso_per_cpu_data; /* 0x0380 */ 127 __u64 vdso_per_cpu_data; /* 0x0380 */
128 __u64 machine_flags; /* 0x0388 */ 128 __u64 machine_flags; /* 0x0388 */
129 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ 129 __u32 preempt_count; /* 0x0390 */
130 __u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
130 __u64 gmap; /* 0x0398 */ 131 __u64 gmap; /* 0x0398 */
131 __u32 spinlock_lockval; /* 0x03a0 */ 132 __u32 spinlock_lockval; /* 0x03a0 */
132 __u32 fpu_flags; /* 0x03a4 */ 133 __u32 fpu_flags; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index e75c64cbcf08..c232ef9711f5 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
46#define CLP_UTIL_STR_LEN 64 46#define CLP_UTIL_STR_LEN 64
47#define CLP_PFIP_NR_SEGMENTS 4 47#define CLP_PFIP_NR_SEGMENTS 4
48 48
49extern bool zpci_unique_uid;
50
49/* List PCI functions request */ 51/* List PCI functions request */
50struct clp_req_list_pci { 52struct clp_req_list_pci {
51 struct clp_req_hdr hdr; 53 struct clp_req_hdr hdr;
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
59 u64 resume_token; 61 u64 resume_token;
60 u32 reserved2; 62 u32 reserved2;
61 u16 max_fn; 63 u16 max_fn;
62 u8 reserved3; 64 u8 : 7;
65 u8 uid_checking : 1;
63 u8 entry_size; 66 u8 entry_size;
64 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES]; 67 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
65} __packed; 68} __packed;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f4eb9843eed4..166f703dad7c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
27 27
28static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
29{ 29{
30 typedef struct { char _[n]; } addrtype; 30 struct addrtype { char _[256]; };
31 31 int i;
32 *s = val; 32
33 n = (n / 256) - 1; 33 for (i = 0; i < n; i += 256) {
34 asm volatile( 34 *s = val;
35 " mvc 8(248,%0),0(%0)\n" 35 asm volatile(
36 "0: mvc 256(256,%0),0(%0)\n" 36 "mvc 8(248,%[s]),0(%[s])\n"
37 " la %0,256(%0)\n" 37 : "+m" (*(struct addrtype *) s)
38 " brct %1,0b\n" 38 : [s] "a" (s));
39 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s) 39 s += 256 / sizeof(long);
40 : "m" (*(addrtype *) s)); 40 }
41} 41}
42 42
43static inline void crst_table_init(unsigned long *crst, unsigned long entry) 43static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
new file mode 100644
index 000000000000..b0776b2c8dcf
--- /dev/null
+++ b/arch/s390/include/asm/preempt.h
@@ -0,0 +1,137 @@
1#ifndef __ASM_PREEMPT_H
2#define __ASM_PREEMPT_H
3
4#include <asm/current.h>
5#include <linux/thread_info.h>
6#include <asm/atomic_ops.h>
7
8#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
9
10#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
11
12static inline int preempt_count(void)
13{
14 return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
15}
16
17static inline void preempt_count_set(int pc)
18{
19 int old, new;
20
21 do {
22 old = READ_ONCE(S390_lowcore.preempt_count);
23 new = (old & PREEMPT_NEED_RESCHED) |
24 (pc & ~PREEMPT_NEED_RESCHED);
25 } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
26 old, new) != old);
27}
28
29#define init_task_preempt_count(p) do { } while (0)
30
31#define init_idle_preempt_count(p, cpu) do { \
32 S390_lowcore.preempt_count = PREEMPT_ENABLED; \
33} while (0)
34
35static inline void set_preempt_need_resched(void)
36{
37 __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
38}
39
40static inline void clear_preempt_need_resched(void)
41{
42 __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
43}
44
45static inline bool test_preempt_need_resched(void)
46{
47 return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
48}
49
50static inline void __preempt_count_add(int val)
51{
52 if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
53 __atomic_add_const(val, &S390_lowcore.preempt_count);
54 else
55 __atomic_add(val, &S390_lowcore.preempt_count);
56}
57
58static inline void __preempt_count_sub(int val)
59{
60 __preempt_count_add(-val);
61}
62
63static inline bool __preempt_count_dec_and_test(void)
64{
65 return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
66}
67
68static inline bool should_resched(int preempt_offset)
69{
70 return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
71 preempt_offset);
72}
73
74#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
75
76#define PREEMPT_ENABLED (0)
77
78static inline int preempt_count(void)
79{
80 return READ_ONCE(S390_lowcore.preempt_count);
81}
82
83static inline void preempt_count_set(int pc)
84{
85 S390_lowcore.preempt_count = pc;
86}
87
88#define init_task_preempt_count(p) do { } while (0)
89
90#define init_idle_preempt_count(p, cpu) do { \
91 S390_lowcore.preempt_count = PREEMPT_ENABLED; \
92} while (0)
93
94static inline void set_preempt_need_resched(void)
95{
96}
97
98static inline void clear_preempt_need_resched(void)
99{
100}
101
102static inline bool test_preempt_need_resched(void)
103{
104 return false;
105}
106
107static inline void __preempt_count_add(int val)
108{
109 S390_lowcore.preempt_count += val;
110}
111
112static inline void __preempt_count_sub(int val)
113{
114 S390_lowcore.preempt_count -= val;
115}
116
117static inline bool __preempt_count_dec_and_test(void)
118{
119 return !--S390_lowcore.preempt_count && tif_need_resched();
120}
121
122static inline bool should_resched(int preempt_offset)
123{
124 return unlikely(preempt_count() == preempt_offset &&
125 tif_need_resched());
126}
127
128#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
129
130#ifdef CONFIG_PREEMPT
131extern asmlinkage void preempt_schedule(void);
132#define __preempt_schedule() preempt_schedule()
133extern asmlinkage void preempt_schedule_notrace(void);
134#define __preempt_schedule_notrace() preempt_schedule_notrace()
135#endif /* CONFIG_PREEMPT */
136
137#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9d3a21aedc97..6bca916a5ba0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -110,14 +110,20 @@ typedef struct {
110struct thread_struct { 110struct thread_struct {
111 unsigned int acrs[NUM_ACRS]; 111 unsigned int acrs[NUM_ACRS];
112 unsigned long ksp; /* kernel stack pointer */ 112 unsigned long ksp; /* kernel stack pointer */
113 unsigned long user_timer; /* task cputime in user space */
114 unsigned long system_timer; /* task cputime in kernel space */
115 unsigned long sys_call_table; /* system call table address */
113 mm_segment_t mm_segment; 116 mm_segment_t mm_segment;
114 unsigned long gmap_addr; /* address of last gmap fault. */ 117 unsigned long gmap_addr; /* address of last gmap fault. */
115 unsigned int gmap_write_flag; /* gmap fault write indication */ 118 unsigned int gmap_write_flag; /* gmap fault write indication */
116 unsigned int gmap_int_code; /* int code of last gmap fault */ 119 unsigned int gmap_int_code; /* int code of last gmap fault */
117 unsigned int gmap_pfault; /* signal of a pending guest pfault */ 120 unsigned int gmap_pfault; /* signal of a pending guest pfault */
121 /* Per-thread information related to debugging */
118 struct per_regs per_user; /* User specified PER registers */ 122 struct per_regs per_user; /* User specified PER registers */
119 struct per_event per_event; /* Cause of the last PER trap */ 123 struct per_event per_event; /* Cause of the last PER trap */
120 unsigned long per_flags; /* Flags to control debug behavior */ 124 unsigned long per_flags; /* Flags to control debug behavior */
125 unsigned int system_call; /* system call number in signal */
126 unsigned long last_break; /* last breaking-event-address. */
121 /* pfault_wait is used to block the process on a pfault event */ 127 /* pfault_wait is used to block the process on a pfault event */
122 unsigned long pfault_wait; 128 unsigned long pfault_wait;
123 struct list_head list; 129 struct list_head list;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2ad9c204b1a2..8db92a5b3bf1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -101,7 +101,8 @@ struct zpci_report_error_header {
101 u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */ 101 u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
102} __packed; 102} __packed;
103 103
104int sclp_get_core_info(struct sclp_core_info *info); 104int _sclp_get_core_info_early(struct sclp_core_info *info);
105int _sclp_get_core_info(struct sclp_core_info *info);
105int sclp_core_configure(u8 core); 106int sclp_core_configure(u8 core);
106int sclp_core_deconfigure(u8 core); 107int sclp_core_deconfigure(u8 core);
107int sclp_sdias_blk_count(void); 108int sclp_sdias_blk_count(void);
@@ -119,4 +120,11 @@ void sclp_early_detect(void);
119void _sclp_print_early(const char *); 120void _sclp_print_early(const char *);
120void sclp_ocf_cpc_name_copy(char *dst); 121void sclp_ocf_cpc_name_copy(char *dst);
121 122
123static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
124{
125 if (early)
126 return _sclp_get_core_info_early(info);
127 return _sclp_get_core_info(info);
128}
129
122#endif /* _ASM_S390_SCLP_H */ 130#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4af99cdaddf5..17a7904f001a 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -96,7 +96,8 @@ struct tm_scsw {
96 u32 dstat:8; 96 u32 dstat:8;
97 u32 cstat:8; 97 u32 cstat:8;
98 u32 fcxs:8; 98 u32 fcxs:8;
99 u32 schxs:8; 99 u32 ifob:1;
100 u32 sesq:7;
100} __attribute__ ((packed)); 101} __attribute__ ((packed));
101 102
102/** 103/**
@@ -177,6 +178,9 @@ union scsw {
177#define SCHN_STAT_INTF_CTRL_CHK 0x02 178#define SCHN_STAT_INTF_CTRL_CHK 0x02
178#define SCHN_STAT_CHAIN_CHECK 0x01 179#define SCHN_STAT_CHAIN_CHECK 0x01
179 180
181#define SCSW_SESQ_DEV_NOFCX 3
182#define SCSW_SESQ_PATH_NOFCX 4
183
180/* 184/*
181 * architectured values for first sense byte 185 * architectured values for first sense byte
182 */ 186 */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0cc383b9be7f..3deb134587b7 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
36extern void smp_cpu_set_polarization(int cpu, int val); 36extern void smp_cpu_set_polarization(int cpu, int val);
37extern int smp_cpu_get_polarization(int cpu); 37extern int smp_cpu_get_polarization(int cpu);
38extern void smp_fill_possible_mask(void); 38extern void smp_fill_possible_mask(void);
39extern void smp_detect_cpus(void);
39 40
40#else /* CONFIG_SMP */ 41#else /* CONFIG_SMP */
41 42
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
56static inline int smp_vcpu_scheduled(int cpu) { return 1; } 57static inline int smp_vcpu_scheduled(int cpu) { return 1; }
57static inline void smp_yield_cpu(int cpu) { } 58static inline void smp_yield_cpu(int cpu) { }
58static inline void smp_fill_possible_mask(void) { } 59static inline void smp_fill_possible_mask(void) { }
60static inline void smp_detect_cpus(void) { }
59 61
60#endif /* CONFIG_SMP */ 62#endif /* CONFIG_SMP */
61 63
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
69 } 71 }
70} 72}
71 73
74/* Return thread 0 CPU number as base CPU */
75static inline int smp_get_base_cpu(int cpu)
76{
77 return cpu - (cpu % (smp_cpu_mtid + 1));
78}
79
72#ifdef CONFIG_HOTPLUG_CPU 80#ifdef CONFIG_HOTPLUG_CPU
73extern int smp_rescan_cpus(void); 81extern int smp_rescan_cpus(void);
74extern void __noreturn cpu_die(void); 82extern void __noreturn cpu_die(void);
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8662f5c8e17f..15a3c005c274 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -14,6 +14,7 @@
14#define __HAVE_ARCH_MEMCHR /* inline & arch function */ 14#define __HAVE_ARCH_MEMCHR /* inline & arch function */
15#define __HAVE_ARCH_MEMCMP /* arch function */ 15#define __HAVE_ARCH_MEMCMP /* arch function */
16#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ 16#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
17#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
17#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ 18#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
18#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ 19#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
19#define __HAVE_ARCH_STRCAT /* inline & arch function */ 20#define __HAVE_ARCH_STRCAT /* inline & arch function */
@@ -32,6 +33,7 @@
32extern int memcmp(const void *, const void *, size_t); 33extern int memcmp(const void *, const void *, size_t);
33extern void *memcpy(void *, const void *, size_t); 34extern void *memcpy(void *, const void *, size_t);
34extern void *memset(void *, int, size_t); 35extern void *memset(void *, int, size_t);
36extern void *memmove(void *, const void *, size_t);
35extern int strcmp(const char *,const char *); 37extern int strcmp(const char *,const char *);
36extern size_t strlcat(char *, const char *, size_t); 38extern size_t strlcat(char *, const char *, size_t);
37extern size_t strlcpy(char *, const char *, size_t); 39extern size_t strlcpy(char *, const char *, size_t);
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
40extern char *strrchr(const char *, int); 42extern char *strrchr(const char *, int);
41extern char *strstr(const char *, const char *); 43extern char *strstr(const char *, const char *);
42 44
43#undef __HAVE_ARCH_MEMMOVE
44#undef __HAVE_ARCH_STRCHR 45#undef __HAVE_ARCH_STRCHR
45#undef __HAVE_ARCH_STRNCHR 46#undef __HAVE_ARCH_STRNCHR
46#undef __HAVE_ARCH_STRNCMP 47#undef __HAVE_ARCH_STRNCMP
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2728114d5484..229326c942c7 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
107 char reserved_3[5]; 107 char reserved_3[5];
108 unsigned short cpus_dedicated; 108 unsigned short cpus_dedicated;
109 unsigned short cpus_shared; 109 unsigned short cpus_shared;
110 char reserved_4[3];
111 unsigned char vsne;
112 uuid_be uuid;
113 char reserved_5[160];
114 char ext_name[256];
110}; 115};
111 116
112#define LPAR_CHAR_DEDICATED (1 << 7) 117#define LPAR_CHAR_DEDICATED (1 << 7)
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
127 unsigned int caf; 132 unsigned int caf;
128 char cpi[16]; 133 char cpi[16];
129 char reserved_1[3]; 134 char reserved_1[3];
130 char ext_name_encoding; 135 unsigned char evmne;
131 unsigned int reserved_2; 136 unsigned int reserved_2;
132 uuid_be uuid; 137 uuid_be uuid;
133 } vm[8]; 138 } vm[8];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c0398c363..a5b54a445eb8 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -12,10 +12,10 @@
12/* 12/*
13 * Size of kernel stack for each process 13 * Size of kernel stack for each process
14 */ 14 */
15#define THREAD_ORDER 2 15#define THREAD_SIZE_ORDER 2
16#define ASYNC_ORDER 2 16#define ASYNC_ORDER 2
17 17
18#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 18#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
19#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 19#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
@@ -30,15 +30,7 @@
30 * - if the contents of this structure are changed, the assembly constants must also be changed 30 * - if the contents of this structure are changed, the assembly constants must also be changed
31 */ 31 */
32struct thread_info { 32struct thread_info {
33 struct task_struct *task; /* main task structure */
34 unsigned long flags; /* low level flags */ 33 unsigned long flags; /* low level flags */
35 unsigned long sys_call_table; /* System call table address */
36 unsigned int cpu; /* current CPU */
37 int preempt_count; /* 0 => preemptable, <0 => BUG */
38 unsigned int system_call;
39 __u64 user_timer;
40 __u64 system_timer;
41 unsigned long last_break; /* last breaking-event-address. */
42}; 34};
43 35
44/* 36/*
@@ -46,26 +38,14 @@ struct thread_info {
46 */ 38 */
47#define INIT_THREAD_INFO(tsk) \ 39#define INIT_THREAD_INFO(tsk) \
48{ \ 40{ \
49 .task = &tsk, \
50 .flags = 0, \ 41 .flags = 0, \
51 .cpu = 0, \
52 .preempt_count = INIT_PREEMPT_COUNT, \
53} 42}
54 43
55#define init_thread_info (init_thread_union.thread_info)
56#define init_stack (init_thread_union.stack) 44#define init_stack (init_thread_union.stack)
57 45
58/* how to get the thread information struct from C */
59static inline struct thread_info *current_thread_info(void)
60{
61 return (struct thread_info *) S390_lowcore.thread_info;
62}
63
64void arch_release_task_struct(struct task_struct *tsk); 46void arch_release_task_struct(struct task_struct *tsk);
65int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 47int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
66 48
67#define THREAD_SIZE_ORDER THREAD_ORDER
68
69#endif 49#endif
70 50
71/* 51/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f341c09..de8298800722 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
52 52
53void clock_comparator_work(void); 53void clock_comparator_work(void);
54 54
55void __init ptff_init(void); 55void __init time_early_init(void);
56 56
57extern unsigned char ptff_function_mask[16]; 57extern unsigned char ptff_function_mask[16];
58extern unsigned long lpar_offset;
59extern unsigned long initial_leap_seconds;
60 58
61/* Function codes for the ptff instruction. */ 59/* Function codes for the ptff instruction. */
62#define PTFF_QAF 0x00 /* query available functions */ 60#define PTFF_QAF 0x00 /* query available functions */
@@ -100,21 +98,28 @@ struct ptff_qui {
100 unsigned int pad_0x5c[41]; 98 unsigned int pad_0x5c[41];
101} __packed; 99} __packed;
102 100
103static inline int ptff(void *ptff_block, size_t len, unsigned int func) 101/*
104{ 102 * ptff - Perform timing facility function
105 typedef struct { char _[len]; } addrtype; 103 * @ptff_block: Pointer to ptff parameter block
106 register unsigned int reg0 asm("0") = func; 104 * @len: Length of parameter block
107 register unsigned long reg1 asm("1") = (unsigned long) ptff_block; 105 * @func: Function code
108 int rc; 106 * Returns: Condition code (0 on success)
109 107 */
110 asm volatile( 108#define ptff(ptff_block, len, func) \
111 " .word 0x0104\n" 109({ \
112 " ipm %0\n" 110 struct addrtype { char _[len]; }; \
113 " srl %0,28\n" 111 register unsigned int reg0 asm("0") = func; \
114 : "=d" (rc), "+m" (*(addrtype *) ptff_block) 112 register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
115 : "d" (reg0), "d" (reg1) : "cc"); 113 int rc; \
116 return rc; 114 \
117} 115 asm volatile( \
116 " .word 0x0104\n" \
117 " ipm %0\n" \
118 " srl %0,28\n" \
119 : "=d" (rc), "+m" (*(struct addrtype *) reg1) \
120 : "d" (reg0), "d" (reg1) : "cc"); \
121 rc; \
122})
118 123
119static inline unsigned long long local_tick_disable(void) 124static inline unsigned long long local_tick_disable(void)
120{ 125{
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f5571ca2b..fa1bfce10370 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
22 cpumask_t drawer_mask; 22 cpumask_t drawer_mask;
23}; 23};
24 24
25DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); 25extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
26 26extern cpumask_t cpus_with_topology;
27#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) 27
28#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) 28#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
29#define topology_sibling_cpumask(cpu) \ 29#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
30 (&per_cpu(cpu_topology, cpu).thread_mask) 30#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
31#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) 31#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
32#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) 32#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
33#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) 33#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
34#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) 34#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
35#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id) 35#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
36#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask) 36#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
37 37
38#define mc_capable() 1 38#define mc_capable() 1
39 39
40void topology_init_early(void);
40int topology_cpu_init(struct cpu *); 41int topology_cpu_init(struct cpu *);
41int topology_set_cpu_management(int fc); 42int topology_set_cpu_management(int fc);
42void topology_schedule_update(void); 43void topology_schedule_update(void);
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
46 47
47#else /* CONFIG_SCHED_TOPOLOGY */ 48#else /* CONFIG_SCHED_TOPOLOGY */
48 49
50static inline void topology_init_early(void) { }
49static inline void topology_schedule_update(void) { } 51static inline void topology_schedule_update(void) { }
50static inline int topology_cpu_init(struct cpu *cpu) { return 0; } 52static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
51static inline void topology_expect_change(void) { } 53static inline void topology_expect_change(void) { }
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
65#define cpu_to_node cpu_to_node 67#define cpu_to_node cpu_to_node
66static inline int cpu_to_node(int cpu) 68static inline int cpu_to_node(int cpu)
67{ 69{
68 return per_cpu(cpu_topology, cpu).node_id; 70 return cpu_topology[cpu].node_id;
69} 71}
70 72
71/* Returns a pointer to the cpumask of CPUs on node 'node'. */ 73/* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c8709279..f82b04e85a21 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -37,14 +37,14 @@
37#define get_ds() (KERNEL_DS) 37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment) 38#define get_fs() (current->thread.mm_segment)
39 39
40#define set_fs(x) \ 40#define set_fs(x) \
41({ \ 41{ \
42 unsigned long __pto; \ 42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \ 43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \ 44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \ 46 __ctl_load(__pto, 7, 7); \
47}) 47}
48 48
49#define segment_eq(a,b) ((a).ar4 == (b).ar4) 49#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50 50
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index d0a2dbf2433d..88bdc477a843 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -33,6 +33,8 @@ struct vdso_data {
33 __u32 ectg_available; /* ECTG instruction present 0x58 */ 33 __u32 ectg_available; /* ECTG instruction present 0x58 */
34 __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */ 34 __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
35 __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */ 35 __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
36 __u32 ts_dir; /* TOD steering direction 0x64 */
37 __u64 ts_end; /* TOD steering end 0x68 */
36}; 38};
37 39
38struct vdso_per_cpu_data { 40struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index cc44b09c25fc..bf736e764cb4 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += dasd.h
12header-y += debug.h 12header-y += debug.h
13header-y += errno.h 13header-y += errno.h
14header-y += fcntl.h 14header-y += fcntl.h
15header-y += hypfs.h
15header-y += ioctl.h 16header-y += ioctl.h
16header-y += ioctls.h 17header-y += ioctls.h
17header-y += ipcbuf.h 18header-y += ipcbuf.h
@@ -29,16 +30,16 @@ header-y += ptrace.h
29header-y += qeth.h 30header-y += qeth.h
30header-y += resource.h 31header-y += resource.h
31header-y += schid.h 32header-y += schid.h
33header-y += sclp_ctl.h
32header-y += sembuf.h 34header-y += sembuf.h
33header-y += setup.h 35header-y += setup.h
34header-y += shmbuf.h 36header-y += shmbuf.h
37header-y += sie.h
35header-y += sigcontext.h 38header-y += sigcontext.h
36header-y += siginfo.h 39header-y += siginfo.h
37header-y += signal.h 40header-y += signal.h
38header-y += socket.h 41header-y += socket.h
39header-y += sockios.h 42header-y += sockios.h
40header-y += sclp_ctl.h
41header-y += sie.h
42header-y += stat.h 43header-y += stat.h
43header-y += statfs.h 44header-y += statfs.h
44header-y += swab.h 45header-y += swab.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98f6db9..36b5101c8606 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,20 +2,47 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5KCOV_INSTRUMENT_early.o := n
6KCOV_INSTRUMENT_sclp.o := n
7KCOV_INSTRUMENT_als.o := n
8
9ifdef CONFIG_FUNCTION_TRACER 5ifdef CONFIG_FUNCTION_TRACER
10# Don't trace early setup code and tracing code 6
11CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) 7# Do not trace tracer code
12CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 8CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
9
10# Do not trace early setup code
11CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
12CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
13CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
14
15endif
16
17GCOV_PROFILE_als.o := n
18GCOV_PROFILE_early.o := n
19GCOV_PROFILE_sclp.o := n
20
21KCOV_INSTRUMENT_als.o := n
22KCOV_INSTRUMENT_early.o := n
23KCOV_INSTRUMENT_sclp.o := n
24
25UBSAN_SANITIZE_als.o := n
26UBSAN_SANITIZE_early.o := n
27UBSAN_SANITIZE_sclp.o := n
28
29#
30# Use -march=z900 for sclp.c and als.c to be able to print an error
31# message if the kernel is started on a machine which is too old
32#
33ifneq ($(CC_FLAGS_MARCH),-march=z900)
34CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
35CFLAGS_als.o += -march=z900
36CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
37CFLAGS_sclp.o += -march=z900
38AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
39AFLAGS_head.o += -march=z900
13endif 40endif
14 41
15# 42#
16# Passing null pointers is ok for smp code, since we access the lowcore here. 43# Passing null pointers is ok for smp code, since we access the lowcore here.
17# 44#
18CFLAGS_smp.o := -Wno-nonnull 45CFLAGS_smp.o := -Wno-nonnull
19 46
20# 47#
21# Disable tailcall optimizations for stack / callchain walking functions 48# Disable tailcall optimizations for stack / callchain walking functions
@@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
30# 57#
31CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 58CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
32 59
33CFLAGS_sysinfo.o += -w 60CFLAGS_sysinfo.o += -w
34
35#
36# Use -march=z900 for sclp.c and als.c to be able to print an error
37# message if the kernel is started on a machine which is too old
38#
39CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
40CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
41ifneq ($(CC_FLAGS_MARCH),-march=z900)
42CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
43CFLAGS_sclp.o += -march=z900
44CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
45CFLAGS_als.o += -march=z900
46AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
47AFLAGS_head.o += -march=z900
48endif
49GCOV_PROFILE_sclp.o := n
50GCOV_PROFILE_als.o := n
51UBSAN_SANITIZE_als.o := n
52UBSAN_SANITIZE_early.o := n
53UBSAN_SANITIZE_sclp.o := n
54 61
55obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 62obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
56obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 63obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0a5dec..c4b3570ded5b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -25,12 +25,14 @@
25int main(void) 25int main(void)
26{ 26{
27 /* task struct offsets */ 27 /* task struct offsets */
28 OFFSET(__TASK_thread_info, task_struct, stack); 28 OFFSET(__TASK_stack, task_struct, stack);
29 OFFSET(__TASK_thread, task_struct, thread); 29 OFFSET(__TASK_thread, task_struct, thread);
30 OFFSET(__TASK_pid, task_struct, pid); 30 OFFSET(__TASK_pid, task_struct, pid);
31 BLANK(); 31 BLANK();
32 /* thread struct offsets */ 32 /* thread struct offsets */
33 OFFSET(__THREAD_ksp, thread_struct, ksp); 33 OFFSET(__THREAD_ksp, thread_struct, ksp);
34 OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
35 OFFSET(__THREAD_last_break, thread_struct, last_break);
34 OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc); 36 OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
35 OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs); 37 OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
36 OFFSET(__THREAD_per_cause, thread_struct, per_event.cause); 38 OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
@@ -39,14 +41,7 @@ int main(void)
39 OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb); 41 OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
40 BLANK(); 42 BLANK();
41 /* thread info offsets */ 43 /* thread info offsets */
42 OFFSET(__TI_task, thread_info, task); 44 OFFSET(__TI_flags, task_struct, thread_info.flags);
43 OFFSET(__TI_flags, thread_info, flags);
44 OFFSET(__TI_sysc_table, thread_info, sys_call_table);
45 OFFSET(__TI_cpu, thread_info, cpu);
46 OFFSET(__TI_precount, thread_info, preempt_count);
47 OFFSET(__TI_user_timer, thread_info, user_timer);
48 OFFSET(__TI_system_timer, thread_info, system_timer);
49 OFFSET(__TI_last_break, thread_info, last_break);
50 BLANK(); 45 BLANK();
51 /* pt_regs offsets */ 46 /* pt_regs offsets */
52 OFFSET(__PT_ARGS, pt_regs, args); 47 OFFSET(__PT_ARGS, pt_regs, args);
@@ -79,6 +74,8 @@ int main(void)
79 OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available); 74 OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
80 OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult); 75 OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
81 OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); 76 OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
77 OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
78 OFFSET(__VDSO_TS_END, vdso_data, ts_end);
82 OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); 79 OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
83 OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); 80 OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
84 OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); 81 OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -159,7 +156,6 @@ int main(void)
159 OFFSET(__LC_INT_CLOCK, lowcore, int_clock); 156 OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
160 OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock); 157 OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
161 OFFSET(__LC_CURRENT, lowcore, current_task); 158 OFFSET(__LC_CURRENT, lowcore, current_task);
162 OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
163 OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack); 159 OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
164 OFFSET(__LC_ASYNC_STACK, lowcore, async_stack); 160 OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
165 OFFSET(__LC_PANIC_STACK, lowcore, panic_stack); 161 OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
@@ -173,6 +169,7 @@ int main(void)
173 OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset); 169 OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
174 OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data); 170 OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
175 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); 171 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
172 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
176 OFFSET(__LC_GMAP, lowcore, gmap); 173 OFFSET(__LC_GMAP, lowcore, gmap);
177 OFFSET(__LC_PASTE, lowcore, paste); 174 OFFSET(__LC_PASTE, lowcore, paste);
178 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 175 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4af60374eba0..6f2a6ab13cb5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
446 /* set extra registers only for synchronous signals */ 446 /* set extra registers only for synchronous signals */
447 regs->gprs[4] = regs->int_code & 127; 447 regs->gprs[4] = regs->int_code & 127;
448 regs->gprs[5] = regs->int_parm_long; 448 regs->gprs[5] = regs->int_parm_long;
449 regs->gprs[6] = task_thread_info(current)->last_break; 449 regs->gprs[6] = current->thread.last_break;
450 } 450 }
451 451
452 return 0; 452 return 0;
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
523 regs->gprs[2] = ksig->sig; 523 regs->gprs[2] = ksig->sig;
524 regs->gprs[3] = (__force __u64) &frame->info; 524 regs->gprs[3] = (__force __u64) &frame->info;
525 regs->gprs[4] = (__force __u64) &frame->uc; 525 regs->gprs[4] = (__force __u64) &frame->uc;
526 regs->gprs[5] = task_thread_info(current)->last_break; 526 regs->gprs[5] = current->thread.last_break;
527 return 0; 527 return 0;
528} 528}
529 529
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b46bbc..d038c8cea6cb 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
293 psw.addr = (unsigned long) s390_base_pgm_handler; 293 psw.addr = (unsigned long) s390_base_pgm_handler;
294 S390_lowcore.program_new_psw = psw; 294 S390_lowcore.program_new_psw = psw;
295 s390_base_pgm_handler_fn = early_pgm_check_handler; 295 s390_base_pgm_handler_fn = early_pgm_check_handler;
296 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
296} 297}
297 298
298static noinline __init void setup_facility_list(void) 299static noinline __init void setup_facility_list(void)
@@ -391,7 +392,49 @@ static int __init cad_init(void)
391} 392}
392early_initcall(cad_init); 393early_initcall(cad_init);
393 394
394static __init void rescue_initrd(void) 395static __init void memmove_early(void *dst, const void *src, size_t n)
396{
397 unsigned long addr;
398 long incr;
399 psw_t old;
400
401 if (!n)
402 return;
403 incr = 1;
404 if (dst > src) {
405 incr = -incr;
406 dst += n - 1;
407 src += n - 1;
408 }
409 old = S390_lowcore.program_new_psw;
410 S390_lowcore.program_new_psw.mask = __extract_psw();
411 asm volatile(
412 " larl %[addr],1f\n"
413 " stg %[addr],%[psw_pgm_addr]\n"
414 "0: mvc 0(1,%[dst]),0(%[src])\n"
415 " agr %[dst],%[incr]\n"
416 " agr %[src],%[incr]\n"
417 " brctg %[n],0b\n"
418 "1:\n"
419 : [addr] "=&d" (addr),
420 [psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
421 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
422 : [incr] "d" (incr)
423 : "cc", "memory");
424 S390_lowcore.program_new_psw = old;
425}
426
427static __init noinline void ipl_save_parameters(void)
428{
429 void *src, *dst;
430
431 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
432 dst = (void *) IPL_PARMBLOCK_ORIGIN;
433 memmove_early(dst, src, PAGE_SIZE);
434 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
435}
436
437static __init noinline void rescue_initrd(void)
395{ 438{
396#ifdef CONFIG_BLK_DEV_INITRD 439#ifdef CONFIG_BLK_DEV_INITRD
397 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); 440 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
405 return; 448 return;
406 if (INITRD_START >= min_initrd_addr) 449 if (INITRD_START >= min_initrd_addr)
407 return; 450 return;
408 memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); 451 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
409 INITRD_START = min_initrd_addr; 452 INITRD_START = min_initrd_addr;
410#endif 453#endif
411} 454}
@@ -467,7 +510,8 @@ void __init startup_init(void)
467 ipl_save_parameters(); 510 ipl_save_parameters();
468 rescue_initrd(); 511 rescue_initrd();
469 clear_bss_section(); 512 clear_bss_section();
470 ptff_init(); 513 ipl_verify_parameters();
514 time_early_init();
471 init_kernel_storage_key(); 515 init_kernel_storage_key();
472 lockdep_off(); 516 lockdep_off();
473 setup_lowcore_early(); 517 setup_lowcore_early();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a30737adde..97298c58b2be 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
42__PT_R14 = __PT_GPRS + 112 42__PT_R14 = __PT_GPRS + 112
43__PT_R15 = __PT_GPRS + 120 43__PT_R15 = __PT_GPRS + 120
44 44
45STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 45STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
46STACK_SIZE = 1 << STACK_SHIFT 46STACK_SIZE = 1 << STACK_SHIFT
47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
48 48
@@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
123 123
124 .macro LAST_BREAK scratch 124 .macro LAST_BREAK scratch
125 srag \scratch,%r10,23 125 srag \scratch,%r10,23
126#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
126 jz .+10 127 jz .+10
127 stg %r10,__TI_last_break(%r12) 128 stg %r10,__TASK_thread+__THREAD_last_break(%r12)
129#else
130 jz .+14
131 lghi \scratch,__TASK_thread
132 stg %r10,__THREAD_last_break(\scratch,%r12)
133#endif
128 .endm 134 .endm
129 135
130 .macro REENABLE_IRQS 136 .macro REENABLE_IRQS
@@ -186,14 +192,13 @@ ENTRY(__switch_to)
186 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 192 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
187 lgr %r1,%r2 193 lgr %r1,%r2
188 aghi %r1,__TASK_thread # thread_struct of prev task 194 aghi %r1,__TASK_thread # thread_struct of prev task
189 lg %r5,__TASK_thread_info(%r3) # get thread_info of next 195 lg %r5,__TASK_stack(%r3) # start of kernel stack of next
190 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev 196 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
191 lgr %r1,%r3 197 lgr %r1,%r3
192 aghi %r1,__TASK_thread # thread_struct of next task 198 aghi %r1,__TASK_thread # thread_struct of next task
193 lgr %r15,%r5 199 lgr %r15,%r5
194 aghi %r15,STACK_INIT # end of kernel stack of next 200 aghi %r15,STACK_INIT # end of kernel stack of next
195 stg %r3,__LC_CURRENT # store task struct of next 201 stg %r3,__LC_CURRENT # store task struct of next
196 stg %r5,__LC_THREAD_INFO # store thread info of next
197 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 202 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
198 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next 203 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
199 /* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */ 204 /* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
@@ -274,7 +279,7 @@ ENTRY(system_call)
274.Lsysc_stmg: 279.Lsysc_stmg:
275 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 280 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
276 lg %r10,__LC_LAST_BREAK 281 lg %r10,__LC_LAST_BREAK
277 lg %r12,__LC_THREAD_INFO 282 lg %r12,__LC_CURRENT
278 lghi %r14,_PIF_SYSCALL 283 lghi %r14,_PIF_SYSCALL
279.Lsysc_per: 284.Lsysc_per:
280 lg %r15,__LC_KERNEL_STACK 285 lg %r15,__LC_KERNEL_STACK
@@ -288,7 +293,13 @@ ENTRY(system_call)
288 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 293 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
289 stg %r14,__PT_FLAGS(%r11) 294 stg %r14,__PT_FLAGS(%r11)
290.Lsysc_do_svc: 295.Lsysc_do_svc:
291 lg %r10,__TI_sysc_table(%r12) # address of system call table 296 # load address of system call table
297#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
298 lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
299#else
300 lghi %r13,__TASK_thread
301 lg %r10,__THREAD_sysc_table(%r13,%r12)
302#endif
292 llgh %r8,__PT_INT_CODE+2(%r11) 303 llgh %r8,__PT_INT_CODE+2(%r11)
293 slag %r8,%r8,2 # shift and test for svc 0 304 slag %r8,%r8,2 # shift and test for svc 0
294 jnz .Lsysc_nr_ok 305 jnz .Lsysc_nr_ok
@@ -389,7 +400,6 @@ ENTRY(system_call)
389 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 400 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
390 jno .Lsysc_return 401 jno .Lsysc_return
391 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 402 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
392 lg %r10,__TI_sysc_table(%r12) # address of system call table
393 lghi %r8,0 # svc 0 returns -ENOSYS 403 lghi %r8,0 # svc 0 returns -ENOSYS
394 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 404 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
395 cghi %r1,NR_syscalls 405 cghi %r1,NR_syscalls
@@ -457,7 +467,7 @@ ENTRY(system_call)
457# 467#
458ENTRY(ret_from_fork) 468ENTRY(ret_from_fork)
459 la %r11,STACK_FRAME_OVERHEAD(%r15) 469 la %r11,STACK_FRAME_OVERHEAD(%r15)
460 lg %r12,__LC_THREAD_INFO 470 lg %r12,__LC_CURRENT
461 brasl %r14,schedule_tail 471 brasl %r14,schedule_tail
462 TRACE_IRQS_ON 472 TRACE_IRQS_ON
463 ssm __LC_SVC_NEW_PSW # reenable interrupts 473 ssm __LC_SVC_NEW_PSW # reenable interrupts
@@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
478 stpt __LC_SYNC_ENTER_TIMER 488 stpt __LC_SYNC_ENTER_TIMER
479 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 489 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
480 lg %r10,__LC_LAST_BREAK 490 lg %r10,__LC_LAST_BREAK
481 lg %r12,__LC_THREAD_INFO 491 lg %r12,__LC_CURRENT
482 larl %r13,cleanup_critical 492 larl %r13,cleanup_critical
483 lmg %r8,%r9,__LC_PGM_OLD_PSW 493 lmg %r8,%r9,__LC_PGM_OLD_PSW
484 tmhh %r8,0x0001 # test problem state bit 494 tmhh %r8,0x0001 # test problem state bit
@@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
5012: LAST_BREAK %r14 5112: LAST_BREAK %r14
502 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 512 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
503 lg %r15,__LC_KERNEL_STACK 513 lg %r15,__LC_KERNEL_STACK
504 lg %r14,__TI_task(%r12) 514 lgr %r14,%r12
505 aghi %r14,__TASK_thread # pointer to thread_struct 515 aghi %r14,__TASK_thread # pointer to thread_struct
506 lghi %r13,__LC_PGM_TDB 516 lghi %r13,__LC_PGM_TDB
507 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 517 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
@@ -567,7 +577,7 @@ ENTRY(io_int_handler)
567 stpt __LC_ASYNC_ENTER_TIMER 577 stpt __LC_ASYNC_ENTER_TIMER
568 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 578 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
569 lg %r10,__LC_LAST_BREAK 579 lg %r10,__LC_LAST_BREAK
570 lg %r12,__LC_THREAD_INFO 580 lg %r12,__LC_CURRENT
571 larl %r13,cleanup_critical 581 larl %r13,cleanup_critical
572 lmg %r8,%r9,__LC_IO_OLD_PSW 582 lmg %r8,%r9,__LC_IO_OLD_PSW
573 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 583 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -626,7 +636,7 @@ ENTRY(io_int_handler)
626 jo .Lio_work_user # yes -> do resched & signal 636 jo .Lio_work_user # yes -> do resched & signal
627#ifdef CONFIG_PREEMPT 637#ifdef CONFIG_PREEMPT
628 # check for preemptive scheduling 638 # check for preemptive scheduling
629 icm %r0,15,__TI_precount(%r12) 639 icm %r0,15,__LC_PREEMPT_COUNT
630 jnz .Lio_restore # preemption is disabled 640 jnz .Lio_restore # preemption is disabled
631 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 641 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
632 jno .Lio_restore 642 jno .Lio_restore
@@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
741 stpt __LC_ASYNC_ENTER_TIMER 751 stpt __LC_ASYNC_ENTER_TIMER
742 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 752 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
743 lg %r10,__LC_LAST_BREAK 753 lg %r10,__LC_LAST_BREAK
744 lg %r12,__LC_THREAD_INFO 754 lg %r12,__LC_CURRENT
745 larl %r13,cleanup_critical 755 larl %r13,cleanup_critical
746 lmg %r8,%r9,__LC_EXT_OLD_PSW 756 lmg %r8,%r9,__LC_EXT_OLD_PSW
747 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 757 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
798 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 808 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
799 bor %r14 809 bor %r14
800 stfpc __THREAD_FPU_fpc(%r2) 810 stfpc __THREAD_FPU_fpc(%r2)
801.Lsave_fpu_regs_fpc_end:
802 lg %r3,__THREAD_FPU_regs(%r2) 811 lg %r3,__THREAD_FPU_regs(%r2)
803 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 812 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
804 jz .Lsave_fpu_regs_fp # no -> store FP regs 813 jz .Lsave_fpu_regs_fp # no -> store FP regs
805.Lsave_fpu_regs_vx_low:
806 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 814 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
807.Lsave_fpu_regs_vx_high:
808 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 815 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
809 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 816 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
810.Lsave_fpu_regs_fp: 817.Lsave_fpu_regs_fp:
@@ -851,9 +858,7 @@ load_fpu_regs:
851 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 858 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
852 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 859 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
853 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 860 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
854.Lload_fpu_regs_vx:
855 VLM %v0,%v15,0,%r4 861 VLM %v0,%v15,0,%r4
856.Lload_fpu_regs_vx_high:
857 VLM %v16,%v31,256,%r4 862 VLM %v16,%v31,256,%r4
858 j .Lload_fpu_regs_done 863 j .Lload_fpu_regs_done
859.Lload_fpu_regs_fp: 864.Lload_fpu_regs_fp:
@@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
889 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 894 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
890 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 895 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
891 lg %r10,__LC_LAST_BREAK 896 lg %r10,__LC_LAST_BREAK
892 lg %r12,__LC_THREAD_INFO 897 lg %r12,__LC_CURRENT
893 larl %r13,cleanup_critical 898 larl %r13,cleanup_critical
894 lmg %r8,%r9,__LC_MCK_OLD_PSW 899 lmg %r8,%r9,__LC_MCK_OLD_PSW
895 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 900 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
@@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
948 953
949.Lmcck_panic: 954.Lmcck_panic:
950 lg %r15,__LC_PANIC_STACK 955 lg %r15,__LC_PANIC_STACK
951 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 956 la %r11,STACK_FRAME_OVERHEAD(%r15)
952 j .Lmcck_skip 957 j .Lmcck_skip
953 958
954# 959#
@@ -1085,7 +1090,7 @@ cleanup_critical:
1085 jhe 0f 1090 jhe 0f
1086 # set up saved registers r10 and r12 1091 # set up saved registers r10 and r12
1087 stg %r10,16(%r11) # r10 last break 1092 stg %r10,16(%r11) # r10 last break
1088 stg %r12,32(%r11) # r12 thread-info pointer 1093 stg %r12,32(%r11) # r12 task struct pointer
10890: # check if the user time update has been done 10940: # check if the user time update has been done
1090 clg %r9,BASED(.Lcleanup_system_call_insn+24) 1095 clg %r9,BASED(.Lcleanup_system_call_insn+24)
1091 jh 0f 1096 jh 0f
@@ -1106,7 +1111,9 @@ cleanup_critical:
1106 lg %r9,16(%r11) 1111 lg %r9,16(%r11)
1107 srag %r9,%r9,23 1112 srag %r9,%r9,23
1108 jz 0f 1113 jz 0f
1109 mvc __TI_last_break(8,%r12),16(%r11) 1114 lgr %r9,%r12
1115 aghi %r9,__TASK_thread
1116 mvc __THREAD_last_break(8,%r9),16(%r11)
11100: # set up saved register r11 11170: # set up saved register r11
1111 lg %r15,__LC_KERNEL_STACK 1118 lg %r15,__LC_KERNEL_STACK
1112 la %r9,STACK_FRAME_OVERHEAD(%r15) 1119 la %r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4431905f8cfa..0b5ebf8a3d30 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
315 jg startup_continue 315 jg startup_continue
316 316
317.Lstack: 317.Lstack:
318 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) 318 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
319 .align 8 319 .align 8
3206: .long 0x7fffffff,0xffffffff 3206: .long 0x7fffffff,0xffffffff
321 321
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 03c2b469c472..482d3526e32b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -32,11 +32,10 @@ ENTRY(startup_continue)
32# 32#
33# Setup stack 33# Setup stack
34# 34#
35 larl %r15,init_thread_union 35 larl %r14,init_task
36 stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
37 lg %r14,__TI_task(%r15) # cache current in lowcore
38 stg %r14,__LC_CURRENT 36 stg %r14,__LC_CURRENT
39 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE 37 larl %r15,init_thread_union
38 aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
40 stg %r15,__LC_KERNEL_STACK # set end of kernel stack 39 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
41 aghi %r15,-160 40 aghi %r15,-160
42# 41#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7124bc..ff3364a067ff 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
1991 diag308_set_works = 1; 1991 diag308_set_works = 1;
1992} 1992}
1993 1993
1994void __init ipl_save_parameters(void) 1994void __init ipl_verify_parameters(void)
1995{ 1995{
1996 struct cio_iplinfo iplinfo; 1996 struct cio_iplinfo iplinfo;
1997 void *src, *dst;
1998 1997
1999 if (cio_get_iplinfo(&iplinfo)) 1998 if (cio_get_iplinfo(&iplinfo))
2000 return; 1999 return;
@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
2005 if (!iplinfo.is_qdio) 2004 if (!iplinfo.is_qdio)
2006 return; 2005 return;
2007 ipl_flags |= IPL_PARMBLOCK_VALID; 2006 ipl_flags |= IPL_PARMBLOCK_VALID;
2008 src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
2009 dst = (void *)IPL_PARMBLOCK_ORIGIN;
2010 memmove(dst, src, PAGE_SIZE);
2011 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
2012} 2007}
2013 2008
2014static LIST_HEAD(rcall); 2009static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d6561076d..ef60f4177331 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
168 old = current_stack_pointer(); 168 old = current_stack_pointer();
169 /* Check against async. stack address range. */ 169 /* Check against async. stack address range. */
170 new = S390_lowcore.async_stack; 170 new = S390_lowcore.async_stack;
171 if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { 171 if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
172 /* Need to switch to the async. stack. */ 172 /* Need to switch to the async. stack. */
173 new -= STACK_FRAME_OVERHEAD; 173 new -= STACK_FRAME_OVERHEAD;
174 ((struct stack_frame *) new)->back_chain = old; 174 ((struct stack_frame *) new)->back_chain = old;
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6ea6d69339b5..ae7dff110054 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -5,7 +5,8 @@
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> 5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/init.h>
9#include <linux/export.h>
9#include <linux/timer.h> 10#include <linux/timer.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <asm/facility.h> 12#include <asm/facility.h>
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
183 lgr_timer_set(); 184 lgr_timer_set();
184 return 0; 185 return 0;
185} 186}
186module_init(lgr_init); 187device_initcall(lgr_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c1479a..763dec18edcd 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
995 regs.int_parm = CPU_MF_INT_SF_PRA; 995 regs.int_parm = CPU_MF_INT_SF_PRA;
996 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long; 996 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
997 997
998 regs.psw.addr = sfr->basic.ia; 998 psw_bits(regs.psw).ia = sfr->basic.ia;
999 if (sfr->basic.T) 999 psw_bits(regs.psw).t = sfr->basic.T;
1000 regs.psw.mask |= PSW_MASK_DAT; 1000 psw_bits(regs.psw).w = sfr->basic.W;
1001 if (sfr->basic.W) 1001 psw_bits(regs.psw).p = sfr->basic.P;
1002 regs.psw.mask |= PSW_MASK_WAIT; 1002 psw_bits(regs.psw).as = sfr->basic.AS;
1003 if (sfr->basic.P)
1004 regs.psw.mask |= PSW_MASK_PSTATE;
1005 switch (sfr->basic.AS) {
1006 case 0x0:
1007 regs.psw.mask |= PSW_ASC_PRIMARY;
1008 break;
1009 case 0x1:
1010 regs.psw.mask |= PSW_ASC_ACCREG;
1011 break;
1012 case 0x2:
1013 regs.psw.mask |= PSW_ASC_SECONDARY;
1014 break;
1015 case 0x3:
1016 regs.psw.mask |= PSW_ASC_HOME;
1017 break;
1018 }
1019 1003
1020 /* 1004 /*
1021 * A non-zero guest program parameter indicates a guest 1005 * Use the hardware provided configuration level to decide if the
1022 * sample. 1006 * sample belongs to a guest or host. If that is not available,
1023 * Note that some early samples or samples from guests without 1007 * fall back to the following heuristics:
1008 * A non-zero guest program parameter always indicates a guest
1009 * sample. Some early samples or samples from guests without
1024 * lpp usage would be misaccounted to the host. We use the asn 1010 * lpp usage would be misaccounted to the host. We use the asn
1025 * value as a heuristic to detect most of these guest samples. 1011 * value as an addon heuristic to detect most of these guest samples.
1026 * If the value differs from the host hpp value, we assume 1012 * If the value differs from the host hpp value, we assume to be a
1027 * it to be a KVM guest. 1013 * KVM guest.
1028 */ 1014 */
1029 if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp) 1015 switch (sfr->basic.CL) {
1016 case 1: /* logical partition */
1017 sde_regs->in_guest = 0;
1018 break;
1019 case 2: /* virtual machine */
1030 sde_regs->in_guest = 1; 1020 sde_regs->in_guest = 1;
1021 break;
1022 default: /* old machine, use heuristics */
1023 if (sfr->basic.gpp ||
1024 sfr->basic.prim_asn != (u16)sfr->basic.hpp)
1025 sde_regs->in_guest = 1;
1026 break;
1027 }
1031 1028
1032 overflow = 0; 1029 overflow = 0;
1033 if (perf_exclude_event(event, &regs, sde_regs)) 1030 if (perf_exclude_event(event, &regs, sde_regs))
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa74b321..400d14f0b9f5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
103int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 103int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
104 unsigned long arg, struct task_struct *p) 104 unsigned long arg, struct task_struct *p)
105{ 105{
106 struct thread_info *ti;
107 struct fake_frame 106 struct fake_frame
108 { 107 {
109 struct stack_frame sf; 108 struct stack_frame sf;
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
121 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); 120 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
122 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 121 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
123 /* Initialize per thread user and system timer values */ 122 /* Initialize per thread user and system timer values */
124 ti = task_thread_info(p); 123 p->thread.user_timer = 0;
125 ti->user_timer = 0; 124 p->thread.system_timer = 0;
126 ti->system_timer = 0;
127 125
128 frame->sf.back_chain = 0; 126 frame->sf.back_chain = 0;
129 /* new return point is ret_from_fork */ 127 /* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9336e824e2db..b81ab8882e2e 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
461 } 461 }
462 return 0; 462 return 0;
463 case PTRACE_GET_LAST_BREAK: 463 case PTRACE_GET_LAST_BREAK:
464 put_user(task_thread_info(child)->last_break, 464 put_user(child->thread.last_break,
465 (unsigned long __user *) data); 465 (unsigned long __user *) data);
466 return 0; 466 return 0;
467 case PTRACE_ENABLE_TE: 467 case PTRACE_ENABLE_TE:
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
811 } 811 }
812 return 0; 812 return 0;
813 case PTRACE_GET_LAST_BREAK: 813 case PTRACE_GET_LAST_BREAK:
814 put_user(task_thread_info(child)->last_break, 814 put_user(child->thread.last_break,
815 (unsigned int __user *) data); 815 (unsigned int __user *) data);
816 return 0; 816 return 0;
817 } 817 }
@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
997 if (count > 0) { 997 if (count > 0) {
998 if (kbuf) { 998 if (kbuf) {
999 unsigned long *k = kbuf; 999 unsigned long *k = kbuf;
1000 *k = task_thread_info(target)->last_break; 1000 *k = target->thread.last_break;
1001 } else { 1001 } else {
1002 unsigned long __user *u = ubuf; 1002 unsigned long __user *u = ubuf;
1003 if (__put_user(task_thread_info(target)->last_break, u)) 1003 if (__put_user(target->thread.last_break, u))
1004 return -EFAULT; 1004 return -EFAULT;
1005 } 1005 }
1006 } 1006 }
@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
1113 unsigned int pos, unsigned int count, 1113 unsigned int pos, unsigned int count,
1114 void *kbuf, void __user *ubuf) 1114 void *kbuf, void __user *ubuf)
1115{ 1115{
1116 unsigned int *data = &task_thread_info(target)->system_call; 1116 unsigned int *data = &target->thread.system_call;
1117 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1117 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1118 data, 0, sizeof(unsigned int)); 1118 data, 0, sizeof(unsigned int));
1119} 1119}
@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
1123 unsigned int pos, unsigned int count, 1123 unsigned int pos, unsigned int count,
1124 const void *kbuf, const void __user *ubuf) 1124 const void *kbuf, const void __user *ubuf)
1125{ 1125{
1126 unsigned int *data = &task_thread_info(target)->system_call; 1126 unsigned int *data = &target->thread.system_call;
1127 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1127 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1128 data, 0, sizeof(unsigned int)); 1128 data, 0, sizeof(unsigned int));
1129} 1129}
@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
1327 compat_ulong_t last_break; 1327 compat_ulong_t last_break;
1328 1328
1329 if (count > 0) { 1329 if (count > 0) {
1330 last_break = task_thread_info(target)->last_break; 1330 last_break = target->thread.last_break;
1331 if (kbuf) { 1331 if (kbuf) {
1332 unsigned long *k = kbuf; 1332 unsigned long *k = kbuf;
1333 *k = last_break; 1333 *k = last_break;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7f7ba5f23f13..adfac9f0a89f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -35,6 +35,7 @@
35#include <linux/root_dev.h> 35#include <linux/root_dev.h>
36#include <linux/console.h> 36#include <linux/console.h>
37#include <linux/kernel_stat.h> 37#include <linux/kernel_stat.h>
38#include <linux/dma-contiguous.h>
38#include <linux/device.h> 39#include <linux/device.h>
39#include <linux/notifier.h> 40#include <linux/notifier.h>
40#include <linux/pfn.h> 41#include <linux/pfn.h>
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
303 * Setup lowcore for boot cpu 304 * Setup lowcore for boot cpu
304 */ 305 */
305 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); 306 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
306 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 307 lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
307 lc->restart_psw.mask = PSW_KERNEL_BITS; 308 lc->restart_psw.mask = PSW_KERNEL_BITS;
308 lc->restart_psw.addr = (unsigned long) restart_int_handler; 309 lc->restart_psw.addr = (unsigned long) restart_int_handler;
309 lc->external_new_psw.mask = PSW_KERNEL_BITS | 310 lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
324 lc->kernel_stack = ((unsigned long) &init_thread_union) 325 lc->kernel_stack = ((unsigned long) &init_thread_union)
325 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 326 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
326 lc->async_stack = (unsigned long) 327 lc->async_stack = (unsigned long)
327 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) 328 memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
328 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 329 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
329 lc->panic_stack = (unsigned long) 330 lc->panic_stack = (unsigned long)
330 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) 331 memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
331 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 332 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
332 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 333 lc->current_task = (unsigned long)&init_task;
333 lc->thread_info = (unsigned long) &init_thread_union;
334 lc->lpp = LPP_MAGIC; 334 lc->lpp = LPP_MAGIC;
335 lc->machine_flags = S390_lowcore.machine_flags; 335 lc->machine_flags = S390_lowcore.machine_flags;
336 lc->preempt_count = S390_lowcore.preempt_count;
336 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 337 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
337 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 338 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
338 MAX_FACILITY_BIT/8); 339 MAX_FACILITY_BIT/8);
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
349 lc->last_update_timer = S390_lowcore.last_update_timer; 350 lc->last_update_timer = S390_lowcore.last_update_timer;
350 lc->last_update_clock = S390_lowcore.last_update_clock; 351 lc->last_update_clock = S390_lowcore.last_update_clock;
351 352
352 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 353 restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
353 restart_stack += ASYNC_SIZE; 354 restart_stack += ASYNC_SIZE;
354 355
355 /* 356 /*
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
412 bss_resource.end = (unsigned long) &__bss_stop - 1; 413 bss_resource.end = (unsigned long) &__bss_stop - 1;
413 414
414 for_each_memblock(memory, reg) { 415 for_each_memblock(memory, reg) {
415 res = alloc_bootmem_low(sizeof(*res)); 416 res = memblock_virt_alloc(sizeof(*res), 8);
416 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 417 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
417 418
418 res->name = "System RAM"; 419 res->name = "System RAM";
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
426 std_res->start > res->end) 427 std_res->start > res->end)
427 continue; 428 continue;
428 if (std_res->end > res->end) { 429 if (std_res->end > res->end) {
429 sub_res = alloc_bootmem_low(sizeof(*sub_res)); 430 sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
430 *sub_res = *std_res; 431 *sub_res = *std_res;
431 sub_res->end = res->end; 432 sub_res->end = res->end;
432 std_res->start = res->end + 1; 433 std_res->start = res->end + 1;
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
445 * part of the System RAM resource. 446 * part of the System RAM resource.
446 */ 447 */
447 if (crashk_res.end) { 448 if (crashk_res.end) {
448 memblock_add(crashk_res.start, resource_size(&crashk_res)); 449 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
449 memblock_reserve(crashk_res.start, resource_size(&crashk_res)); 450 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
450 insert_resource(&iomem_resource, &crashk_res); 451 insert_resource(&iomem_resource, &crashk_res);
451 } 452 }
@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
903 904
904 setup_memory_end(); 905 setup_memory_end();
905 setup_memory(); 906 setup_memory();
907 dma_contiguous_reserve(memory_end);
906 908
907 check_initrd(); 909 check_initrd();
908 reserve_crashkernel(); 910 reserve_crashkernel();
@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
921 cpu_detect_mhz_feature(); 923 cpu_detect_mhz_feature();
922 cpu_init(); 924 cpu_init();
923 numa_setup(); 925 numa_setup();
926 smp_detect_cpus();
927 topology_init_early();
924 928
925 /* 929 /*
926 * Create kernel page tables and switch to virtual addressing. 930 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d82562cf0a0e..9f241d1efeda 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
359 /* set extra registers only for synchronous signals */ 359 /* set extra registers only for synchronous signals */
360 regs->gprs[4] = regs->int_code & 127; 360 regs->gprs[4] = regs->int_code & 127;
361 regs->gprs[5] = regs->int_parm_long; 361 regs->gprs[5] = regs->int_parm_long;
362 regs->gprs[6] = task_thread_info(current)->last_break; 362 regs->gprs[6] = current->thread.last_break;
363 } 363 }
364 return 0; 364 return 0;
365} 365}
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
430 regs->gprs[2] = ksig->sig; 430 regs->gprs[2] = ksig->sig;
431 regs->gprs[3] = (unsigned long) &frame->info; 431 regs->gprs[3] = (unsigned long) &frame->info;
432 regs->gprs[4] = (unsigned long) &frame->uc; 432 regs->gprs[4] = (unsigned long) &frame->uc;
433 regs->gprs[5] = task_thread_info(current)->last_break; 433 regs->gprs[5] = current->thread.last_break;
434 return 0; 434 return 0;
435} 435}
436 436
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
467 * the debugger may change all our registers, including the system 467 * the debugger may change all our registers, including the system
468 * call information. 468 * call information.
469 */ 469 */
470 current_thread_info()->system_call = 470 current->thread.system_call =
471 test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; 471 test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
472 472
473 if (get_signal(&ksig)) { 473 if (get_signal(&ksig)) {
474 /* Whee! Actually deliver the signal. */ 474 /* Whee! Actually deliver the signal. */
475 if (current_thread_info()->system_call) { 475 if (current->thread.system_call) {
476 regs->int_code = current_thread_info()->system_call; 476 regs->int_code = current->thread.system_call;
477 /* Check for system call restarting. */ 477 /* Check for system call restarting. */
478 switch (regs->gprs[2]) { 478 switch (regs->gprs[2]) {
479 case -ERESTART_RESTARTBLOCK: 479 case -ERESTART_RESTARTBLOCK:
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
506 506
507 /* No handlers present - check for system call restart */ 507 /* No handlers present - check for system call restart */
508 clear_pt_regs_flag(regs, PIF_SYSCALL); 508 clear_pt_regs_flag(regs, PIF_SYSCALL);
509 if (current_thread_info()->system_call) { 509 if (current->thread.system_call) {
510 regs->int_code = current_thread_info()->system_call; 510 regs->int_code = current->thread.system_call;
511 switch (regs->gprs[2]) { 511 switch (regs->gprs[2]) {
512 case -ERESTART_RESTARTBLOCK: 512 case -ERESTART_RESTARTBLOCK:
513 /* Restart with sys_restart_syscall */ 513 /* Restart with sys_restart_syscall */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index df4a508ff35c..e49f61aadaf9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -19,6 +19,7 @@
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 20
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <linux/bootmem.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
259static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 260static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
260{ 261{
261 struct lowcore *lc = pcpu->lowcore; 262 struct lowcore *lc = pcpu->lowcore;
262 struct thread_info *ti = task_thread_info(tsk);
263 263
264 lc->kernel_stack = (unsigned long) task_stack_page(tsk) 264 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
265 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 265 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
266 lc->thread_info = (unsigned long) task_thread_info(tsk);
267 lc->current_task = (unsigned long) tsk; 266 lc->current_task = (unsigned long) tsk;
268 lc->lpp = LPP_MAGIC; 267 lc->lpp = LPP_MAGIC;
269 lc->current_pid = tsk->pid; 268 lc->current_pid = tsk->pid;
270 lc->user_timer = ti->user_timer; 269 lc->user_timer = tsk->thread.user_timer;
271 lc->system_timer = ti->system_timer; 270 lc->system_timer = tsk->thread.system_timer;
272 lc->steal_timer = 0; 271 lc->steal_timer = 0;
273} 272}
274 273
@@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
662 return pcpu_devices[cpu].polarization; 661 return pcpu_devices[cpu].polarization;
663} 662}
664 663
665static struct sclp_core_info *smp_get_core_info(void) 664static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
666{ 665{
667 static int use_sigp_detection; 666 static int use_sigp_detection;
668 struct sclp_core_info *info;
669 int address; 667 int address;
670 668
671 info = kzalloc(sizeof(*info), GFP_KERNEL); 669 if (use_sigp_detection || sclp_get_core_info(info, early)) {
672 if (info && (use_sigp_detection || sclp_get_core_info(info))) {
673 use_sigp_detection = 1; 670 use_sigp_detection = 1;
674 for (address = 0; 671 for (address = 0;
675 address < (SCLP_MAX_CORES << smp_cpu_mt_shift); 672 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
@@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
683 } 680 }
684 info->combined = info->configured; 681 info->combined = info->configured;
685 } 682 }
686 return info;
687} 683}
688 684
689static int smp_add_present_cpu(int cpu); 685static int smp_add_present_cpu(int cpu);
@@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
724 return nr; 720 return nr;
725} 721}
726 722
727static void __init smp_detect_cpus(void) 723void __init smp_detect_cpus(void)
728{ 724{
729 unsigned int cpu, mtid, c_cpus, s_cpus; 725 unsigned int cpu, mtid, c_cpus, s_cpus;
730 struct sclp_core_info *info; 726 struct sclp_core_info *info;
731 u16 address; 727 u16 address;
732 728
733 /* Get CPU information */ 729 /* Get CPU information */
734 info = smp_get_core_info(); 730 info = memblock_virt_alloc(sizeof(*info), 8);
735 if (!info) 731 smp_get_core_info(info, 1);
736 panic("smp_detect_cpus failed to allocate memory\n");
737
738 /* Find boot CPU type */ 732 /* Find boot CPU type */
739 if (sclp.has_core_type) { 733 if (sclp.has_core_type) {
740 address = stap(); 734 address = stap();
@@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
770 get_online_cpus(); 764 get_online_cpus();
771 __smp_rescan_cpus(info, 0); 765 __smp_rescan_cpus(info, 0);
772 put_online_cpus(); 766 put_online_cpus();
773 kfree(info); 767 memblock_free_early((unsigned long)info, sizeof(*info));
774} 768}
775 769
776/* 770/*
@@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
807 pcpu = pcpu_devices + cpu; 801 pcpu = pcpu_devices + cpu;
808 if (pcpu->state != CPU_STATE_CONFIGURED) 802 if (pcpu->state != CPU_STATE_CONFIGURED)
809 return -EIO; 803 return -EIO;
810 base = cpu - (cpu % (smp_cpu_mtid + 1)); 804 base = smp_get_base_cpu(cpu);
811 for (i = 0; i <= smp_cpu_mtid; i++) { 805 for (i = 0; i <= smp_cpu_mtid; i++) {
812 if (base + i < nr_cpu_ids) 806 if (base + i < nr_cpu_ids)
813 if (cpu_online(base + i)) 807 if (cpu_online(base + i))
@@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
907 /* request the 0x1202 external call external interrupt */ 901 /* request the 0x1202 external call external interrupt */
908 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 902 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
909 panic("Couldn't request external interrupt 0x1202"); 903 panic("Couldn't request external interrupt 0x1202");
910 smp_detect_cpus();
911} 904}
912 905
913void __init smp_prepare_boot_cpu(void) 906void __init smp_prepare_boot_cpu(void)
@@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
973 rc = -EBUSY; 966 rc = -EBUSY;
974 /* disallow configuration changes of online cpus and cpu 0 */ 967 /* disallow configuration changes of online cpus and cpu 0 */
975 cpu = dev->id; 968 cpu = dev->id;
976 cpu -= cpu % (smp_cpu_mtid + 1); 969 cpu = smp_get_base_cpu(cpu);
977 if (cpu == 0) 970 if (cpu == 0)
978 goto out; 971 goto out;
979 for (i = 0; i <= smp_cpu_mtid; i++) 972 for (i = 0; i <= smp_cpu_mtid; i++)
@@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
1106 struct sclp_core_info *info; 1099 struct sclp_core_info *info;
1107 int nr; 1100 int nr;
1108 1101
1109 info = smp_get_core_info(); 1102 info = kzalloc(sizeof(*info), GFP_KERNEL);
1110 if (!info) 1103 if (!info)
1111 return -ENOMEM; 1104 return -ENOMEM;
1105 smp_get_core_info(info, 0);
1112 get_online_cpus(); 1106 get_online_cpus();
1113 mutex_lock(&smp_cpu_state_mutex); 1107 mutex_lock(&smp_cpu_state_mutex);
1114 nr = __smp_rescan_cpus(info, 1); 1108 nr = __smp_rescan_cpus(info, 1);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..1ff21f05d7dd 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -194,7 +194,7 @@ pgm_check_entry:
194 194
195 /* Suspend CPU not available -> panic */ 195 /* Suspend CPU not available -> panic */
196 larl %r15,init_thread_union 196 larl %r15,init_thread_union
197 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) 197 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
198 larl %r2,.Lpanic_string 198 larl %r2,.Lpanic_string
199 larl %r3,_sclp_print_early 199 larl %r3,_sclp_print_early
200 lghi %r1,0 200 lghi %r1,0
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index bfda6aa40280..24021c1e3ecb 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
56} 56}
57EXPORT_SYMBOL(stsi); 57EXPORT_SYMBOL(stsi);
58 58
59static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
60{
61 switch (encoding) {
62 case 1: /* EBCDIC */
63 EBCASC(name, len);
64 break;
65 case 2: /* UTF-8 */
66 break;
67 default:
68 return false;
69 }
70 return true;
71}
72
59static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) 73static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
60{ 74{
61 int i; 75 int i;
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
207 seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid); 221 seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
208 seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid); 222 seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
209 } 223 }
224 if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
225 seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
226 seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
227 }
210} 228}
211 229
212static void print_ext_name(struct seq_file *m, int lvl, 230static void print_ext_name(struct seq_file *m, int lvl,
213 struct sysinfo_3_2_2 *info) 231 struct sysinfo_3_2_2 *info)
214{ 232{
215 if (info->vm[lvl].ext_name_encoding == 0) 233 size_t len = sizeof(info->ext_names[lvl]);
216 return; 234
217 if (info->ext_names[lvl][0] == 0) 235 if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
218 return;
219 switch (info->vm[lvl].ext_name_encoding) {
220 case 1: /* EBCDIC */
221 EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
222 break;
223 case 2: /* UTF-8 */
224 break;
225 default:
226 return; 236 return;
227 }
228 seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl, 237 seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
229 info->ext_names[lvl]); 238 info->ext_names[lvl]);
230} 239}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0bfcc492987e..867d0a057046 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
59EXPORT_SYMBOL(s390_epoch_delta_notifier); 59EXPORT_SYMBOL(s390_epoch_delta_notifier);
60 60
61unsigned char ptff_function_mask[16]; 61unsigned char ptff_function_mask[16];
62unsigned long lpar_offset; 62
63unsigned long initial_leap_seconds; 63static unsigned long long lpar_offset;
64static unsigned long long initial_leap_seconds;
65static unsigned long long tod_steering_end;
66static long long tod_steering_delta;
64 67
65/* 68/*
66 * Get time offsets with PTFF 69 * Get time offsets with PTFF
67 */ 70 */
68void __init ptff_init(void) 71void __init time_early_init(void)
69{ 72{
70 struct ptff_qto qto; 73 struct ptff_qto qto;
71 struct ptff_qui qui; 74 struct ptff_qui qui;
72 75
76 /* Initialize TOD steering parameters */
77 tod_steering_end = sched_clock_base_cc;
78 vdso_data->ts_end = tod_steering_end;
79
73 if (!test_facility(28)) 80 if (!test_facility(28))
74 return; 81 return;
82
75 ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF); 83 ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
76 84
77 /* get LPAR offset */ 85 /* get LPAR offset */
@@ -80,7 +88,7 @@ void __init ptff_init(void)
80 88
81 /* get initial leap seconds */ 89 /* get initial leap seconds */
82 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0) 90 if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
83 initial_leap_seconds = (unsigned long) 91 initial_leap_seconds = (unsigned long long)
84 ((long) qui.old_leap * 4096000000L); 92 ((long) qui.old_leap * 4096000000L);
85} 93}
86 94
@@ -123,18 +131,6 @@ void clock_comparator_work(void)
123 cd->event_handler(cd); 131 cd->event_handler(cd);
124} 132}
125 133
126/*
127 * Fixup the clock comparator.
128 */
129static void fixup_clock_comparator(unsigned long long delta)
130{
131 /* If nobody is waiting there's nothing to fix. */
132 if (S390_lowcore.clock_comparator == -1ULL)
133 return;
134 S390_lowcore.clock_comparator += delta;
135 set_clock_comparator(S390_lowcore.clock_comparator);
136}
137
138static int s390_next_event(unsigned long delta, 134static int s390_next_event(unsigned long delta,
139 struct clock_event_device *evt) 135 struct clock_event_device *evt)
140{ 136{
@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
215 211
216static cycle_t read_tod_clock(struct clocksource *cs) 212static cycle_t read_tod_clock(struct clocksource *cs)
217{ 213{
218 return get_tod_clock(); 214 unsigned long long now, adj;
215
216 preempt_disable(); /* protect from changes to steering parameters */
217 now = get_tod_clock();
218 adj = tod_steering_end - now;
219 if (unlikely((s64) adj >= 0))
220 /*
221 * manually steer by 1 cycle every 2^16 cycles. This
222 * corresponds to shifting the tod delta by 15. 1s is
223 * therefore steered in ~9h. The adjust will decrease
224 * over time, until it finally reaches 0.
225 */
226 now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
227 preempt_enable();
228 return now;
219} 229}
220 230
221static struct clocksource clocksource_tod = { 231static struct clocksource clocksource_tod = {
@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
384 return rc; 394 return rc;
385} 395}
386 396
397/*
398 * Apply clock delta to the global data structures.
399 * This is called once on the CPU that performed the clock sync.
400 */
401static void clock_sync_global(unsigned long long delta)
402{
403 unsigned long now, adj;
404 struct ptff_qto qto;
405
406 /* Fixup the monotonic sched clock. */
407 sched_clock_base_cc += delta;
408 /* Adjust TOD steering parameters. */
409 vdso_data->tb_update_count++;
410 now = get_tod_clock();
411 adj = tod_steering_end - now;
412 if (unlikely((s64) adj >= 0))
413 /* Calculate how much of the old adjustment is left. */
414 tod_steering_delta = (tod_steering_delta < 0) ?
415 -(adj >> 15) : (adj >> 15);
416 tod_steering_delta += delta;
417 if ((abs(tod_steering_delta) >> 48) != 0)
418 panic("TOD clock sync offset %lli is too large to drift\n",
419 tod_steering_delta);
420 tod_steering_end = now + (abs(tod_steering_delta) << 15);
421 vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
422 vdso_data->ts_end = tod_steering_end;
423 vdso_data->tb_update_count++;
424 /* Update LPAR offset. */
425 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
426 lpar_offset = qto.tod_epoch_difference;
427 /* Call the TOD clock change notifier. */
428 atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
429}
430
431/*
432 * Apply clock delta to the per-CPU data structures of this CPU.
433 * This is called for each online CPU after the call to clock_sync_global.
434 */
435static void clock_sync_local(unsigned long long delta)
436{
437 /* Add the delta to the clock comparator. */
438 if (S390_lowcore.clock_comparator != -1ULL) {
439 S390_lowcore.clock_comparator += delta;
440 set_clock_comparator(S390_lowcore.clock_comparator);
441 }
442 /* Adjust the last_update_clock time-stamp. */
443 S390_lowcore.last_update_clock += delta;
444}
445
387/* Single threaded workqueue used for stp sync events */ 446/* Single threaded workqueue used for stp sync events */
388static struct workqueue_struct *time_sync_wq; 447static struct workqueue_struct *time_sync_wq;
389 448
@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
397struct clock_sync_data { 456struct clock_sync_data {
398 atomic_t cpus; 457 atomic_t cpus;
399 int in_sync; 458 int in_sync;
400 unsigned long long fixup_cc; 459 unsigned long long clock_delta;
401}; 460};
402 461
403static void clock_sync_cpu(struct clock_sync_data *sync)
404{
405 atomic_dec(&sync->cpus);
406 enable_sync_clock();
407 while (sync->in_sync == 0) {
408 __udelay(1);
409 /*
410 * A different cpu changes *in_sync. Therefore use
411 * barrier() to force memory access.
412 */
413 barrier();
414 }
415 if (sync->in_sync != 1)
416 /* Didn't work. Clear per-cpu in sync bit again. */
417 disable_sync_clock(NULL);
418 /*
419 * This round of TOD syncing is done. Set the clock comparator
420 * to the next tick and let the processor continue.
421 */
422 fixup_clock_comparator(sync->fixup_cc);
423}
424
425/* 462/*
426 * Server Time Protocol (STP) code. 463 * Server Time Protocol (STP) code.
427 */ 464 */
@@ -523,54 +560,46 @@ void stp_queue_work(void)
523 560
524static int stp_sync_clock(void *data) 561static int stp_sync_clock(void *data)
525{ 562{
526 static int first; 563 struct clock_sync_data *sync = data;
527 unsigned long long clock_delta; 564 unsigned long long clock_delta;
528 struct clock_sync_data *stp_sync; 565 static int first;
529 struct ptff_qto qto;
530 int rc; 566 int rc;
531 567
532 stp_sync = data;
533
534 if (xchg(&first, 1) == 1) {
535 /* Slave */
536 clock_sync_cpu(stp_sync);
537 return 0;
538 }
539
540 /* Wait until all other cpus entered the sync function. */
541 while (atomic_read(&stp_sync->cpus) != 0)
542 cpu_relax();
543
544 enable_sync_clock(); 568 enable_sync_clock();
545 569 if (xchg(&first, 1) == 0) {
546 rc = 0; 570 /* Wait until all other cpus entered the sync function. */
547 if (stp_info.todoff[0] || stp_info.todoff[1] || 571 while (atomic_read(&sync->cpus) != 0)
548 stp_info.todoff[2] || stp_info.todoff[3] || 572 cpu_relax();
549 stp_info.tmd != 2) { 573 rc = 0;
550 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta); 574 if (stp_info.todoff[0] || stp_info.todoff[1] ||
551 if (rc == 0) { 575 stp_info.todoff[2] || stp_info.todoff[3] ||
552 /* fixup the monotonic sched clock */ 576 stp_info.tmd != 2) {
553 sched_clock_base_cc += clock_delta; 577 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
554 if (ptff_query(PTFF_QTO) && 578 &clock_delta);
555 ptff(&qto, sizeof(qto), PTFF_QTO) == 0) 579 if (rc == 0) {
556 /* Update LPAR offset */ 580 sync->clock_delta = clock_delta;
557 lpar_offset = qto.tod_epoch_difference; 581 clock_sync_global(clock_delta);
558 atomic_notifier_call_chain(&s390_epoch_delta_notifier, 582 rc = chsc_sstpi(stp_page, &stp_info,
559 0, &clock_delta); 583 sizeof(struct stp_sstpi));
560 stp_sync->fixup_cc = clock_delta; 584 if (rc == 0 && stp_info.tmd != 2)
561 fixup_clock_comparator(clock_delta); 585 rc = -EAGAIN;
562 rc = chsc_sstpi(stp_page, &stp_info, 586 }
563 sizeof(struct stp_sstpi));
564 if (rc == 0 && stp_info.tmd != 2)
565 rc = -EAGAIN;
566 } 587 }
588 sync->in_sync = rc ? -EAGAIN : 1;
589 xchg(&first, 0);
590 } else {
591 /* Slave */
592 atomic_dec(&sync->cpus);
593 /* Wait for in_sync to be set. */
594 while (READ_ONCE(sync->in_sync) == 0)
595 __udelay(1);
567 } 596 }
568 if (rc) { 597 if (sync->in_sync != 1)
598 /* Didn't work. Clear per-cpu in sync bit again. */
569 disable_sync_clock(NULL); 599 disable_sync_clock(NULL);
570 stp_sync->in_sync = -EAGAIN; 600 /* Apply clock delta to per-CPU fields of this CPU. */
571 } else 601 clock_sync_local(sync->clock_delta);
572 stp_sync->in_sync = 1; 602
573 xchg(&first, 0);
574 return 0; 603 return 0;
575} 604}
576 605
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e959c02e0cac..93dcbae1e98d 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/workqueue.h> 9#include <linux/workqueue.h>
10#include <linux/bootmem.h>
10#include <linux/cpuset.h> 11#include <linux/cpuset.h>
11#include <linux/device.h> 12#include <linux/device.h>
12#include <linux/export.h> 13#include <linux/export.h>
@@ -41,15 +42,17 @@ static bool topology_enabled = true;
41static DECLARE_WORK(topology_work, topology_work_fn); 42static DECLARE_WORK(topology_work, topology_work_fn);
42 43
43/* 44/*
44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are 45 * Socket/Book linked lists and cpu_topology updates are
45 * protected by "sched_domains_mutex". 46 * protected by "sched_domains_mutex".
46 */ 47 */
47static struct mask_info socket_info; 48static struct mask_info socket_info;
48static struct mask_info book_info; 49static struct mask_info book_info;
49static struct mask_info drawer_info; 50static struct mask_info drawer_info;
50 51
51DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); 52struct cpu_topology_s390 cpu_topology[NR_CPUS];
52EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); 53EXPORT_SYMBOL_GPL(cpu_topology);
54
55cpumask_t cpus_with_topology;
53 56
54static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 57static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
55{ 58{
@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
97 if (lcpu < 0) 100 if (lcpu < 0)
98 continue; 101 continue;
99 for (i = 0; i <= smp_cpu_mtid; i++) { 102 for (i = 0; i <= smp_cpu_mtid; i++) {
100 topo = &per_cpu(cpu_topology, lcpu + i); 103 topo = &cpu_topology[lcpu + i];
101 topo->drawer_id = drawer->id; 104 topo->drawer_id = drawer->id;
102 topo->book_id = book->id; 105 topo->book_id = book->id;
103 topo->socket_id = socket->id; 106 topo->socket_id = socket->id;
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
106 cpumask_set_cpu(lcpu + i, &drawer->mask); 109 cpumask_set_cpu(lcpu + i, &drawer->mask);
107 cpumask_set_cpu(lcpu + i, &book->mask); 110 cpumask_set_cpu(lcpu + i, &book->mask);
108 cpumask_set_cpu(lcpu + i, &socket->mask); 111 cpumask_set_cpu(lcpu + i, &socket->mask);
112 cpumask_set_cpu(lcpu + i, &cpus_with_topology);
109 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 113 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
110 } 114 }
111 } 115 }
@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
220 int cpu; 224 int cpu;
221 225
222 for_each_possible_cpu(cpu) { 226 for_each_possible_cpu(cpu) {
223 topo = &per_cpu(cpu_topology, cpu); 227 topo = &cpu_topology[cpu];
224 topo->thread_mask = cpu_thread_map(cpu); 228 topo->thread_mask = cpu_thread_map(cpu);
225 topo->core_mask = cpu_group_map(&socket_info, cpu); 229 topo->core_mask = cpu_group_map(&socket_info, cpu);
226 topo->book_mask = cpu_group_map(&book_info, cpu); 230 topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
231 topo->socket_id = cpu; 235 topo->socket_id = cpu;
232 topo->book_id = cpu; 236 topo->book_id = cpu;
233 topo->drawer_id = cpu; 237 topo->drawer_id = cpu;
238 if (cpu_present(cpu))
239 cpumask_set_cpu(cpu, &cpus_with_topology);
234 } 240 }
235 } 241 }
236 numa_update_cpu_topology(); 242 numa_update_cpu_topology();
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
241 stsi(info, 15, 1, min(topology_max_mnest, 4)); 247 stsi(info, 15, 1, min(topology_max_mnest, 4));
242} 248}
243 249
244int arch_update_cpu_topology(void) 250static int __arch_update_cpu_topology(void)
245{ 251{
246 struct sysinfo_15_1_x *info = tl_info; 252 struct sysinfo_15_1_x *info = tl_info;
247 struct device *dev; 253 int rc = 0;
248 int cpu, rc = 0;
249 254
255 cpumask_clear(&cpus_with_topology);
250 if (MACHINE_HAS_TOPOLOGY) { 256 if (MACHINE_HAS_TOPOLOGY) {
251 rc = 1; 257 rc = 1;
252 store_topology(info); 258 store_topology(info);
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
255 update_cpu_masks(); 261 update_cpu_masks();
256 if (!MACHINE_HAS_TOPOLOGY) 262 if (!MACHINE_HAS_TOPOLOGY)
257 topology_update_polarization_simple(); 263 topology_update_polarization_simple();
264 return rc;
265}
266
267int arch_update_cpu_topology(void)
268{
269 struct device *dev;
270 int cpu, rc;
271
272 rc = __arch_update_cpu_topology();
258 for_each_online_cpu(cpu) { 273 for_each_online_cpu(cpu) {
259 dev = get_cpu_device(cpu); 274 dev = get_cpu_device(cpu);
260 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 275 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
394 409
395static const struct cpumask *cpu_thread_mask(int cpu) 410static const struct cpumask *cpu_thread_mask(int cpu)
396{ 411{
397 return &per_cpu(cpu_topology, cpu).thread_mask; 412 return &cpu_topology[cpu].thread_mask;
398} 413}
399 414
400 415
401const struct cpumask *cpu_coregroup_mask(int cpu) 416const struct cpumask *cpu_coregroup_mask(int cpu)
402{ 417{
403 return &per_cpu(cpu_topology, cpu).core_mask; 418 return &cpu_topology[cpu].core_mask;
404} 419}
405 420
406static const struct cpumask *cpu_book_mask(int cpu) 421static const struct cpumask *cpu_book_mask(int cpu)
407{ 422{
408 return &per_cpu(cpu_topology, cpu).book_mask; 423 return &cpu_topology[cpu].book_mask;
409} 424}
410 425
411static const struct cpumask *cpu_drawer_mask(int cpu) 426static const struct cpumask *cpu_drawer_mask(int cpu)
412{ 427{
413 return &per_cpu(cpu_topology, cpu).drawer_mask; 428 return &cpu_topology[cpu].drawer_mask;
414} 429}
415 430
416static int __init early_parse_topology(char *p) 431static int __init early_parse_topology(char *p)
@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
438 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; 453 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
439 nr_masks = max(nr_masks, 1); 454 nr_masks = max(nr_masks, 1);
440 for (i = 0; i < nr_masks; i++) { 455 for (i = 0; i < nr_masks; i++) {
441 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); 456 mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
442 mask = mask->next; 457 mask = mask->next;
443 } 458 }
444} 459}
445 460
446static int __init s390_topology_init(void) 461void __init topology_init_early(void)
447{ 462{
448 struct sysinfo_15_1_x *info; 463 struct sysinfo_15_1_x *info;
449 int i; 464 int i;
450 465
466 set_sched_topology(s390_topology);
451 if (!MACHINE_HAS_TOPOLOGY) 467 if (!MACHINE_HAS_TOPOLOGY)
452 return 0; 468 goto out;
453 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); 469 tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
454 info = tl_info; 470 info = tl_info;
455 store_topology(info); 471 store_topology(info);
456 pr_info("The CPU configuration topology of the machine is:"); 472 pr_info("The CPU configuration topology of the machine is:");
@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
460 alloc_masks(info, &socket_info, 1); 476 alloc_masks(info, &socket_info, 1);
461 alloc_masks(info, &book_info, 2); 477 alloc_masks(info, &book_info, 2);
462 alloc_masks(info, &drawer_info, 3); 478 alloc_masks(info, &drawer_info, 3);
463 set_sched_topology(s390_topology); 479out:
464 return 0; 480 __arch_update_cpu_topology();
465} 481}
466early_initcall(s390_topology_init);
467 482
468static int __init topology_init(void) 483static int __init topology_init(void)
469{ 484{
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5eec9afbb5b5..a5769b83d90e 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -99,8 +99,27 @@ __kernel_clock_gettime:
99 tml %r4,0x0001 /* pending update ? loop */ 99 tml %r4,0x0001 /* pending update ? loop */
100 jnz 11b 100 jnz 11b
101 stcke 0(%r15) /* Store TOD clock */ 101 stcke 0(%r15) /* Store TOD clock */
102 lm %r0,%r1,1(%r15) 102 lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
103 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 103 s %r0,1(%r15) /* no - ts_steering_end */
104 sl %r1,5(%r15)
105 brc 3,22f
106 ahi %r0,-1
10722: ltr %r0,%r0 /* past end of steering? */
108 jm 24f
109 srdl %r0,15 /* 1 per 2^16 */
110 tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
111 jz 23f
112 lcr %r0,%r0 /* negative TOD offset */
113 lcr %r1,%r1
114 je 23f
115 ahi %r0,-1
11623: a %r0,1(%r15) /* add TOD timestamp */
117 al %r1,5(%r15)
118 brc 12,25f
119 ahi %r0,1
120 j 25f
12124: lm %r0,%r1,1(%r15) /* load TOD timestamp */
12225: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
104 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 123 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
105 brc 3,12f 124 brc 3,12f
106 ahi %r0,-1 125 ahi %r0,-1
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 719de6186b20..63b86dceb0bf 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -31,8 +31,27 @@ __kernel_gettimeofday:
31 tml %r4,0x0001 /* pending update ? loop */ 31 tml %r4,0x0001 /* pending update ? loop */
32 jnz 1b 32 jnz 1b
33 stcke 0(%r15) /* Store TOD clock */ 33 stcke 0(%r15) /* Store TOD clock */
34 lm %r0,%r1,1(%r15) 34 lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
35 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 35 s %r0,1(%r15)
36 sl %r1,5(%r15)
37 brc 3,14f
38 ahi %r0,-1
3914: ltr %r0,%r0 /* past end of steering? */
40 jm 16f
41 srdl %r0,15 /* 1 per 2^16 */
42 tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
43 jz 15f
44 lcr %r0,%r0 /* negative TOD offset */
45 lcr %r1,%r1
46 je 15f
47 ahi %r0,-1
4815: a %r0,1(%r15) /* add TOD timestamp */
49 al %r1,5(%r15)
50 brc 12,17f
51 ahi %r0,1
52 j 17f
5316: lm %r0,%r1,1(%r15) /* load TOD timestamp */
5417: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
36 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 55 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
37 brc 3,3f 56 brc 3,3f
38 ahi %r0,-1 57 ahi %r0,-1
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 61541fb93dc6..9c3b12626dba 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -83,8 +83,17 @@ __kernel_clock_gettime:
83 tmll %r4,0x0001 /* pending update ? loop */ 83 tmll %r4,0x0001 /* pending update ? loop */
84 jnz 5b 84 jnz 5b
85 stcke 0(%r15) /* Store TOD clock */ 85 stcke 0(%r15) /* Store TOD clock */
86 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
87 lg %r1,1(%r15) 86 lg %r1,1(%r15)
87 lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
88 slgr %r0,%r1 /* now - ts_steering_end */
89 ltgr %r0,%r0 /* past end of steering ? */
90 jm 17f
91 srlg %r0,%r0,15 /* 1 per 2^16 */
92 tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
93 jz 18f
94 lcgr %r0,%r0 /* negative TOD offset */
9518: algr %r1,%r0 /* add steering offset */
9617: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
88 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 97 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
89 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 98 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 99 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6ce46707663c..b02e62f3bc12 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,16 @@ __kernel_gettimeofday:
31 jnz 0b 31 jnz 0b
32 stcke 0(%r15) /* Store TOD clock */ 32 stcke 0(%r15) /* Store TOD clock */
33 lg %r1,1(%r15) 33 lg %r1,1(%r15)
34 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 34 lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
35 slgr %r0,%r1 /* now - ts_steering_end */
36 ltgr %r0,%r0 /* past end of steering ? */
37 jm 6f
38 srlg %r0,%r0,15 /* 1 per 2^16 */
39 tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
40 jz 7f
41 lcgr %r0,%r0 /* negative TOD offset */
427: algr %r1,%r0 /* add steering offset */
436: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
35 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 44 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 45 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ 46 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1bd5dde2d5a9..6b246aadf311 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
96 */ 96 */
97static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 97static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
98{ 98{
99 struct thread_info *ti = task_thread_info(tsk);
100 u64 timer, clock, user, system, steal; 99 u64 timer, clock, user, system, steal;
101 u64 user_scaled, system_scaled; 100 u64 user_scaled, system_scaled;
102 101
@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
119 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 118 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
120 update_mt_scaling(); 119 update_mt_scaling();
121 120
122 user = S390_lowcore.user_timer - ti->user_timer; 121 user = S390_lowcore.user_timer - tsk->thread.user_timer;
123 S390_lowcore.steal_timer -= user; 122 S390_lowcore.steal_timer -= user;
124 ti->user_timer = S390_lowcore.user_timer; 123 tsk->thread.user_timer = S390_lowcore.user_timer;
125 124
126 system = S390_lowcore.system_timer - ti->system_timer; 125 system = S390_lowcore.system_timer - tsk->thread.system_timer;
127 S390_lowcore.steal_timer -= system; 126 S390_lowcore.steal_timer -= system;
128 ti->system_timer = S390_lowcore.system_timer; 127 tsk->thread.system_timer = S390_lowcore.system_timer;
129 128
130 user_scaled = user; 129 user_scaled = user;
131 system_scaled = system; 130 system_scaled = system;
@@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
153 152
154void vtime_task_switch(struct task_struct *prev) 153void vtime_task_switch(struct task_struct *prev)
155{ 154{
156 struct thread_info *ti;
157
158 do_account_vtime(prev, 0); 155 do_account_vtime(prev, 0);
159 ti = task_thread_info(prev); 156 prev->thread.user_timer = S390_lowcore.user_timer;
160 ti->user_timer = S390_lowcore.user_timer; 157 prev->thread.system_timer = S390_lowcore.system_timer;
161 ti->system_timer = S390_lowcore.system_timer; 158 S390_lowcore.user_timer = current->thread.user_timer;
162 ti = task_thread_info(current); 159 S390_lowcore.system_timer = current->thread.system_timer;
163 S390_lowcore.user_timer = ti->user_timer;
164 S390_lowcore.system_timer = ti->system_timer;
165} 160}
166 161
167/* 162/*
@@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
181 */ 176 */
182void vtime_account_irq_enter(struct task_struct *tsk) 177void vtime_account_irq_enter(struct task_struct *tsk)
183{ 178{
184 struct thread_info *ti = task_thread_info(tsk);
185 u64 timer, system, system_scaled; 179 u64 timer, system, system_scaled;
186 180
187 timer = S390_lowcore.last_update_timer; 181 timer = S390_lowcore.last_update_timer;
@@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
193 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 187 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
194 update_mt_scaling(); 188 update_mt_scaling();
195 189
196 system = S390_lowcore.system_timer - ti->system_timer; 190 system = S390_lowcore.system_timer - tsk->thread.system_timer;
197 S390_lowcore.steal_timer -= system; 191 S390_lowcore.steal_timer -= system;
198 ti->system_timer = S390_lowcore.system_timer; 192 tsk->thread.system_timer = S390_lowcore.system_timer;
199 system_scaled = system; 193 system_scaled = system;
200 /* Do MT utilization scaling */ 194 /* Do MT utilization scaling */
201 if (smp_cpu_mtid) { 195 if (smp_cpu_mtid) {
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65bfac4..7422a706f310 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -8,6 +8,45 @@
8#include <asm/export.h> 8#include <asm/export.h>
9 9
10/* 10/*
11 * void *memmove(void *dest, const void *src, size_t n)
12 */
13ENTRY(memmove)
14 ltgr %r4,%r4
15 lgr %r1,%r2
16 bzr %r14
17 clgr %r2,%r3
18 jnh .Lmemmove_forward
19 la %r5,0(%r4,%r3)
20 clgr %r2,%r5
21 jl .Lmemmove_reverse
22.Lmemmove_forward:
23 aghi %r4,-1
24 srlg %r0,%r4,8
25 ltgr %r0,%r0
26 jz .Lmemmove_rest
27.Lmemmove_loop:
28 mvc 0(256,%r1),0(%r3)
29 la %r1,256(%r1)
30 la %r3,256(%r3)
31 brctg %r0,.Lmemmove_loop
32.Lmemmove_rest:
33 larl %r5,.Lmemmove_mvc
34 ex %r4,0(%r5)
35 br %r14
36.Lmemmove_reverse:
37 aghi %r4,-1
38.Lmemmove_reverse_loop:
39 ic %r0,0(%r4,%r3)
40 stc %r0,0(%r4,%r1)
41 brctg %r4,.Lmemmove_reverse_loop
42 ic %r0,0(%r4,%r3)
43 stc %r0,0(%r4,%r1)
44 br %r14
45.Lmemmove_mvc:
46 mvc 0(1,%r1),0(%r3)
47EXPORT_SYMBOL(memmove)
48
49/*
11 * memset implementation 50 * memset implementation
12 * 51 *
13 * This code corresponds to the C construct below. We do distinguish 52 * This code corresponds to the C construct below. We do distinguish
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe63c43..d1faae5cdd12 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -733,6 +733,7 @@ block:
733 * return to userspace schedule() to block. */ 733 * return to userspace schedule() to block. */
734 __set_current_state(TASK_UNINTERRUPTIBLE); 734 __set_current_state(TASK_UNINTERRUPTIBLE);
735 set_tsk_need_resched(tsk); 735 set_tsk_need_resched(tsk);
736 set_preempt_need_resched();
736 } 737 }
737 } 738 }
738out: 739out:
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1848292766ef..45becc8a44ec 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
34 34
35 if (slab_is_available()) 35 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL, order); 36 return (void *)__get_free_pages(GFP_KERNEL, order);
37 return alloc_bootmem_align(size, size); 37 return (void *) memblock_alloc(size, size);
38} 38}
39 39
40static inline pud_t *vmem_pud_alloc(void) 40static inline pud_t *vmem_pud_alloc(void)
@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
61 61
62pte_t __ref *vmem_pte_alloc(void) 62pte_t __ref *vmem_pte_alloc(void)
63{ 63{
64 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
64 pte_t *pte; 65 pte_t *pte;
65 66
66 if (slab_is_available()) 67 if (slab_is_available())
67 pte = (pte_t *) page_table_alloc(&init_mm); 68 pte = (pte_t *) page_table_alloc(&init_mm);
68 else 69 else
69 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), 70 pte = (pte_t *) memblock_alloc(size, size);
70 PTRS_PER_PTE * sizeof(pte_t));
71 if (!pte) 71 if (!pte)
72 return NULL; 72 return NULL;
73 clear_table((unsigned long *) pte, _PAGE_INVALID, 73 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
74 PTRS_PER_PTE * sizeof(pte_t));
75 return pte; 74 return pte;
76} 75}
77 76
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 37e0bb835516..cfd08384f0ab 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/memblock.h> 23#include <linux/memblock.h>
24#include <linux/bootmem.h>
24#include <linux/node.h> 25#include <linux/node.h>
25#include <linux/memory.h> 26#include <linux/memory.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -307,13 +308,11 @@ fail:
307/* 308/*
308 * Allocate and initialize core to node mapping 309 * Allocate and initialize core to node mapping
309 */ 310 */
310static void create_core_to_node_map(void) 311static void __ref create_core_to_node_map(void)
311{ 312{
312 int i; 313 int i;
313 314
314 emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); 315 emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
315 if (emu_cores == NULL)
316 panic("Could not allocate cores to node memory");
317 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) 316 for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
318 emu_cores->to_node_id[i] = NODE_ID_FREE; 317 emu_cores->to_node_id[i] = NODE_ID_FREE;
319} 318}
@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
354 353
355 phys = toptree_new(TOPTREE_ID_PHYS, 1); 354 phys = toptree_new(TOPTREE_ID_PHYS, 1);
356 355
357 for_each_online_cpu(cpu) { 356 for_each_cpu(cpu, &cpus_with_topology) {
358 top = &per_cpu(cpu_topology, cpu); 357 top = &cpu_topology[cpu];
359 node = toptree_get_child(phys, 0); 358 node = toptree_get_child(phys, 0);
360 drawer = toptree_get_child(node, top->drawer_id); 359 drawer = toptree_get_child(node, top->drawer_id);
361 book = toptree_get_child(drawer, top->book_id); 360 book = toptree_get_child(drawer, top->book_id);
362 mc = toptree_get_child(book, top->socket_id); 361 mc = toptree_get_child(book, top->socket_id);
363 core = toptree_get_child(mc, top->core_id); 362 core = toptree_get_child(mc, smp_get_base_cpu(cpu));
364 if (!drawer || !book || !mc || !core) 363 if (!drawer || !book || !mc || !core)
365 panic("NUMA emulation could not allocate memory"); 364 panic("NUMA emulation could not allocate memory");
366 cpumask_set_cpu(cpu, &core->mask); 365 cpumask_set_cpu(cpu, &core->mask);
@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
378 int cpu; 377 int cpu;
379 378
380 for_each_cpu(cpu, &core->mask) { 379 for_each_cpu(cpu, &core->mask) {
381 top = &per_cpu(cpu_topology, cpu); 380 top = &cpu_topology[cpu];
382 cpumask_copy(&top->thread_mask, &core->mask); 381 cpumask_copy(&top->thread_mask, &core->mask);
383 cpumask_copy(&top->core_mask, &core_mc(core)->mask); 382 cpumask_copy(&top->core_mask, &core_mc(core)->mask);
384 cpumask_copy(&top->book_mask, &core_book(core)->mask); 383 cpumask_copy(&top->book_mask, &core_book(core)->mask);
@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
425 } 424 }
426} 425}
427 426
427static void pin_all_possible_cpus(void)
428{
429 int core_id, node_id, cpu;
430 static int initialized;
431
432 if (initialized)
433 return;
434 print_node_to_core_map();
435 node_id = 0;
436 for_each_possible_cpu(cpu) {
437 core_id = smp_get_base_cpu(cpu);
438 if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
439 continue;
440 pin_core_to_node(core_id, node_id);
441 cpu_topology[cpu].node_id = node_id;
442 node_id = (node_id + 1) % emu_nodes;
443 }
444 print_node_to_core_map();
445 initialized = 1;
446}
447
428/* 448/*
429 * Transfer physical topology into a NUMA topology and modify CPU masks 449 * Transfer physical topology into a NUMA topology and modify CPU masks
430 * according to the NUMA topology. 450 * according to the NUMA topology.
@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
442 toptree_free(phys); 462 toptree_free(phys);
443 toptree_to_topology(numa); 463 toptree_to_topology(numa);
444 toptree_free(numa); 464 toptree_free(numa);
445 print_node_to_core_map(); 465 pin_all_possible_cpus();
446} 466}
447 467
448/* 468/*
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350d859a..26f622b1cd11 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/bootmem.h>
10#include <linux/cpumask.h> 11#include <linux/cpumask.h>
11#include <linux/list.h> 12#include <linux/list.h>
12#include <linux/list_sort.h> 13#include <linux/list_sort.h>
@@ -25,10 +26,14 @@
25 * RETURNS: 26 * RETURNS:
26 * Pointer to the new tree node or NULL on error 27 * Pointer to the new tree node or NULL on error
27 */ 28 */
28struct toptree *toptree_alloc(int level, int id) 29struct toptree __ref *toptree_alloc(int level, int id)
29{ 30{
30 struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); 31 struct toptree *res;
31 32
33 if (slab_is_available())
34 res = kzalloc(sizeof(*res), GFP_KERNEL);
35 else
36 res = memblock_virt_alloc(sizeof(*res), 8);
32 if (!res) 37 if (!res)
33 return res; 38 return res;
34 39
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
65 * cleanly using toptree_remove. Possible children are freed 70 * cleanly using toptree_remove. Possible children are freed
66 * recursively. In the end @cand itself is freed. 71 * recursively. In the end @cand itself is freed.
67 */ 72 */
68void toptree_free(struct toptree *cand) 73void __ref toptree_free(struct toptree *cand)
69{ 74{
70 struct toptree *child, *tmp; 75 struct toptree *child, *tmp;
71 76
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
73 toptree_remove(cand); 78 toptree_remove(cand);
74 toptree_for_each_child_safe(child, tmp, cand) 79 toptree_for_each_child_safe(child, tmp, cand)
75 toptree_free(child); 80 toptree_free(child);
76 kfree(cand); 81 if (slab_is_available())
82 kfree(cand);
83 else
84 memblock_free_early((unsigned long)cand, sizeof(*cand));
77} 85}
78 86
79/** 87/**
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19c8c0c..64e1734bebb7 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
722 722
723static int zpci_alloc_domain(struct zpci_dev *zdev) 723static int zpci_alloc_domain(struct zpci_dev *zdev)
724{ 724{
725 if (zpci_unique_uid) {
726 zdev->domain = (u16) zdev->uid;
727 return 0;
728 }
729
725 spin_lock(&zpci_domain_lock); 730 spin_lock(&zpci_domain_lock);
726 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 731 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
727 if (zdev->domain == ZPCI_NR_DEVICES) { 732 if (zdev->domain == ZPCI_NR_DEVICES) {
@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
735 740
736static void zpci_free_domain(struct zpci_dev *zdev) 741static void zpci_free_domain(struct zpci_dev *zdev)
737{ 742{
743 if (zpci_unique_uid)
744 return;
745
738 spin_lock(&zpci_domain_lock); 746 spin_lock(&zpci_domain_lock);
739 clear_bit(zdev->domain, zpci_domain); 747 clear_bit(zdev->domain, zpci_domain);
740 spin_unlock(&zpci_domain_lock); 748 spin_unlock(&zpci_domain_lock);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1a4512c8544a..e3ef63b36b5a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -22,6 +22,8 @@
22#include <asm/clp.h> 22#include <asm/clp.h>
23#include <uapi/asm/clp.h> 23#include <uapi/asm/clp.h>
24 24
25bool zpci_unique_uid;
26
25static inline void zpci_err_clp(unsigned int rsp, int rc) 27static inline void zpci_err_clp(unsigned int rsp, int rc)
26{ 28{
27 struct { 29 struct {
@@ -315,6 +317,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
315 goto out; 317 goto out;
316 } 318 }
317 319
320 zpci_unique_uid = rrb->response.uid_checking;
318 WARN_ON_ONCE(rrb->response.entry_size != 321 WARN_ON_ONCE(rrb->response.entry_size !=
319 sizeof(struct clp_fh_list_entry)); 322 sizeof(struct clp_fh_list_entry));
320 323
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 38993b156924..c2f786f0ea06 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
69 int i; 69 int i;
70 70
71 for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++) 71 for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
72 seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i], 72 seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
73 atomic64_read(counter)); 73 atomic64_read(counter));
74} 74}
75 75
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6b2f72f523b9..1d7a9c71944a 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
181 /* 181 /*
182 * With zdev->tlb_refresh == 0, rpcit is not required to establish new 182 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
183 * translations when previously invalid translation-table entries are 183 * translations when previously invalid translation-table entries are
184 * validated. With lazy unmap, it also is skipped for previously valid 184 * validated. With lazy unmap, rpcit is skipped for previously valid
185 * entries, but a global rpcit is then required before any address can 185 * entries, but a global rpcit is then required before any address can
186 * be re-used, i.e. after each iommu bitmap wrap-around. 186 * be re-used, i.e. after each iommu bitmap wrap-around.
187 */ 187 */
188 if (!zdev->tlb_refresh && 188 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
189 (!s390_iommu_strict || 189 if (!zdev->tlb_refresh)
190 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))) 190 return 0;
191 return 0; 191 } else {
192 if (!s390_iommu_strict)
193 return 0;
194 }
192 195
193 return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, 196 return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
194 PAGE_ALIGN(size)); 197 PAGE_ALIGN(size));
@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
257 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 260 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
258 offset = __dma_alloc_iommu(dev, zdev->next_bit, size); 261 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
259 if (offset == -1) { 262 if (offset == -1) {
260 if (!zdev->tlb_refresh && !s390_iommu_strict) { 263 if (!s390_iommu_strict) {
261 /* global flush before DMA addresses are reused */ 264 /* global flush before DMA addresses are reused */
262 if (zpci_refresh_global(zdev)) 265 if (zpci_refresh_global(zdev))
263 goto out_error; 266 goto out_error;
@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
292 if (!zdev->iommu_bitmap) 295 if (!zdev->iommu_bitmap)
293 goto out; 296 goto out;
294 297
295 if (zdev->tlb_refresh || s390_iommu_strict) 298 if (s390_iommu_strict)
296 bitmap_clear(zdev->iommu_bitmap, offset, size); 299 bitmap_clear(zdev->iommu_bitmap, offset, size);
297 else 300 else
298 bitmap_set(zdev->lazy_bitmap, offset, size); 301 bitmap_set(zdev->lazy_bitmap, offset, size);
@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
388 return NULL; 391 return NULL;
389 392
390 pa = page_to_phys(page); 393 pa = page_to_phys(page);
391 memset((void *) pa, 0, size);
392
393 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); 394 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
394 if (dma_mapping_error(dev, map)) { 395 if (dma_mapping_error(dev, map)) {
395 free_pages(pa, get_order(size)); 396 free_pages(pa, get_order(size));
@@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
419 size_t size, dma_addr_t *handle, 420 size_t size, dma_addr_t *handle,
420 enum dma_data_direction dir) 421 enum dma_data_direction dir)
421{ 422{
423 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
422 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 424 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
423 dma_addr_t dma_addr_base, dma_addr; 425 dma_addr_t dma_addr_base, dma_addr;
424 int flags = ZPCI_PTE_VALID; 426 int flags = ZPCI_PTE_VALID;
@@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
426 unsigned long pa = 0; 428 unsigned long pa = 0;
427 int ret; 429 int ret;
428 430
429 size = PAGE_ALIGN(size); 431 dma_addr_base = dma_alloc_address(dev, nr_pages);
430 dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
431 if (dma_addr_base == DMA_ERROR_CODE) 432 if (dma_addr_base == DMA_ERROR_CODE)
432 return -ENOMEM; 433 return -ENOMEM;
433 434
@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
436 flags |= ZPCI_TABLE_PROTECTED; 437 flags |= ZPCI_TABLE_PROTECTED;
437 438
438 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) { 439 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
439 pa = page_to_phys(sg_page(s)) + s->offset; 440 pa = page_to_phys(sg_page(s));
440 ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags); 441 ret = __dma_update_trans(zdev, pa, dma_addr,
442 s->offset + s->length, flags);
441 if (ret) 443 if (ret)
442 goto unmap; 444 goto unmap;
443 445
444 dma_addr += s->length; 446 dma_addr += s->offset + s->length;
445 } 447 }
446 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags); 448 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
447 if (ret) 449 if (ret)
448 goto unmap; 450 goto unmap;
449 451
450 *handle = dma_addr_base; 452 *handle = dma_addr_base;
451 atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages); 453 atomic64_add(nr_pages, &zdev->mapped_pages);
452 454
453 return ret; 455 return ret;
454 456
455unmap: 457unmap:
456 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base, 458 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
457 ZPCI_PTE_INVALID); 459 ZPCI_PTE_INVALID);
458 dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT); 460 dma_free_address(dev, dma_addr_base, nr_pages);
459 zpci_err("map error:\n"); 461 zpci_err("map error:\n");
460 zpci_err_dma(ret, pa); 462 zpci_err_dma(ret, pa);
461 return ret; 463 return ret;
@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
564 rc = -ENOMEM; 566 rc = -ENOMEM;
565 goto free_dma_table; 567 goto free_dma_table;
566 } 568 }
567 if (!zdev->tlb_refresh && !s390_iommu_strict) { 569 if (!s390_iommu_strict) {
568 zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8); 570 zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
569 if (!zdev->lazy_bitmap) { 571 if (!zdev->lazy_bitmap) {
570 rc = -ENOMEM; 572 rc = -ENOMEM;
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 6d9814c9df2b..4b5e1e499527 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -9,7 +9,5 @@ define filechk_facilities.h
9 $(obj)/gen_facilities 9 $(obj)/gen_facilities
10endef 10endef
11 11
12$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
13
14include/generated/facilities.h: $(obj)/gen_facilities FORCE 12include/generated/facilities.h: $(obj)/gen_facilities FORCE
15 $(call filechk,facilities.h) 13 $(call filechk,facilities.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fe4e6c910dd7..8cc53b1e6d03 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -7,13 +7,83 @@
7 * 7 *
8 */ 8 */
9 9
10#define S390_GEN_FACILITIES_C
11
12#include <strings.h> 10#include <strings.h>
13#include <string.h> 11#include <string.h>
14#include <stdlib.h> 12#include <stdlib.h>
15#include <stdio.h> 13#include <stdio.h>
16#include <asm/facilities_src.h> 14
15struct facility_def {
16 char *name;
17 int *bits;
18};
19
20static struct facility_def facility_defs[] = {
21 {
22 /*
23 * FACILITIES_ALS contains the list of facilities that are
24 * required to run a kernel that is compiled e.g. with
25 * -march=<machine>.
26 */
27 .name = "FACILITIES_ALS",
28 .bits = (int[]){
29#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
30 0, /* N3 instructions */
31 1, /* z/Arch mode installed */
32#endif
33#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
34 18, /* long displacement facility */
35#endif
36#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
37 7, /* stfle */
38 17, /* message security assist */
39 21, /* extended-immediate facility */
40 25, /* store clock fast */
41#endif
42#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
43 27, /* mvcos */
44 32, /* compare and swap and store */
45 33, /* compare and swap and store 2 */
46 34, /* general extension facility */
47 35, /* execute extensions */
48#endif
49#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50 45, /* fast-BCR, etc. */
51#endif
52#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
53 49, /* misc-instruction-extensions */
54 52, /* interlocked facility 2 */
55#endif
56#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
57 53, /* load-and-zero-rightmost-byte, etc. */
58#endif
59 -1 /* END */
60 }
61 },
62 {
63 .name = "FACILITIES_KVM",
64 .bits = (int[]){
65 0, /* N3 instructions */
66 1, /* z/Arch mode installed */
67 2, /* z/Arch mode active */
68 3, /* DAT-enhancement */
69 4, /* idte segment table */
70 5, /* idte region table */
71 6, /* ASN-and-LX reuse */
72 7, /* stfle */
73 8, /* enhanced-DAT 1 */
74 9, /* sense-running-status */
75 10, /* conditional sske */
76 13, /* ipte-range */
77 14, /* nonquiescing key-setting */
78 73, /* transactional execution */
79 75, /* access-exception-fetch/store indication */
80 76, /* msa extension 3 */
81 77, /* msa extension 4 */
82 78, /* enhanced-DAT 2 */
83 -1 /* END */
84 }
85 },
86};
17 87
18static void print_facility_list(struct facility_def *def) 88static void print_facility_list(struct facility_def *def)
19{ 89{
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 50b8b7d54416..530d0e49f2ed 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -5,12 +5,13 @@
5 * 5 *
6 * Author(s): 6 * Author(s):
7 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 *
9 * License: GPL
8 */ 10 */
9 11
10#define KMSG_COMPONENT "zpci" 12#define KMSG_COMPONENT "zpci"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 14
13#include <linux/module.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/pci.h> 17#include <linux/pci.h>
@@ -21,10 +22,6 @@
21#define SLOT_NAME_SIZE 10 22#define SLOT_NAME_SIZE 10
22static LIST_HEAD(s390_hotplug_slot_list); 23static LIST_HEAD(s390_hotplug_slot_list);
23 24
24MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
25MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
26MODULE_LICENSE("GPL");
27
28static int zpci_fn_configured(enum zpci_state state) 25static int zpci_fn_configured(enum zpci_state state)
29{ 26{
30 return state == ZPCI_FN_STATE_CONFIGURED || 27 return state == ZPCI_FN_STATE_CONFIGURED ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de089019268..0e3fdfdbd098 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *);
69static void do_kick_device(struct work_struct *); 69static void do_kick_device(struct work_struct *);
70static void do_restore_device(struct work_struct *); 70static void do_restore_device(struct work_struct *);
71static void do_reload_device(struct work_struct *); 71static void do_reload_device(struct work_struct *);
72static void do_requeue_requests(struct work_struct *);
72static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 73static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
73static void dasd_device_timeout(unsigned long); 74static void dasd_device_timeout(unsigned long);
74static void dasd_block_timeout(unsigned long); 75static void dasd_block_timeout(unsigned long);
@@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void)
125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->kick_work, do_kick_device);
126 INIT_WORK(&device->restore_device, do_restore_device); 127 INIT_WORK(&device->restore_device, do_restore_device);
127 INIT_WORK(&device->reload_device, do_reload_device); 128 INIT_WORK(&device->reload_device, do_reload_device);
129 INIT_WORK(&device->requeue_requests, do_requeue_requests);
128 device->state = DASD_STATE_NEW; 130 device->state = DASD_STATE_NEW;
129 device->target = DASD_STATE_NEW; 131 device->target = DASD_STATE_NEW;
130 mutex_init(&device->state_mutex); 132 mutex_init(&device->state_mutex);
@@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
1448 cqr->starttime = jiffies; 1450 cqr->starttime = jiffies;
1449 cqr->retries--; 1451 cqr->retries--;
1450 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1452 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1451 cqr->lpm &= device->path_data.opm; 1453 cqr->lpm &= dasd_path_get_opm(device);
1452 if (!cqr->lpm) 1454 if (!cqr->lpm)
1453 cqr->lpm = device->path_data.opm; 1455 cqr->lpm = dasd_path_get_opm(device);
1454 } 1456 }
1455 if (cqr->cpmode == 1) { 1457 if (cqr->cpmode == 1) {
1456 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1458 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
@@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
1483 DBF_DEV_EVENT(DBF_WARNING, device, 1485 DBF_DEV_EVENT(DBF_WARNING, device,
1484 "start_IO: selected paths gone (%x)", 1486 "start_IO: selected paths gone (%x)",
1485 cqr->lpm); 1487 cqr->lpm);
1486 } else if (cqr->lpm != device->path_data.opm) { 1488 } else if (cqr->lpm != dasd_path_get_opm(device)) {
1487 cqr->lpm = device->path_data.opm; 1489 cqr->lpm = dasd_path_get_opm(device);
1488 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1490 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1489 "start_IO: selected paths gone," 1491 "start_IO: selected paths gone,"
1490 " retry on all paths"); 1492 " retry on all paths");
@@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
1493 "start_IO: all paths in opm gone," 1495 "start_IO: all paths in opm gone,"
1494 " do path verification"); 1496 " do path verification");
1495 dasd_generic_last_path_gone(device); 1497 dasd_generic_last_path_gone(device);
1496 device->path_data.opm = 0; 1498 dasd_path_no_path(device);
1497 device->path_data.ppm = 0; 1499 dasd_path_set_tbvpm(device,
1498 device->path_data.npm = 0; 1500 ccw_device_get_path_mask(
1499 device->path_data.tbvpm = 1501 device->cdev));
1500 ccw_device_get_path_mask(device->cdev);
1501 } 1502 }
1502 break; 1503 break;
1503 case -ENODEV: 1504 case -ENODEV:
@@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1623} 1624}
1624EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1625EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1625 1626
1627static int dasd_check_hpf_error(struct irb *irb)
1628{
1629 return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1630 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1631 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1632}
1633
1626/* 1634/*
1627 * Interrupt handler for "normal" ssch-io based dasd devices. 1635 * Interrupt handler for "normal" ssch-io based dasd devices.
1628 */ 1636 */
@@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1642 switch (PTR_ERR(irb)) { 1650 switch (PTR_ERR(irb)) {
1643 case -EIO: 1651 case -EIO:
1644 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1652 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1645 device = (struct dasd_device *) cqr->startdev; 1653 device = cqr->startdev;
1646 cqr->status = DASD_CQR_CLEARED; 1654 cqr->status = DASD_CQR_CLEARED;
1647 dasd_device_clear_timer(device); 1655 dasd_device_clear_timer(device);
1648 wake_up(&dasd_flush_wq); 1656 wake_up(&dasd_flush_wq);
@@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1749 struct dasd_ccw_req, devlist); 1757 struct dasd_ccw_req, devlist);
1750 } 1758 }
1751 } else { /* error */ 1759 } else { /* error */
1760 /* check for HPF error
1761 * call discipline function to requeue all requests
1762 * and disable HPF accordingly
1763 */
1764 if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1765 device->discipline->handle_hpf_error)
1766 device->discipline->handle_hpf_error(device, irb);
1752 /* 1767 /*
1753 * If we don't want complex ERP for this request, then just 1768 * If we don't want complex ERP for this request, then just
1754 * reset this and retry it in the fastpath 1769 * reset this and retry it in the fastpath
1755 */ 1770 */
1756 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1771 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1757 cqr->retries > 0) { 1772 cqr->retries > 0) {
1758 if (cqr->lpm == device->path_data.opm) 1773 if (cqr->lpm == dasd_path_get_opm(device))
1759 DBF_DEV_EVENT(DBF_DEBUG, device, 1774 DBF_DEV_EVENT(DBF_DEBUG, device,
1760 "default ERP in fastpath " 1775 "default ERP in fastpath "
1761 "(%i retries left)", 1776 "(%i retries left)",
1762 cqr->retries); 1777 cqr->retries);
1763 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1778 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1764 cqr->lpm = device->path_data.opm; 1779 cqr->lpm = dasd_path_get_opm(device);
1765 cqr->status = DASD_CQR_QUEUED; 1780 cqr->status = DASD_CQR_QUEUED;
1766 next = cqr; 1781 next = cqr;
1767 } else 1782 } else
@@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
2002{ 2017{
2003 int rc; 2018 int rc;
2004 2019
2005 if (device->path_data.tbvpm) { 2020 if (!dasd_path_get_tbvpm(device))
2006 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 2021 return;
2007 DASD_UNRESUMED_PM)) 2022
2008 return; 2023 if (device->stopped &
2009 rc = device->discipline->verify_path( 2024 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
2010 device, device->path_data.tbvpm); 2025 return;
2011 if (rc) 2026 rc = device->discipline->verify_path(device,
2012 dasd_device_set_timer(device, 50); 2027 dasd_path_get_tbvpm(device));
2013 else 2028 if (rc)
2014 device->path_data.tbvpm = 0; 2029 dasd_device_set_timer(device, 50);
2015 } 2030 else
2031 dasd_path_clear_all_verify(device);
2016}; 2032};
2017 2033
2018/* 2034/*
@@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2924 2940
2925 if (!block) 2941 if (!block)
2926 return -EINVAL; 2942 return -EINVAL;
2927 spin_lock_irqsave(&block->queue_lock, flags); 2943 spin_lock_irqsave(&block->request_queue_lock, flags);
2928 req = (struct request *) cqr->callback_data; 2944 req = (struct request *) cqr->callback_data;
2929 blk_requeue_request(block->request_queue, req); 2945 blk_requeue_request(block->request_queue, req);
2930 spin_unlock_irqrestore(&block->queue_lock, flags); 2946 spin_unlock_irqrestore(&block->request_queue_lock, flags);
2931 2947
2932 return 0; 2948 return 0;
2933} 2949}
@@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
3121 */ 3137 */
3122static void dasd_setup_queue(struct dasd_block *block) 3138static void dasd_setup_queue(struct dasd_block *block)
3123{ 3139{
3140 struct request_queue *q = block->request_queue;
3124 int max; 3141 int max;
3125 3142
3126 if (block->base->features & DASD_FEATURE_USERAW) { 3143 if (block->base->features & DASD_FEATURE_USERAW) {
@@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block)
3135 } else { 3152 } else {
3136 max = block->base->discipline->max_blocks << block->s2b_shift; 3153 max = block->base->discipline->max_blocks << block->s2b_shift;
3137 } 3154 }
3138 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3155 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3139 block->request_queue->limits.max_dev_sectors = max; 3156 q->limits.max_dev_sectors = max;
3140 blk_queue_logical_block_size(block->request_queue, 3157 blk_queue_logical_block_size(q, block->bp_block);
3141 block->bp_block); 3158 blk_queue_max_hw_sectors(q, max);
3142 blk_queue_max_hw_sectors(block->request_queue, max); 3159 blk_queue_max_segments(q, USHRT_MAX);
3143 blk_queue_max_segments(block->request_queue, -1L);
3144 /* with page sized segments we can translate each segement into 3160 /* with page sized segments we can translate each segement into
3145 * one idaw/tidaw 3161 * one idaw/tidaw
3146 */ 3162 */
3147 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3163 blk_queue_max_segment_size(q, PAGE_SIZE);
3148 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3164 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
3149} 3165}
3150 3166
3151/* 3167/*
@@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3517 struct dasd_device *device; 3533 struct dasd_device *device;
3518 struct dasd_block *block; 3534 struct dasd_block *block;
3519 int max_count, open_count, rc; 3535 int max_count, open_count, rc;
3536 unsigned long flags;
3520 3537
3521 rc = 0; 3538 rc = 0;
3522 device = dasd_device_from_cdev(cdev); 3539 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3523 if (IS_ERR(device)) 3540 device = dasd_device_from_cdev_locked(cdev);
3541 if (IS_ERR(device)) {
3542 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3524 return PTR_ERR(device); 3543 return PTR_ERR(device);
3544 }
3525 3545
3526 /* 3546 /*
3527 * We must make sure that this device is currently not in use. 3547 * We must make sure that this device is currently not in use.
@@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3540 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3560 pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3541 dev_name(&cdev->dev)); 3561 dev_name(&cdev->dev));
3542 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3562 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3543 dasd_put_device(device); 3563 goto out_busy;
3544 return -EBUSY;
3545 } 3564 }
3546 } 3565 }
3547 3566
@@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3551 * could only be called by normal offline so safe_offline flag 3570 * could only be called by normal offline so safe_offline flag
3552 * needs to be removed to run normal offline and kill all I/O 3571 * needs to be removed to run normal offline and kill all I/O
3553 */ 3572 */
3554 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3573 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
3555 /* Already doing normal offline processing */ 3574 /* Already doing normal offline processing */
3556 dasd_put_device(device); 3575 goto out_busy;
3557 return -EBUSY; 3576 else
3558 } else
3559 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3577 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
3560 3578 } else {
3561 } else 3579 if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
3562 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3563 /* Already doing offline processing */ 3580 /* Already doing offline processing */
3564 dasd_put_device(device); 3581 goto out_busy;
3565 return -EBUSY; 3582 }
3566 } 3583
3584 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3585 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3567 3586
3568 /* 3587 /*
3569 * if safe_offline called set safe_offline_running flag and 3588 * if safe_offline called set safe_offline_running flag and
@@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3591 goto interrupted; 3610 goto interrupted;
3592 } 3611 }
3593 3612
3594 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3595 dasd_set_target_state(device, DASD_STATE_NEW); 3613 dasd_set_target_state(device, DASD_STATE_NEW);
3596 /* dasd_delete_device destroys the device reference. */ 3614 /* dasd_delete_device destroys the device reference. */
3597 block = device->block; 3615 block = device->block;
@@ -3610,7 +3628,14 @@ interrupted:
3610 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3628 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3611 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3629 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3612 dasd_put_device(device); 3630 dasd_put_device(device);
3631
3613 return rc; 3632 return rc;
3633
3634out_busy:
3635 dasd_put_device(device);
3636 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3637
3638 return -EBUSY;
3614} 3639}
3615EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3640EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3616 3641
@@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
3675 case CIO_GONE: 3700 case CIO_GONE:
3676 case CIO_BOXED: 3701 case CIO_BOXED:
3677 case CIO_NO_PATH: 3702 case CIO_NO_PATH:
3678 device->path_data.opm = 0; 3703 dasd_path_no_path(device);
3679 device->path_data.ppm = 0;
3680 device->path_data.npm = 0;
3681 ret = dasd_generic_last_path_gone(device); 3704 ret = dasd_generic_last_path_gone(device);
3682 break; 3705 break;
3683 case CIO_OPER: 3706 case CIO_OPER:
3684 ret = 1; 3707 ret = 1;
3685 if (device->path_data.opm) 3708 if (dasd_path_get_opm(device))
3686 ret = dasd_generic_path_operational(device); 3709 ret = dasd_generic_path_operational(device);
3687 break; 3710 break;
3688 } 3711 }
@@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
3693 3716
3694void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3717void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3695{ 3718{
3696 int chp;
3697 __u8 oldopm, eventlpm;
3698 struct dasd_device *device; 3719 struct dasd_device *device;
3720 int chp, oldopm, hpfpm, ifccpm;
3699 3721
3700 device = dasd_device_from_cdev_locked(cdev); 3722 device = dasd_device_from_cdev_locked(cdev);
3701 if (IS_ERR(device)) 3723 if (IS_ERR(device))
3702 return; 3724 return;
3725
3726 oldopm = dasd_path_get_opm(device);
3703 for (chp = 0; chp < 8; chp++) { 3727 for (chp = 0; chp < 8; chp++) {
3704 eventlpm = 0x80 >> chp;
3705 if (path_event[chp] & PE_PATH_GONE) { 3728 if (path_event[chp] & PE_PATH_GONE) {
3706 oldopm = device->path_data.opm; 3729 dasd_path_notoper(device, chp);
3707 device->path_data.opm &= ~eventlpm;
3708 device->path_data.ppm &= ~eventlpm;
3709 device->path_data.npm &= ~eventlpm;
3710 if (oldopm && !device->path_data.opm) {
3711 dev_warn(&device->cdev->dev,
3712 "No verified channel paths remain "
3713 "for the device\n");
3714 DBF_DEV_EVENT(DBF_WARNING, device,
3715 "%s", "last verified path gone");
3716 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3717 dasd_device_set_stop_bits(device,
3718 DASD_STOPPED_DC_WAIT);
3719 }
3720 } 3730 }
3721 if (path_event[chp] & PE_PATH_AVAILABLE) { 3731 if (path_event[chp] & PE_PATH_AVAILABLE) {
3722 device->path_data.opm &= ~eventlpm; 3732 dasd_path_available(device, chp);
3723 device->path_data.ppm &= ~eventlpm;
3724 device->path_data.npm &= ~eventlpm;
3725 device->path_data.tbvpm |= eventlpm;
3726 dasd_schedule_device_bh(device); 3733 dasd_schedule_device_bh(device);
3727 } 3734 }
3728 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3735 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3729 if (!(device->path_data.opm & eventlpm) && 3736 if (!dasd_path_is_operational(device, chp) &&
3730 !(device->path_data.tbvpm & eventlpm)) { 3737 !dasd_path_need_verify(device, chp)) {
3731 /* 3738 /*
3732 * we can not establish a pathgroup on an 3739 * we can not establish a pathgroup on an
3733 * unavailable path, so trigger a path 3740 * unavailable path, so trigger a path
3734 * verification first 3741 * verification first
3735 */ 3742 */
3736 device->path_data.tbvpm |= eventlpm; 3743 dasd_path_available(device, chp);
3737 dasd_schedule_device_bh(device); 3744 dasd_schedule_device_bh(device);
3738 } 3745 }
3739 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3746 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3740 "Pathgroup re-established\n"); 3747 "Pathgroup re-established\n");
@@ -3742,45 +3749,65 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3742 device->discipline->kick_validate(device); 3749 device->discipline->kick_validate(device);
3743 } 3750 }
3744 } 3751 }
3752 hpfpm = dasd_path_get_hpfpm(device);
3753 ifccpm = dasd_path_get_ifccpm(device);
3754 if (!dasd_path_get_opm(device) && hpfpm) {
3755 /*
3756 * device has no operational paths but at least one path is
3757 * disabled due to HPF errors
3758 * disable HPF at all and use the path(s) again
3759 */
3760 if (device->discipline->disable_hpf)
3761 device->discipline->disable_hpf(device);
3762 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3763 dasd_path_set_tbvpm(device, hpfpm);
3764 dasd_schedule_device_bh(device);
3765 dasd_schedule_requeue(device);
3766 } else if (!dasd_path_get_opm(device) && ifccpm) {
3767 /*
3768 * device has no operational paths but at least one path is
3769 * disabled due to IFCC errors
3770 * trigger path verification on paths with IFCC errors
3771 */
3772 dasd_path_set_tbvpm(device, ifccpm);
3773 dasd_schedule_device_bh(device);
3774 }
3775 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3776 dev_warn(&device->cdev->dev,
3777 "No verified channel paths remain for the device\n");
3778 DBF_DEV_EVENT(DBF_WARNING, device,
3779 "%s", "last verified path gone");
3780 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3781 dasd_device_set_stop_bits(device,
3782 DASD_STOPPED_DC_WAIT);
3783 }
3745 dasd_put_device(device); 3784 dasd_put_device(device);
3746} 3785}
3747EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3786EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3748 3787
3749int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3788int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3750{ 3789{
3751 if (!device->path_data.opm && lpm) { 3790 if (!dasd_path_get_opm(device) && lpm) {
3752 device->path_data.opm = lpm; 3791 dasd_path_set_opm(device, lpm);
3753 dasd_generic_path_operational(device); 3792 dasd_generic_path_operational(device);
3754 } else 3793 } else
3755 device->path_data.opm |= lpm; 3794 dasd_path_add_opm(device, lpm);
3756 return 0; 3795 return 0;
3757} 3796}
3758EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3797EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3759 3798
3760 3799/*
3761int dasd_generic_pm_freeze(struct ccw_device *cdev) 3800 * clear active requests and requeue them to block layer if possible
3801 */
3802static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3762{ 3803{
3763 struct dasd_device *device = dasd_device_from_cdev(cdev); 3804 struct list_head requeue_queue;
3764 struct list_head freeze_queue;
3765 struct dasd_ccw_req *cqr, *n; 3805 struct dasd_ccw_req *cqr, *n;
3766 struct dasd_ccw_req *refers; 3806 struct dasd_ccw_req *refers;
3767 int rc; 3807 int rc;
3768 3808
3769 if (IS_ERR(device)) 3809 INIT_LIST_HEAD(&requeue_queue);
3770 return PTR_ERR(device); 3810 spin_lock_irq(get_ccwdev_lock(device->cdev));
3771
3772 /* mark device as suspended */
3773 set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3774
3775 if (device->discipline->freeze)
3776 rc = device->discipline->freeze(device);
3777
3778 /* disallow new I/O */
3779 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
3780
3781 /* clear active requests and requeue them to block layer if possible */
3782 INIT_LIST_HEAD(&freeze_queue);
3783 spin_lock_irq(get_ccwdev_lock(cdev));
3784 rc = 0; 3811 rc = 0;
3785 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3812 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3786 /* Check status and move request to flush_queue */ 3813 /* Check status and move request to flush_queue */
@@ -3791,25 +3818,22 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3791 dev_err(&device->cdev->dev, 3818 dev_err(&device->cdev->dev,
3792 "Unable to terminate request %p " 3819 "Unable to terminate request %p "
3793 "on suspend\n", cqr); 3820 "on suspend\n", cqr);
3794 spin_unlock_irq(get_ccwdev_lock(cdev)); 3821 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3795 dasd_put_device(device); 3822 dasd_put_device(device);
3796 return rc; 3823 return rc;
3797 } 3824 }
3798 } 3825 }
3799 list_move_tail(&cqr->devlist, &freeze_queue); 3826 list_move_tail(&cqr->devlist, &requeue_queue);
3800 } 3827 }
3801 spin_unlock_irq(get_ccwdev_lock(cdev)); 3828 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3802 3829
3803 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3830 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3804 wait_event(dasd_flush_wq, 3831 wait_event(dasd_flush_wq,
3805 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3832 (cqr->status != DASD_CQR_CLEAR_PENDING));
3806 if (cqr->status == DASD_CQR_CLEARED)
3807 cqr->status = DASD_CQR_QUEUED;
3808 3833
3809 /* requeue requests to blocklayer will only work for 3834 /* mark sleepon requests as ended */
3810 block device requests */ 3835 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
3811 if (_dasd_requeue_request(cqr)) 3836 cqr->callback_data = DASD_SLEEPON_END_TAG;
3812 continue;
3813 3837
3814 /* remove requests from device and block queue */ 3838 /* remove requests from device and block queue */
3815 list_del_init(&cqr->devlist); 3839 list_del_init(&cqr->devlist);
@@ -3821,6 +3845,14 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3821 dasd_free_erp_request(cqr, cqr->memdev); 3845 dasd_free_erp_request(cqr, cqr->memdev);
3822 cqr = refers; 3846 cqr = refers;
3823 } 3847 }
3848
3849 /*
3850 * requeue requests to blocklayer will only work
3851 * for block device requests
3852 */
3853 if (_dasd_requeue_request(cqr))
3854 continue;
3855
3824 if (cqr->block) 3856 if (cqr->block)
3825 list_del_init(&cqr->blocklist); 3857 list_del_init(&cqr->blocklist);
3826 cqr->block->base->discipline->free_cp( 3858 cqr->block->base->discipline->free_cp(
@@ -3831,15 +3863,56 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3831 * if requests remain then they are internal request 3863 * if requests remain then they are internal request
3832 * and go back to the device queue 3864 * and go back to the device queue
3833 */ 3865 */
3834 if (!list_empty(&freeze_queue)) { 3866 if (!list_empty(&requeue_queue)) {
3835 /* move freeze_queue to start of the ccw_queue */ 3867 /* move freeze_queue to start of the ccw_queue */
3836 spin_lock_irq(get_ccwdev_lock(cdev)); 3868 spin_lock_irq(get_ccwdev_lock(device->cdev));
3837 list_splice_tail(&freeze_queue, &device->ccw_queue); 3869 list_splice_tail(&requeue_queue, &device->ccw_queue);
3838 spin_unlock_irq(get_ccwdev_lock(cdev)); 3870 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3839 } 3871 }
3840 dasd_put_device(device); 3872 /* wake up generic waitqueue for eventually ended sleepon requests */
3873 wake_up(&generic_waitq);
3841 return rc; 3874 return rc;
3842} 3875}
3876
3877static void do_requeue_requests(struct work_struct *work)
3878{
3879 struct dasd_device *device = container_of(work, struct dasd_device,
3880 requeue_requests);
3881 dasd_generic_requeue_all_requests(device);
3882 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3883 if (device->block)
3884 dasd_schedule_block_bh(device->block);
3885 dasd_put_device(device);
3886}
3887
3888void dasd_schedule_requeue(struct dasd_device *device)
3889{
3890 dasd_get_device(device);
3891 /* queue call to dasd_reload_device to the kernel event daemon. */
3892 if (!schedule_work(&device->requeue_requests))
3893 dasd_put_device(device);
3894}
3895EXPORT_SYMBOL(dasd_schedule_requeue);
3896
3897int dasd_generic_pm_freeze(struct ccw_device *cdev)
3898{
3899 struct dasd_device *device = dasd_device_from_cdev(cdev);
3900 int rc;
3901
3902 if (IS_ERR(device))
3903 return PTR_ERR(device);
3904
3905 /* mark device as suspended */
3906 set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3907
3908 if (device->discipline->freeze)
3909 rc = device->discipline->freeze(device);
3910
3911 /* disallow new I/O */
3912 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
3913
3914 return dasd_generic_requeue_all_requests(device);
3915}
3843EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3916EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
3844 3917
3845int dasd_generic_restore_device(struct ccw_device *cdev) 3918int dasd_generic_restore_device(struct ccw_device *cdev)
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 8305ab688d57..95f7645e3c37 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
152 opm = ccw_device_get_path_mask(device->cdev); 152 opm = ccw_device_get_path_mask(device->cdev);
153 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 153 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
154 if (erp->lpm == 0) 154 if (erp->lpm == 0)
155 erp->lpm = device->path_data.opm & 155 erp->lpm = dasd_path_get_opm(device) &
156 ~(erp->irb.esw.esw0.sublog.lpum); 156 ~(erp->irb.esw.esw0.sublog.lpum);
157 else 157 else
158 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); 158 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
273 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { 273 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
274 erp->status = DASD_CQR_FILLED; 274 erp->status = DASD_CQR_FILLED;
275 erp->retries = 10; 275 erp->retries = 10;
276 erp->lpm = erp->startdev->path_data.opm; 276 erp->lpm = dasd_path_get_opm(erp->startdev);
277 erp->function = dasd_3990_erp_action_1_sec; 277 erp->function = dasd_3990_erp_action_1_sec;
278 } 278 }
279 return erp; 279 return erp;
@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1926 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { 1926 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
1927 /* reset the lpm and the status to be able to 1927 /* reset the lpm and the status to be able to
1928 * try further actions. */ 1928 * try further actions. */
1929 erp->lpm = erp->startdev->path_data.opm; 1929 erp->lpm = dasd_path_get_opm(erp->startdev);
1930 erp->status = DASD_CQR_NEED_ERP; 1930 erp->status = DASD_CQR_NEED_ERP;
1931 } 1931 }
1932 } 1932 }
@@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2208 2208
2209} /* end dasd_3990_erp_inspect_32 */ 2209} /* end dasd_3990_erp_inspect_32 */
2210 2210
2211static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
2212{
2213 int pos = pathmask_to_pos(lpum);
2214
2215 /* no remaining path, cannot disable */
2216 if (!(dasd_path_get_opm(device) & ~lpum))
2217 return;
2218
2219 dev_err(&device->cdev->dev,
2220 "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
2221 device->path[pos].cssid, device->path[pos].chpid, lpum);
2222 dasd_path_remove_opm(device, lpum);
2223 dasd_path_add_ifccpm(device, lpum);
2224 device->path[pos].errorclk = 0;
2225 atomic_set(&device->path[pos].error_count, 0);
2226}
2227
2228static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
2229{
2230 struct dasd_device *device = erp->startdev;
2231 __u8 lpum = erp->refers->irb.esw.esw1.lpum;
2232 int pos = pathmask_to_pos(lpum);
2233 unsigned long long clk;
2234
2235 if (!device->path_thrhld)
2236 return;
2237
2238 clk = get_tod_clock();
2239 /*
2240 * check if the last error is longer ago than the timeout,
2241 * if so reset error state
2242 */
2243 if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
2244 >= device->path_interval) {
2245 atomic_set(&device->path[pos].error_count, 0);
2246 device->path[pos].errorclk = 0;
2247 }
2248 atomic_inc(&device->path[pos].error_count);
2249 device->path[pos].errorclk = clk;
2250 /* threshold exceeded disable path if possible */
2251 if (atomic_read(&device->path[pos].error_count) >=
2252 device->path_thrhld)
2253 dasd_3990_erp_disable_path(device, lpum);
2254}
2255
2211/* 2256/*
2212 ***************************************************************************** 2257 *****************************************************************************
2213 * main ERP control functions (24 and 32 byte sense) 2258 * main ERP control functions (24 and 32 byte sense)
@@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2237 | SCHN_STAT_CHN_CTRL_CHK)) { 2282 | SCHN_STAT_CHN_CTRL_CHK)) {
2238 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2283 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2239 "channel or interface control check"); 2284 "channel or interface control check");
2285 dasd_3990_erp_account_error(erp);
2240 erp = dasd_3990_erp_action_4(erp, NULL); 2286 erp = dasd_3990_erp_action_4(erp, NULL);
2241 } 2287 }
2242 return erp; 2288 return erp;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 15a1a70cace9..84ca314c87e3 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
725static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, 725static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
726 const char *buf, size_t count) 726 const char *buf, size_t count)
727{ 727{
728 struct dasd_devmap *devmap; 728 unsigned int val;
729 int val; 729 int rc;
730 char *endp;
731
732 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
733 if (IS_ERR(devmap))
734 return PTR_ERR(devmap);
735 730
736 val = simple_strtoul(buf, &endp, 0); 731 if (kstrtouint(buf, 0, &val) || val > 1)
737 if (((endp + 1) < (buf + count)) || (val > 1))
738 return -EINVAL; 732 return -EINVAL;
739 733
740 spin_lock(&dasd_devmap_lock); 734 rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
741 if (val) 735
742 devmap->features |= DASD_FEATURE_FAILFAST; 736 return rc ? : count;
743 else
744 devmap->features &= ~DASD_FEATURE_FAILFAST;
745 if (devmap->device)
746 devmap->device->features = devmap->features;
747 spin_unlock(&dasd_devmap_lock);
748 return count;
749} 737}
750 738
751static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store); 739static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
@@ -771,32 +759,41 @@ static ssize_t
771dasd_ro_store(struct device *dev, struct device_attribute *attr, 759dasd_ro_store(struct device *dev, struct device_attribute *attr,
772 const char *buf, size_t count) 760 const char *buf, size_t count)
773{ 761{
774 struct dasd_devmap *devmap; 762 struct ccw_device *cdev = to_ccwdev(dev);
775 struct dasd_device *device; 763 struct dasd_device *device;
776 int val; 764 unsigned long flags;
777 char *endp; 765 unsigned int val;
778 766 int rc;
779 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
780 if (IS_ERR(devmap))
781 return PTR_ERR(devmap);
782 767
783 val = simple_strtoul(buf, &endp, 0); 768 if (kstrtouint(buf, 0, &val) || val > 1)
784 if (((endp + 1) < (buf + count)) || (val > 1))
785 return -EINVAL; 769 return -EINVAL;
786 770
787 spin_lock(&dasd_devmap_lock); 771 rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
788 if (val) 772 if (rc)
789 devmap->features |= DASD_FEATURE_READONLY; 773 return rc;
790 else 774
791 devmap->features &= ~DASD_FEATURE_READONLY; 775 device = dasd_device_from_cdev(cdev);
792 device = devmap->device; 776 if (IS_ERR(device))
793 if (device) { 777 return PTR_ERR(device);
794 device->features = devmap->features; 778
795 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags); 779 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
780 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
781
782 if (!device->block || !device->block->gdp ||
783 test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
784 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
785 goto out;
796 } 786 }
797 spin_unlock(&dasd_devmap_lock); 787 /* Increase open_count to avoid losing the block device */
798 if (device && device->block && device->block->gdp) 788 atomic_inc(&device->block->open_count);
799 set_disk_ro(device->block->gdp, val); 789 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
790
791 set_disk_ro(device->block->gdp, val);
792 atomic_dec(&device->block->open_count);
793
794out:
795 dasd_put_device(device);
796
800 return count; 797 return count;
801} 798}
802 799
@@ -823,27 +820,15 @@ static ssize_t
823dasd_erplog_store(struct device *dev, struct device_attribute *attr, 820dasd_erplog_store(struct device *dev, struct device_attribute *attr,
824 const char *buf, size_t count) 821 const char *buf, size_t count)
825{ 822{
826 struct dasd_devmap *devmap; 823 unsigned int val;
827 int val; 824 int rc;
828 char *endp;
829
830 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
831 if (IS_ERR(devmap))
832 return PTR_ERR(devmap);
833 825
834 val = simple_strtoul(buf, &endp, 0); 826 if (kstrtouint(buf, 0, &val) || val > 1)
835 if (((endp + 1) < (buf + count)) || (val > 1))
836 return -EINVAL; 827 return -EINVAL;
837 828
838 spin_lock(&dasd_devmap_lock); 829 rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
839 if (val) 830
840 devmap->features |= DASD_FEATURE_ERPLOG; 831 return rc ? : count;
841 else
842 devmap->features &= ~DASD_FEATURE_ERPLOG;
843 if (devmap->device)
844 devmap->device->features = devmap->features;
845 spin_unlock(&dasd_devmap_lock);
846 return count;
847} 832}
848 833
849static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store); 834static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
@@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
871 const char *buf, size_t count) 856 const char *buf, size_t count)
872{ 857{
873 struct dasd_devmap *devmap; 858 struct dasd_devmap *devmap;
859 unsigned int val;
874 ssize_t rc; 860 ssize_t rc;
875 int val;
876 char *endp;
877 861
878 devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); 862 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
879 if (IS_ERR(devmap)) 863 if (IS_ERR(devmap))
880 return PTR_ERR(devmap); 864 return PTR_ERR(devmap);
881 865
882 val = simple_strtoul(buf, &endp, 0); 866 if (kstrtouint(buf, 0, &val) || val > 1)
883 if (((endp + 1) < (buf + count)) || (val > 1))
884 return -EINVAL; 867 return -EINVAL;
885 868
886 spin_lock(&dasd_devmap_lock); 869 spin_lock(&dasd_devmap_lock);
@@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
994 if (IS_ERR(device)) 977 if (IS_ERR(device))
995 return PTR_ERR(device); 978 return PTR_ERR(device);
996 979
997 if (device->discipline->host_access_count) 980 if (!device->discipline)
998 count = device->discipline->host_access_count(device); 981 count = -ENODEV;
999 else 982 else if (!device->discipline->host_access_count)
1000 count = -EOPNOTSUPP; 983 count = -EOPNOTSUPP;
984 else
985 count = device->discipline->host_access_count(device);
1001 986
1002 dasd_put_device(device); 987 dasd_put_device(device);
1003 if (count < 0) 988 if (count < 0)
@@ -1197,27 +1182,25 @@ static ssize_t
1197dasd_eer_store(struct device *dev, struct device_attribute *attr, 1182dasd_eer_store(struct device *dev, struct device_attribute *attr,
1198 const char *buf, size_t count) 1183 const char *buf, size_t count)
1199{ 1184{
1200 struct dasd_devmap *devmap; 1185 struct dasd_device *device;
1201 int val, rc; 1186 unsigned int val;
1202 char *endp; 1187 int rc = 0;
1203 1188
1204 devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); 1189 device = dasd_device_from_cdev(to_ccwdev(dev));
1205 if (IS_ERR(devmap)) 1190 if (IS_ERR(device))
1206 return PTR_ERR(devmap); 1191 return PTR_ERR(device);
1207 if (!devmap->device)
1208 return -ENODEV;
1209 1192
1210 val = simple_strtoul(buf, &endp, 0); 1193 if (kstrtouint(buf, 0, &val) || val > 1)
1211 if (((endp + 1) < (buf + count)) || (val > 1))
1212 return -EINVAL; 1194 return -EINVAL;
1213 1195
1214 if (val) { 1196 if (val)
1215 rc = dasd_eer_enable(devmap->device); 1197 rc = dasd_eer_enable(device);
1216 if (rc) 1198 else
1217 return rc; 1199 dasd_eer_disable(device);
1218 } else 1200
1219 dasd_eer_disable(devmap->device); 1201 dasd_put_device(device);
1220 return count; 1202
1203 return rc ? : count;
1221} 1204}
1222 1205
1223static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); 1206static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
@@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
1360static DEVICE_ATTR(timeout, 0644, 1343static DEVICE_ATTR(timeout, 0644,
1361 dasd_timeout_show, dasd_timeout_store); 1344 dasd_timeout_show, dasd_timeout_store);
1362 1345
1346
1347static ssize_t
1348dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
1349 const char *buf, size_t count)
1350{
1351 struct dasd_device *device;
1352 unsigned int val;
1353
1354 device = dasd_device_from_cdev(to_ccwdev(dev));
1355 if (IS_ERR(device))
1356 return -ENODEV;
1357
1358 if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
1359 val = 0;
1360
1361 if (device->discipline && device->discipline->reset_path)
1362 device->discipline->reset_path(device, (__u8) val);
1363
1364 dasd_put_device(device);
1365 return count;
1366}
1367
1368static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
1369
1370static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
1371 char *buf)
1372{
1373 struct dasd_device *device;
1374 int hpf;
1375
1376 device = dasd_device_from_cdev(to_ccwdev(dev));
1377 if (IS_ERR(device))
1378 return -ENODEV;
1379 if (!device->discipline || !device->discipline->hpf_enabled) {
1380 dasd_put_device(device);
1381 return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
1382 }
1383 hpf = device->discipline->hpf_enabled(device);
1384 dasd_put_device(device);
1385 return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
1386}
1387
1388static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
1389
1363static ssize_t dasd_reservation_policy_show(struct device *dev, 1390static ssize_t dasd_reservation_policy_show(struct device *dev,
1364 struct device_attribute *attr, 1391 struct device_attribute *attr,
1365 char *buf) 1392 char *buf)
@@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev,
1385 struct device_attribute *attr, 1412 struct device_attribute *attr,
1386 const char *buf, size_t count) 1413 const char *buf, size_t count)
1387{ 1414{
1388 struct dasd_devmap *devmap; 1415 struct ccw_device *cdev = to_ccwdev(dev);
1389 int rc; 1416 int rc;
1390 1417
1391 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
1392 if (IS_ERR(devmap))
1393 return PTR_ERR(devmap);
1394 rc = 0;
1395 spin_lock(&dasd_devmap_lock);
1396 if (sysfs_streq("ignore", buf)) 1418 if (sysfs_streq("ignore", buf))
1397 devmap->features &= ~DASD_FEATURE_FAILONSLCK; 1419 rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
1398 else if (sysfs_streq("fail", buf)) 1420 else if (sysfs_streq("fail", buf))
1399 devmap->features |= DASD_FEATURE_FAILONSLCK; 1421 rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
1400 else 1422 else
1401 rc = -EINVAL; 1423 rc = -EINVAL;
1402 if (devmap->device) 1424
1403 devmap->device->features = devmap->features; 1425 return rc ? : count;
1404 spin_unlock(&dasd_devmap_lock);
1405 if (rc)
1406 return rc;
1407 else
1408 return count;
1409} 1426}
1410 1427
1411static DEVICE_ATTR(reservation_policy, 0644, 1428static DEVICE_ATTR(reservation_policy, 0644,
@@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev,
1461 struct device_attribute *attr, char *buf) 1478 struct device_attribute *attr, char *buf)
1462{ 1479{
1463 struct dasd_device *device; 1480 struct dasd_device *device;
1464 u8 opm, nppm, cablepm, cuirpm, hpfpm; 1481 u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
1465 1482
1466 device = dasd_device_from_cdev(to_ccwdev(dev)); 1483 device = dasd_device_from_cdev(to_ccwdev(dev));
1467 if (IS_ERR(device)) 1484 if (IS_ERR(device))
1468 return sprintf(buf, "0\n"); 1485 return sprintf(buf, "0\n");
1469 1486
1470 opm = device->path_data.opm; 1487 opm = dasd_path_get_opm(device);
1471 nppm = device->path_data.npm; 1488 nppm = dasd_path_get_nppm(device);
1472 cablepm = device->path_data.cablepm; 1489 cablepm = dasd_path_get_cablepm(device);
1473 cuirpm = device->path_data.cuirpm; 1490 cuirpm = dasd_path_get_cuirpm(device);
1474 hpfpm = device->path_data.hpfpm; 1491 hpfpm = dasd_path_get_hpfpm(device);
1492 ifccpm = dasd_path_get_ifccpm(device);
1475 dasd_put_device(device); 1493 dasd_put_device(device);
1476 1494
1477 return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm, 1495 return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
1478 cablepm, cuirpm, hpfpm); 1496 cablepm, cuirpm, hpfpm, ifccpm);
1479} 1497}
1480 1498
1481static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL); 1499static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
1482 1500
1501/*
1502 * threshold value for IFCC/CCC errors
1503 */
1504static ssize_t
1505dasd_path_threshold_show(struct device *dev,
1506 struct device_attribute *attr, char *buf)
1507{
1508 struct dasd_device *device;
1509 int len;
1510
1511 device = dasd_device_from_cdev(to_ccwdev(dev));
1512 if (IS_ERR(device))
1513 return -ENODEV;
1514 len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
1515 dasd_put_device(device);
1516 return len;
1517}
1518
1519static ssize_t
1520dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
1521 const char *buf, size_t count)
1522{
1523 struct dasd_device *device;
1524 unsigned long flags;
1525 unsigned long val;
1526
1527 device = dasd_device_from_cdev(to_ccwdev(dev));
1528 if (IS_ERR(device))
1529 return -ENODEV;
1530
1531 if ((kstrtoul(buf, 10, &val) != 0) ||
1532 (val > DASD_THRHLD_MAX) || val == 0) {
1533 dasd_put_device(device);
1534 return -EINVAL;
1535 }
1536 spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
1537 if (val)
1538 device->path_thrhld = val;
1539 spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
1540 dasd_put_device(device);
1541 return count;
1542}
1543
1544static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
1545 dasd_path_threshold_store);
1546/*
1547 * interval for IFCC/CCC checks
1548 * meaning time with no IFCC/CCC error before the error counter
1549 * gets reset
1550 */
1551static ssize_t
1552dasd_path_interval_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554{
1555 struct dasd_device *device;
1556 int len;
1557
1558 device = dasd_device_from_cdev(to_ccwdev(dev));
1559 if (IS_ERR(device))
1560 return -ENODEV;
1561 len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
1562 dasd_put_device(device);
1563 return len;
1564}
1565
1566static ssize_t
1567dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
1568 const char *buf, size_t count)
1569{
1570 struct dasd_device *device;
1571 unsigned long flags;
1572 unsigned long val;
1573
1574 device = dasd_device_from_cdev(to_ccwdev(dev));
1575 if (IS_ERR(device))
1576 return -ENODEV;
1577
1578 if ((kstrtoul(buf, 10, &val) != 0) ||
1579 (val > DASD_INTERVAL_MAX) || val == 0) {
1580 dasd_put_device(device);
1581 return -EINVAL;
1582 }
1583 spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
1584 if (val)
1585 device->path_interval = val;
1586 spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
1587 dasd_put_device(device);
1588 return count;
1589}
1590
1591static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
1592 dasd_path_interval_store);
1593
1594
1483static struct attribute * dasd_attrs[] = { 1595static struct attribute * dasd_attrs[] = {
1484 &dev_attr_readonly.attr, 1596 &dev_attr_readonly.attr,
1485 &dev_attr_discipline.attr, 1597 &dev_attr_discipline.attr,
@@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = {
1500 &dev_attr_safe_offline.attr, 1612 &dev_attr_safe_offline.attr,
1501 &dev_attr_host_access_count.attr, 1613 &dev_attr_host_access_count.attr,
1502 &dev_attr_path_masks.attr, 1614 &dev_attr_path_masks.attr,
1615 &dev_attr_path_threshold.attr,
1616 &dev_attr_path_interval.attr,
1617 &dev_attr_path_reset.attr,
1618 &dev_attr_hpf.attr,
1503 NULL, 1619 NULL,
1504}; 1620};
1505 1621
@@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
1531{ 1647{
1532 struct dasd_devmap *devmap; 1648 struct dasd_devmap *devmap;
1533 1649
1534 devmap = dasd_find_busid(dev_name(&cdev->dev)); 1650 devmap = dasd_devmap_from_cdev(cdev);
1535 if (IS_ERR(devmap)) 1651 if (IS_ERR(devmap))
1536 return PTR_ERR(devmap); 1652 return PTR_ERR(devmap);
1537 1653
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a7a88476e215..67bf50c9946f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1042 private->conf_data = NULL; 1042 private->conf_data = NULL;
1043 private->conf_len = 0; 1043 private->conf_len = 0;
1044 for (i = 0; i < 8; i++) { 1044 for (i = 0; i < 8; i++) {
1045 kfree(private->path_conf_data[i]); 1045 kfree(device->path[i].conf_data);
1046 private->path_conf_data[i] = NULL; 1046 device->path[i].conf_data = NULL;
1047 device->path[i].cssid = 0;
1048 device->path[i].ssid = 0;
1049 device->path[i].chpid = 0;
1047 } 1050 }
1048} 1051}
1049 1052
@@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1055 int rc, path_err, pos; 1058 int rc, path_err, pos;
1056 __u8 lpm, opm; 1059 __u8 lpm, opm;
1057 struct dasd_eckd_private *private, path_private; 1060 struct dasd_eckd_private *private, path_private;
1058 struct dasd_path *path_data;
1059 struct dasd_uid *uid; 1061 struct dasd_uid *uid;
1060 char print_path_uid[60], print_device_uid[60]; 1062 char print_path_uid[60], print_device_uid[60];
1063 struct channel_path_desc *chp_desc;
1064 struct subchannel_id sch_id;
1061 1065
1062 private = device->private; 1066 private = device->private;
1063 path_data = &device->path_data;
1064 opm = ccw_device_get_path_mask(device->cdev); 1067 opm = ccw_device_get_path_mask(device->cdev);
1068 ccw_device_get_schid(device->cdev, &sch_id);
1065 conf_data_saved = 0; 1069 conf_data_saved = 0;
1066 path_err = 0; 1070 path_err = 0;
1067 /* get configuration data per operational path */ 1071 /* get configuration data per operational path */
@@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1081 "No configuration data " 1085 "No configuration data "
1082 "retrieved"); 1086 "retrieved");
1083 /* no further analysis possible */ 1087 /* no further analysis possible */
1084 path_data->opm |= lpm; 1088 dasd_path_add_opm(device, opm);
1085 continue; /* no error */ 1089 continue; /* no error */
1086 } 1090 }
1087 /* save first valid configuration data */ 1091 /* save first valid configuration data */
@@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1098 } 1102 }
1099 pos = pathmask_to_pos(lpm); 1103 pos = pathmask_to_pos(lpm);
1100 /* store per path conf_data */ 1104 /* store per path conf_data */
1101 private->path_conf_data[pos] = 1105 device->path[pos].conf_data = conf_data;
1102 (struct dasd_conf_data *) conf_data; 1106 device->path[pos].cssid = sch_id.cssid;
1107 device->path[pos].ssid = sch_id.ssid;
1108 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1109 if (chp_desc)
1110 device->path[pos].chpid = chp_desc->chpid;
1111 kfree(chp_desc);
1103 /* 1112 /*
1104 * build device UID that other path data 1113 * build device UID that other path data
1105 * can be compared to it 1114 * can be compared to it
@@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1154 "device %s instead of %s\n", lpm, 1163 "device %s instead of %s\n", lpm,
1155 print_path_uid, print_device_uid); 1164 print_path_uid, print_device_uid);
1156 path_err = -EINVAL; 1165 path_err = -EINVAL;
1157 path_data->cablepm |= lpm; 1166 dasd_path_add_cablepm(device, lpm);
1158 continue; 1167 continue;
1159 } 1168 }
1160 pos = pathmask_to_pos(lpm); 1169 pos = pathmask_to_pos(lpm);
1161 /* store per path conf_data */ 1170 /* store per path conf_data */
1162 private->path_conf_data[pos] = 1171 device->path[pos].conf_data = conf_data;
1163 (struct dasd_conf_data *) conf_data; 1172 device->path[pos].cssid = sch_id.cssid;
1173 device->path[pos].ssid = sch_id.ssid;
1174 chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1175 if (chp_desc)
1176 device->path[pos].chpid = chp_desc->chpid;
1177 kfree(chp_desc);
1164 path_private.conf_data = NULL; 1178 path_private.conf_data = NULL;
1165 path_private.conf_len = 0; 1179 path_private.conf_len = 0;
1166 } 1180 }
1167 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1181 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1168 case 0x02: 1182 case 0x02:
1169 path_data->npm |= lpm; 1183 dasd_path_add_nppm(device, lpm);
1170 break; 1184 break;
1171 case 0x03: 1185 case 0x03:
1172 path_data->ppm |= lpm; 1186 dasd_path_add_ppm(device, lpm);
1173 break; 1187 break;
1174 } 1188 }
1175 if (!path_data->opm) { 1189 if (!dasd_path_get_opm(device)) {
1176 path_data->opm = lpm; 1190 dasd_path_set_opm(device, lpm);
1177 dasd_generic_path_operational(device); 1191 dasd_generic_path_operational(device);
1178 } else { 1192 } else {
1179 path_data->opm |= lpm; 1193 dasd_path_add_opm(device, lpm);
1180 } 1194 }
1181 /*
1182 * if the path is used
1183 * it should not be in one of the negative lists
1184 */
1185 path_data->cablepm &= ~lpm;
1186 path_data->hpfpm &= ~lpm;
1187 path_data->cuirpm &= ~lpm;
1188 } 1195 }
1189 1196
1190 return path_err; 1197 return path_err;
1191} 1198}
1192 1199
1200static u32 get_fcx_max_data(struct dasd_device *device)
1201{
1202 struct dasd_eckd_private *private = device->private;
1203 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1204 int tpm, mdc;
1205
1206 if (dasd_nofcx)
1207 return 0;
1208 /* is transport mode supported? */
1209 fcx_in_css = css_general_characteristics.fcx;
1210 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1211 fcx_in_features = private->features.feature[40] & 0x80;
1212 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1213
1214 if (!tpm)
1215 return 0;
1216
1217 mdc = ccw_device_get_mdc(device->cdev, 0);
1218 if (mdc < 0) {
1219 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1220 return 0;
1221 } else {
1222 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1223 }
1224}
1225
1193static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1226static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1194{ 1227{
1195 struct dasd_eckd_private *private = device->private; 1228 struct dasd_eckd_private *private = device->private;
@@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
1222 struct path_verification_work_data *data) 1255 struct path_verification_work_data *data)
1223{ 1256{
1224 struct dasd_eckd_private *private = device->private; 1257 struct dasd_eckd_private *private = device->private;
1225 struct dasd_path *path_data = &device->path_data; 1258 __u8 lpm, opm = dasd_path_get_opm(device);
1226 __u8 lpm, opm = path_data->opm;
1227 int rc = -ENODEV; 1259 int rc = -ENODEV;
1228 1260
1229 for (lpm = 0x80; lpm; lpm >>= 1) { 1261 for (lpm = 0x80; lpm; lpm >>= 1) {
@@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
1356 * in other case the device UID may have changed and 1388 * in other case the device UID may have changed and
1357 * the first working path UID will be used as device UID 1389 * the first working path UID will be used as device UID
1358 */ 1390 */
1359 if (device->path_data.opm && 1391 if (dasd_path_get_opm(device) &&
1360 dasd_eckd_compare_path_uid(device, &path_private)) { 1392 dasd_eckd_compare_path_uid(device, &path_private)) {
1361 /* 1393 /*
1362 * the comparison was not successful 1394 * the comparison was not successful
@@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
1406 * situation in dasd_start_IO. 1438 * situation in dasd_start_IO.
1407 */ 1439 */
1408 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1440 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1409 if (!device->path_data.opm && opm) { 1441 if (!dasd_path_get_opm(device) && opm) {
1410 device->path_data.opm = opm; 1442 dasd_path_set_opm(device, opm);
1411 device->path_data.cablepm &= ~opm;
1412 device->path_data.cuirpm &= ~opm;
1413 device->path_data.hpfpm &= ~opm;
1414 dasd_generic_path_operational(device); 1443 dasd_generic_path_operational(device);
1415 } else { 1444 } else {
1416 device->path_data.opm |= opm; 1445 dasd_path_add_opm(device, opm);
1417 device->path_data.cablepm &= ~opm;
1418 device->path_data.cuirpm &= ~opm;
1419 device->path_data.hpfpm &= ~opm;
1420 } 1446 }
1421 device->path_data.npm |= npm; 1447 dasd_path_add_nppm(device, npm);
1422 device->path_data.ppm |= ppm; 1448 dasd_path_add_ppm(device, ppm);
1423 device->path_data.tbvpm |= epm; 1449 dasd_path_add_tbvpm(device, epm);
1424 device->path_data.cablepm |= cablepm; 1450 dasd_path_add_cablepm(device, cablepm);
1425 device->path_data.hpfpm |= hpfpm; 1451 dasd_path_add_nohpfpm(device, hpfpm);
1426 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1452 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1427 } 1453 }
1428 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1454 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
@@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1456 return 0; 1482 return 0;
1457} 1483}
1458 1484
1485static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1486{
1487 struct dasd_eckd_private *private = device->private;
1488 unsigned long flags;
1489
1490 if (!private->fcx_max_data)
1491 private->fcx_max_data = get_fcx_max_data(device);
1492 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1493 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1494 dasd_schedule_device_bh(device);
1495 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1496}
1497
1459static int dasd_eckd_read_features(struct dasd_device *device) 1498static int dasd_eckd_read_features(struct dasd_device *device)
1460{ 1499{
1461 struct dasd_eckd_private *private = device->private; 1500 struct dasd_eckd_private *private = device->private;
@@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1652 dasd_put_device(device); 1691 dasd_put_device(device);
1653} 1692}
1654 1693
1655static u32 get_fcx_max_data(struct dasd_device *device)
1656{
1657 struct dasd_eckd_private *private = device->private;
1658 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1659 int tpm, mdc;
1660
1661 if (dasd_nofcx)
1662 return 0;
1663 /* is transport mode supported? */
1664 fcx_in_css = css_general_characteristics.fcx;
1665 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1666 fcx_in_features = private->features.feature[40] & 0x80;
1667 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1668
1669 if (!tpm)
1670 return 0;
1671
1672 mdc = ccw_device_get_mdc(device->cdev, 0);
1673 if (mdc < 0) {
1674 dev_warn(&device->cdev->dev, "Detecting the maximum supported"
1675 " data size for zHPF requests failed\n");
1676 return 0;
1677 } else
1678 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1679}
1680
1681/* 1694/*
1682 * Check device characteristics. 1695 * Check device characteristics.
1683 * If the device is accessible using ECKD discipline, the device is enabled. 1696 * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1729 if (rc) 1742 if (rc)
1730 goto out_err1; 1743 goto out_err1;
1731 1744
1732 /* set default timeout */ 1745 /* set some default values */
1733 device->default_expires = DASD_EXPIRES; 1746 device->default_expires = DASD_EXPIRES;
1734 /* set default retry count */
1735 device->default_retries = DASD_RETRIES; 1747 device->default_retries = DASD_RETRIES;
1748 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1749 device->path_interval = DASD_ECKD_PATH_INTERVAL;
1736 1750
1737 if (private->gneq) { 1751 if (private->gneq) {
1738 value = 1; 1752 value = 1;
@@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
1839 private->gneq = NULL; 1853 private->gneq = NULL;
1840 private->conf_len = 0; 1854 private->conf_len = 0;
1841 for (i = 0; i < 8; i++) { 1855 for (i = 0; i < 8; i++) {
1842 kfree(private->path_conf_data[i]); 1856 kfree(device->path[i].conf_data);
1843 if ((__u8 *)private->path_conf_data[i] == 1857 if ((__u8 *)device->path[i].conf_data ==
1844 private->conf_data) { 1858 private->conf_data) {
1845 private->conf_data = NULL; 1859 private->conf_data = NULL;
1846 private->conf_len = 0; 1860 private->conf_len = 0;
1847 } 1861 }
1848 private->path_conf_data[i] = NULL; 1862 device->path[i].conf_data = NULL;
1863 device->path[i].cssid = 0;
1864 device->path[i].ssid = 0;
1865 device->path[i].chpid = 0;
1849 } 1866 }
1850 kfree(private->conf_data); 1867 kfree(private->conf_data);
1851 private->conf_data = NULL; 1868 private->conf_data = NULL;
@@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
2966 if (cqr->block && (cqr->startdev != cqr->block->base)) { 2983 if (cqr->block && (cqr->startdev != cqr->block->base)) {
2967 dasd_eckd_reset_ccw_to_base_io(cqr); 2984 dasd_eckd_reset_ccw_to_base_io(cqr);
2968 cqr->startdev = cqr->block->base; 2985 cqr->startdev = cqr->block->base;
2969 cqr->lpm = cqr->block->base->path_data.opm; 2986 cqr->lpm = dasd_path_get_opm(cqr->block->base);
2970 } 2987 }
2971}; 2988};
2972 2989
@@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3251 cqr->memdev = startdev; 3268 cqr->memdev = startdev;
3252 cqr->block = block; 3269 cqr->block = block;
3253 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3270 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
3254 cqr->lpm = startdev->path_data.ppm; 3271 cqr->lpm = dasd_path_get_ppm(startdev);
3255 cqr->retries = startdev->default_retries; 3272 cqr->retries = startdev->default_retries;
3256 cqr->buildclk = get_tod_clock(); 3273 cqr->buildclk = get_tod_clock();
3257 cqr->status = DASD_CQR_FILLED; 3274 cqr->status = DASD_CQR_FILLED;
@@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3426 cqr->memdev = startdev; 3443 cqr->memdev = startdev;
3427 cqr->block = block; 3444 cqr->block = block;
3428 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3445 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
3429 cqr->lpm = startdev->path_data.ppm; 3446 cqr->lpm = dasd_path_get_ppm(startdev);
3430 cqr->retries = startdev->default_retries; 3447 cqr->retries = startdev->default_retries;
3431 cqr->buildclk = get_tod_clock(); 3448 cqr->buildclk = get_tod_clock();
3432 cqr->status = DASD_CQR_FILLED; 3449 cqr->status = DASD_CQR_FILLED;
@@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3735 cqr->memdev = startdev; 3752 cqr->memdev = startdev;
3736 cqr->block = block; 3753 cqr->block = block;
3737 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 3754 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
3738 cqr->lpm = startdev->path_data.ppm; 3755 cqr->lpm = dasd_path_get_ppm(startdev);
3739 cqr->retries = startdev->default_retries; 3756 cqr->retries = startdev->default_retries;
3740 cqr->buildclk = get_tod_clock(); 3757 cqr->buildclk = get_tod_clock();
3741 cqr->status = DASD_CQR_FILLED; 3758 cqr->status = DASD_CQR_FILLED;
@@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3962 cqr->memdev = startdev; 3979 cqr->memdev = startdev;
3963 cqr->block = block; 3980 cqr->block = block;
3964 cqr->expires = startdev->default_expires * HZ; 3981 cqr->expires = startdev->default_expires * HZ;
3965 cqr->lpm = startdev->path_data.ppm; 3982 cqr->lpm = dasd_path_get_ppm(startdev);
3966 cqr->retries = startdev->default_retries; 3983 cqr->retries = startdev->default_retries;
3967 cqr->buildclk = get_tod_clock(); 3984 cqr->buildclk = get_tod_clock();
3968 cqr->status = DASD_CQR_FILLED; 3985 cqr->status = DASD_CQR_FILLED;
@@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4783 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4800 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
4784 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 4801 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
4785 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4802 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
4786 irb->scsw.tm.fcxs, irb->scsw.tm.schxs, 4803 irb->scsw.tm.fcxs,
4804 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
4787 req ? req->intrc : 0); 4805 req ? req->intrc : 0);
4788 len += sprintf(page + len, PRINTK_HEADER 4806 len += sprintf(page + len, PRINTK_HEADER
4789 " device %s: Failing TCW: %p\n", 4807 " device %s: Failing TCW: %p\n",
@@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5306 */ 5324 */
5307static int 5325static int
5308dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 5326dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5309 __u32 message_id, 5327 __u32 message_id, __u8 lpum)
5310 struct channel_path_desc *desc,
5311 struct subchannel_id sch_id)
5312{ 5328{
5313 struct dasd_psf_cuir_response *psf_cuir; 5329 struct dasd_psf_cuir_response *psf_cuir;
5330 int pos = pathmask_to_pos(lpum);
5314 struct dasd_ccw_req *cqr; 5331 struct dasd_ccw_req *cqr;
5315 struct ccw1 *ccw; 5332 struct ccw1 *ccw;
5316 int rc; 5333 int rc;
@@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5328 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 5345 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5329 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 5346 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5330 psf_cuir->cc = response; 5347 psf_cuir->cc = response;
5331 if (desc) 5348 psf_cuir->chpid = device->path[pos].chpid;
5332 psf_cuir->chpid = desc->chpid;
5333 psf_cuir->message_id = message_id; 5349 psf_cuir->message_id = message_id;
5334 psf_cuir->cssid = sch_id.cssid; 5350 psf_cuir->cssid = device->path[pos].cssid;
5335 psf_cuir->ssid = sch_id.ssid; 5351 psf_cuir->ssid = device->path[pos].ssid;
5336 ccw = cqr->cpaddr; 5352 ccw = cqr->cpaddr;
5337 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5353 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5338 ccw->cda = (__u32)(addr_t)psf_cuir; 5354 ccw->cda = (__u32)(addr_t)psf_cuir;
@@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
5363 __u8 lpum, 5379 __u8 lpum,
5364 struct dasd_cuir_message *cuir) 5380 struct dasd_cuir_message *cuir)
5365{ 5381{
5366 struct dasd_eckd_private *private = device->private;
5367 struct dasd_conf_data *conf_data; 5382 struct dasd_conf_data *conf_data;
5368 int path, pos; 5383 int path, pos;
5369 5384
5370 if (cuir->record_selector == 0) 5385 if (cuir->record_selector == 0)
5371 goto out; 5386 goto out;
5372 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 5387 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
5373 conf_data = private->path_conf_data[pos]; 5388 conf_data = device->path[pos].conf_data;
5374 if (conf_data->gneq.record_selector == 5389 if (conf_data->gneq.record_selector ==
5375 cuir->record_selector) 5390 cuir->record_selector)
5376 return conf_data; 5391 return conf_data;
5377 } 5392 }
5378out: 5393out:
5379 return private->path_conf_data[pathmask_to_pos(lpum)]; 5394 return device->path[pathmask_to_pos(lpum)].conf_data;
5380} 5395}
5381 5396
5382/* 5397/*
@@ -5391,7 +5406,6 @@ out:
5391static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 5406static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5392 struct dasd_cuir_message *cuir) 5407 struct dasd_cuir_message *cuir)
5393{ 5408{
5394 struct dasd_eckd_private *private = device->private;
5395 struct dasd_conf_data *ref_conf_data; 5409 struct dasd_conf_data *ref_conf_data;
5396 unsigned long bitmask = 0, mask = 0; 5410 unsigned long bitmask = 0, mask = 0;
5397 struct dasd_conf_data *conf_data; 5411 struct dasd_conf_data *conf_data;
@@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5417 mask |= cuir->neq_map[1] << 8; 5431 mask |= cuir->neq_map[1] << 8;
5418 mask |= cuir->neq_map[0] << 16; 5432 mask |= cuir->neq_map[0] << 16;
5419 5433
5420 for (path = 0x80; path; path >>= 1) { 5434 for (path = 0; path < 8; path++) {
5421 /* initialise data per path */ 5435 /* initialise data per path */
5422 bitmask = mask; 5436 bitmask = mask;
5423 pos = pathmask_to_pos(path); 5437 conf_data = device->path[path].conf_data;
5424 conf_data = private->path_conf_data[pos];
5425 pos = 8 - ffs(cuir->ned_map); 5438 pos = 8 - ffs(cuir->ned_map);
5426 ned = (char *) &conf_data->neds[pos]; 5439 ned = (char *) &conf_data->neds[pos];
5427 /* compare reference ned and per path ned */ 5440 /* compare reference ned and per path ned */
@@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5442 continue; 5455 continue;
5443 /* device and path match the reference values 5456 /* device and path match the reference values
5444 add path to CUIR scope */ 5457 add path to CUIR scope */
5445 tbcpm |= path; 5458 tbcpm |= 0x80 >> path;
5446 } 5459 }
5447 return tbcpm; 5460 return tbcpm;
5448} 5461}
5449 5462
5450static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 5463static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
5451 unsigned long paths, 5464 unsigned long paths, int action)
5452 struct subchannel_id sch_id, int action)
5453{ 5465{
5454 struct channel_path_desc *desc;
5455 int pos; 5466 int pos;
5456 5467
5457 while (paths) { 5468 while (paths) {
5458 /* get position of bit in mask */ 5469 /* get position of bit in mask */
5459 pos = ffs(paths) - 1; 5470 pos = 8 - ffs(paths);
5460 /* get channel path descriptor from this position */ 5471 /* get channel path descriptor from this position */
5461 desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
5462 if (action == CUIR_QUIESCE) 5472 if (action == CUIR_QUIESCE)
5463 pr_warn("Service on the storage server caused path " 5473 pr_warn("Service on the storage server caused path %x.%02x to go offline",
5464 "%x.%02x to go offline", sch_id.cssid, 5474 device->path[pos].cssid,
5465 desc ? desc->chpid : 0); 5475 device->path[pos].chpid);
5466 else if (action == CUIR_RESUME) 5476 else if (action == CUIR_RESUME)
5467 pr_info("Path %x.%02x is back online after service " 5477 pr_info("Path %x.%02x is back online after service on the storage server",
5468 "on the storage server", sch_id.cssid, 5478 device->path[pos].cssid,
5469 desc ? desc->chpid : 0); 5479 device->path[pos].chpid);
5470 kfree(desc); 5480 clear_bit(7 - pos, &paths);
5471 clear_bit(pos, &paths);
5472 } 5481 }
5473} 5482}
5474 5483
@@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
5479 5488
5480 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 5489 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
5481 /* nothing to do if path is not in use */ 5490 /* nothing to do if path is not in use */
5482 if (!(device->path_data.opm & tbcpm)) 5491 if (!(dasd_path_get_opm(device) & tbcpm))
5483 return 0; 5492 return 0;
5484 if (!(device->path_data.opm & ~tbcpm)) { 5493 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
5485 /* no path would be left if the CUIR action is taken 5494 /* no path would be left if the CUIR action is taken
5486 return error */ 5495 return error */
5487 return -EINVAL; 5496 return -EINVAL;
5488 } 5497 }
5489 /* remove device from operational path mask */ 5498 /* remove device from operational path mask */
5490 device->path_data.opm &= ~tbcpm; 5499 dasd_path_remove_opm(device, tbcpm);
5491 device->path_data.cuirpm |= tbcpm; 5500 dasd_path_add_cuirpm(device, tbcpm);
5492 return tbcpm; 5501 return tbcpm;
5493} 5502}
5494 5503
@@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
5501 * notify the already set offline devices again 5510 * notify the already set offline devices again
5502 */ 5511 */
5503static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 5512static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
5504 struct subchannel_id sch_id,
5505 struct dasd_cuir_message *cuir) 5513 struct dasd_cuir_message *cuir)
5506{ 5514{
5507 struct dasd_eckd_private *private = device->private; 5515 struct dasd_eckd_private *private = device->private;
@@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
5556 } 5564 }
5557 } 5565 }
5558 /* notify user about all paths affected by CUIR action */ 5566 /* notify user about all paths affected by CUIR action */
5559 dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE); 5567 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
5560 return 0; 5568 return 0;
5561out_err: 5569out_err:
5562 return tbcpm; 5570 return tbcpm;
5563} 5571}
5564 5572
5565static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 5573static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5566 struct subchannel_id sch_id,
5567 struct dasd_cuir_message *cuir) 5574 struct dasd_cuir_message *cuir)
5568{ 5575{
5569 struct dasd_eckd_private *private = device->private; 5576 struct dasd_eckd_private *private = device->private;
@@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5581 alias_list) { 5588 alias_list) {
5582 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5589 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5583 paths |= tbcpm; 5590 paths |= tbcpm;
5584 if (!(dev->path_data.opm & tbcpm)) { 5591 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5585 dev->path_data.tbvpm |= tbcpm; 5592 dasd_path_add_tbvpm(dev, tbcpm);
5586 dasd_schedule_device_bh(dev); 5593 dasd_schedule_device_bh(dev);
5587 } 5594 }
5588 } 5595 }
@@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5591 alias_list) { 5598 alias_list) {
5592 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5599 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5593 paths |= tbcpm; 5600 paths |= tbcpm;
5594 if (!(dev->path_data.opm & tbcpm)) { 5601 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5595 dev->path_data.tbvpm |= tbcpm; 5602 dasd_path_add_tbvpm(dev, tbcpm);
5596 dasd_schedule_device_bh(dev); 5603 dasd_schedule_device_bh(dev);
5597 } 5604 }
5598 } 5605 }
@@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5605 alias_list) { 5612 alias_list) {
5606 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5613 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5607 paths |= tbcpm; 5614 paths |= tbcpm;
5608 if (!(dev->path_data.opm & tbcpm)) { 5615 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5609 dev->path_data.tbvpm |= tbcpm; 5616 dasd_path_add_tbvpm(dev, tbcpm);
5610 dasd_schedule_device_bh(dev); 5617 dasd_schedule_device_bh(dev);
5611 } 5618 }
5612 } 5619 }
@@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5615 alias_list) { 5622 alias_list) {
5616 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 5623 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5617 paths |= tbcpm; 5624 paths |= tbcpm;
5618 if (!(dev->path_data.opm & tbcpm)) { 5625 if (!(dasd_path_get_opm(dev) & tbcpm)) {
5619 dev->path_data.tbvpm |= tbcpm; 5626 dasd_path_add_tbvpm(dev, tbcpm);
5620 dasd_schedule_device_bh(dev); 5627 dasd_schedule_device_bh(dev);
5621 } 5628 }
5622 } 5629 }
5623 } 5630 }
5624 /* notify user about all paths affected by CUIR action */ 5631 /* notify user about all paths affected by CUIR action */
5625 dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME); 5632 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
5626 return 0; 5633 return 0;
5627} 5634}
5628 5635
@@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
5630 __u8 lpum) 5637 __u8 lpum)
5631{ 5638{
5632 struct dasd_cuir_message *cuir = messages; 5639 struct dasd_cuir_message *cuir = messages;
5633 struct channel_path_desc *desc; 5640 int response;
5634 struct subchannel_id sch_id;
5635 int pos, response;
5636 5641
5637 DBF_DEV_EVENT(DBF_WARNING, device, 5642 DBF_DEV_EVENT(DBF_WARNING, device,
5638 "CUIR request: %016llx %016llx %016llx %08x", 5643 "CUIR request: %016llx %016llx %016llx %08x",
5639 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 5644 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
5640 ((u32 *)cuir)[3]); 5645 ((u32 *)cuir)[3]);
5641 ccw_device_get_schid(device->cdev, &sch_id);
5642 pos = pathmask_to_pos(lpum);
5643 desc = ccw_device_get_chp_desc(device->cdev, pos);
5644 5646
5645 if (cuir->code == CUIR_QUIESCE) { 5647 if (cuir->code == CUIR_QUIESCE) {
5646 /* quiesce */ 5648 /* quiesce */
5647 if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir)) 5649 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
5648 response = PSF_CUIR_LAST_PATH; 5650 response = PSF_CUIR_LAST_PATH;
5649 else 5651 else
5650 response = PSF_CUIR_COMPLETED; 5652 response = PSF_CUIR_COMPLETED;
5651 } else if (cuir->code == CUIR_RESUME) { 5653 } else if (cuir->code == CUIR_RESUME) {
5652 /* resume */ 5654 /* resume */
5653 dasd_eckd_cuir_resume(device, lpum, sch_id, cuir); 5655 dasd_eckd_cuir_resume(device, lpum, cuir);
5654 response = PSF_CUIR_COMPLETED; 5656 response = PSF_CUIR_COMPLETED;
5655 } else 5657 } else
5656 response = PSF_CUIR_NOT_SUPPORTED; 5658 response = PSF_CUIR_NOT_SUPPORTED;
5657 5659
5658 dasd_eckd_psf_cuir_response(device, response, 5660 dasd_eckd_psf_cuir_response(device, response,
5659 cuir->message_id, desc, sch_id); 5661 cuir->message_id, lpum);
5660 DBF_DEV_EVENT(DBF_WARNING, device, 5662 DBF_DEV_EVENT(DBF_WARNING, device,
5661 "CUIR response: %d on message ID %08x", response, 5663 "CUIR response: %d on message ID %08x", response,
5662 cuir->message_id); 5664 cuir->message_id);
5663 /* free descriptor copy */
5664 kfree(desc);
5665 /* to make sure there is no attention left schedule work again */ 5665 /* to make sure there is no attention left schedule work again */
5666 device->discipline->check_attention(device, lpum); 5666 device->discipline->check_attention(device, lpum);
5667} 5667}
@@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
5708 return 0; 5708 return 0;
5709} 5709}
5710 5710
5711static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
5712{
5713 if (~lpum & dasd_path_get_opm(device)) {
5714 dasd_path_add_nohpfpm(device, lpum);
5715 dasd_path_remove_opm(device, lpum);
5716 dev_err(&device->cdev->dev,
5717 "Channel path %02X lost HPF functionality and is disabled\n",
5718 lpum);
5719 return 1;
5720 }
5721 return 0;
5722}
5723
5724static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
5725{
5726 struct dasd_eckd_private *private = device->private;
5727
5728 dev_err(&device->cdev->dev,
5729 "High Performance FICON disabled\n");
5730 private->fcx_max_data = 0;
5731}
5732
5733static int dasd_eckd_hpf_enabled(struct dasd_device *device)
5734{
5735 struct dasd_eckd_private *private = device->private;
5736
5737 return private->fcx_max_data ? 1 : 0;
5738}
5739
5740static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
5741 struct irb *irb)
5742{
5743 struct dasd_eckd_private *private = device->private;
5744
5745 if (!private->fcx_max_data) {
5746 /* sanity check for no HPF, the error makes no sense */
5747 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5748 "Trying to disable HPF for a non HPF device");
5749 return;
5750 }
5751 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
5752 dasd_eckd_disable_hpf_device(device);
5753 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
5754 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
5755 return;
5756 dasd_eckd_disable_hpf_device(device);
5757 dasd_path_set_tbvpm(device,
5758 dasd_path_get_hpfpm(device));
5759 }
5760 /*
5761 * prevent that any new I/O ist started on the device and schedule a
5762 * requeue of existing requests
5763 */
5764 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
5765 dasd_schedule_requeue(device);
5766}
5767
5711static struct ccw_driver dasd_eckd_driver = { 5768static struct ccw_driver dasd_eckd_driver = {
5712 .driver = { 5769 .driver = {
5713 .name = "dasd-eckd", 5770 .name = "dasd-eckd",
@@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
5776 .check_attention = dasd_eckd_check_attention, 5833 .check_attention = dasd_eckd_check_attention,
5777 .host_access_count = dasd_eckd_host_access_count, 5834 .host_access_count = dasd_eckd_host_access_count,
5778 .hosts_print = dasd_hosts_print, 5835 .hosts_print = dasd_hosts_print,
5836 .handle_hpf_error = dasd_eckd_handle_hpf_error,
5837 .disable_hpf = dasd_eckd_disable_hpf_device,
5838 .hpf_enabled = dasd_eckd_hpf_enabled,
5839 .reset_path = dasd_eckd_reset_path,
5779}; 5840};
5780 5841
5781static int __init 5842static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 59803626ea36..e2a710c250a5 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -94,6 +94,8 @@
94#define FCX_MAX_DATA_FACTOR 65536 94#define FCX_MAX_DATA_FACTOR 65536
95#define DASD_ECKD_RCD_DATA_SIZE 256 95#define DASD_ECKD_RCD_DATA_SIZE 256
96 96
97#define DASD_ECKD_PATH_THRHLD 256
98#define DASD_ECKD_PATH_INTERVAL 300
97 99
98/***************************************************************************** 100/*****************************************************************************
99 * SECTION: Type Definitions 101 * SECTION: Type Definitions
@@ -535,8 +537,7 @@ struct dasd_eckd_private {
535 struct dasd_eckd_characteristics rdc_data; 537 struct dasd_eckd_characteristics rdc_data;
536 u8 *conf_data; 538 u8 *conf_data;
537 int conf_len; 539 int conf_len;
538 /* per path configuration data */ 540
539 struct dasd_conf_data *path_conf_data[8];
540 /* pointers to specific parts in the conf_data */ 541 /* pointers to specific parts in the conf_data */
541 struct dasd_ned *ned; 542 struct dasd_ned *ned;
542 struct dasd_sneq *sneq; 543 struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 21ef63cf0960..6c5d671304b4 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
454 */ 454 */
455int dasd_eer_enable(struct dasd_device *device) 455int dasd_eer_enable(struct dasd_device *device)
456{ 456{
457 struct dasd_ccw_req *cqr; 457 struct dasd_ccw_req *cqr = NULL;
458 unsigned long flags; 458 unsigned long flags;
459 struct ccw1 *ccw; 459 struct ccw1 *ccw;
460 int rc = 0;
460 461
462 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
461 if (device->eer_cqr) 463 if (device->eer_cqr)
462 return 0; 464 goto out;
465 else if (!device->discipline ||
466 strcmp(device->discipline->name, "ECKD"))
467 rc = -EMEDIUMTYPE;
468 else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
469 rc = -EBUSY;
463 470
464 if (!device->discipline || strcmp(device->discipline->name, "ECKD")) 471 if (rc)
465 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ 472 goto out;
466 473
467 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, 474 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
468 SNSS_DATA_SIZE, device); 475 SNSS_DATA_SIZE, device);
469 if (IS_ERR(cqr)) 476 if (IS_ERR(cqr)) {
470 return -ENOMEM; 477 rc = -ENOMEM;
478 cqr = NULL;
479 goto out;
480 }
471 481
472 cqr->startdev = device; 482 cqr->startdev = device;
473 cqr->retries = 255; 483 cqr->retries = 255;
@@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device)
485 cqr->status = DASD_CQR_FILLED; 495 cqr->status = DASD_CQR_FILLED;
486 cqr->callback = dasd_eer_snss_cb; 496 cqr->callback = dasd_eer_snss_cb;
487 497
488 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
489 if (!device->eer_cqr) { 498 if (!device->eer_cqr) {
490 device->eer_cqr = cqr; 499 device->eer_cqr = cqr;
491 cqr = NULL; 500 cqr = NULL;
492 } 501 }
502
503out:
493 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 504 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
505
494 if (cqr) 506 if (cqr)
495 dasd_kfree_request(cqr, device); 507 dasd_kfree_request(cqr, device);
496 return 0; 508
509 return rc;
497} 510}
498 511
499/* 512/*
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d138d0116734..113c1c1fa1af 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
96 "default ERP called (%i retries left)", 96 "default ERP called (%i retries left)",
97 cqr->retries); 97 cqr->retries);
98 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 98 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
99 cqr->lpm = device->path_data.opm; 99 cqr->lpm = dasd_path_get_opm(device);
100 cqr->status = DASD_CQR_FILLED; 100 cqr->status = DASD_CQR_FILLED;
101 } else { 101 } else {
102 pr_err("%s: default ERP has run out of retries and failed\n", 102 pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d7b5b550364b..462cab5d4302 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
168 168
169 device->default_expires = DASD_EXPIRES; 169 device->default_expires = DASD_EXPIRES;
170 device->default_retries = FBA_DEFAULT_RETRIES; 170 device->default_retries = FBA_DEFAULT_RETRIES;
171 device->path_data.opm = LPM_ANYPATH; 171 dasd_path_set_opm(device, LPM_ANYPATH);
172 172
173 readonly = dasd_device_is_ro(device); 173 readonly = dasd_device_is_ro(device);
174 if (readonly) 174 if (readonly)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 87ff6cef872f..24be210c10e5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -55,6 +55,7 @@
55#include <asm/debug.h> 55#include <asm/debug.h>
56#include <asm/dasd.h> 56#include <asm/dasd.h>
57#include <asm/idals.h> 57#include <asm/idals.h>
58#include <linux/bitops.h>
58 59
59/* DASD discipline magic */ 60/* DASD discipline magic */
60#define DASD_ECKD_MAGIC 0xC5C3D2C4 61#define DASD_ECKD_MAGIC 0xC5C3D2C4
@@ -377,6 +378,10 @@ struct dasd_discipline {
377 int (*check_attention)(struct dasd_device *, __u8); 378 int (*check_attention)(struct dasd_device *, __u8);
378 int (*host_access_count)(struct dasd_device *); 379 int (*host_access_count)(struct dasd_device *);
379 int (*hosts_print)(struct dasd_device *, struct seq_file *); 380 int (*hosts_print)(struct dasd_device *, struct seq_file *);
381 void (*handle_hpf_error)(struct dasd_device *, struct irb *);
382 void (*disable_hpf)(struct dasd_device *);
383 int (*hpf_enabled)(struct dasd_device *);
384 void (*reset_path)(struct dasd_device *, __u8);
380}; 385};
381 386
382extern struct dasd_discipline *dasd_diag_discipline_pointer; 387extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
397#define DASD_EER_STATECHANGE 3 402#define DASD_EER_STATECHANGE 3
398#define DASD_EER_PPRCSUSPEND 4 403#define DASD_EER_PPRCSUSPEND 4
399 404
405/* DASD path handling */
406
407#define DASD_PATH_OPERATIONAL 1
408#define DASD_PATH_TBV 2
409#define DASD_PATH_PP 3
410#define DASD_PATH_NPP 4
411#define DASD_PATH_MISCABLED 5
412#define DASD_PATH_NOHPF 6
413#define DASD_PATH_CUIR 7
414#define DASD_PATH_IFCC 8
415
416#define DASD_THRHLD_MAX 4294967295U
417#define DASD_INTERVAL_MAX 4294967295U
418
400struct dasd_path { 419struct dasd_path {
401 __u8 opm; 420 unsigned long flags;
402 __u8 tbvpm; 421 u8 cssid;
403 __u8 ppm; 422 u8 ssid;
404 __u8 npm; 423 u8 chpid;
405 /* paths that are not used because of a special condition */ 424 struct dasd_conf_data *conf_data;
406 __u8 cablepm; /* miss-cabled */ 425 atomic_t error_count;
407 __u8 hpfpm; /* the HPF requirements of the other paths are not met */ 426 unsigned long long errorclk;
408 __u8 cuirpm; /* CUIR varied offline */
409}; 427};
410 428
429
411struct dasd_profile_info { 430struct dasd_profile_info {
412 /* legacy part of profile data, as in dasd_profile_info_t */ 431 /* legacy part of profile data, as in dasd_profile_info_t */
413 unsigned int dasd_io_reqs; /* number of requests processed */ 432 unsigned int dasd_io_reqs; /* number of requests processed */
@@ -458,7 +477,8 @@ struct dasd_device {
458 struct dasd_discipline *discipline; 477 struct dasd_discipline *discipline;
459 struct dasd_discipline *base_discipline; 478 struct dasd_discipline *base_discipline;
460 void *private; 479 void *private;
461 struct dasd_path path_data; 480 struct dasd_path path[8];
481 __u8 opm;
462 482
463 /* Device state and target state. */ 483 /* Device state and target state. */
464 int state, target; 484 int state, target;
@@ -483,6 +503,7 @@ struct dasd_device {
483 struct work_struct reload_device; 503 struct work_struct reload_device;
484 struct work_struct kick_validate; 504 struct work_struct kick_validate;
485 struct work_struct suc_work; 505 struct work_struct suc_work;
506 struct work_struct requeue_requests;
486 struct timer_list timer; 507 struct timer_list timer;
487 508
488 debug_info_t *debug_area; 509 debug_info_t *debug_area;
@@ -498,6 +519,9 @@ struct dasd_device {
498 519
499 unsigned long blk_timeout; 520 unsigned long blk_timeout;
500 521
522 unsigned long path_thrhld;
523 unsigned long path_interval;
524
501 struct dentry *debugfs_dentry; 525 struct dentry *debugfs_dentry;
502 struct dentry *hosts_dentry; 526 struct dentry *hosts_dentry;
503 struct dasd_profile profile; 527 struct dasd_profile profile;
@@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int);
707void dasd_kick_device(struct dasd_device *); 731void dasd_kick_device(struct dasd_device *);
708void dasd_restore_device(struct dasd_device *); 732void dasd_restore_device(struct dasd_device *);
709void dasd_reload_device(struct dasd_device *); 733void dasd_reload_device(struct dasd_device *);
734void dasd_schedule_requeue(struct dasd_device *);
710 735
711void dasd_add_request_head(struct dasd_ccw_req *); 736void dasd_add_request_head(struct dasd_ccw_req *);
712void dasd_add_request_tail(struct dasd_ccw_req *); 737void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
835#define dasd_eer_enabled(d) (0) 860#define dasd_eer_enabled(d) (0)
836#endif /* CONFIG_DASD_ERR */ 861#endif /* CONFIG_DASD_ERR */
837 862
863
864/* DASD path handling functions */
865
866/*
867 * helper functions to modify bit masks for a given channel path for a device
868 */
869static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
870{
871 return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
872}
873
874static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
875{
876 return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
877}
878
879static inline void dasd_path_verify(struct dasd_device *device, int chp)
880{
881 __set_bit(DASD_PATH_TBV, &device->path[chp].flags);
882}
883
884static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
885{
886 __clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
887}
888
889static inline void dasd_path_clear_all_verify(struct dasd_device *device)
890{
891 int chp;
892
893 for (chp = 0; chp < 8; chp++)
894 dasd_path_clear_verify(device, chp);
895}
896
897static inline void dasd_path_operational(struct dasd_device *device, int chp)
898{
899 __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
900 device->opm |= (0x80 >> chp);
901}
902
903static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
904{
905 __set_bit(DASD_PATH_NPP, &device->path[chp].flags);
906}
907
908static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
909{
910 return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
911}
912
913static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
914 int chp)
915{
916 __clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
917}
918
919static inline void dasd_path_preferred(struct dasd_device *device, int chp)
920{
921 __set_bit(DASD_PATH_PP, &device->path[chp].flags);
922}
923
924static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
925{
926 return test_bit(DASD_PATH_PP, &device->path[chp].flags);
927}
928
929static inline void dasd_path_clear_preferred(struct dasd_device *device,
930 int chp)
931{
932 __clear_bit(DASD_PATH_PP, &device->path[chp].flags);
933}
934
935static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
936{
937 __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
938 device->opm &= ~(0x80 >> chp);
939}
940
941static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
942{
943 __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
944}
945
946static inline void dasd_path_cuir(struct dasd_device *device, int chp)
947{
948 __set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
949}
950
951static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
952{
953 return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
954}
955
956static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
957{
958 __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
959}
960
961static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
962{
963 set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
964}
965
966static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
967{
968 return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
969}
970
971static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
972{
973 clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
974}
975
976static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
977{
978 __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
979}
980
981static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
982{
983 __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
984}
985
986static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
987{
988 return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
989}
990
991static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
992{
993 __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
994}
995
996static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
997{
998 return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
999}
1000
1001/*
1002 * get functions for path masks
1003 * will return a path masks for the given device
1004 */
1005
1006static inline __u8 dasd_path_get_opm(struct dasd_device *device)
1007{
1008 return device->opm;
1009}
1010
1011static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
1012{
1013 int chp;
1014 __u8 tbvpm = 0x00;
1015
1016 for (chp = 0; chp < 8; chp++)
1017 if (dasd_path_need_verify(device, chp))
1018 tbvpm |= 0x80 >> chp;
1019 return tbvpm;
1020}
1021
1022static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
1023{
1024 int chp;
1025 __u8 npm = 0x00;
1026
1027 for (chp = 0; chp < 8; chp++) {
1028 if (dasd_path_is_nonpreferred(device, chp))
1029 npm |= 0x80 >> chp;
1030 }
1031 return npm;
1032}
1033
1034static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
1035{
1036 int chp;
1037 __u8 ppm = 0x00;
1038
1039 for (chp = 0; chp < 8; chp++)
1040 if (dasd_path_is_preferred(device, chp))
1041 ppm |= 0x80 >> chp;
1042 return ppm;
1043}
1044
1045static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
1046{
1047 int chp;
1048 __u8 cablepm = 0x00;
1049
1050 for (chp = 0; chp < 8; chp++)
1051 if (dasd_path_is_miscabled(device, chp))
1052 cablepm |= 0x80 >> chp;
1053 return cablepm;
1054}
1055
1056static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
1057{
1058 int chp;
1059 __u8 cuirpm = 0x00;
1060
1061 for (chp = 0; chp < 8; chp++)
1062 if (dasd_path_is_cuir(device, chp))
1063 cuirpm |= 0x80 >> chp;
1064 return cuirpm;
1065}
1066
1067static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
1068{
1069 int chp;
1070 __u8 ifccpm = 0x00;
1071
1072 for (chp = 0; chp < 8; chp++)
1073 if (dasd_path_is_ifcc(device, chp))
1074 ifccpm |= 0x80 >> chp;
1075 return ifccpm;
1076}
1077
1078static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
1079{
1080 int chp;
1081 __u8 hpfpm = 0x00;
1082
1083 for (chp = 0; chp < 8; chp++)
1084 if (dasd_path_is_nohpf(device, chp))
1085 hpfpm |= 0x80 >> chp;
1086 return hpfpm;
1087}
1088
1089/*
1090 * add functions for path masks
1091 * the existing path mask will be extended by the given path mask
1092 */
1093static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
1094{
1095 int chp;
1096
1097 for (chp = 0; chp < 8; chp++)
1098 if (pm & (0x80 >> chp))
1099 dasd_path_verify(device, chp);
1100}
1101
1102static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
1103{
1104 int chp;
1105 __u8 nopm = 0x00;
1106
1107 for (chp = 0; chp < 8; chp++)
1108 if (dasd_path_is_nohpf(device, chp) ||
1109 dasd_path_is_ifcc(device, chp) ||
1110 dasd_path_is_cuir(device, chp) ||
1111 dasd_path_is_miscabled(device, chp))
1112 nopm |= 0x80 >> chp;
1113 return nopm;
1114}
1115
1116static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
1117{
1118 int chp;
1119
1120 for (chp = 0; chp < 8; chp++)
1121 if (pm & (0x80 >> chp)) {
1122 dasd_path_operational(device, chp);
1123 /*
1124 * if the path is used
1125 * it should not be in one of the negative lists
1126 */
1127 dasd_path_clear_nohpf(device, chp);
1128 dasd_path_clear_cuir(device, chp);
1129 dasd_path_clear_cable(device, chp);
1130 dasd_path_clear_ifcc(device, chp);
1131 }
1132}
1133
1134static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
1135{
1136 int chp;
1137
1138 for (chp = 0; chp < 8; chp++)
1139 if (pm & (0x80 >> chp))
1140 dasd_path_miscabled(device, chp);
1141}
1142
1143static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
1144{
1145 int chp;
1146
1147 for (chp = 0; chp < 8; chp++)
1148 if (pm & (0x80 >> chp))
1149 dasd_path_cuir(device, chp);
1150}
1151
1152static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
1153{
1154 int chp;
1155
1156 for (chp = 0; chp < 8; chp++)
1157 if (pm & (0x80 >> chp))
1158 dasd_path_ifcc(device, chp);
1159}
1160
1161static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
1162{
1163 int chp;
1164
1165 for (chp = 0; chp < 8; chp++)
1166 if (pm & (0x80 >> chp))
1167 dasd_path_nonpreferred(device, chp);
1168}
1169
1170static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
1171{
1172 int chp;
1173
1174 for (chp = 0; chp < 8; chp++)
1175 if (pm & (0x80 >> chp))
1176 dasd_path_nohpf(device, chp);
1177}
1178
1179static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
1180{
1181 int chp;
1182
1183 for (chp = 0; chp < 8; chp++)
1184 if (pm & (0x80 >> chp))
1185 dasd_path_preferred(device, chp);
1186}
1187
1188/*
1189 * set functions for path masks
1190 * the existing path mask will be replaced by the given path mask
1191 */
1192static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
1193{
1194 int chp;
1195
1196 for (chp = 0; chp < 8; chp++)
1197 if (pm & (0x80 >> chp))
1198 dasd_path_verify(device, chp);
1199 else
1200 dasd_path_clear_verify(device, chp);
1201}
1202
1203static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
1204{
1205 int chp;
1206
1207 for (chp = 0; chp < 8; chp++) {
1208 dasd_path_clear_oper(device, chp);
1209 if (pm & (0x80 >> chp)) {
1210 dasd_path_operational(device, chp);
1211 /*
1212 * if the path is used
1213 * it should not be in one of the negative lists
1214 */
1215 dasd_path_clear_nohpf(device, chp);
1216 dasd_path_clear_cuir(device, chp);
1217 dasd_path_clear_cable(device, chp);
1218 dasd_path_clear_ifcc(device, chp);
1219 }
1220 }
1221}
1222
1223/*
1224 * remove functions for path masks
1225 * the existing path mask will be cleared with the given path mask
1226 */
1227static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
1228{
1229 int chp;
1230
1231 for (chp = 0; chp < 8; chp++) {
1232 if (pm & (0x80 >> chp))
1233 dasd_path_clear_oper(device, chp);
1234 }
1235}
1236
1237/*
1238 * add the newly available path to the to be verified pm and remove it from
1239 * normal operation until it is verified
1240 */
1241static inline void dasd_path_available(struct dasd_device *device, int chp)
1242{
1243 dasd_path_clear_oper(device, chp);
1244 dasd_path_verify(device, chp);
1245}
1246
1247static inline void dasd_path_notoper(struct dasd_device *device, int chp)
1248{
1249 dasd_path_clear_oper(device, chp);
1250 dasd_path_clear_preferred(device, chp);
1251 dasd_path_clear_nonpreferred(device, chp);
1252}
1253
1254/*
1255 * remove all paths from normal operation
1256 */
1257static inline void dasd_path_no_path(struct dasd_device *device)
1258{
1259 int chp;
1260
1261 for (chp = 0; chp < 8; chp++)
1262 dasd_path_notoper(device, chp);
1263
1264 dasd_path_clear_all_verify(device);
1265}
1266
1267/* end - path handling */
1268
838#endif /* DASD_H */ 1269#endif /* DASD_H */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 931d10e86837..1b8d825623bd 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/types.h> 12#include <linux/types.h>
14#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
15#include <linux/tty.h> 14#include <linux/tty.h>
@@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
1215 tty3215_driver = driver; 1214 tty3215_driver = driver;
1216 return 0; 1215 return 0;
1217} 1216}
1218 1217device_initcall(tty3215_init);
1219static void __exit tty3215_exit(void)
1220{
1221 tty_unregister_driver(tty3215_driver);
1222 put_tty_driver(tty3215_driver);
1223 ccw_driver_unregister(&raw3215_ccw_driver);
1224}
1225
1226module_init(tty3215_init);
1227module_exit(tty3215_exit);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7a10c56334bb..e1fc7eb043d6 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -59,6 +59,7 @@
59 59
60typedef unsigned int sclp_cmdw_t; 60typedef unsigned int sclp_cmdw_t;
61 61
62#define SCLP_CMDW_READ_CPU_INFO 0x00010001
62#define SCLP_CMDW_READ_EVENT_DATA 0x00770005 63#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
63#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 64#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
64#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 65#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
@@ -102,6 +103,28 @@ struct init_sccb {
102 sccb_mask_t sclp_send_mask; 103 sccb_mask_t sclp_send_mask;
103} __attribute__((packed)); 104} __attribute__((packed));
104 105
106struct read_cpu_info_sccb {
107 struct sccb_header header;
108 u16 nr_configured;
109 u16 offset_configured;
110 u16 nr_standby;
111 u16 offset_standby;
112 u8 reserved[4096 - 16];
113} __attribute__((packed, aligned(PAGE_SIZE)));
114
115static inline void sclp_fill_core_info(struct sclp_core_info *info,
116 struct read_cpu_info_sccb *sccb)
117{
118 char *page = (char *) sccb;
119
120 memset(info, 0, sizeof(*info));
121 info->configured = sccb->nr_configured;
122 info->standby = sccb->nr_standby;
123 info->combined = sccb->nr_configured + sccb->nr_standby;
124 memcpy(&info->core, page + sccb->offset_configured,
125 info->combined * sizeof(struct sclp_core_entry));
126}
127
105#define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL) 128#define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL)
106#define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL) 129#define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL)
107#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL) 130#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e3fc7539116b..b9c5522b8a68 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -80,33 +80,10 @@ out:
80 * CPU configuration related functions. 80 * CPU configuration related functions.
81 */ 81 */
82 82
83#define SCLP_CMDW_READ_CPU_INFO 0x00010001
84#define SCLP_CMDW_CONFIGURE_CPU 0x00110001 83#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
85#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 84#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
86 85
87struct read_cpu_info_sccb { 86int _sclp_get_core_info(struct sclp_core_info *info)
88 struct sccb_header header;
89 u16 nr_configured;
90 u16 offset_configured;
91 u16 nr_standby;
92 u16 offset_standby;
93 u8 reserved[4096 - 16];
94} __attribute__((packed, aligned(PAGE_SIZE)));
95
96static void sclp_fill_core_info(struct sclp_core_info *info,
97 struct read_cpu_info_sccb *sccb)
98{
99 char *page = (char *) sccb;
100
101 memset(info, 0, sizeof(*info));
102 info->configured = sccb->nr_configured;
103 info->standby = sccb->nr_standby;
104 info->combined = sccb->nr_configured + sccb->nr_standby;
105 memcpy(&info->core, page + sccb->offset_configured,
106 info->combined * sizeof(struct sclp_core_entry));
107}
108
109int sclp_get_core_info(struct sclp_core_info *info)
110{ 87{
111 int rc; 88 int rc;
112 struct read_cpu_info_sccb *sccb; 89 struct read_cpu_info_sccb *sccb;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index c71df0c7dedc..f8e46c22e641 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb,
221 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); 221 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
222} 222}
223 223
224static struct sclp_core_info sclp_core_info_early __initdata;
225static int sclp_core_info_early_valid __initdata;
226
227static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
228{
229 int rc;
230
231 if (!SCLP_HAS_CPU_INFO)
232 return;
233 memset(sccb, 0, sizeof(*sccb));
234 sccb->header.length = sizeof(*sccb);
235 do {
236 rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
237 } while (rc == -EBUSY);
238 if (rc)
239 return;
240 if (sccb->header.response_code != 0x0010)
241 return;
242 sclp_fill_core_info(&sclp_core_info_early, sccb);
243 sclp_core_info_early_valid = 1;
244}
245
246int __init _sclp_get_core_info_early(struct sclp_core_info *info)
247{
248 if (!sclp_core_info_early_valid)
249 return -EIO;
250 *info = sclp_core_info_early;
251 return 0;
252}
253
224static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) 254static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
225{ 255{
226 sccb_init_eq_size(sccb); 256 sccb_init_eq_size(sccb);
@@ -293,6 +323,7 @@ void __init sclp_early_detect(void)
293 void *sccb = &sccb_early; 323 void *sccb = &sccb_early;
294 324
295 sclp_facilities_detect(sccb); 325 sclp_facilities_detect(sccb);
326 sclp_init_core_info_early(sccb);
296 sclp_hsa_size_detect(sccb); 327 sclp_hsa_size_detect(sccb);
297 328
298 /* Turn off SCLP event notifications. Also save remote masks in the 329 /* Turn off SCLP event notifications. Also save remote masks in the
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 475e470d9768..e4958511168a 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -6,7 +6,6 @@
6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
7 */ 7 */
8 8
9#include <linux/module.h>
10#include <linux/types.h> 9#include <linux/types.h>
11#include <linux/cpumask.h> 10#include <linux/cpumask.h>
12#include <linux/smp.h> 11#include <linux/smp.h>
@@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void)
80{ 79{
81 return sclp_register(&sclp_quiesce_event); 80 return sclp_register(&sclp_quiesce_event);
82} 81}
83 82device_initcall(sclp_quiesce_init);
84module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 3c6e174e19b6..9259017a1295 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -7,7 +7,6 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/module.h>
11#include <linux/kmod.h> 10#include <linux/kmod.h>
12#include <linux/tty.h> 11#include <linux/tty.h>
13#include <linux/tty_driver.h> 12#include <linux/tty_driver.h>
@@ -573,4 +572,4 @@ sclp_tty_init(void)
573 sclp_tty_driver = driver; 572 sclp_tty_driver = driver;
574 return 0; 573 return 0;
575} 574}
576module_init(sclp_tty_init); 575device_initcall(sclp_tty_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e883063c7258..3167e8581994 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
870 goto cleanup; 870 goto cleanup;
871 871
872 for (i=0; i < MAXMINOR; ++i ) { 872 for (i=0; i < MAXMINOR; ++i ) {
873 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); 873 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
874 if (!sys_ser[i].buffer) { 874 if (!sys_ser[i].buffer) {
875 rc = -ENOMEM; 875 rc = -ENOMEM;
876 break; 876 break;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 16992e2a40ad..f771e5e9e26b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright IBM Corp. 2003, 2008 8 * Copyright IBM Corp. 2003, 2008
9 * Author(s): Michael Holzheu 9 * Author(s): Michael Holzheu
10 * License: GPL
10 */ 11 */
11 12
12#define KMSG_COMPONENT "zdump" 13#define KMSG_COMPONENT "zdump"
@@ -16,7 +17,6 @@
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
18#include <linux/debugfs.h> 19#include <linux/debugfs.h>
19#include <linux/module.h>
20#include <linux/memblock.h> 20#include <linux/memblock.h>
21 21
22#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
@@ -320,7 +320,7 @@ static int __init zcore_init(void)
320 goto fail; 320 goto fail;
321 } 321 }
322 322
323 pr_alert("DETECTED 'S390X (64 bit) OS'\n"); 323 pr_alert("The dump process started for a 64-bit operating system\n");
324 rc = init_cpu_info(); 324 rc = init_cpu_info();
325 if (rc) 325 if (rc)
326 goto fail; 326 goto fail;
@@ -364,22 +364,4 @@ fail:
364 diag308(DIAG308_REL_HSA, NULL); 364 diag308(DIAG308_REL_HSA, NULL);
365 return rc; 365 return rc;
366} 366}
367
368static void __exit zcore_exit(void)
369{
370 debug_unregister(zcore_dbf);
371 sclp_sdias_exit();
372 free_page((unsigned long) ipl_block);
373 debugfs_remove(zcore_hsa_file);
374 debugfs_remove(zcore_reipl_file);
375 debugfs_remove(zcore_memmap_file);
376 debugfs_remove(zcore_dir);
377 diag308(DIAG308_REL_HSA, NULL);
378}
379
380MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
381MODULE_DESCRIPTION("zcore module for zfcpdump support");
382MODULE_LICENSE("GPL");
383
384subsys_initcall(zcore_init); 367subsys_initcall(zcore_init);
385module_exit(zcore_exit);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 268aa23afa01..6b6386e9a500 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -30,7 +30,7 @@
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/module.h> 33#include <linux/export.h>
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/timex.h> /* get_tod_clock() */ 36#include <linux/timex.h> /* get_tod_clock() */
@@ -1389,13 +1389,7 @@ static int __init init_cmf(void)
1389 "%s (mode %s)\n", format_string, detect_string); 1389 "%s (mode %s)\n", format_string, detect_string);
1390 return 0; 1390 return 0;
1391} 1391}
1392module_init(init_cmf); 1392device_initcall(init_cmf);
1393
1394
1395MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1396MODULE_LICENSE("GPL");
1397MODULE_DESCRIPTION("channel measurement facility base driver\n"
1398 "Copyright IBM Corp. 2003\n");
1399 1393
1400EXPORT_SYMBOL_GPL(enable_cmf); 1394EXPORT_SYMBOL_GPL(enable_cmf);
1401EXPORT_SYMBOL_GPL(disable_cmf); 1395EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3d2b20ee613f..bc099b61394d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -5,12 +5,14 @@
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 *
9 * License: GPL
8 */ 10 */
9 11
10#define KMSG_COMPONENT "cio" 12#define KMSG_COMPONENT "cio"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 14
13#include <linux/module.h> 15#include <linux/export.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/device.h> 17#include <linux/device.h>
16#include <linux/slab.h> 18#include <linux/slab.h>
@@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv)
1285 driver_unregister(&cdrv->drv); 1287 driver_unregister(&cdrv->drv);
1286} 1288}
1287EXPORT_SYMBOL_GPL(css_driver_unregister); 1289EXPORT_SYMBOL_GPL(css_driver_unregister);
1288
1289MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6a58bc8f46e2..79823ee9c100 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -5,12 +5,14 @@
5 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 5 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * License: GPL
8 */ 10 */
9 11
10#define KMSG_COMPONENT "cio" 12#define KMSG_COMPONENT "cio"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 14
13#include <linux/module.h> 15#include <linux/export.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/spinlock.h> 17#include <linux/spinlock.h>
16#include <linux/errno.h> 18#include <linux/errno.h>
@@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = {
145 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 147 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
146 { /* end of list */ }, 148 { /* end of list */ },
147}; 149};
148MODULE_DEVICE_TABLE(css, io_subchannel_ids);
149 150
150static int io_subchannel_prepare(struct subchannel *sch) 151static int io_subchannel_prepare(struct subchannel *sch)
151{ 152{
@@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev)
2150} 2151}
2151EXPORT_SYMBOL_GPL(ccw_device_siosl); 2152EXPORT_SYMBOL_GPL(ccw_device_siosl);
2152 2153
2153MODULE_LICENSE("GPL");
2154EXPORT_SYMBOL(ccw_device_set_online); 2154EXPORT_SYMBOL(ccw_device_set_online);
2155EXPORT_SYMBOL(ccw_device_set_offline); 2155EXPORT_SYMBOL(ccw_device_set_offline);
2156EXPORT_SYMBOL(ccw_driver_register); 2156EXPORT_SYMBOL(ccw_driver_register);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 065b1be98e2c..ec497af99dd8 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -13,7 +13,6 @@
13 */ 13 */
14enum dev_state { 14enum dev_state {
15 DEV_STATE_NOT_OPER, 15 DEV_STATE_NOT_OPER,
16 DEV_STATE_SENSE_PGID,
17 DEV_STATE_SENSE_ID, 16 DEV_STATE_SENSE_ID,
18 DEV_STATE_OFFLINE, 17 DEV_STATE_OFFLINE,
19 DEV_STATE_VERIFY, 18 DEV_STATE_VERIFY,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47e08b6..9afb5ce13007 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1058 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1058 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1059 [DEV_EVENT_VERIFY] = ccw_device_nop, 1059 [DEV_EVENT_VERIFY] = ccw_device_nop,
1060 }, 1060 },
1061 [DEV_STATE_SENSE_PGID] = {
1062 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1063 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1064 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1065 [DEV_EVENT_VERIFY] = ccw_device_nop,
1066 },
1067 [DEV_STATE_SENSE_ID] = { 1061 [DEV_STATE_SENSE_ID] = {
1068 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1062 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1069 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1063 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f601e63..cf8c4ac6323a 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -3,8 +3,10 @@
3 * 3 *
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 * Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Cornelia Huck (cornelia.huck@de.ibm.com)
6 *
7 * License: GPL
6 */ 8 */
7#include <linux/module.h> 9#include <linux/export.h>
8#include <linux/init.h> 10#include <linux/init.h>
9#include <linux/errno.h> 11#include <linux/errno.h>
10#include <linux/slab.h> 12#include <linux/slab.h>
@@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
676} 678}
677EXPORT_SYMBOL_GPL(ccw_device_get_schid); 679EXPORT_SYMBOL_GPL(ccw_device_get_schid);
678 680
679MODULE_LICENSE("GPL");
680EXPORT_SYMBOL(ccw_device_set_options_mask); 681EXPORT_SYMBOL(ccw_device_set_options_mask);
681EXPORT_SYMBOL(ccw_device_set_options); 682EXPORT_SYMBOL(ccw_device_set_options);
682EXPORT_SYMBOL(ccw_device_clear_options); 683EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb09fc8e..f407b4f9d0ba 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1273,7 +1273,7 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
1273 return retval; 1273 return retval;
1274} 1274}
1275 1275
1276static int ap_dev_suspend(struct device *dev, pm_message_t state) 1276static int ap_dev_suspend(struct device *dev)
1277{ 1277{
1278 struct ap_device *ap_dev = to_ap_dev(dev); 1278 struct ap_device *ap_dev = to_ap_dev(dev);
1279 1279
@@ -1287,11 +1287,6 @@ static int ap_dev_suspend(struct device *dev, pm_message_t state)
1287 return 0; 1287 return 0;
1288} 1288}
1289 1289
1290static int ap_dev_resume(struct device *dev)
1291{
1292 return 0;
1293}
1294
1295static void ap_bus_suspend(void) 1290static void ap_bus_suspend(void)
1296{ 1291{
1297 ap_suspend_flag = 1; 1292 ap_suspend_flag = 1;
@@ -1356,12 +1351,13 @@ static struct notifier_block ap_power_notifier = {
1356 .notifier_call = ap_power_event, 1351 .notifier_call = ap_power_event,
1357}; 1352};
1358 1353
1354static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL);
1355
1359static struct bus_type ap_bus_type = { 1356static struct bus_type ap_bus_type = {
1360 .name = "ap", 1357 .name = "ap",
1361 .match = &ap_bus_match, 1358 .match = &ap_bus_match,
1362 .uevent = &ap_uevent, 1359 .uevent = &ap_uevent,
1363 .suspend = ap_dev_suspend, 1360 .pm = &ap_bus_pm_ops,
1364 .resume = ap_dev_resume,
1365}; 1361};
1366 1362
1367void ap_device_init_reply(struct ap_device *ap_dev, 1363void ap_device_init_reply(struct ap_device *ap_dev,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8688ad4c825f..639ed4e6afd1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -24,7 +24,7 @@
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/list.h> 25#include <linux/list.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/module.h> 27#include <linux/moduleparam.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
30#include <linux/notifier.h> 30#include <linux/notifier.h>
@@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void)
235 return info; 235 return info;
236} 236}
237 237
238static void destroy_airq_info(struct airq_info *info)
239{
240 if (!info)
241 return;
242
243 unregister_adapter_interrupt(&info->airq);
244 airq_iv_release(info->aiv);
245 kfree(info);
246}
247
248static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 238static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
249 u64 *first, void **airq_info) 239 u64 *first, void **airq_info)
250{ 240{
@@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = {
1294 { CCW_DEVICE(0x3832, 0) }, 1284 { CCW_DEVICE(0x3832, 0) },
1295 {}, 1285 {},
1296}; 1286};
1297MODULE_DEVICE_TABLE(ccw, virtio_ids);
1298 1287
1299static struct ccw_driver virtio_ccw_driver = { 1288static struct ccw_driver virtio_ccw_driver = {
1300 .driver = { 1289 .driver = {
@@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void)
1406 no_auto_parse(); 1395 no_auto_parse();
1407 return ccw_driver_register(&virtio_ccw_driver); 1396 return ccw_driver_register(&virtio_ccw_driver);
1408} 1397}
1409module_init(virtio_ccw_init); 1398device_initcall(virtio_ccw_init);
1410
1411static void __exit virtio_ccw_exit(void)
1412{
1413 int i;
1414
1415 ccw_driver_unregister(&virtio_ccw_driver);
1416 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1417 destroy_airq_info(airq_areas[i]);
1418}
1419module_exit(virtio_ccw_exit);