aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:42:32 -0500
commitb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (patch)
treecc049e7ec9edd9f5a76f286e04d8db9a1caa516a /arch/s390
parent07f80d41cf24b7e6e76cd97d420167932c9a7f82 (diff)
parent6a039eab53c01a58bfff95c78fc800ca7de27c77 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig42
-rw-r--r--arch/s390/Makefile12
-rw-r--r--arch/s390/boot/compressed/misc.c3
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c4
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/hypfs/hypfs.h7
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c49
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c139
-rw-r--r--arch/s390/hypfs/inode.c9
-rw-r--r--arch/s390/include/asm/cpu_mf.h14
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/ftrace.h15
-rw-r--r--arch/s390/include/asm/jump_label.h7
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/reset.h3
-rw-r--r--arch/s390/include/asm/sclp.h7
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h20
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/uapi/asm/hypfs.h35
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S3
-rw-r--r--arch/s390/kernel/cache.c391
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c18
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/ftrace.c108
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/ipl.c11
-rw-r--r--arch/s390/kernel/jump_label.c63
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c19
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/sclp.S3
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c261
-rw-r--r--arch/s390/kernel/sysinfo.c8
-rw-r--r--arch/s390/kernel/topology.c63
-rw-r--r--arch/s390/kernel/vtime.c58
-rw-r--r--arch/s390/lib/spinlock.c52
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/mmap.c142
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/pci/pci_mmio.c4
55 files changed, 1120 insertions, 568 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 68b68d755fdf..373cd5badf1c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -66,6 +66,7 @@ config S390
66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
67 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS 67 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
68 select ARCH_HAS_GCOV_PROFILE_ALL 68 select ARCH_HAS_GCOV_PROFILE_ALL
69 select ARCH_HAS_SG_CHAIN
69 select ARCH_HAVE_NMI_SAFE_CMPXCHG 70 select ARCH_HAVE_NMI_SAFE_CMPXCHG
70 select ARCH_INLINE_READ_LOCK 71 select ARCH_INLINE_READ_LOCK
71 select ARCH_INLINE_READ_LOCK_BH 72 select ARCH_INLINE_READ_LOCK_BH
@@ -116,7 +117,6 @@ config S390
116 select HAVE_BPF_JIT if 64BIT && PACK_STACK 117 select HAVE_BPF_JIT if 64BIT && PACK_STACK
117 select HAVE_CMPXCHG_DOUBLE 118 select HAVE_CMPXCHG_DOUBLE
118 select HAVE_CMPXCHG_LOCAL 119 select HAVE_CMPXCHG_LOCAL
119 select HAVE_C_RECORDMCOUNT
120 select HAVE_DEBUG_KMEMLEAK 120 select HAVE_DEBUG_KMEMLEAK
121 select HAVE_DYNAMIC_FTRACE if 64BIT 121 select HAVE_DYNAMIC_FTRACE if 64BIT
122 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT 122 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
@@ -151,7 +151,6 @@ config S390
151 select TTY 151 select TTY
152 select VIRT_CPU_ACCOUNTING 152 select VIRT_CPU_ACCOUNTING
153 select VIRT_TO_BUS 153 select VIRT_TO_BUS
154 select ARCH_HAS_SG_CHAIN
155 154
156config SCHED_OMIT_FRAME_POINTER 155config SCHED_OMIT_FRAME_POINTER
157 def_bool y 156 def_bool y
@@ -185,6 +184,10 @@ config HAVE_MARCH_ZEC12_FEATURES
185 def_bool n 184 def_bool n
186 select HAVE_MARCH_Z196_FEATURES 185 select HAVE_MARCH_Z196_FEATURES
187 186
187config HAVE_MARCH_Z13_FEATURES
188 def_bool n
189 select HAVE_MARCH_ZEC12_FEATURES
190
188choice 191choice
189 prompt "Processor type" 192 prompt "Processor type"
190 default MARCH_G5 193 default MARCH_G5
@@ -244,6 +247,14 @@ config MARCH_ZEC12
244 2827 series). The kernel will be slightly faster but will not work on 247 2827 series). The kernel will be slightly faster but will not work on
245 older machines. 248 older machines.
246 249
250config MARCH_Z13
251 bool "IBM z13"
252 select HAVE_MARCH_Z13_FEATURES if 64BIT
253 help
254 Select this to enable optimizations for IBM z13 (2964 series).
255 The kernel will be slightly faster but will not work on older
256 machines.
257
247endchoice 258endchoice
248 259
249config MARCH_G5_TUNE 260config MARCH_G5_TUNE
@@ -267,6 +278,9 @@ config MARCH_Z196_TUNE
267config MARCH_ZEC12_TUNE 278config MARCH_ZEC12_TUNE
268 def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT 279 def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
269 280
281config MARCH_Z13_TUNE
282 def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
283
270choice 284choice
271 prompt "Tune code generation" 285 prompt "Tune code generation"
272 default TUNE_DEFAULT 286 default TUNE_DEFAULT
@@ -305,6 +319,9 @@ config TUNE_Z196
305config TUNE_ZEC12 319config TUNE_ZEC12
306 bool "IBM zBC12 and zEC12" 320 bool "IBM zBC12 and zEC12"
307 321
322config TUNE_Z13
323 bool "IBM z13"
324
308endchoice 325endchoice
309 326
310config 64BIT 327config 64BIT
@@ -356,14 +373,14 @@ config SMP
356 Even if you don't know what to do here, say Y. 373 Even if you don't know what to do here, say Y.
357 374
358config NR_CPUS 375config NR_CPUS
359 int "Maximum number of CPUs (2-256)" 376 int "Maximum number of CPUs (2-512)"
360 range 2 256 377 range 2 512
361 depends on SMP 378 depends on SMP
362 default "32" if !64BIT 379 default "32" if !64BIT
363 default "64" if 64BIT 380 default "64" if 64BIT
364 help 381 help
365 This allows you to specify the maximum number of CPUs which this 382 This allows you to specify the maximum number of CPUs which this
366 kernel will support. The maximum supported value is 256 and the 383 kernel will support. The maximum supported value is 512 and the
367 minimum value which makes sense is 2. 384 minimum value which makes sense is 2.
368 385
369 This is purely to save memory - each supported CPU adds 386 This is purely to save memory - each supported CPU adds
@@ -378,17 +395,26 @@ config HOTPLUG_CPU
378 can be controlled through /sys/devices/system/cpu/cpu#. 395 can be controlled through /sys/devices/system/cpu/cpu#.
379 Say N if you want to disable CPU hotplug. 396 Say N if you want to disable CPU hotplug.
380 397
398config SCHED_SMT
399 def_bool n
400
381config SCHED_MC 401config SCHED_MC
382 def_bool n 402 def_bool n
383 403
384config SCHED_BOOK 404config SCHED_BOOK
405 def_bool n
406
407config SCHED_TOPOLOGY
385 def_bool y 408 def_bool y
386 prompt "Book scheduler support" 409 prompt "Topology scheduler support"
387 depends on SMP 410 depends on SMP
411 select SCHED_SMT
388 select SCHED_MC 412 select SCHED_MC
413 select SCHED_BOOK
389 help 414 help
390 Book scheduler support improves the CPU scheduler's decision making 415 Topology scheduler support improves the CPU scheduler's decision
391 when dealing with machines that have several books. 416 making when dealing with machines that have multi-threading,
417 multiple cores or multiple books.
392 418
393source kernel/Kconfig.preempt 419source kernel/Kconfig.preempt
394 420
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 878e67973151..acb6859c6a95 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
42mflags-$(CONFIG_MARCH_Z10) := -march=z10 42mflags-$(CONFIG_MARCH_Z10) := -march=z10
43mflags-$(CONFIG_MARCH_Z196) := -march=z196 43mflags-$(CONFIG_MARCH_Z196) := -march=z196
44mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 44mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
45mflags-$(CONFIG_MARCH_Z13) := -march=z13
45 46
46aflags-y += $(mflags-y) 47aflags-y += $(mflags-y)
47cflags-y += $(mflags-y) 48cflags-y += $(mflags-y)
@@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
53cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10 54cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
54cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196 55cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
55cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12 56cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
57cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
56 58
57#KBUILD_IMAGE is necessary for make rpm 59#KBUILD_IMAGE is necessary for make rpm
58KBUILD_IMAGE :=arch/s390/boot/image 60KBUILD_IMAGE :=arch/s390/boot/image
@@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
85cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack 87cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
86endif 88endif
87 89
90ifdef CONFIG_FUNCTION_TRACER
91# make use of hotpatch feature if the compiler supports it
92cc_hotpatch := -mhotpatch=0,3
93ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
94CC_FLAGS_FTRACE := $(cc_hotpatch)
95KBUILD_AFLAGS += -DCC_USING_HOTPATCH
96KBUILD_CFLAGS += -DCC_USING_HOTPATCH
97endif
98endif
99
88KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y) 100KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
89KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare 101KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
90KBUILD_AFLAGS += $(aflags-y) 102KBUILD_AFLAGS += $(aflags-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 57cbaff1f397..42506b371b74 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -8,6 +8,7 @@
8 8
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#include <asm/sclp.h>
11#include <asm/ipl.h> 12#include <asm/ipl.h>
12#include "sizes.h" 13#include "sizes.h"
13 14
@@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr;
63#include "../../../../lib/decompress_unxz.c" 64#include "../../../../lib/decompress_unxz.c"
64#endif 65#endif
65 66
66extern _sclp_print_early(const char *);
67
68static int puts(const char *s) 67static int puts(const char *s)
69{ 68{
70 _sclp_print_early(s); 69 _sclp_print_early(s);
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9432d0f202ef..64707750c780 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
555CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y 555CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
556CONFIG_SLUB_DEBUG_ON=y 556CONFIG_SLUB_DEBUG_ON=y
557CONFIG_SLUB_STATS=y 557CONFIG_SLUB_STATS=y
558CONFIG_DEBUG_KMEMLEAK=y
559CONFIG_DEBUG_STACK_USAGE=y 558CONFIG_DEBUG_STACK_USAGE=y
560CONFIG_DEBUG_VM=y 559CONFIG_DEBUG_VM=y
561CONFIG_DEBUG_VM_RB=y 560CONFIG_DEBUG_VM_RB=y
@@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
563CONFIG_DEBUG_PER_CPU_MAPS=y 562CONFIG_DEBUG_PER_CPU_MAPS=y
564CONFIG_DEBUG_SHIRQ=y 563CONFIG_DEBUG_SHIRQ=y
565CONFIG_DETECT_HUNG_TASK=y 564CONFIG_DETECT_HUNG_TASK=y
565CONFIG_PANIC_ON_OOPS=y
566CONFIG_TIMER_STATS=y 566CONFIG_TIMER_STATS=y
567CONFIG_DEBUG_RT_MUTEXES=y 567CONFIG_DEBUG_RT_MUTEXES=y
568CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 568CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 219dca6ea926..5c3097272cd8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y
540CONFIG_MAGIC_SYSRQ=y 540CONFIG_MAGIC_SYSRQ=y
541CONFIG_DEBUG_KERNEL=y 541CONFIG_DEBUG_KERNEL=y
542CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m 542CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
543CONFIG_PANIC_ON_OOPS=y
543CONFIG_TIMER_STATS=y 544CONFIG_TIMER_STATS=y
544CONFIG_RCU_TORTURE_TEST=m 545CONFIG_RCU_TORTURE_TEST=m
545CONFIG_RCU_CPU_STALL_TIMEOUT=60 546CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 822c2f2e0c25..bda70f1ffd2c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024
537CONFIG_UNUSED_SYMBOLS=y 537CONFIG_UNUSED_SYMBOLS=y
538CONFIG_MAGIC_SYSRQ=y 538CONFIG_MAGIC_SYSRQ=y
539CONFIG_DEBUG_KERNEL=y 539CONFIG_DEBUG_KERNEL=y
540CONFIG_PANIC_ON_OOPS=y
540CONFIG_TIMER_STATS=y 541CONFIG_TIMER_STATS=y
541CONFIG_RCU_TORTURE_TEST=m 542CONFIG_RCU_TORTURE_TEST=m
542CONFIG_RCU_CPU_STALL_TIMEOUT=60 543CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 9d63051ebec4..1b0184a0f7f2 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y
71CONFIG_DEBUG_INFO=y 71CONFIG_DEBUG_INFO=y
72CONFIG_DEBUG_FS=y 72CONFIG_DEBUG_FS=y
73CONFIG_DEBUG_KERNEL=y 73CONFIG_DEBUG_KERNEL=y
74CONFIG_PANIC_ON_OOPS=y
74# CONFIG_SCHED_DEBUG is not set 75# CONFIG_SCHED_DEBUG is not set
75CONFIG_RCU_CPU_STALL_TIMEOUT=60 76CONFIG_RCU_CPU_STALL_TIMEOUT=60
76# CONFIG_FTRACE is not set 77# CONFIG_FTRACE is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1f272b24fc0b..5566ce80abdb 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
134 134
135static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 135static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
136{ 136{
137 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 137 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138 138
139 if (unlikely(need_fallback(sctx->key_len))) { 139 if (unlikely(need_fallback(sctx->key_len))) {
140 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 140 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
@@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
159 159
160static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 160static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
161{ 161{
162 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 162 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
163 163
164 if (unlikely(need_fallback(sctx->key_len))) { 164 if (unlikely(need_fallback(sctx->key_len))) {
165 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 165 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 785c5f24d6f9..83ef702d2403 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y
14CONFIG_CGROUPS=y 14CONFIG_CGROUPS=y
15CONFIG_CPUSETS=y 15CONFIG_CPUSETS=y
16CONFIG_CGROUP_CPUACCT=y 16CONFIG_CGROUP_CPUACCT=y
17CONFIG_RESOURCE_COUNTERS=y
18CONFIG_MEMCG=y 17CONFIG_MEMCG=y
19CONFIG_MEMCG_SWAP=y 18CONFIG_MEMCG_SWAP=y
20CONFIG_CGROUP_SCHED=y 19CONFIG_CGROUP_SCHED=y
@@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y
22CONFIG_BLK_CGROUP=y 21CONFIG_BLK_CGROUP=y
23CONFIG_NAMESPACES=y 22CONFIG_NAMESPACES=y
24CONFIG_BLK_DEV_INITRD=y 23CONFIG_BLK_DEV_INITRD=y
25CONFIG_RD_BZIP2=y
26CONFIG_RD_LZMA=y
27CONFIG_RD_XZ=y
28CONFIG_RD_LZO=y
29CONFIG_RD_LZ4=y
30CONFIG_EXPERT=y 24CONFIG_EXPERT=y
25CONFIG_BPF_SYSCALL=y
31# CONFIG_COMPAT_BRK is not set 26# CONFIG_COMPAT_BRK is not set
32CONFIG_PROFILING=y 27CONFIG_PROFILING=y
33CONFIG_OPROFILE=y 28CONFIG_OPROFILE=y
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 06f8d95a16cd..2ee25ba252d6 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
8s390_hypfs-objs += hypfs_diag0c.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index b34b5ab90a31..eecde500ed49 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -37,6 +37,10 @@ extern int hypfs_vm_init(void);
37extern void hypfs_vm_exit(void); 37extern void hypfs_vm_exit(void);
38extern int hypfs_vm_create_files(struct dentry *root); 38extern int hypfs_vm_create_files(struct dentry *root);
39 39
40/* VM diagnose 0c */
41int hypfs_diag0c_init(void);
42void hypfs_diag0c_exit(void);
43
40/* Set Partition-Resource Parameter */ 44/* Set Partition-Resource Parameter */
41int hypfs_sprp_init(void); 45int hypfs_sprp_init(void);
42void hypfs_sprp_exit(void); 46void hypfs_sprp_exit(void);
@@ -49,7 +53,6 @@ struct hypfs_dbfs_data {
49 void *buf_free_ptr; 53 void *buf_free_ptr;
50 size_t size; 54 size_t size;
51 struct hypfs_dbfs_file *dbfs_file; 55 struct hypfs_dbfs_file *dbfs_file;
52 struct kref kref;
53}; 56};
54 57
55struct hypfs_dbfs_file { 58struct hypfs_dbfs_file {
@@ -61,8 +64,6 @@ struct hypfs_dbfs_file {
61 unsigned long); 64 unsigned long);
62 65
63 /* Private data for hypfs_dbfs.c */ 66 /* Private data for hypfs_dbfs.c */
64 struct hypfs_dbfs_data *data;
65 struct delayed_work data_free_work;
66 struct mutex lock; 67 struct mutex lock;
67 struct dentry *dentry; 68 struct dentry *dentry;
68}; 69};
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 47fe1055c714..752f6df3e697 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
17 data = kmalloc(sizeof(*data), GFP_KERNEL); 17 data = kmalloc(sizeof(*data), GFP_KERNEL);
18 if (!data) 18 if (!data)
19 return NULL; 19 return NULL;
20 kref_init(&data->kref);
21 data->dbfs_file = f; 20 data->dbfs_file = f;
22 return data; 21 return data;
23} 22}
24 23
25static void hypfs_dbfs_data_free(struct kref *kref) 24static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
26{ 25{
27 struct hypfs_dbfs_data *data;
28
29 data = container_of(kref, struct hypfs_dbfs_data, kref);
30 data->dbfs_file->data_free(data->buf_free_ptr); 26 data->dbfs_file->data_free(data->buf_free_ptr);
31 kfree(data); 27 kfree(data);
32} 28}
33 29
34static void data_free_delayed(struct work_struct *work)
35{
36 struct hypfs_dbfs_data *data;
37 struct hypfs_dbfs_file *df;
38
39 df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
40 mutex_lock(&df->lock);
41 data = df->data;
42 df->data = NULL;
43 mutex_unlock(&df->lock);
44 kref_put(&data->kref, hypfs_dbfs_data_free);
45}
46
47static ssize_t dbfs_read(struct file *file, char __user *buf, 30static ssize_t dbfs_read(struct file *file, char __user *buf,
48 size_t size, loff_t *ppos) 31 size_t size, loff_t *ppos)
49{ 32{
@@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
56 39
57 df = file_inode(file)->i_private; 40 df = file_inode(file)->i_private;
58 mutex_lock(&df->lock); 41 mutex_lock(&df->lock);
59 if (!df->data) { 42 data = hypfs_dbfs_data_alloc(df);
60 data = hypfs_dbfs_data_alloc(df); 43 if (!data) {
61 if (!data) { 44 mutex_unlock(&df->lock);
62 mutex_unlock(&df->lock); 45 return -ENOMEM;
63 return -ENOMEM; 46 }
64 } 47 rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
65 rc = df->data_create(&data->buf, &data->buf_free_ptr, 48 if (rc) {
66 &data->size); 49 mutex_unlock(&df->lock);
67 if (rc) { 50 kfree(data);
68 mutex_unlock(&df->lock); 51 return rc;
69 kfree(data);
70 return rc;
71 }
72 df->data = data;
73 schedule_delayed_work(&df->data_free_work, HZ);
74 } 52 }
75 data = df->data;
76 kref_get(&data->kref);
77 mutex_unlock(&df->lock); 53 mutex_unlock(&df->lock);
78 54
79 rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size); 55 rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
80 kref_put(&data->kref, hypfs_dbfs_data_free); 56 hypfs_dbfs_data_free(data);
81 return rc; 57 return rc;
82} 58}
83 59
@@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
108 if (IS_ERR(df->dentry)) 84 if (IS_ERR(df->dentry))
109 return PTR_ERR(df->dentry); 85 return PTR_ERR(df->dentry);
110 mutex_init(&df->lock); 86 mutex_init(&df->lock);
111 INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
112 return 0; 87 return 0;
113} 88}
114 89
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
new file mode 100644
index 000000000000..d4c0d3717543
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -0,0 +1,139 @@
1/*
2 * Hypervisor filesystem for Linux on s390
3 *
4 * Diag 0C implementation
5 *
6 * Copyright IBM Corp. 2014
7 */
8
9#include <linux/slab.h>
10#include <linux/cpu.h>
11#include <asm/hypfs.h>
12#include "hypfs.h"
13
14#define DBFS_D0C_HDR_VERSION 0
15
16/*
17 * Execute diagnose 0c in 31 bit mode
18 */
19static void diag0c(struct hypfs_diag0c_entry *entry)
20{
21 asm volatile (
22#ifdef CONFIG_64BIT
23 " sam31\n"
24 " diag %0,%0,0x0c\n"
25 " sam64\n"
26#else
27 " diag %0,%0,0x0c\n"
28#endif
29 : /* no output register */
30 : "a" (entry)
31 : "memory");
32}
33
34/*
35 * Get hypfs_diag0c_entry from CPU vector and store diag0c data
36 */
37static void diag0c_fn(void *data)
38{
39 diag0c(((void **) data)[smp_processor_id()]);
40}
41
42/*
43 * Allocate buffer and store diag 0c data
44 */
45static void *diag0c_store(unsigned int *count)
46{
47 struct hypfs_diag0c_data *diag0c_data;
48 unsigned int cpu_count, cpu, i;
49 void **cpu_vec;
50
51 get_online_cpus();
52 cpu_count = num_online_cpus();
53 cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
54 if (!cpu_vec)
55 goto fail_put_online_cpus;
56 /* Note: Diag 0c needs 8 byte alignment and real storage */
57 diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
58 cpu_count * sizeof(struct hypfs_diag0c_entry),
59 GFP_KERNEL | GFP_DMA);
60 if (!diag0c_data)
61 goto fail_kfree_cpu_vec;
62 i = 0;
63 /* Fill CPU vector for each online CPU */
64 for_each_online_cpu(cpu) {
65 diag0c_data->entry[i].cpu = cpu;
66 cpu_vec[cpu] = &diag0c_data->entry[i++];
67 }
68 /* Collect data all CPUs */
69 on_each_cpu(diag0c_fn, cpu_vec, 1);
70 *count = cpu_count;
71 kfree(cpu_vec);
72 put_online_cpus();
73 return diag0c_data;
74
75fail_kfree_cpu_vec:
76 kfree(cpu_vec);
77fail_put_online_cpus:
78 put_online_cpus();
79 return ERR_PTR(-ENOMEM);
80}
81
82/*
83 * Hypfs DBFS callback: Free diag 0c data
84 */
85static void dbfs_diag0c_free(const void *data)
86{
87 kfree(data);
88}
89
90/*
91 * Hypfs DBFS callback: Create diag 0c data
92 */
93static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
94{
95 struct hypfs_diag0c_data *diag0c_data;
96 unsigned int count;
97
98 diag0c_data = diag0c_store(&count);
99 if (IS_ERR(diag0c_data))
100 return PTR_ERR(diag0c_data);
101 memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
102 get_tod_clock_ext(diag0c_data->hdr.tod_ext);
103 diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
104 diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
105 diag0c_data->hdr.count = count;
106 *data = diag0c_data;
107 *data_free_ptr = diag0c_data;
108 *size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
109 return 0;
110}
111
112/*
113 * Hypfs DBFS file structure
114 */
115static struct hypfs_dbfs_file dbfs_file_0c = {
116 .name = "diag_0c",
117 .data_create = dbfs_diag0c_create,
118 .data_free = dbfs_diag0c_free,
119};
120
121/*
122 * Initialize diag 0c interface for z/VM
123 */
124int __init hypfs_diag0c_init(void)
125{
126 if (!MACHINE_IS_VM)
127 return 0;
128 return hypfs_dbfs_create_file(&dbfs_file_0c);
129}
130
131/*
132 * Shutdown diag 0c interface for z/VM
133 */
134void hypfs_diag0c_exit(void)
135{
136 if (!MACHINE_IS_VM)
137 return;
138 hypfs_dbfs_remove_file(&dbfs_file_0c);
139}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c952b981e4f2..4c8008dd938e 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -482,10 +482,14 @@ static int __init hypfs_init(void)
482 rc = -ENODATA; 482 rc = -ENODATA;
483 goto fail_hypfs_vm_exit; 483 goto fail_hypfs_vm_exit;
484 } 484 }
485 if (hypfs_diag0c_init()) {
486 rc = -ENODATA;
487 goto fail_hypfs_sprp_exit;
488 }
485 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); 489 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
486 if (!s390_kobj) { 490 if (!s390_kobj) {
487 rc = -ENOMEM; 491 rc = -ENOMEM;
488 goto fail_hypfs_sprp_exit; 492 goto fail_hypfs_diag0c_exit;
489 } 493 }
490 rc = register_filesystem(&hypfs_type); 494 rc = register_filesystem(&hypfs_type);
491 if (rc) 495 if (rc)
@@ -494,6 +498,8 @@ static int __init hypfs_init(void)
494 498
495fail_filesystem: 499fail_filesystem:
496 kobject_put(s390_kobj); 500 kobject_put(s390_kobj);
501fail_hypfs_diag0c_exit:
502 hypfs_diag0c_exit();
497fail_hypfs_sprp_exit: 503fail_hypfs_sprp_exit:
498 hypfs_sprp_exit(); 504 hypfs_sprp_exit();
499fail_hypfs_vm_exit: 505fail_hypfs_vm_exit:
@@ -510,6 +516,7 @@ static void __exit hypfs_exit(void)
510{ 516{
511 unregister_filesystem(&hypfs_type); 517 unregister_filesystem(&hypfs_type);
512 kobject_put(s390_kobj); 518 kobject_put(s390_kobj);
519 hypfs_diag0c_exit();
513 hypfs_sprp_exit(); 520 hypfs_sprp_exit();
514 hypfs_vm_exit(); 521 hypfs_vm_exit();
515 hypfs_diag_exit(); 522 hypfs_diag_exit();
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index cb700d54bd83..5243a8679a1d 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
189 return cc; 189 return cc;
190} 190}
191 191
192/* Store CPU counter multiple for the MT utilization counter set */
193static inline int stcctm5(u64 num, u64 *val)
194{
195 typedef struct { u64 _[num]; } addrtype;
196 int cc;
197
198 asm volatile (
199 " .insn rsy,0xeb0000000017,%2,5,%1\n"
200 " ipm %0\n"
201 " srl %0,28\n"
202 : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
203 return cc;
204}
205
192/* Query sampling information */ 206/* Query sampling information */
193static inline int qsi(struct hws_qsi_info_block *info) 207static inline int qsi(struct hws_qsi_info_block *info)
194{ 208{
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d39e3d8..c9df40b5c0ac 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
163 the loader. We need to make sure that it is out of the way of the program 163 the loader. We need to make sure that it is out of the way of the program
164 that it will "exec", and that there is sufficient room for the brk. */ 164 that it will "exec", and that there is sufficient room for the brk. */
165 165
166extern unsigned long randomize_et_dyn(unsigned long base); 166extern unsigned long randomize_et_dyn(void);
167#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) 167#define ELF_ET_DYN_BASE randomize_et_dyn()
168 168
169/* This yields a mask that user programs can use to figure out what 169/* This yields a mask that user programs can use to figure out what
170 instruction set this CPU supports. */ 170 instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
209} while (0) 209} while (0)
210#endif /* CONFIG_COMPAT */ 210#endif /* CONFIG_COMPAT */
211 211
212#define STACK_RND_MASK 0x7ffUL 212extern unsigned long mmap_rnd_mask;
213
214#define STACK_RND_MASK (mmap_rnd_mask)
213 215
214#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
215do { \ 217do { \
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index abb618f1ead2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -3,8 +3,12 @@
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1 4#define ARCH_SUPPORTS_FTRACE_OPS 1
5 5
6#ifdef CC_USING_HOTPATCH
7#define MCOUNT_INSN_SIZE 6
8#else
6#define MCOUNT_INSN_SIZE 24 9#define MCOUNT_INSN_SIZE 24
7#define MCOUNT_RETURN_FIXUP 18 10#define MCOUNT_RETURN_FIXUP 18
11#endif
8 12
9#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
10 14
@@ -37,18 +41,29 @@ struct ftrace_insn {
37static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn) 41static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
38{ 42{
39#ifdef CONFIG_FUNCTION_TRACER 43#ifdef CONFIG_FUNCTION_TRACER
44#ifdef CC_USING_HOTPATCH
45 /* brcl 0,0 */
46 insn->opc = 0xc004;
47 insn->disp = 0;
48#else
40 /* jg .+24 */ 49 /* jg .+24 */
41 insn->opc = 0xc0f4; 50 insn->opc = 0xc0f4;
42 insn->disp = MCOUNT_INSN_SIZE / 2; 51 insn->disp = MCOUNT_INSN_SIZE / 2;
43#endif 52#endif
53#endif
44} 54}
45 55
46static inline int is_ftrace_nop(struct ftrace_insn *insn) 56static inline int is_ftrace_nop(struct ftrace_insn *insn)
47{ 57{
48#ifdef CONFIG_FUNCTION_TRACER 58#ifdef CONFIG_FUNCTION_TRACER
59#ifdef CC_USING_HOTPATCH
60 if (insn->disp == 0)
61 return 1;
62#else
49 if (insn->disp == MCOUNT_INSN_SIZE / 2) 63 if (insn->disp == MCOUNT_INSN_SIZE / 2)
50 return 1; 64 return 1;
51#endif 65#endif
66#endif
52 return 0; 67 return 0;
53} 68}
54 69
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 346b1c85ffb4..58642fd29c87 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#define JUMP_LABEL_NOP_SIZE 6 6#define JUMP_LABEL_NOP_SIZE 6
7#define JUMP_LABEL_NOP_OFFSET 2
7 8
8#ifdef CONFIG_64BIT 9#ifdef CONFIG_64BIT
9#define ASM_PTR ".quad" 10#define ASM_PTR ".quad"
@@ -13,9 +14,13 @@
13#define ASM_ALIGN ".balign 4" 14#define ASM_ALIGN ".balign 4"
14#endif 15#endif
15 16
17/*
18 * We use a brcl 0,2 instruction for jump labels at compile time so it
19 * can be easily distinguished from a hotpatch generated instruction.
20 */
16static __always_inline bool arch_static_branch(struct static_key *key) 21static __always_inline bool arch_static_branch(struct static_key *key)
17{ 22{
18 asm_volatile_goto("0: brcl 0,0\n" 23 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
19 ".pushsection __jump_table, \"aw\"\n" 24 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 25 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n" 26 ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ffb1d8ce97ae..0441ec24ae87 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1758,6 +1758,10 @@ extern int s390_enable_sie(void);
1758extern int s390_enable_skey(void); 1758extern int s390_enable_skey(void);
1759extern void s390_reset_cmma(struct mm_struct *mm); 1759extern void s390_reset_cmma(struct mm_struct *mm);
1760 1760
1761/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1762#define HAVE_ARCH_UNMAPPED_AREA
1763#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1764
1761/* 1765/*
1762 * No page table caches to initialise 1766 * No page table caches to initialise
1763 */ 1767 */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bed05ea7ec27..e7cbbdcdee13 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -215,10 +215,7 @@ static inline unsigned short stap(void)
215/* 215/*
216 * Give up the time slice of the virtual PU. 216 * Give up the time slice of the virtual PU.
217 */ 217 */
218static inline void cpu_relax(void) 218void cpu_relax(void);
219{
220 barrier();
221}
222 219
223#define cpu_relax_lowlatency() barrier() 220#define cpu_relax_lowlatency() barrier()
224 221
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 804578587a7a..72786067b300 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -15,5 +15,6 @@ struct reset_call {
15 15
16extern void register_reset_call(struct reset_call *reset); 16extern void register_reset_call(struct reset_call *reset);
17extern void unregister_reset_call(struct reset_call *reset); 17extern void unregister_reset_call(struct reset_call *reset);
18extern void s390_reset_system(void (*func)(void *), void *data); 18extern void s390_reset_system(void (*fn_pre)(void),
19 void (*fn_post)(void *), void *data);
19#endif /* _ASM_S390_RESET_H */ 20#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..edb453cfc2c6 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -27,7 +27,7 @@ struct sclp_ipl_info {
27}; 27};
28 28
29struct sclp_cpu_entry { 29struct sclp_cpu_entry {
30 u8 address; 30 u8 core_id;
31 u8 reserved0[2]; 31 u8 reserved0[2];
32 u8 : 3; 32 u8 : 3;
33 u8 siif : 1; 33 u8 siif : 1;
@@ -51,6 +51,9 @@ int sclp_cpu_deconfigure(u8 cpu);
51unsigned long long sclp_get_rnmax(void); 51unsigned long long sclp_get_rnmax(void);
52unsigned long long sclp_get_rzm(void); 52unsigned long long sclp_get_rzm(void);
53unsigned int sclp_get_max_cpu(void); 53unsigned int sclp_get_max_cpu(void);
54unsigned int sclp_get_mtid(u8 cpu_type);
55unsigned int sclp_get_mtid_max(void);
56unsigned int sclp_get_mtid_prev(void);
54int sclp_sdias_blk_count(void); 57int sclp_sdias_blk_count(void);
55int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 58int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
56int sclp_chp_configure(struct chp_id chpid); 59int sclp_chp_configure(struct chp_id chpid);
@@ -68,4 +71,6 @@ void sclp_early_detect(void);
68int sclp_has_siif(void); 71int sclp_has_siif(void);
69unsigned int sclp_get_ibc(void); 72unsigned int sclp_get_ibc(void);
70 73
74long _sclp_print_early(const char *);
75
71#endif /* _ASM_S390_SCLP_H */ 76#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7736fdd72595..b8d1e54b4733 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -57,6 +57,7 @@ extern void detect_memory_memblock(void);
57#define MACHINE_FLAG_TE (1UL << 15) 57#define MACHINE_FLAG_TE (1UL << 15)
58#define MACHINE_FLAG_TLB_LC (1UL << 17) 58#define MACHINE_FLAG_TLB_LC (1UL << 17)
59#define MACHINE_FLAG_VX (1UL << 18) 59#define MACHINE_FLAG_VX (1UL << 18)
60#define MACHINE_FLAG_CAD (1UL << 19)
60 61
61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 62#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 63#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -80,6 +81,7 @@ extern void detect_memory_memblock(void);
80#define MACHINE_HAS_TE (0) 81#define MACHINE_HAS_TE (0)
81#define MACHINE_HAS_TLB_LC (0) 82#define MACHINE_HAS_TLB_LC (0)
82#define MACHINE_HAS_VX (0) 83#define MACHINE_HAS_VX (0)
84#define MACHINE_HAS_CAD (0)
83#else /* CONFIG_64BIT */ 85#else /* CONFIG_64BIT */
84#define MACHINE_HAS_IEEE (1) 86#define MACHINE_HAS_IEEE (1)
85#define MACHINE_HAS_CSP (1) 87#define MACHINE_HAS_CSP (1)
@@ -93,6 +95,7 @@ extern void detect_memory_memblock(void);
93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 95#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
94#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 96#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
95#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) 97#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
98#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
96#endif /* CONFIG_64BIT */ 99#endif /* CONFIG_64BIT */
97 100
98/* 101/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index fad4ae23ece0..ec60cf7fa0a2 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -16,6 +16,7 @@
16#define SIGP_SET_ARCHITECTURE 18 16#define SIGP_SET_ARCHITECTURE 18
17#define SIGP_COND_EMERGENCY_SIGNAL 19 17#define SIGP_COND_EMERGENCY_SIGNAL 19
18#define SIGP_SENSE_RUNNING 21 18#define SIGP_SENSE_RUNNING 21
19#define SIGP_SET_MULTI_THREADING 22
19#define SIGP_STORE_ADDITIONAL_STATUS 23 20#define SIGP_STORE_ADDITIONAL_STATUS 23
20 21
21/* SIGP condition codes */ 22/* SIGP condition codes */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 762d4f88af5a..b3bd0282dd98 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -16,6 +16,8 @@
16#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 16#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
17 17
18extern struct mutex smp_cpu_state_mutex; 18extern struct mutex smp_cpu_state_mutex;
19extern unsigned int smp_cpu_mt_shift;
20extern unsigned int smp_cpu_mtid;
19 21
20extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); 22extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
21 23
@@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void);
35 37
36#else /* CONFIG_SMP */ 38#else /* CONFIG_SMP */
37 39
40#define smp_cpu_mtid 0
41
38static inline void smp_call_ipl_cpu(void (*func)(void *), void *data) 42static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
39{ 43{
40 func(data); 44 func(data);
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index f92428e459f8..73f12d21af4d 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -90,7 +90,11 @@ struct sysinfo_2_2_2 {
90 unsigned short cpus_reserved; 90 unsigned short cpus_reserved;
91 char name[8]; 91 char name[8];
92 unsigned int caf; 92 unsigned int caf;
93 char reserved_2[16]; 93 char reserved_2[8];
94 unsigned char mt_installed;
95 unsigned char mt_general;
96 unsigned char mt_psmtid;
97 char reserved_3[5];
94 unsigned short cpus_dedicated; 98 unsigned short cpus_dedicated;
95 unsigned short cpus_shared; 99 unsigned short cpus_shared;
96}; 100};
@@ -120,26 +124,28 @@ struct sysinfo_3_2_2 {
120 124
121extern int topology_max_mnest; 125extern int topology_max_mnest;
122 126
123#define TOPOLOGY_CPU_BITS 64 127#define TOPOLOGY_CORE_BITS 64
124#define TOPOLOGY_NR_MAG 6 128#define TOPOLOGY_NR_MAG 6
125 129
126struct topology_cpu { 130struct topology_core {
127 unsigned char reserved0[4]; 131 unsigned char nl;
132 unsigned char reserved0[3];
128 unsigned char :6; 133 unsigned char :6;
129 unsigned char pp:2; 134 unsigned char pp:2;
130 unsigned char reserved1; 135 unsigned char reserved1;
131 unsigned short origin; 136 unsigned short origin;
132 unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG]; 137 unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
133}; 138};
134 139
135struct topology_container { 140struct topology_container {
136 unsigned char reserved[7]; 141 unsigned char nl;
142 unsigned char reserved[6];
137 unsigned char id; 143 unsigned char id;
138}; 144};
139 145
140union topology_entry { 146union topology_entry {
141 unsigned char nl; 147 unsigned char nl;
142 struct topology_cpu cpu; 148 struct topology_core cpu;
143 struct topology_container container; 149 struct topology_container container;
144}; 150};
145 151
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 56af53093d24..c4fbb9527c5c 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,9 +9,11 @@ struct cpu;
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
10 10
11struct cpu_topology_s390 { 11struct cpu_topology_s390 {
12 unsigned short thread_id;
12 unsigned short core_id; 13 unsigned short core_id;
13 unsigned short socket_id; 14 unsigned short socket_id;
14 unsigned short book_id; 15 unsigned short book_id;
16 cpumask_t thread_mask;
15 cpumask_t core_mask; 17 cpumask_t core_mask;
16 cpumask_t book_mask; 18 cpumask_t book_mask;
17}; 19};
@@ -19,6 +21,8 @@ struct cpu_topology_s390 {
19extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 21extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
20 22
21#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 23#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
24#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
25#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
22#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 26#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
23#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) 27#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
24#define topology_book_id(cpu) (cpu_topology[cpu].book_id) 28#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h
index 37998b449531..b3fe12d8dd87 100644
--- a/arch/s390/include/uapi/asm/hypfs.h
+++ b/arch/s390/include/uapi/asm/hypfs.h
@@ -1,16 +1,19 @@
1/* 1/*
2 * IOCTL interface for hypfs 2 * Structures for hypfs interface
3 * 3 *
4 * Copyright IBM Corp. 2013 4 * Copyright IBM Corp. 2013
5 * 5 *
6 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */ 7 */
8 8
9#ifndef _ASM_HYPFS_CTL_H 9#ifndef _ASM_HYPFS_H
10#define _ASM_HYPFS_CTL_H 10#define _ASM_HYPFS_H
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14/*
15 * IOCTL for binary interface /sys/kernel/debug/diag_304
16 */
14struct hypfs_diag304 { 17struct hypfs_diag304 {
15 __u32 args[2]; 18 __u32 args[2];
16 __u64 data; 19 __u64 data;
@@ -22,4 +25,30 @@ struct hypfs_diag304 {
22#define HYPFS_DIAG304 \ 25#define HYPFS_DIAG304 \
23 _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304) 26 _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304)
24 27
28/*
29 * Structures for binary interface /sys/kernel/debug/diag_0c
30 */
31struct hypfs_diag0c_hdr {
32 __u64 len; /* Length of diag0c buffer without header */
33 __u16 version; /* Version of header */
34 char reserved1[6]; /* Reserved */
35 char tod_ext[16]; /* TOD clock for diag0c */
36 __u64 count; /* Number of entries (CPUs) in diag0c array */
37 char reserved2[24]; /* Reserved */
38};
39
40struct hypfs_diag0c_entry {
41 char date[8]; /* MM/DD/YY in EBCDIC */
42 char time[8]; /* HH:MM:SS in EBCDIC */
43 __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */
44 __u64 totalproc; /* Total of virtual and simulation time (us) */
45 __u32 cpu; /* Linux logical CPU number */
46 __u32 reserved; /* Align to 8 byte */
47};
48
49struct hypfs_diag0c_data {
50 struct hypfs_diag0c_hdr hdr; /* 64 byte header */
51 struct hypfs_diag0c_entry entry[]; /* diag0c entry array */
52};
53
25#endif 54#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 204c43a4c245..31fab2676fe9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,8 +4,8 @@
4 4
5ifdef CONFIG_FUNCTION_TRACER 5ifdef CONFIG_FUNCTION_TRACER
6# Don't trace early setup code and tracing code 6# Don't trace early setup code and tracing code
7CFLAGS_REMOVE_early.o = -pg 7CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
8CFLAGS_REMOVE_ftrace.o = -pg 8CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
9endif 9endif
10 10
11# 11#
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a2275..f74a53d339b0 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
97 lg %r4,0(%r4) # Save PSW 97 lg %r4,0(%r4) # Save PSW
98 sturg %r4,%r3 # Use sturg, because of large pages 98 sturg %r4,%r3 # Use sturg, because of large pages
99 lghi %r1,1 99 lghi %r1,1
100 diag %r1,%r1,0x308 100 lghi %r0,0
101 diag %r0,%r1,0x308
101.Lrestart_part2: 102.Lrestart_part2:
102 lhi %r0,0 # Load r0 with zero 103 lhi %r0,0 # Load r0 with zero
103 lhi %r1,2 # Use mode 2 = ESAME (dump) 104 lhi %r1,2 # Use mode 2 = ESAME (dump)
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index c0b03c28d157..632fa06ea162 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -5,37 +5,11 @@
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */ 6 */
7 7
8#include <linux/notifier.h>
9#include <linux/seq_file.h> 8#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h> 9#include <linux/cpu.h>
10#include <linux/cacheinfo.h>
14#include <asm/facility.h> 11#include <asm/facility.h>
15 12
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum { 13enum {
40 CACHE_SCOPE_NOTEXISTS, 14 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE, 15 CACHE_SCOPE_PRIVATE,
@@ -44,10 +18,10 @@ enum {
44}; 18};
45 19
46enum { 20enum {
47 CACHE_TYPE_SEPARATE, 21 CTYPE_SEPARATE,
48 CACHE_TYPE_DATA, 22 CTYPE_DATA,
49 CACHE_TYPE_INSTRUCTION, 23 CTYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED, 24 CTYPE_UNIFIED,
51}; 25};
52 26
53enum { 27enum {
@@ -70,37 +44,60 @@ struct cache_info {
70}; 44};
71 45
72#define CACHE_MAX_LEVEL 8 46#define CACHE_MAX_LEVEL 8
73
74union cache_topology { 47union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL]; 48 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw; 49 unsigned long long raw;
77}; 50};
78 51
79static const char * const cache_type_string[] = { 52static const char * const cache_type_string[] = {
80 "Data", 53 "",
81 "Instruction", 54 "Instruction",
55 "Data",
56 "",
82 "Unified", 57 "Unified",
83}; 58};
84 59
85static struct cache_dir *cache_dir_cpu[NR_CPUS]; 60static const enum cache_type cache_type_map[] = {
86static LIST_HEAD(cache_list); 61 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
62 [CTYPE_DATA] = CACHE_TYPE_DATA,
63 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
64 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
65};
87 66
88void show_cacheinfo(struct seq_file *m) 67void show_cacheinfo(struct seq_file *m)
89{ 68{
90 struct cache *cache; 69 struct cpu_cacheinfo *this_cpu_ci;
91 int index = 0; 70 struct cacheinfo *cache;
71 int idx;
92 72
93 list_for_each_entry(cache, &cache_list, list) { 73 get_online_cpus();
94 seq_printf(m, "cache%-11d: ", index); 74 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
75 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
76 cache = this_cpu_ci->info_list + idx;
77 seq_printf(m, "cache%-11d: ", idx);
95 seq_printf(m, "level=%d ", cache->level); 78 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]); 79 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); 80 seq_printf(m, "scope=%s ",
98 seq_printf(m, "size=%luK ", cache->size >> 10); 81 cache->disable_sysfs ? "Shared" : "Private");
99 seq_printf(m, "line_size=%u ", cache->line_size); 82 seq_printf(m, "size=%dK ", cache->size >> 10);
100 seq_printf(m, "associativity=%d", cache->associativity); 83 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
84 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
101 seq_puts(m, "\n"); 85 seq_puts(m, "\n");
102 index++;
103 } 86 }
87 put_online_cpus();
88}
89
90static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91{
92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE;
94
95 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type];
104} 101}
105 102
106static inline unsigned long ecag(int ai, int li, int ti) 103static inline unsigned long ecag(int ai, int li, int ti)
@@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
113 return val; 110 return val;
114} 111}
115 112
116static int __init cache_add(int level, int private, int type) 113static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level)
117{ 115{
118 struct cache *cache; 116 int ti, num_sets;
119 int ti; 117 int cpu = smp_processor_id();
120 118
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 119 if (type == CACHE_TYPE_INST)
122 if (!cache)
123 return -ENOMEM;
124 if (type == CACHE_TYPE_INSTRUCTION)
125 ti = CACHE_TI_INSTRUCTION; 120 ti = CACHE_TI_INSTRUCTION;
126 else 121 else
127 ti = CACHE_TI_UNIFIED; 122 ti = CACHE_TI_UNIFIED;
128 cache->size = ecag(EXTRACT_SIZE, level, ti);
129 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131 cache->nr_sets = cache->size / cache->associativity;
132 cache->nr_sets /= cache->line_size;
133 cache->private = private;
134 cache->level = level + 1;
135 cache->type = type - 1;
136 list_add_tail(&cache->list, &cache_list);
137 return 0;
138}
139
140static void __init cache_build_info(void)
141{
142 struct cache *cache, *next;
143 union cache_topology ct;
144 int level, private, rc;
145
146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148 switch (ct.ci[level].scope) {
149 case CACHE_SCOPE_SHARED:
150 private = 0;
151 break;
152 case CACHE_SCOPE_PRIVATE:
153 private = 1;
154 break;
155 default:
156 return;
157 }
158 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
159 rc = cache_add(level, private, CACHE_TYPE_DATA);
160 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
161 } else {
162 rc = cache_add(level, private, ct.ci[level].type);
163 }
164 if (rc)
165 goto error;
166 }
167 return;
168error:
169 list_for_each_entry_safe(cache, next, &cache_list, list) {
170 list_del(&cache->list);
171 kfree(cache);
172 }
173}
174
175static struct cache_dir *cache_create_cache_dir(int cpu)
176{
177 struct cache_dir *cache_dir;
178 struct kobject *kobj = NULL;
179 struct device *dev;
180
181 dev = get_cpu_device(cpu);
182 if (!dev)
183 goto out;
184 kobj = kobject_create_and_add("cache", &dev->kobj);
185 if (!kobj)
186 goto out;
187 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
188 if (!cache_dir)
189 goto out;
190 cache_dir->kobj = kobj;
191 cache_dir_cpu[cpu] = cache_dir;
192 return cache_dir;
193out:
194 kobject_put(kobj);
195 return NULL;
196}
197
198static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
199{
200 return container_of(kobj, struct cache_index_dir, kobj);
201}
202
203static void cache_index_release(struct kobject *kobj)
204{
205 struct cache_index_dir *index;
206
207 index = kobj_to_cache_index_dir(kobj);
208 kfree(index);
209}
210
211static ssize_t cache_index_show(struct kobject *kobj,
212 struct attribute *attr, char *buf)
213{
214 struct kobj_attribute *kobj_attr;
215
216 kobj_attr = container_of(attr, struct kobj_attribute, attr);
217 return kobj_attr->show(kobj, kobj_attr, buf);
218}
219
220#define DEFINE_CACHE_ATTR(_name, _format, _value) \
221static ssize_t cache_##_name##_show(struct kobject *kobj, \
222 struct kobj_attribute *attr, \
223 char *buf) \
224{ \
225 struct cache_index_dir *index; \
226 \
227 index = kobj_to_cache_index_dir(kobj); \
228 return sprintf(buf, _format, _value); \
229} \
230static struct kobj_attribute cache_##_name##_attr = \
231 __ATTR(_name, 0444, cache_##_name##_show, NULL);
232 123
233DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); 124 this_leaf->level = level + 1;
234DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); 125 this_leaf->type = type;
235DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); 126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
236DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); 127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
237DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); 128 level, ti);
238DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); 129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
239 130
240static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) 131 num_sets = this_leaf->size / this_leaf->coherency_line_size;
241{ 132 num_sets /= this_leaf->ways_of_associativity;
242 struct cache_index_dir *index; 133 this_leaf->number_of_sets = num_sets;
243 int len; 134 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
244 135 if (!private)
245 index = kobj_to_cache_index_dir(kobj); 136 this_leaf->disable_sysfs = true;
246 len = type ?
247 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
248 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
249 len += sprintf(&buf[len], "\n");
250 return len;
251}
252
253static ssize_t shared_cpu_map_show(struct kobject *kobj,
254 struct kobj_attribute *attr, char *buf)
255{
256 return shared_cpu_map_func(kobj, 0, buf);
257} 137}
258static struct kobj_attribute cache_shared_cpu_map_attr =
259 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
260 138
261static ssize_t shared_cpu_list_show(struct kobject *kobj, 139int init_cache_level(unsigned int cpu)
262 struct kobj_attribute *attr, char *buf)
263{ 140{
264 return shared_cpu_map_func(kobj, 1, buf); 141 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
265} 142 unsigned int level = 0, leaves = 0;
266static struct kobj_attribute cache_shared_cpu_list_attr = 143 union cache_topology ct;
267 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); 144 enum cache_type ctype;
268
269static struct attribute *cache_index_default_attrs[] = {
270 &cache_type_attr.attr,
271 &cache_size_attr.attr,
272 &cache_number_of_sets_attr.attr,
273 &cache_ways_of_associativity_attr.attr,
274 &cache_level_attr.attr,
275 &cache_coherency_line_size_attr.attr,
276 &cache_shared_cpu_map_attr.attr,
277 &cache_shared_cpu_list_attr.attr,
278 NULL,
279};
280
281static const struct sysfs_ops cache_index_ops = {
282 .show = cache_index_show,
283};
284
285static struct kobj_type cache_index_type = {
286 .sysfs_ops = &cache_index_ops,
287 .release = cache_index_release,
288 .default_attrs = cache_index_default_attrs,
289};
290
291static int cache_create_index_dir(struct cache_dir *cache_dir,
292 struct cache *cache, int index, int cpu)
293{
294 struct cache_index_dir *index_dir;
295 int rc;
296
297 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
298 if (!index_dir)
299 return -ENOMEM;
300 index_dir->cache = cache;
301 index_dir->cpu = cpu;
302 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
303 cache_dir->kobj, "index%d", index);
304 if (rc)
305 goto out;
306 index_dir->next = cache_dir->index;
307 cache_dir->index = index_dir;
308 return 0;
309out:
310 kfree(index_dir);
311 return rc;
312}
313 145
314static int cache_add_cpu(int cpu) 146 if (!this_cpu_ci)
315{ 147 return -EINVAL;
316 struct cache_dir *cache_dir;
317 struct cache *cache;
318 int rc, index = 0;
319 148
320 if (list_empty(&cache_list)) 149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
321 return 0; 150 do {
322 cache_dir = cache_create_cache_dir(cpu); 151 ctype = get_cache_type(&ct.ci[0], level);
323 if (!cache_dir) 152 if (ctype == CACHE_TYPE_NOCACHE)
324 return -ENOMEM;
325 list_for_each_entry(cache, &cache_list, list) {
326 if (!cache->private)
327 break; 153 break;
328 rc = cache_create_index_dir(cache_dir, cache, index, cpu); 154 /* Separate instruction and data caches */
329 if (rc) 155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
330 return rc; 156 } while (++level < CACHE_MAX_LEVEL);
331 index++;
332 }
333 return 0;
334}
335 157
336static void cache_remove_cpu(int cpu) 158 this_cpu_ci->num_levels = level;
337{ 159 this_cpu_ci->num_leaves = leaves;
338 struct cache_index_dir *index, *next;
339 struct cache_dir *cache_dir;
340 160
341 cache_dir = cache_dir_cpu[cpu]; 161 return 0;
342 if (!cache_dir)
343 return;
344 index = cache_dir->index;
345 while (index) {
346 next = index->next;
347 kobject_put(&index->kobj);
348 index = next;
349 }
350 kobject_put(cache_dir->kobj);
351 kfree(cache_dir);
352 cache_dir_cpu[cpu] = NULL;
353} 162}
354 163
355static int cache_hotplug(struct notifier_block *nfb, unsigned long action, 164int populate_cache_leaves(unsigned int cpu)
356 void *hcpu)
357{ 165{
358 int cpu = (long)hcpu; 166 unsigned int level, idx, pvt;
359 int rc = 0; 167 union cache_topology ct;
168 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
360 171
361 switch (action & ~CPU_TASKS_FROZEN) { 172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
362 case CPU_ONLINE: 173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
363 rc = cache_add_cpu(cpu); 174 idx < this_cpu_ci->num_leaves; idx++, level++) {
364 if (rc) 175 if (!this_leaf)
365 cache_remove_cpu(cpu); 176 return -EINVAL;
366 break; 177
367 case CPU_DEAD: 178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
368 cache_remove_cpu(cpu); 179 ctype = get_cache_type(&ct.ci[0], level);
369 break; 180 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
183 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level);
185 }
370 } 186 }
371 return rc ? NOTIFY_BAD : NOTIFY_OK;
372}
373
374static int __init cache_init(void)
375{
376 int cpu;
377
378 if (!test_facility(34))
379 return 0;
380 cache_build_info();
381
382 cpu_notifier_register_begin();
383 for_each_online_cpu(cpu)
384 cache_add_cpu(cpu);
385 __hotcpu_notifier(cache_hotplug, 0);
386 cpu_notifier_register_done();
387 return 0; 187 return 0;
388} 188}
389device_initcall(cache_init);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f3762937dd82..533430307da8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -137,7 +137,7 @@ enum {
137 INSTR_RSI_RRP, 137 INSTR_RSI_RRP,
138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD, 138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, 139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
140 INSTR_RSY_RDRM, 140 INSTR_RSY_RDRM, INSTR_RSY_RMRD,
141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
142 INSTR_RS_RURD, 142 INSTR_RS_RURD,
143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, 143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
226 [U16_32] = { 16, 32, 0 }, 226 [U16_32] = { 16, 32, 0 },
227 [J16_16] = { 16, 16, OPERAND_PCREL }, 227 [J16_16] = { 16, 16, OPERAND_PCREL },
228 [J16_32] = { 16, 32, OPERAND_PCREL }, 228 [J16_32] = { 16, 32, OPERAND_PCREL },
229 [I16_32] = { 16, 32, OPERAND_SIGNED },
230 [I24_24] = { 24, 24, OPERAND_SIGNED }, 229 [I24_24] = { 24, 24, OPERAND_SIGNED },
231 [J32_16] = { 32, 16, OPERAND_PCREL }, 230 [J32_16] = { 32, 16, OPERAND_PCREL },
232 [I32_16] = { 32, 16, OPERAND_SIGNED }, 231 [I32_16] = { 32, 16, OPERAND_SIGNED },
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
308 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, 307 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
309 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, 308 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
310 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, 309 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
310 [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
311 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, 311 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
312 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 312 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
313 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, 313 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
@@ -451,7 +451,8 @@ enum {
451 LONG_INSN_VERLLV, 451 LONG_INSN_VERLLV,
452 LONG_INSN_VESRAV, 452 LONG_INSN_VESRAV,
453 LONG_INSN_VESRLV, 453 LONG_INSN_VESRLV,
454 LONG_INSN_VSBCBI 454 LONG_INSN_VSBCBI,
455 LONG_INSN_STCCTM
455}; 456};
456 457
457static char *long_insn_name[] = { 458static char *long_insn_name[] = {
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
531 [LONG_INSN_VESRAV] = "vesrav", 532 [LONG_INSN_VESRAV] = "vesrav",
532 [LONG_INSN_VESRLV] = "vesrlv", 533 [LONG_INSN_VESRLV] = "vesrlv",
533 [LONG_INSN_VSBCBI] = "vsbcbi", 534 [LONG_INSN_VSBCBI] = "vsbcbi",
535 [LONG_INSN_STCCTM] = "stcctm",
534}; 536};
535 537
536static struct s390_insn opcode[] = { 538static struct s390_insn opcode[] = {
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
1656 { "lric", 0x60, INSTR_RSY_RDRM }, 1658 { "lric", 0x60, INSTR_RSY_RDRM },
1657 { "stric", 0x61, INSTR_RSY_RDRM }, 1659 { "stric", 0x61, INSTR_RSY_RDRM },
1658 { "mric", 0x62, INSTR_RSY_RDRM }, 1660 { "mric", 0x62, INSTR_RSY_RDRM },
1661 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
1659#endif 1662#endif
1660 { "rll", 0x1d, INSTR_RSY_RRRD }, 1663 { "rll", 0x1d, INSTR_RSY_RRRD },
1661 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1664 { "mvclu", 0x8e, INSTR_RSY_RRRD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 302ac1f7f8e7..70a329450901 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 394 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396 if (test_facility(128))
397 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
396#endif 398#endif
397} 399}
398 400
401static int __init nocad_setup(char *str)
402{
403 S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
404 return 0;
405}
406early_param("nocad", nocad_setup);
407
408static int __init cad_init(void)
409{
410 if (MACHINE_HAS_CAD)
411 /* Enable problem state CAD. */
412 __ctl_set_bit(2, 3);
413 return 0;
414}
415early_initcall(cad_init);
416
399static __init void rescue_initrd(void) 417static __init void rescue_initrd(void)
400{ 418{
401#ifdef CONFIG_BLK_DEV_INITRD 419#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 8e61393c8275..834df047d35f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
71struct fadvise64_64_args; 71struct fadvise64_64_args;
72struct old_sigaction; 72struct old_sigaction;
73 73
74long sys_rt_sigreturn(void);
75long sys_sigreturn(void);
76
74long sys_s390_personality(unsigned int personality); 77long sys_s390_personality(unsigned int personality);
75long sys_s390_runtime_instr(int command, int signum); 78long sys_s390_runtime_instr(int command, int signum);
76
77long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); 79long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
78long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); 80long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
79#endif /* _ENTRY_H */ 81#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..82c19899574f 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,6 +46,13 @@
46 * lg %r14,8(%r15) # offset 18 46 * lg %r14,8(%r15) # offset 18
47 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible. 48 * as possible.
49 * In case we use gcc's hotpatch feature the original and also the disabled
50 * function prologue contains only a single six byte instruction and looks
51 * like this:
52 * > brcl 0,0 # offset 0
53 * To enable ftrace the code gets patched like above and afterwards looks
54 * like this:
55 * > brasl %r0,ftrace_caller # offset 0
49 */ 56 */
50 57
51unsigned long ftrace_plt; 58unsigned long ftrace_plt;
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 66int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
60 unsigned long addr) 67 unsigned long addr)
61{ 68{
62 struct ftrace_insn insn; 69 struct ftrace_insn orig, new, old;
63 unsigned short op; 70
64 void *from, *to; 71 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
65 size_t size;
66
67 ftrace_generate_nop_insn(&insn);
68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT; 72 return -EFAULT;
73 /* 73 if (addr == MCOUNT_ADDR) {
74 * If we find a breakpoint instruction, a kprobe has been placed 74 /* Initial code replacement */
75 * at the beginning of the function. We write the constant 75#ifdef CC_USING_HOTPATCH
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original 76 /* We expect to see brcl 0,0 */
77 * instruction so that the kprobes handler can execute a nop, if it 77 ftrace_generate_nop_insn(&orig);
78 * reaches this breakpoint. 78#else
79 */ 79 /* We expect to see stg r14,8(r15) */
80 if (op == BREAKPOINT_INSTRUCTION) { 80 orig.opc = 0xe3e0;
81 size -= 2; 81 orig.disp = 0xf0080024;
82 from += 2; 82#endif
83 to += 2; 83 ftrace_generate_nop_insn(&new);
84 insn.disp = KPROBE_ON_FTRACE_NOP; 84 } else if (old.opc == BREAKPOINT_INSTRUCTION) {
85 /*
86 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the
88 * constant KPROBE_ON_FTRACE_NOP into the remaining four
89 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint.
91 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
93 orig.disp = KPROBE_ON_FTRACE_CALL;
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else {
96 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip);
98 ftrace_generate_nop_insn(&new);
85 } 99 }
86 if (probe_kernel_write(to, from, size)) 100 /* Verify that the to be replaced code matches what we expect. */
101 if (memcmp(&orig, &old, sizeof(old)))
102 return -EINVAL;
103 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
87 return -EPERM; 104 return -EPERM;
88 return 0; 105 return 0;
89} 106}
90 107
91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 108int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
92{ 109{
93 struct ftrace_insn insn; 110 struct ftrace_insn orig, new, old;
94 unsigned short op; 111
95 void *from, *to; 112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT; 113 return -EFAULT;
104 /* 114 if (old.opc == BREAKPOINT_INSTRUCTION) {
105 * If we find a breakpoint instruction, a kprobe has been placed 115 /*
106 * at the beginning of the function. We write the constant 116 * If we find a breakpoint instruction, a kprobe has been
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original 117 * placed at the beginning of the function. We write the
108 * instruction so that the kprobes handler can execute a brasl if it 118 * constant KPROBE_ON_FTRACE_CALL into the remaining four
109 * reaches this breakpoint. 119 * bytes of the original instruction so that the kprobes
110 */ 120 * handler can execute a brasl if it reaches this breakpoint.
111 if (op == BREAKPOINT_INSTRUCTION) { 121 */
112 size -= 2; 122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
113 from += 2; 123 orig.disp = KPROBE_ON_FTRACE_NOP;
114 to += 2; 124 new.disp = KPROBE_ON_FTRACE_CALL;
115 insn.disp = KPROBE_ON_FTRACE_CALL; 125 } else {
126 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig);
128 ftrace_generate_call_insn(&new, rec->ip);
116 } 129 }
117 if (probe_kernel_write(to, from, size)) 130 /* Verify that the to be replaced code matches what we expect. */
131 if (memcmp(&orig, &old, sizeof(old)))
132 return -EINVAL;
133 if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
118 return -EPERM; 134 return -EPERM;
119 return 0; 135 return 0;
120} 136}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d62eee11f0b5..132f4c9ade60 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
436# followed by the facility words. 436# followed by the facility words.
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_Z13)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 442 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 443#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100eff2, 0xf46c0000 444 .long 2, 0xc100eff2, 0xf46c0000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39badb9ca0b3..5c8651f36509 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
2074 2074
2075u32 dump_prefix_page; 2075u32 dump_prefix_page;
2076 2076
2077void s390_reset_system(void (*func)(void *), void *data) 2077void s390_reset_system(void (*fn_pre)(void),
2078 void (*fn_post)(void *), void *data)
2078{ 2079{
2079 struct _lowcore *lc; 2080 struct _lowcore *lc;
2080 2081
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
2112 /* Store status at absolute zero */ 2113 /* Store status at absolute zero */
2113 store_status(); 2114 store_status();
2114 2115
2116 /* Call function before reset */
2117 if (fn_pre)
2118 fn_pre();
2115 do_reset_calls(); 2119 do_reset_calls();
2116 if (func) 2120 /* Call function after reset */
2117 func(data); 2121 if (fn_post)
2122 fn_post(data);
2118} 2123}
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c1541..cb2d51e779df 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,31 +22,66 @@ struct insn_args {
22 enum jump_label_type type; 22 enum jump_label_type type;
23}; 23};
24 24
25static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
26{
27 /* brcl 0,0 */
28 insn->opcode = 0xc004;
29 insn->offset = 0;
30}
31
32static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
33{
34 /* brcl 15,offset */
35 insn->opcode = 0xc0f4;
36 insn->offset = (entry->target - entry->code) >> 1;
37}
38
39static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
40{
41 unsigned char *ipc = (unsigned char *)entry->code;
42 unsigned char *ipe = (unsigned char *)insn;
43
44 pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
45 pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
46 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
47 pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
48 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
49 panic("Corrupted kernel text");
50}
51
52static struct insn orignop = {
53 .opcode = 0xc004,
54 .offset = JUMP_LABEL_NOP_OFFSET >> 1,
55};
56
25static void __jump_label_transform(struct jump_entry *entry, 57static void __jump_label_transform(struct jump_entry *entry,
26 enum jump_label_type type) 58 enum jump_label_type type,
59 int init)
27{ 60{
28 struct insn insn; 61 struct insn old, new;
29 int rc;
30 62
31 if (type == JUMP_LABEL_ENABLE) { 63 if (type == JUMP_LABEL_ENABLE) {
32 /* brcl 15,offset */ 64 jump_label_make_nop(entry, &old);
33 insn.opcode = 0xc0f4; 65 jump_label_make_branch(entry, &new);
34 insn.offset = (entry->target - entry->code) >> 1;
35 } else { 66 } else {
36 /* brcl 0,0 */ 67 jump_label_make_branch(entry, &old);
37 insn.opcode = 0xc004; 68 jump_label_make_nop(entry, &new);
38 insn.offset = 0;
39 } 69 }
40 70 if (init) {
41 rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); 71 if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
42 WARN_ON_ONCE(rc < 0); 72 jump_label_bug(entry, &old);
73 } else {
74 if (memcmp((void *)entry->code, &old, sizeof(old)))
75 jump_label_bug(entry, &old);
76 }
77 probe_kernel_write((void *)entry->code, &new, sizeof(new));
43} 78}
44 79
45static int __sm_arch_jump_label_transform(void *data) 80static int __sm_arch_jump_label_transform(void *data)
46{ 81{
47 struct insn_args *args = data; 82 struct insn_args *args = data;
48 83
49 __jump_label_transform(args->entry, args->type); 84 __jump_label_transform(args->entry, args->type, 0);
50 return 0; 85 return 0;
51} 86}
52 87
@@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
64void arch_jump_label_transform_static(struct jump_entry *entry, 99void arch_jump_label_transform_static(struct jump_entry *entry,
65 enum jump_label_type type) 100 enum jump_label_type type)
66{ 101{
67 __jump_label_transform(entry, type); 102 __jump_label_transform(entry, type, 1);
68} 103}
69 104
70#endif 105#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1e4c710dfb92..f516edc1fbe3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
69 /* 69 /*
70 * If kprobes patches the instruction that is morphed by 70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch 71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block 72 * "jg .+24" that skips the mcount block or the "brcl 0,0"
73 * in case of hotpatch.
73 */ 74 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); 75 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1; 76 p->ainsn.is_ftrace_insn = 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 4685337fa7c6..fb0901ec4306 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
103 return 0; 103 return 0;
104} 104}
105arch_initcall(machine_kdump_pm_init); 105arch_initcall(machine_kdump_pm_init);
106#endif
107 106
108/* 107/*
109 * Start kdump: We expect here that a store status has been done on our CPU 108 * Start kdump: We expect here that a store status has been done on our CPU
110 */ 109 */
111static void __do_machine_kdump(void *image) 110static void __do_machine_kdump(void *image)
112{ 111{
113#ifdef CONFIG_CRASH_DUMP
114 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; 112 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
115 113
116 setup_regs();
117 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); 114 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
118 start_kdump(1); 115 start_kdump(1);
119#endif
120} 116}
117#endif
121 118
122/* 119/*
123 * Check if kdump checksums are valid: We call purgatory with parameter "0" 120 * Check if kdump checksums are valid: We call purgatory with parameter "0"
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
249 */ 246 */
250static void __machine_kexec(void *data) 247static void __machine_kexec(void *data)
251{ 248{
252 struct kimage *image = data;
253
254 __arch_local_irq_stosm(0x04); /* enable DAT */ 249 __arch_local_irq_stosm(0x04); /* enable DAT */
255 pfault_fini(); 250 pfault_fini();
256 tracing_off(); 251 tracing_off();
257 debug_locks_off(); 252 debug_locks_off();
258 if (image->type == KEXEC_TYPE_CRASH) { 253#ifdef CONFIG_CRASH_DUMP
254 if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
255
259 lgr_info_log(); 256 lgr_info_log();
260 s390_reset_system(__do_machine_kdump, data); 257 s390_reset_system(setup_regs, __do_machine_kdump, data);
261 } else { 258 } else
262 s390_reset_system(__do_machine_kexec, data); 259#endif
263 } 260 s390_reset_system(NULL, __do_machine_kexec, data);
264 disabled_wait((unsigned long) __builtin_return_address(0)); 261 disabled_wait((unsigned long) __builtin_return_address(0));
265} 262}
266 263
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index b6dfc5bfcb89..e499370fbccb 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller 28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15 29 lgr %r1,%r15
30#ifndef CC_USING_HOTPATCH
30 aghi %r0,MCOUNT_RETURN_FIXUP 31 aghi %r0,MCOUNT_RETURN_FIXUP
32#endif
31 aghi %r15,-STACK_FRAME_SIZE 33 aghi %r15,-STACK_FRAME_SIZE
32 stg %r1,__SF_BACKCHAIN(%r15) 34 stg %r1,__SF_BACKCHAIN(%r15)
33 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 35 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..13fc0978ca7e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
79{ 79{
80} 80}
81 81
82#ifdef CONFIG_64BIT
83void arch_release_task_struct(struct task_struct *tsk)
84{
85 if (tsk->thread.vxrs)
86 kfree(tsk->thread.vxrs);
87}
88#endif
89
82int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 90int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
83 unsigned long arg, struct task_struct *p) 91 unsigned long arg, struct task_struct *p)
84{ 92{
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
243 ret = PAGE_ALIGN(mm->brk + brk_rnd()); 251 ret = PAGE_ALIGN(mm->brk + brk_rnd());
244 return (ret > mm->brk) ? ret : mm->brk; 252 return (ret > mm->brk) ? ret : mm->brk;
245} 253}
246
247unsigned long randomize_et_dyn(unsigned long base)
248{
249 unsigned long ret;
250
251 if (!(current->flags & PF_RANDOMIZE))
252 return base;
253 ret = PAGE_ALIGN(base + brk_rnd());
254 return (ret > base) ? ret : base;
255}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dbdd33ee0102..26108232fcaa 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,16 +8,24 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp.h>
12#include <linux/seq_file.h> 11#include <linux/seq_file.h>
13#include <linux/delay.h> 12#include <linux/delay.h>
14#include <linux/cpu.h> 13#include <linux/cpu.h>
15#include <asm/elf.h> 14#include <asm/elf.h>
16#include <asm/lowcore.h> 15#include <asm/lowcore.h>
17#include <asm/param.h> 16#include <asm/param.h>
17#include <asm/smp.h>
18 18
19static DEFINE_PER_CPU(struct cpuid, cpu_id); 19static DEFINE_PER_CPU(struct cpuid, cpu_id);
20 20
21void cpu_relax(void)
22{
23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
24 asm volatile("diag 0,0,0x44");
25 barrier();
26}
27EXPORT_SYMBOL(cpu_relax);
28
21/* 29/*
22 * cpu_init - initializes state that is per-CPU. 30 * cpu_init - initializes state that is per-CPU.
23 */ 31 */
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index a41f2c99dcc8..7e77e03378f3 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
294#ifdef CONFIG_64BIT 294#ifdef CONFIG_64BIT
295 tm LC_AR_MODE_ID,1 295 tm LC_AR_MODE_ID,1
296 jno .Lesa3 296 jno .Lesa3
297 lmh %r6,%r15,96(%r15) # store upper register halves 297 lgfr %r2,%r2 # sign extend return value
298 lmh %r6,%r15,96(%r15) # restore upper register halves
298 ahi %r15,80 299 ahi %r15,80
299.Lesa3: 300.Lesa3:
300#endif 301#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4e532c67832f..bfac77ada4f2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
810 case 0x2828: 810 case 0x2828:
811 strcpy(elf_platform, "zEC12"); 811 strcpy(elf_platform, "zEC12");
812 break; 812 break;
813 case 0x2964:
814 strcpy(elf_platform, "z13");
815 break;
813 } 816 }
814} 817}
815 818
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b499f5cbe19..a668993ff577 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -71,9 +71,30 @@ struct pcpu {
71}; 71};
72 72
73static u8 boot_cpu_type; 73static u8 boot_cpu_type;
74static u16 boot_cpu_address;
75static struct pcpu pcpu_devices[NR_CPUS]; 74static struct pcpu pcpu_devices[NR_CPUS];
76 75
76unsigned int smp_cpu_mt_shift;
77EXPORT_SYMBOL(smp_cpu_mt_shift);
78
79unsigned int smp_cpu_mtid;
80EXPORT_SYMBOL(smp_cpu_mtid);
81
82static unsigned int smp_max_threads __initdata = -1U;
83
84static int __init early_nosmt(char *s)
85{
86 smp_max_threads = 1;
87 return 0;
88}
89early_param("nosmt", early_nosmt);
90
91static int __init early_smt(char *s)
92{
93 get_option(&s, &smp_max_threads);
94 return 0;
95}
96early_param("smt", early_smt);
97
77/* 98/*
78 * The smp_cpu_state_mutex must be held when changing the state or polarization 99 * The smp_cpu_state_mutex must be held when changing the state or polarization
79 * member of a pcpu data structure within the pcpu_devices arreay. 100 * member of a pcpu data structure within the pcpu_devices arreay.
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
132/* 153/*
133 * Find struct pcpu by cpu address. 154 * Find struct pcpu by cpu address.
134 */ 155 */
135static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) 156static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
136{ 157{
137 int cpu; 158 int cpu;
138 159
@@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
299} 320}
300 321
301/* 322/*
323 * Enable additional logical cpus for multi-threading.
324 */
325static int pcpu_set_smt(unsigned int mtid)
326{
327 register unsigned long reg1 asm ("1") = (unsigned long) mtid;
328 int cc;
329
330 if (smp_cpu_mtid == mtid)
331 return 0;
332 asm volatile(
333 " sigp %1,0,%2 # sigp set multi-threading\n"
334 " ipm %0\n"
335 " srl %0,28\n"
336 : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
337 : "cc");
338 if (cc == 0) {
339 smp_cpu_mtid = mtid;
340 smp_cpu_mt_shift = 0;
341 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
342 smp_cpu_mt_shift++;
343 pcpu_devices[0].address = stap();
344 }
345 return cc;
346}
347
348/*
302 * Call function on an online CPU. 349 * Call function on an online CPU.
303 */ 350 */
304void smp_call_online_cpu(void (*func)(void *), void *data) 351void smp_call_online_cpu(void (*func)(void *), void *data)
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
512 559
513#ifdef CONFIG_CRASH_DUMP 560#ifdef CONFIG_CRASH_DUMP
514 561
515static void __init smp_get_save_area(int cpu, u16 address) 562static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
516{ 563{
517 void *lc = pcpu_devices[0].lowcore; 564 void *lc = pcpu_devices[0].lowcore;
518 struct save_area_ext *sa_ext; 565 struct save_area_ext *sa_ext;
519 unsigned long vx_sa; 566 unsigned long vx_sa;
520 567
521 if (is_kdump_kernel())
522 return;
523 if (!OLDMEM_BASE && (address == boot_cpu_address ||
524 ipl_info.type != IPL_TYPE_FCP_DUMP))
525 return;
526 sa_ext = dump_save_area_create(cpu); 568 sa_ext = dump_save_area_create(cpu);
527 if (!sa_ext) 569 if (!sa_ext)
528 panic("could not allocate memory for save area\n"); 570 panic("could not allocate memory for save area\n");
529 if (address == boot_cpu_address) { 571 if (is_boot_cpu) {
530 /* Copy the registers of the boot cpu. */ 572 /* Copy the registers of the boot CPU. */
531 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), 573 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
532 SAVE_AREA_BASE - PAGE_SIZE, 0); 574 SAVE_AREA_BASE - PAGE_SIZE, 0);
533 if (MACHINE_HAS_VX) 575 if (MACHINE_HAS_VX)
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
548 free_page(vx_sa); 590 free_page(vx_sa);
549} 591}
550 592
593/*
594 * Collect CPU state of the previous, crashed system.
595 * There are four cases:
596 * 1) standard zfcp dump
597 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
598 * The state for all CPUs except the boot CPU needs to be collected
599 * with sigp stop-and-store-status. The boot CPU state is located in
600 * the absolute lowcore of the memory stored in the HSA. The zcore code
601 * will allocate the save area and copy the boot CPU state from the HSA.
602 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
603 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
604 * The state for all CPUs except the boot CPU needs to be collected
605 * with sigp stop-and-store-status. The firmware or the boot-loader
606 * stored the registers of the boot CPU in the absolute lowcore in the
607 * memory of the old system.
608 * 3) kdump and the old kernel did not store the CPU state,
609 * or stand-alone kdump for DASD
610 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
611 * The state for all CPUs except the boot CPU needs to be collected
612 * with sigp stop-and-store-status. The kexec code or the boot-loader
613 * stored the registers of the boot CPU in the memory of the old system.
614 * 4) kdump and the old kernel stored the CPU state
615 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
616 * The state of all CPUs is stored in ELF sections in the memory of the
617 * old system. The ELF sections are picked up by the crash_dump code
618 * via elfcorehdr_addr.
619 */
620static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
621{
622 unsigned int cpu, address, i, j;
623 int is_boot_cpu;
624
625 if (is_kdump_kernel())
626 /* Previous system stored the CPU states. Nothing to do. */
627 return;
628 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
629 /* No previous system present, normal boot. */
630 return;
631 /* Set multi-threading state to the previous system. */
632 pcpu_set_smt(sclp_get_mtid_prev());
633 /* Collect CPU states. */
634 cpu = 0;
635 for (i = 0; i < info->configured; i++) {
636 /* Skip CPUs with different CPU type. */
637 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
638 continue;
639 for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
640 address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
641 is_boot_cpu = (address == pcpu_devices[0].address);
642 if (is_boot_cpu && !OLDMEM_BASE)
643 /* Skip boot CPU for standard zfcp dump. */
644 continue;
645 /* Get state for this CPu. */
646 __smp_store_cpu_state(cpu, address, is_boot_cpu);
647 }
648 }
649}
650
551int smp_store_status(int cpu) 651int smp_store_status(int cpu)
552{ 652{
553 unsigned long vx_sa; 653 unsigned long vx_sa;
@@ -565,10 +665,6 @@ int smp_store_status(int cpu)
565 return 0; 665 return 0;
566} 666}
567 667
568#else /* CONFIG_CRASH_DUMP */
569
570static inline void smp_get_save_area(int cpu, u16 address) { }
571
572#endif /* CONFIG_CRASH_DUMP */ 668#endif /* CONFIG_CRASH_DUMP */
573 669
574void smp_cpu_set_polarization(int cpu, int val) 670void smp_cpu_set_polarization(int cpu, int val)
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
590 info = kzalloc(sizeof(*info), GFP_KERNEL); 686 info = kzalloc(sizeof(*info), GFP_KERNEL);
591 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { 687 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
592 use_sigp_detection = 1; 688 use_sigp_detection = 1;
593 for (address = 0; address <= MAX_CPU_ADDRESS; address++) { 689 for (address = 0; address <= MAX_CPU_ADDRESS;
690 address += (1U << smp_cpu_mt_shift)) {
594 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == 691 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
595 SIGP_CC_NOT_OPERATIONAL) 692 SIGP_CC_NOT_OPERATIONAL)
596 continue; 693 continue;
597 info->cpu[info->configured].address = address; 694 info->cpu[info->configured].core_id =
695 address >> smp_cpu_mt_shift;
598 info->configured++; 696 info->configured++;
599 } 697 }
600 info->combined = info->configured; 698 info->combined = info->configured;
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
608{ 706{
609 struct pcpu *pcpu; 707 struct pcpu *pcpu;
610 cpumask_t avail; 708 cpumask_t avail;
611 int cpu, nr, i; 709 int cpu, nr, i, j;
710 u16 address;
612 711
613 nr = 0; 712 nr = 0;
614 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 713 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
616 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 715 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
617 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) 716 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
618 continue; 717 continue;
619 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) 718 address = info->cpu[i].core_id << smp_cpu_mt_shift;
620 continue; 719 for (j = 0; j <= smp_cpu_mtid; j++) {
621 pcpu = pcpu_devices + cpu; 720 if (pcpu_find_address(cpu_present_mask, address + j))
622 pcpu->address = info->cpu[i].address; 721 continue;
623 pcpu->state = (i >= info->configured) ? 722 pcpu = pcpu_devices + cpu;
624 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 723 pcpu->address = address + j;
625 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 724 pcpu->state =
626 set_cpu_present(cpu, true); 725 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
627 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 726 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
628 set_cpu_present(cpu, false); 727 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
629 else 728 set_cpu_present(cpu, true);
630 nr++; 729 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
631 cpu = cpumask_next(cpu, &avail); 730 set_cpu_present(cpu, false);
731 else
732 nr++;
733 cpu = cpumask_next(cpu, &avail);
734 if (cpu >= nr_cpu_ids)
735 break;
736 }
632 } 737 }
633 return nr; 738 return nr;
634} 739}
635 740
636static void __init smp_detect_cpus(void) 741static void __init smp_detect_cpus(void)
637{ 742{
638 unsigned int cpu, c_cpus, s_cpus; 743 unsigned int cpu, mtid, c_cpus, s_cpus;
639 struct sclp_cpu_info *info; 744 struct sclp_cpu_info *info;
745 u16 address;
640 746
747 /* Get CPU information */
641 info = smp_get_cpu_info(); 748 info = smp_get_cpu_info();
642 if (!info) 749 if (!info)
643 panic("smp_detect_cpus failed to allocate memory\n"); 750 panic("smp_detect_cpus failed to allocate memory\n");
751
752 /* Find boot CPU type */
644 if (info->has_cpu_type) { 753 if (info->has_cpu_type) {
645 for (cpu = 0; cpu < info->combined; cpu++) { 754 address = stap();
646 if (info->cpu[cpu].address != boot_cpu_address) 755 for (cpu = 0; cpu < info->combined; cpu++)
647 continue; 756 if (info->cpu[cpu].core_id == address) {
648 /* The boot cpu dictates the cpu type. */ 757 /* The boot cpu dictates the cpu type. */
649 boot_cpu_type = info->cpu[cpu].type; 758 boot_cpu_type = info->cpu[cpu].type;
650 break; 759 break;
651 } 760 }
761 if (cpu >= info->combined)
762 panic("Could not find boot CPU type");
652 } 763 }
764
765#ifdef CONFIG_CRASH_DUMP
766 /* Collect CPU state of previous system */
767 smp_store_cpu_states(info);
768#endif
769
770 /* Set multi-threading state for the current system */
771 mtid = sclp_get_mtid(boot_cpu_type);
772 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
773 pcpu_set_smt(mtid);
774
775 /* Print number of CPUs */
653 c_cpus = s_cpus = 0; 776 c_cpus = s_cpus = 0;
654 for (cpu = 0; cpu < info->combined; cpu++) { 777 for (cpu = 0; cpu < info->combined; cpu++) {
655 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) 778 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
656 continue; 779 continue;
657 if (cpu < info->configured) { 780 if (cpu < info->configured)
658 smp_get_save_area(c_cpus, info->cpu[cpu].address); 781 c_cpus += smp_cpu_mtid + 1;
659 c_cpus++; 782 else
660 } else 783 s_cpus += smp_cpu_mtid + 1;
661 s_cpus++;
662 } 784 }
663 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 785 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
786
787 /* Add CPUs present at boot */
664 get_online_cpus(); 788 get_online_cpus();
665 __smp_rescan_cpus(info, 0); 789 __smp_rescan_cpus(info, 0);
666 put_online_cpus(); 790 put_online_cpus();
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
696int __cpu_up(unsigned int cpu, struct task_struct *tidle) 820int __cpu_up(unsigned int cpu, struct task_struct *tidle)
697{ 821{
698 struct pcpu *pcpu; 822 struct pcpu *pcpu;
699 int rc; 823 int base, i, rc;
700 824
701 pcpu = pcpu_devices + cpu; 825 pcpu = pcpu_devices + cpu;
702 if (pcpu->state != CPU_STATE_CONFIGURED) 826 if (pcpu->state != CPU_STATE_CONFIGURED)
703 return -EIO; 827 return -EIO;
704 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 828 base = cpu - (cpu % (smp_cpu_mtid + 1));
829 for (i = 0; i <= smp_cpu_mtid; i++) {
830 if (base + i < nr_cpu_ids)
831 if (cpu_online(base + i))
832 break;
833 }
834 /*
835 * If this is the first CPU of the core to get online
836 * do an initial CPU reset.
837 */
838 if (i > smp_cpu_mtid &&
839 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
705 SIGP_CC_ORDER_CODE_ACCEPTED) 840 SIGP_CC_ORDER_CODE_ACCEPTED)
706 return -EIO; 841 return -EIO;
707 842
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
774{ 909{
775 unsigned int possible, sclp, cpu; 910 unsigned int possible, sclp, cpu;
776 911
777 sclp = sclp_get_max_cpu() ?: nr_cpu_ids; 912 sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
913 sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
778 possible = setup_possible_cpus ?: nr_cpu_ids; 914 possible = setup_possible_cpus ?: nr_cpu_ids;
779 possible = min(possible, sclp); 915 possible = min(possible, sclp);
780 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 916 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
796{ 932{
797 struct pcpu *pcpu = pcpu_devices; 933 struct pcpu *pcpu = pcpu_devices;
798 934
799 boot_cpu_address = stap();
800 pcpu->state = CPU_STATE_CONFIGURED; 935 pcpu->state = CPU_STATE_CONFIGURED;
801 pcpu->address = boot_cpu_address; 936 pcpu->address = stap();
802 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 937 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
803 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE 938 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
804 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 939 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
848 const char *buf, size_t count) 983 const char *buf, size_t count)
849{ 984{
850 struct pcpu *pcpu; 985 struct pcpu *pcpu;
851 int cpu, val, rc; 986 int cpu, val, rc, i;
852 char delim; 987 char delim;
853 988
854 if (sscanf(buf, "%d %c", &val, &delim) != 1) 989 if (sscanf(buf, "%d %c", &val, &delim) != 1)
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
860 rc = -EBUSY; 995 rc = -EBUSY;
861 /* disallow configuration changes of online cpus and cpu 0 */ 996 /* disallow configuration changes of online cpus and cpu 0 */
862 cpu = dev->id; 997 cpu = dev->id;
863 if (cpu_online(cpu) || cpu == 0) 998 cpu -= cpu % (smp_cpu_mtid + 1);
999 if (cpu == 0)
864 goto out; 1000 goto out;
1001 for (i = 0; i <= smp_cpu_mtid; i++)
1002 if (cpu_online(cpu + i))
1003 goto out;
865 pcpu = pcpu_devices + cpu; 1004 pcpu = pcpu_devices + cpu;
866 rc = 0; 1005 rc = 0;
867 switch (val) { 1006 switch (val) {
868 case 0: 1007 case 0:
869 if (pcpu->state != CPU_STATE_CONFIGURED) 1008 if (pcpu->state != CPU_STATE_CONFIGURED)
870 break; 1009 break;
871 rc = sclp_cpu_deconfigure(pcpu->address); 1010 rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
872 if (rc) 1011 if (rc)
873 break; 1012 break;
874 pcpu->state = CPU_STATE_STANDBY; 1013 for (i = 0; i <= smp_cpu_mtid; i++) {
875 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 1014 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1015 continue;
1016 pcpu[i].state = CPU_STATE_STANDBY;
1017 smp_cpu_set_polarization(cpu + i,
1018 POLARIZATION_UNKNOWN);
1019 }
876 topology_expect_change(); 1020 topology_expect_change();
877 break; 1021 break;
878 case 1: 1022 case 1:
879 if (pcpu->state != CPU_STATE_STANDBY) 1023 if (pcpu->state != CPU_STATE_STANDBY)
880 break; 1024 break;
881 rc = sclp_cpu_configure(pcpu->address); 1025 rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
882 if (rc) 1026 if (rc)
883 break; 1027 break;
884 pcpu->state = CPU_STATE_CONFIGURED; 1028 for (i = 0; i <= smp_cpu_mtid; i++) {
885 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 1029 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1030 continue;
1031 pcpu[i].state = CPU_STATE_CONFIGURED;
1032 smp_cpu_set_polarization(cpu + i,
1033 POLARIZATION_UNKNOWN);
1034 }
886 topology_expect_change(); 1035 topology_expect_change();
887 break; 1036 break;
888 default: 1037 default:
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 811f542b8ed4..85565f1ff474 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); 194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); 195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); 196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
197 if (info->mt_installed & 0x80) {
198 seq_printf(m, "LPAR CPUs G-MTID: %d\n",
199 info->mt_general & 0x1f);
200 seq_printf(m, "LPAR CPUs S-MTID: %d\n",
201 info->mt_installed & 0x1f);
202 seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
203 info->mt_psmtid & 0x1f);
204 }
197} 205}
198 206
199static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) 207static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index b93bed76ea94..24ee33f1af24 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
59 return mask; 59 return mask;
60} 60}
61 61
62static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 62static cpumask_t cpu_thread_map(unsigned int cpu)
63{
64 cpumask_t mask;
65 int i;
66
67 cpumask_copy(&mask, cpumask_of(cpu));
68 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
69 return mask;
70 cpu -= cpu % (smp_cpu_mtid + 1);
71 for (i = 0; i <= smp_cpu_mtid; i++)
72 if (cpu_present(cpu + i))
73 cpumask_set_cpu(cpu + i, &mask);
74 return mask;
75}
76
77static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
63 struct mask_info *book, 78 struct mask_info *book,
64 struct mask_info *socket, 79 struct mask_info *socket,
65 int one_socket_per_cpu) 80 int one_socket_per_cpu)
66{ 81{
67 unsigned int cpu; 82 unsigned int core;
68 83
69 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { 84 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
70 unsigned int rcpu; 85 unsigned int rcore;
71 int lcpu; 86 int lcpu, i;
72 87
73 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 88 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
74 lcpu = smp_find_processor_id(rcpu); 89 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
75 if (lcpu < 0) 90 if (lcpu < 0)
76 continue; 91 continue;
77 cpumask_set_cpu(lcpu, &book->mask); 92 for (i = 0; i <= smp_cpu_mtid; i++) {
78 cpu_topology[lcpu].book_id = book->id; 93 cpu_topology[lcpu + i].book_id = book->id;
79 cpumask_set_cpu(lcpu, &socket->mask); 94 cpu_topology[lcpu + i].core_id = rcore;
80 cpu_topology[lcpu].core_id = rcpu; 95 cpu_topology[lcpu + i].thread_id = lcpu + i;
81 if (one_socket_per_cpu) { 96 cpumask_set_cpu(lcpu + i, &book->mask);
82 cpu_topology[lcpu].socket_id = rcpu; 97 cpumask_set_cpu(lcpu + i, &socket->mask);
83 socket = socket->next; 98 if (one_socket_per_cpu)
84 } else { 99 cpu_topology[lcpu + i].socket_id = rcore;
85 cpu_topology[lcpu].socket_id = socket->id; 100 else
101 cpu_topology[lcpu + i].socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
86 } 103 }
87 smp_cpu_set_polarization(lcpu, tl_cpu->pp); 104 if (one_socket_per_cpu)
105 socket = socket->next;
88 } 106 }
89 return socket; 107 return socket;
90} 108}
@@ -108,7 +126,7 @@ static void clear_masks(void)
108static union topology_entry *next_tle(union topology_entry *tle) 126static union topology_entry *next_tle(union topology_entry *tle)
109{ 127{
110 if (!tle->nl) 128 if (!tle->nl)
111 return (union topology_entry *)((struct topology_cpu *)tle + 1); 129 return (union topology_entry *)((struct topology_core *)tle + 1);
112 return (union topology_entry *)((struct topology_container *)tle + 1); 130 return (union topology_entry *)((struct topology_container *)tle + 1);
113} 131}
114 132
@@ -231,9 +249,11 @@ static void update_cpu_masks(void)
231 249
232 spin_lock_irqsave(&topology_lock, flags); 250 spin_lock_irqsave(&topology_lock, flags);
233 for_each_possible_cpu(cpu) { 251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
234 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
235 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
236 if (!MACHINE_HAS_TOPOLOGY) { 255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu;
237 cpu_topology[cpu].core_id = cpu; 257 cpu_topology[cpu].core_id = cpu;
238 cpu_topology[cpu].socket_id = cpu; 258 cpu_topology[cpu].socket_id = cpu;
239 cpu_topology[cpu].book_id = cpu; 259 cpu_topology[cpu].book_id = cpu;
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
445 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); 465 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
446} 466}
447 467
468const struct cpumask *cpu_thread_mask(int cpu)
469{
470 return &cpu_topology[cpu].thread_mask;
471}
472
473
448const struct cpumask *cpu_coregroup_mask(int cpu) 474const struct cpumask *cpu_coregroup_mask(int cpu)
449{ 475{
450 return &cpu_topology[cpu].core_mask; 476 return &cpu_topology[cpu].core_mask;
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
456} 482}
457 483
458static struct sched_domain_topology_level s390_topology[] = { 484static struct sched_domain_topology_level s390_topology[] = {
485 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
459 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 486 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
460 { cpu_book_mask, SD_INIT_NAME(BOOK) }, 487 { cpu_book_mask, SD_INIT_NAME(BOOK) },
461 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 488 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e34122e539a1..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
15#include <asm/cputime.h> 15#include <asm/cputime.h>
16#include <asm/vtimer.h> 16#include <asm/vtimer.h>
17#include <asm/vtime.h> 17#include <asm/vtime.h>
18#include <asm/cpu_mf.h>
19#include <asm/smp.h>
18 20
19static void virt_timer_expire(void); 21static void virt_timer_expire(void);
20 22
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
23static atomic64_t virt_timer_current; 25static atomic64_t virt_timer_current;
24static atomic64_t virt_timer_elapsed; 26static atomic64_t virt_timer_elapsed;
25 27
28static DEFINE_PER_CPU(u64, mt_cycles[32]);
29static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
30static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
31
26static inline u64 get_vtimer(void) 32static inline u64 get_vtimer(void)
27{ 33{
28 u64 timer; 34 u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
61{ 67{
62 struct thread_info *ti = task_thread_info(tsk); 68 struct thread_info *ti = task_thread_info(tsk);
63 u64 timer, clock, user, system, steal; 69 u64 timer, clock, user, system, steal;
70 u64 user_scaled, system_scaled;
71 int i;
64 72
65 timer = S390_lowcore.last_update_timer; 73 timer = S390_lowcore.last_update_timer;
66 clock = S390_lowcore.last_update_clock; 74 clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
76 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 84 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
77 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 85 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
78 86
87 /* Do MT utilization calculation */
88 if (smp_cpu_mtid) {
89 u64 cycles_new[32], *cycles_old;
90 u64 delta, mult, div;
91
92 cycles_old = this_cpu_ptr(mt_cycles);
93 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
94 mult = div = 0;
95 for (i = 0; i <= smp_cpu_mtid; i++) {
96 delta = cycles_new[i] - cycles_old[i];
97 mult += delta;
98 div += (i + 1) * delta;
99 }
100 if (mult > 0) {
101 /* Update scaling factor */
102 __this_cpu_write(mt_scaling_mult, mult);
103 __this_cpu_write(mt_scaling_div, div);
104 memcpy(cycles_old, cycles_new,
105 sizeof(u64) * (smp_cpu_mtid + 1));
106 }
107 }
108 }
109
79 user = S390_lowcore.user_timer - ti->user_timer; 110 user = S390_lowcore.user_timer - ti->user_timer;
80 S390_lowcore.steal_timer -= user; 111 S390_lowcore.steal_timer -= user;
81 ti->user_timer = S390_lowcore.user_timer; 112 ti->user_timer = S390_lowcore.user_timer;
82 account_user_time(tsk, user, user);
83 113
84 system = S390_lowcore.system_timer - ti->system_timer; 114 system = S390_lowcore.system_timer - ti->system_timer;
85 S390_lowcore.steal_timer -= system; 115 S390_lowcore.steal_timer -= system;
86 ti->system_timer = S390_lowcore.system_timer; 116 ti->system_timer = S390_lowcore.system_timer;
87 account_system_time(tsk, hardirq_offset, system, system); 117
118 user_scaled = user;
119 system_scaled = system;
120 /* Do MT utilization scaling */
121 if (smp_cpu_mtid) {
122 u64 mult = __this_cpu_read(mt_scaling_mult);
123 u64 div = __this_cpu_read(mt_scaling_div);
124
125 user_scaled = (user_scaled * mult) / div;
126 system_scaled = (system_scaled * mult) / div;
127 }
128 account_user_time(tsk, user, user_scaled);
129 account_system_time(tsk, hardirq_offset, system, system_scaled);
88 130
89 steal = S390_lowcore.steal_timer; 131 steal = S390_lowcore.steal_timer;
90 if ((s64) steal > 0) { 132 if ((s64) steal > 0) {
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
126void vtime_account_irq_enter(struct task_struct *tsk) 168void vtime_account_irq_enter(struct task_struct *tsk)
127{ 169{
128 struct thread_info *ti = task_thread_info(tsk); 170 struct thread_info *ti = task_thread_info(tsk);
129 u64 timer, system; 171 u64 timer, system, system_scaled;
130 172
131 timer = S390_lowcore.last_update_timer; 173 timer = S390_lowcore.last_update_timer;
132 S390_lowcore.last_update_timer = get_vtimer(); 174 S390_lowcore.last_update_timer = get_vtimer();
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
135 system = S390_lowcore.system_timer - ti->system_timer; 177 system = S390_lowcore.system_timer - ti->system_timer;
136 S390_lowcore.steal_timer -= system; 178 S390_lowcore.steal_timer -= system;
137 ti->system_timer = S390_lowcore.system_timer; 179 ti->system_timer = S390_lowcore.system_timer;
138 account_system_time(tsk, 0, system, system); 180 system_scaled = system;
181 /* Do MT utilization scaling */
182 if (smp_cpu_mtid) {
183 u64 mult = __this_cpu_read(mt_scaling_mult);
184 u64 div = __this_cpu_read(mt_scaling_div);
185
186 system_scaled = (system_scaled * mult) / div;
187 }
188 account_system_time(tsk, 0, system, system_scaled);
139 189
140 virt_timer_forward(system); 190 virt_timer_forward(system);
141} 191}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 034a35a3e9c1..d6c9991f7797 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -12,7 +12,15 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <asm/io.h> 13#include <asm/io.h>
14 14
15int spin_retry = 1000; 15int spin_retry = -1;
16
17static int __init spin_retry_init(void)
18{
19 if (spin_retry < 0)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 return 0;
22}
23early_initcall(spin_retry_init);
16 24
17/** 25/**
18 * spin_retry= parameter 26 * spin_retry= parameter
@@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str)
24} 32}
25__setup("spin_retry=", spin_retry_setup); 33__setup("spin_retry=", spin_retry_setup);
26 34
35static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36{
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38}
39
27void arch_spin_lock_wait(arch_spinlock_t *lp) 40void arch_spin_lock_wait(arch_spinlock_t *lp)
28{ 41{
29 unsigned int cpu = SPINLOCK_LOCKVAL; 42 unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
46 /* Loop for a while on the lock value. */ 59 /* Loop for a while on the lock value. */
47 count = spin_retry; 60 count = spin_retry;
48 do { 61 do {
62 if (MACHINE_HAS_CAD)
63 _raw_compare_and_delay(&lp->lock, owner);
49 owner = ACCESS_ONCE(lp->lock); 64 owner = ACCESS_ONCE(lp->lock);
50 } while (owner && count-- > 0); 65 } while (owner && count-- > 0);
51 if (!owner) 66 if (!owner)
@@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
84 /* Loop for a while on the lock value. */ 99 /* Loop for a while on the lock value. */
85 count = spin_retry; 100 count = spin_retry;
86 do { 101 do {
102 if (MACHINE_HAS_CAD)
103 _raw_compare_and_delay(&lp->lock, owner);
87 owner = ACCESS_ONCE(lp->lock); 104 owner = ACCESS_ONCE(lp->lock);
88 } while (owner && count-- > 0); 105 } while (owner && count-- > 0);
89 if (!owner) 106 if (!owner)
@@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100 117
101int arch_spin_trylock_retry(arch_spinlock_t *lp) 118int arch_spin_trylock_retry(arch_spinlock_t *lp)
102{ 119{
120 unsigned int cpu = SPINLOCK_LOCKVAL;
121 unsigned int owner;
103 int count; 122 int count;
104 123
105 for (count = spin_retry; count > 0; count--) 124 for (count = spin_retry; count > 0; count--) {
106 if (arch_spin_trylock_once(lp)) 125 owner = ACCESS_ONCE(lp->lock);
107 return 1; 126 /* Try to get the lock if it is free. */
127 if (!owner) {
128 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
129 return 1;
130 } else if (MACHINE_HAS_CAD)
131 _raw_compare_and_delay(&lp->lock, owner);
132 }
108 return 0; 133 return 0;
109} 134}
110EXPORT_SYMBOL(arch_spin_trylock_retry); 135EXPORT_SYMBOL(arch_spin_trylock_retry);
@@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
126 } 151 }
127 old = ACCESS_ONCE(rw->lock); 152 old = ACCESS_ONCE(rw->lock);
128 owner = ACCESS_ONCE(rw->owner); 153 owner = ACCESS_ONCE(rw->owner);
129 if ((int) old < 0) 154 if ((int) old < 0) {
155 if (MACHINE_HAS_CAD)
156 _raw_compare_and_delay(&rw->lock, old);
130 continue; 157 continue;
158 }
131 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 159 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
132 return; 160 return;
133 } 161 }
@@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
141 169
142 while (count-- > 0) { 170 while (count-- > 0) {
143 old = ACCESS_ONCE(rw->lock); 171 old = ACCESS_ONCE(rw->lock);
144 if ((int) old < 0) 172 if ((int) old < 0) {
173 if (MACHINE_HAS_CAD)
174 _raw_compare_and_delay(&rw->lock, old);
145 continue; 175 continue;
176 }
146 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 177 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
147 return 1; 178 return 1;
148 } 179 }
@@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
173 } 204 }
174 if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 205 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
175 break; 206 break;
207 if (MACHINE_HAS_CAD)
208 _raw_compare_and_delay(&rw->lock, old);
176 } 209 }
177} 210}
178EXPORT_SYMBOL(_raw_write_lock_wait); 211EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
201 smp_rmb(); 234 smp_rmb();
202 if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 235 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
203 break; 236 break;
237 if (MACHINE_HAS_CAD)
238 _raw_compare_and_delay(&rw->lock, old);
204 } 239 }
205} 240}
206EXPORT_SYMBOL(_raw_write_lock_wait); 241EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
214 249
215 while (count-- > 0) { 250 while (count-- > 0) {
216 old = ACCESS_ONCE(rw->lock); 251 old = ACCESS_ONCE(rw->lock);
217 if (old) 252 if (old) {
253 if (MACHINE_HAS_CAD)
254 _raw_compare_and_delay(&rw->lock, old);
218 continue; 255 continue;
256 }
219 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 257 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
220 return 1; 258 return 1;
221 } 259 }
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9065d5aa3932..3ff86533f7db 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
171 table = table + ((address >> 20) & 0x7ff); 171 table = table + ((address >> 20) & 0x7ff);
172 if (bad_address(table)) 172 if (bad_address(table))
173 goto bad; 173 goto bad;
174 pr_cont(KERN_CONT "S:%016lx ", *table); 174 pr_cont("S:%016lx ", *table);
175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
176 goto out; 176 goto out;
177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
@@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
261 return; 261 return;
262 if (!printk_ratelimit()) 262 if (!printk_ratelimit())
263 return; 263 return;
264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d", 264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
265 regs->int_code & 0xffff, regs->int_code >> 17); 265 regs->int_code & 0xffff, regs->int_code >> 17);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n"); 267 printk(KERN_CONT "\n");
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e01fd67..d35b15113b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
71 break; 71 break;
72 case 0x2827: /* zEC12 */ 72 case 0x2827: /* zEC12 */
73 case 0x2828: /* zEC12 */ 73 case 0x2828: /* zEC12 */
74 default:
75 order = 5; 74 order = 5;
76 break; 75 break;
76 case 0x2964: /* z13 */
77 default:
78 order = 7;
79 break;
77 } 80 }
78 /* Limit number of empty zero pages for small memory sizes */ 81 /* Limit number of empty zero pages for small memory sizes */
79 if (order > 2 && totalram_pages <= 16384) 82 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
80 order = 2; 83 order--;
81 84
82 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 85 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
83 if (!empty_zero_page) 86 if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c21195e..d008f638b2cd 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/compat.h> 30#include <linux/compat.h>
31#include <linux/security.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
32 33
34unsigned long mmap_rnd_mask;
35unsigned long mmap_align_mask;
36
33static unsigned long stack_maxrandom_size(void) 37static unsigned long stack_maxrandom_size(void)
34{ 38{
35 if (!(current->flags & PF_RANDOMIZE)) 39 if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
60{ 64{
61 if (!(current->flags & PF_RANDOMIZE)) 65 if (!(current->flags & PF_RANDOMIZE))
62 return 0; 66 return 0;
63 /* 8MB randomization for mmap_base */ 67 if (is_32bit_task())
64 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 68 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
69 else
70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
65} 71}
66 72
67static unsigned long mmap_base_legacy(void) 73static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
81 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
82} 88}
83 89
90unsigned long
91arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
93{
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 struct vm_unmapped_area_info info;
97 int do_color_align;
98
99 if (len > TASK_SIZE - mmap_min_addr)
100 return -ENOMEM;
101
102 if (flags & MAP_FIXED)
103 return addr;
104
105 if (addr) {
106 addr = PAGE_ALIGN(addr);
107 vma = find_vma(mm, addr);
108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109 (!vma || addr + len <= vma->vm_start))
110 return addr;
111 }
112
113 do_color_align = 0;
114 if (filp || (flags & MAP_SHARED))
115 do_color_align = !is_32bit_task();
116
117 info.flags = 0;
118 info.length = len;
119 info.low_limit = mm->mmap_base;
120 info.high_limit = TASK_SIZE;
121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122 info.align_offset = pgoff << PAGE_SHIFT;
123 return vm_unmapped_area(&info);
124}
125
126unsigned long
127arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128 const unsigned long len, const unsigned long pgoff,
129 const unsigned long flags)
130{
131 struct vm_area_struct *vma;
132 struct mm_struct *mm = current->mm;
133 unsigned long addr = addr0;
134 struct vm_unmapped_area_info info;
135 int do_color_align;
136
137 /* requested length too big for entire address space */
138 if (len > TASK_SIZE - mmap_min_addr)
139 return -ENOMEM;
140
141 if (flags & MAP_FIXED)
142 return addr;
143
144 /* requesting a specific address */
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153 do_color_align = 0;
154 if (filp || (flags & MAP_SHARED))
155 do_color_align = !is_32bit_task();
156
157 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
158 info.length = len;
159 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160 info.high_limit = mm->mmap_base;
161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162 info.align_offset = pgoff << PAGE_SHIFT;
163 addr = vm_unmapped_area(&info);
164
165 /*
166 * A failed mmap() very likely causes application failure,
167 * so fall back to the bottom-up function here. This scenario
168 * can happen with large stack limits and large mmap()
169 * allocations.
170 */
171 if (addr & ~PAGE_MASK) {
172 VM_BUG_ON(addr != -ENOMEM);
173 info.flags = 0;
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 addr = vm_unmapped_area(&info);
177 }
178
179 return addr;
180}
181
182unsigned long randomize_et_dyn(void)
183{
184 unsigned long base;
185
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
187 return base + mmap_rnd();
188}
189
84#ifndef CONFIG_64BIT 190#ifndef CONFIG_64BIT
85 191
86/* 192/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
177 } 283 }
178} 284}
179 285
286static int __init setup_mmap_rnd(void)
287{
288 struct cpuid cpu_id;
289
290 get_cpu_id(&cpu_id);
291 switch (cpu_id.machine) {
292 case 0x9672:
293 case 0x2064:
294 case 0x2066:
295 case 0x2084:
296 case 0x2086:
297 case 0x2094:
298 case 0x2096:
299 case 0x2097:
300 case 0x2098:
301 case 0x2817:
302 case 0x2818:
303 case 0x2827:
304 case 0x2828:
305 mmap_rnd_mask = 0x7ffUL;
306 mmap_align_mask = 0UL;
307 break;
308 case 0x2964: /* z13 */
309 default:
310 mmap_rnd_mask = 0x3ff80UL;
311 mmap_align_mask = 0x7fUL;
312 break;
313 }
314 return 0;
315}
316early_initcall(setup_mmap_rnd);
317
180#endif 318#endif
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3cf8cc03fff6..b2c1542f2ba2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -527,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
527 table += (gaddr >> 53) & 0x7ff; 527 table += (gaddr >> 53) & 0x7ff;
528 if ((*table & _REGION_ENTRY_INVALID) && 528 if ((*table & _REGION_ENTRY_INVALID) &&
529 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, 529 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
530 gaddr & 0xffe0000000000000)) 530 gaddr & 0xffe0000000000000UL))
531 return -ENOMEM; 531 return -ENOMEM;
532 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 532 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
533 } 533 }
@@ -535,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
535 table += (gaddr >> 42) & 0x7ff; 535 table += (gaddr >> 42) & 0x7ff;
536 if ((*table & _REGION_ENTRY_INVALID) && 536 if ((*table & _REGION_ENTRY_INVALID) &&
537 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, 537 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
538 gaddr & 0xfffffc0000000000)) 538 gaddr & 0xfffffc0000000000UL))
539 return -ENOMEM; 539 return -ENOMEM;
540 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 540 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
541 } 541 }
@@ -543,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
543 table += (gaddr >> 31) & 0x7ff; 543 table += (gaddr >> 31) & 0x7ff;
544 if ((*table & _REGION_ENTRY_INVALID) && 544 if ((*table & _REGION_ENTRY_INVALID) &&
545 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, 545 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
546 gaddr & 0xffffffff80000000)) 546 gaddr & 0xffffffff80000000UL))
547 return -ENOMEM; 547 return -ENOMEM;
548 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 548 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
549 } 549 }
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 62c5ea6d8682..8aa271b3d1ad 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
55 ret = get_pfn(mmio_addr, VM_WRITE, &pfn); 55 ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
56 if (ret) 56 if (ret)
57 goto out; 57 goto out;
58 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); 58 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
59 59
60 ret = -EFAULT; 60 ret = -EFAULT;
61 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 61 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -96,7 +96,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
96 ret = get_pfn(mmio_addr, VM_READ, &pfn); 96 ret = get_pfn(mmio_addr, VM_READ, &pfn);
97 if (ret) 97 if (ret)
98 goto out; 98 goto out;
99 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); 99 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
100 100
101 ret = -EFAULT; 101 ret = -EFAULT;
102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)