aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig418
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/boot/compressed/misc.c45
-rw-r--r--arch/s390/defconfig14
-rw-r--r--arch/s390/include/asm/appldata.h2
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h28
-rw-r--r--arch/s390/include/asm/cmpxchg.h61
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/css_chars.h39
-rw-r--r--arch/s390/include/asm/eadm.h124
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/etr.h8
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/lowcore.h6
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/percpu.h50
-rw-r--r--arch/s390/include/asm/processor.h59
-rw-r--r--arch/s390/include/asm/ptrace.h12
-rw-r--r--arch/s390/include/asm/runtime_instr.h98
-rw-r--r--arch/s390/include/asm/scsw.h38
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/string.h8
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/include/asm/sysinfo.h39
-rw-r--r--arch/s390/include/asm/topology.h20
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/Makefile12
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/cache.c385
-rw-r--r--arch/s390/kernel/compat_wrapper.S13
-rw-r--r--arch/s390/kernel/crash.c14
-rw-r--r--arch/s390/kernel/crash_dump.c3
-rw-r--r--arch/s390/kernel/dis.c58
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/entry64.S17
-rw-r--r--arch/s390/kernel/irq.c56
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/lgr.c29
-rw-r--r--arch/s390/kernel/machine_kexec.c9
-rw-r--r--arch/s390/kernel/process.c8
-rw-r--r--arch/s390/kernel/processor.c7
-rw-r--r--arch/s390/kernel/ptrace.c70
-rw-r--r--arch/s390/kernel/runtime_instr.c150
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/setup.c54
-rw-r--r--arch/s390/kernel/smp.c46
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/sysinfo.c351
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c27
-rw-r--r--arch/s390/kernel/traps.c41
-rw-r--r--arch/s390/kernel/vdso.c8
-rw-r--r--arch/s390/kernel/vtime.c3
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/mem64.S88
-rw-r--r--arch/s390/lib/string.c56
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/extable.c81
-rw-r--r--arch/s390/mm/fault.c7
-rw-r--r--arch/s390/mm/gup.c37
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/net/Makefile4
-rw-r--r--arch/s390/net/bpf_jit.S130
-rw-r--r--arch/s390/net/bpf_jit_comp.c776
-rw-r--r--block/partitions/ibm.c455
-rw-r--r--drivers/s390/block/Kconfig18
-rw-r--r--drivers/s390/block/Makefile6
-rw-r--r--drivers/s390/block/dasd_eckd.c21
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/scm_blk.c445
-rw-r--r--drivers/s390/block/scm_blk.h117
-rw-r--r--drivers/s390/block/scm_blk_cluster.c228
-rw-r--r--drivers/s390/block/scm_drv.c81
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/monreader.c5
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_std.h4
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c52
-rw-r--r--drivers/s390/cio/chsc.h43
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/eadm_sch.c401
-rw-r--r--drivers/s390/cio/eadm_sch.h20
-rw-r--r--drivers/s390/cio/orb.h24
-rw-r--r--drivers/s390/cio/qdio_debug.h38
-rw-r--r--drivers/s390/cio/scm.c317
-rw-r--r--drivers/s390/crypto/Makefile3
-rw-r--r--drivers/s390/crypto/ap_bus.c209
-rw-r--r--drivers/s390/crypto/ap_bus.h35
-rw-r--r--drivers/s390/crypto/zcrypt_api.c187
-rw-r--r--drivers/s390/crypto/zcrypt_api.h19
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c371
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c149
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.h12
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h59
-rw-r--r--drivers/s390/crypto/zcrypt_error.h13
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c531
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h39
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c856
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h169
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c781
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c15
-rw-r--r--include/linux/elf.h1
-rw-r--r--scripts/sortextable.c10
119 files changed, 7415 insertions, 2185 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 9858476fa0fe..cc45d25487b0 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -5,3 +5,4 @@ obj-$(CONFIG_CRYPTO_HW) += crypto/
5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ 5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
6obj-$(CONFIG_APPLDATA_BASE) += appldata/ 6obj-$(CONFIG_APPLDATA_BASE) += appldata/
7obj-$(CONFIG_MATHEMU) += math-emu/ 7obj-$(CONFIG_MATHEMU) += math-emu/
8obj-y += net/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f5ab543396da..f9acddd9ace3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -52,6 +52,12 @@ config PGSTE
52config ARCH_SUPPORTS_DEBUG_PAGEALLOC 52config ARCH_SUPPORTS_DEBUG_PAGEALLOC
53 def_bool y 53 def_bool y
54 54
55config KEXEC
56 def_bool y
57
58config AUDIT_ARCH
59 def_bool y
60
55config S390 61config S390
56 def_bool y 62 def_bool y
57 select USE_GENERIC_SMP_HELPERS if SMP 63 select USE_GENERIC_SMP_HELPERS if SMP
@@ -81,11 +87,13 @@ config S390
81 select HAVE_KERNEL_XZ 87 select HAVE_KERNEL_XZ
82 select HAVE_ARCH_MUTEX_CPU_RELAX 88 select HAVE_ARCH_MUTEX_CPU_RELAX
83 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 89 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
90 select HAVE_BPF_JIT if 64BIT && PACK_STACK
84 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 91 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
85 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 92 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
86 select HAVE_MEMBLOCK 93 select HAVE_MEMBLOCK
87 select HAVE_MEMBLOCK_NODE_MAP 94 select HAVE_MEMBLOCK_NODE_MAP
88 select HAVE_CMPXCHG_LOCAL 95 select HAVE_CMPXCHG_LOCAL
96 select HAVE_CMPXCHG_DOUBLE
89 select HAVE_VIRT_CPU_ACCOUNTING 97 select HAVE_VIRT_CPU_ACCOUNTING
90 select VIRT_CPU_ACCOUNTING 98 select VIRT_CPU_ACCOUNTING
91 select ARCH_DISCARD_MEMBLOCK 99 select ARCH_DISCARD_MEMBLOCK
@@ -132,9 +140,79 @@ source "init/Kconfig"
132 140
133source "kernel/Kconfig.freezer" 141source "kernel/Kconfig.freezer"
134 142
135menu "Base setup" 143menu "Processor type and features"
144
145config HAVE_MARCH_Z900_FEATURES
146 def_bool n
147
148config HAVE_MARCH_Z990_FEATURES
149 def_bool n
150 select HAVE_MARCH_Z900_FEATURES
151
152config HAVE_MARCH_Z9_109_FEATURES
153 def_bool n
154 select HAVE_MARCH_Z990_FEATURES
155
156config HAVE_MARCH_Z10_FEATURES
157 def_bool n
158 select HAVE_MARCH_Z9_109_FEATURES
159
160config HAVE_MARCH_Z196_FEATURES
161 def_bool n
162 select HAVE_MARCH_Z10_FEATURES
163
164choice
165 prompt "Processor type"
166 default MARCH_G5
167
168config MARCH_G5
169 bool "System/390 model G5 and G6"
170 depends on !64BIT
171 help
172 Select this to build a 31 bit kernel that works
173 on all ESA/390 and z/Architecture machines.
136 174
137comment "Processor type and features" 175config MARCH_Z900
176 bool "IBM zSeries model z800 and z900"
177 select HAVE_MARCH_Z900_FEATURES if 64BIT
178 help
179 Select this to enable optimizations for model z800/z900 (2064 and
180 2066 series). This will enable some optimizations that are not
181 available on older ESA/390 (31 Bit) only CPUs.
182
183config MARCH_Z990
184 bool "IBM zSeries model z890 and z990"
185 select HAVE_MARCH_Z990_FEATURES if 64BIT
186 help
187 Select this to enable optimizations for model z890/z990 (2084 and
188 2086 series). The kernel will be slightly faster but will not work
189 on older machines.
190
191config MARCH_Z9_109
192 bool "IBM System z9"
193 select HAVE_MARCH_Z9_109_FEATURES if 64BIT
194 help
195 Select this to enable optimizations for IBM System z9 (2094 and
196 2096 series). The kernel will be slightly faster but will not work
197 on older machines.
198
199config MARCH_Z10
200 bool "IBM System z10"
201 select HAVE_MARCH_Z10_FEATURES if 64BIT
202 help
203 Select this to enable optimizations for IBM System z10 (2097 and
204 2098 series). The kernel will be slightly faster but will not work
205 on older machines.
206
207config MARCH_Z196
208 bool "IBM zEnterprise 114 and 196"
209 select HAVE_MARCH_Z196_FEATURES if 64BIT
210 help
211 Select this to enable optimizations for IBM zEnterprise 114 and 196
212 (2818 and 2817 series). The kernel will be slightly faster but will
213 not work on older machines.
214
215endchoice
138 216
139config 64BIT 217config 64BIT
140 def_bool y 218 def_bool y
@@ -146,6 +224,24 @@ config 64BIT
146config 32BIT 224config 32BIT
147 def_bool y if !64BIT 225 def_bool y if !64BIT
148 226
227config COMPAT
228 def_bool y
229 prompt "Kernel support for 31 bit emulation"
230 depends on 64BIT
231 select COMPAT_BINFMT_ELF if BINFMT_ELF
232 select ARCH_WANT_OLD_COMPAT_IPC
233 help
234 Select this option if you want to enable your system kernel to
235 handle system-calls from ELF binaries for 31 bit ESA. This option
236 (and some other stuff like libraries and such) is needed for
237 executing 31 bit applications. It is safe to say "Y".
238
239config SYSVIPC_COMPAT
240 def_bool y if COMPAT && SYSVIPC
241
242config KEYS_COMPAT
243 def_bool y if COMPAT && KEYS
244
149config SMP 245config SMP
150 def_bool y 246 def_bool y
151 prompt "Symmetric multi-processing support" 247 prompt "Symmetric multi-processing support"
@@ -201,6 +297,8 @@ config SCHED_BOOK
201 Book scheduler support improves the CPU scheduler's decision making 297 Book scheduler support improves the CPU scheduler's decision making
202 when dealing with machines that have several books. 298 when dealing with machines that have several books.
203 299
300source kernel/Kconfig.preempt
301
204config MATHEMU 302config MATHEMU
205 def_bool y 303 def_bool y
206 prompt "IEEE FPU emulation" 304 prompt "IEEE FPU emulation"
@@ -210,100 +308,35 @@ config MATHEMU
210 on older ESA/390 machines. Say Y unless you know your machine doesn't 308 on older ESA/390 machines. Say Y unless you know your machine doesn't
211 need this. 309 need this.
212 310
213config COMPAT 311source kernel/Kconfig.hz
214 def_bool y
215 prompt "Kernel support for 31 bit emulation"
216 depends on 64BIT
217 select COMPAT_BINFMT_ELF if BINFMT_ELF
218 select ARCH_WANT_OLD_COMPAT_IPC
219 help
220 Select this option if you want to enable your system kernel to
221 handle system-calls from ELF binaries for 31 bit ESA. This option
222 (and some other stuff like libraries and such) is needed for
223 executing 31 bit applications. It is safe to say "Y".
224 312
225config SYSVIPC_COMPAT 313endmenu
226 def_bool y if COMPAT && SYSVIPC
227 314
228config KEYS_COMPAT 315menu "Memory setup"
229 def_bool y if COMPAT && KEYS
230 316
231config AUDIT_ARCH 317config ARCH_SPARSEMEM_ENABLE
232 def_bool y 318 def_bool y
319 select SPARSEMEM_VMEMMAP_ENABLE
320 select SPARSEMEM_VMEMMAP
321 select SPARSEMEM_STATIC if !64BIT
233 322
234config HAVE_MARCH_Z900_FEATURES 323config ARCH_SPARSEMEM_DEFAULT
235 def_bool n 324 def_bool y
236
237config HAVE_MARCH_Z990_FEATURES
238 def_bool n
239 select HAVE_MARCH_Z900_FEATURES
240
241config HAVE_MARCH_Z9_109_FEATURES
242 def_bool n
243 select HAVE_MARCH_Z990_FEATURES
244
245config HAVE_MARCH_Z10_FEATURES
246 def_bool n
247 select HAVE_MARCH_Z9_109_FEATURES
248
249config HAVE_MARCH_Z196_FEATURES
250 def_bool n
251 select HAVE_MARCH_Z10_FEATURES
252
253comment "Code generation options"
254
255choice
256 prompt "Processor type"
257 default MARCH_G5
258
259config MARCH_G5
260 bool "System/390 model G5 and G6"
261 depends on !64BIT
262 help
263 Select this to build a 31 bit kernel that works
264 on all ESA/390 and z/Architecture machines.
265
266config MARCH_Z900
267 bool "IBM zSeries model z800 and z900"
268 select HAVE_MARCH_Z900_FEATURES if 64BIT
269 help
270 Select this to enable optimizations for model z800/z900 (2064 and
271 2066 series). This will enable some optimizations that are not
272 available on older ESA/390 (31 Bit) only CPUs.
273 325
274config MARCH_Z990 326config ARCH_SELECT_MEMORY_MODEL
275 bool "IBM zSeries model z890 and z990" 327 def_bool y
276 select HAVE_MARCH_Z990_FEATURES if 64BIT
277 help
278 Select this to enable optimizations for model z890/z990 (2084 and
279 2086 series). The kernel will be slightly faster but will not work
280 on older machines.
281 328
282config MARCH_Z9_109 329config ARCH_ENABLE_MEMORY_HOTPLUG
283 bool "IBM System z9" 330 def_bool y if SPARSEMEM
284 select HAVE_MARCH_Z9_109_FEATURES if 64BIT
285 help
286 Select this to enable optimizations for IBM System z9 (2094 and
287 2096 series). The kernel will be slightly faster but will not work
288 on older machines.
289 331
290config MARCH_Z10 332config ARCH_ENABLE_MEMORY_HOTREMOVE
291 bool "IBM System z10" 333 def_bool y
292 select HAVE_MARCH_Z10_FEATURES if 64BIT
293 help
294 Select this to enable optimizations for IBM System z10 (2097 and
295 2098 series). The kernel will be slightly faster but will not work
296 on older machines.
297 334
298config MARCH_Z196 335config FORCE_MAX_ZONEORDER
299 bool "IBM zEnterprise 114 and 196" 336 int
300 select HAVE_MARCH_Z196_FEATURES if 64BIT 337 default "9"
301 help
302 Select this to enable optimizations for IBM zEnterprise 114 and 196
303 (2818 and 2817 series). The kernel will be slightly faster but will
304 not work on older machines.
305 338
306endchoice 339source "mm/Kconfig"
307 340
308config PACK_STACK 341config PACK_STACK
309 def_bool y 342 def_bool y
@@ -367,34 +400,9 @@ config WARN_DYNAMIC_STACK
367 400
368 Say N if you are unsure. 401 Say N if you are unsure.
369 402
370comment "Kernel preemption" 403endmenu
371
372source "kernel/Kconfig.preempt"
373
374config ARCH_SPARSEMEM_ENABLE
375 def_bool y
376 select SPARSEMEM_VMEMMAP_ENABLE
377 select SPARSEMEM_VMEMMAP
378 select SPARSEMEM_STATIC if !64BIT
379
380config ARCH_SPARSEMEM_DEFAULT
381 def_bool y
382
383config ARCH_SELECT_MEMORY_MODEL
384 def_bool y
385
386config ARCH_ENABLE_MEMORY_HOTPLUG
387 def_bool y if SPARSEMEM
388
389config ARCH_ENABLE_MEMORY_HOTREMOVE
390 def_bool y
391
392config ARCH_HIBERNATION_POSSIBLE
393 def_bool y if 64BIT
394
395source "mm/Kconfig"
396 404
397comment "I/O subsystem configuration" 405menu "I/O subsystem"
398 406
399config QDIO 407config QDIO
400 def_tristate y 408 def_tristate y
@@ -425,13 +433,102 @@ config CHSC_SCH
425 433
426 If unsure, say N. 434 If unsure, say N.
427 435
428comment "Misc" 436config SCM_BUS
437 def_bool y
438 depends on 64BIT
439 prompt "SCM bus driver"
440 help
441 Bus driver for Storage Class Memory.
442
443config EADM_SCH
444 def_tristate m
445 prompt "Support for EADM subchannels"
446 depends on SCM_BUS
447 help
448 This driver allows usage of EADM subchannels. EADM subchannels act
449 as a communication vehicle for SCM increments.
450
451 To compile this driver as a module, choose M here: the
452 module will be called eadm_sch.
453
454endmenu
455
456menu "Dump support"
457
458config CRASH_DUMP
459 bool "kernel crash dumps"
460 depends on 64BIT && SMP
461 select KEXEC
462 help
463 Generate crash dump after being started by kexec.
464 Crash dump kernels are loaded in the main kernel with kexec-tools
465 into a specially reserved region and then later executed after
466 a crash by kdump/kexec.
467 For more details see Documentation/kdump/kdump.txt
468
469config ZFCPDUMP
470 def_bool n
471 prompt "zfcpdump support"
472 select SMP
473 help
474 Select this option if you want to build an zfcpdump enabled kernel.
475 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
476
477endmenu
478
479menu "Executable file formats / Emulations"
429 480
430source "fs/Kconfig.binfmt" 481source "fs/Kconfig.binfmt"
431 482
432config FORCE_MAX_ZONEORDER 483config SECCOMP
433 int 484 def_bool y
434 default "9" 485 prompt "Enable seccomp to safely compute untrusted bytecode"
486 depends on PROC_FS
487 help
488 This kernel feature is useful for number crunching applications
489 that may need to compute untrusted bytecode during their
490 execution. By using pipes or other transports made available to
491 the process as file descriptors supporting the read/write
492 syscalls, it's possible to isolate those applications in
493 their own address space using seccomp. Once seccomp is
494 enabled via /proc/<pid>/seccomp, it cannot be disabled
495 and the task is only allowed to execute a few safe syscalls
496 defined by each seccomp mode.
497
498 If unsure, say Y.
499
500endmenu
501
502menu "Power Management"
503
504config ARCH_HIBERNATION_POSSIBLE
505 def_bool y if 64BIT
506
507source "kernel/power/Kconfig"
508
509endmenu
510
511source "net/Kconfig"
512
513config PCMCIA
514 def_bool n
515
516config CCW
517 def_bool y
518
519source "drivers/Kconfig"
520
521source "fs/Kconfig"
522
523source "arch/s390/Kconfig.debug"
524
525source "security/Kconfig"
526
527source "crypto/Kconfig"
528
529source "lib/Kconfig"
530
531menu "Virtualization"
435 532
436config PFAULT 533config PFAULT
437 def_bool y 534 def_bool y
@@ -447,8 +544,8 @@ config PFAULT
447 this option. 544 this option.
448 545
449config SHARED_KERNEL 546config SHARED_KERNEL
450 def_bool y 547 bool "VM shared kernel support"
451 prompt "VM shared kernel support" 548 depends on !JUMP_LABEL
452 help 549 help
453 Select this option, if you want to share the text segment of the 550 Select this option, if you want to share the text segment of the
454 Linux kernel between different VM guests. This reduces memory 551 Linux kernel between different VM guests. This reduces memory
@@ -543,8 +640,6 @@ config APPLDATA_NET_SUM
543 This can also be compiled as a module, which will be called 640 This can also be compiled as a module, which will be called
544 appldata_net_sum.o. 641 appldata_net_sum.o.
545 642
546source kernel/Kconfig.hz
547
548config S390_HYPFS_FS 643config S390_HYPFS_FS
549 def_bool y 644 def_bool y
550 prompt "s390 hypervisor file system support" 645 prompt "s390 hypervisor file system support"
@@ -553,90 +648,21 @@ config S390_HYPFS_FS
553 This is a virtual file system intended to provide accounting 648 This is a virtual file system intended to provide accounting
554 information in an s390 hypervisor environment. 649 information in an s390 hypervisor environment.
555 650
556config KEXEC 651source "arch/s390/kvm/Kconfig"
557 def_bool n
558 prompt "kexec system call"
559 help
560 kexec is a system call that implements the ability to shutdown your
561 current kernel, and to start another kernel. It is like a reboot
562 but is independent of hardware/microcode support.
563
564config CRASH_DUMP
565 bool "kernel crash dumps"
566 depends on 64BIT && SMP
567 select KEXEC
568 help
569 Generate crash dump after being started by kexec.
570 Crash dump kernels are loaded in the main kernel with kexec-tools
571 into a specially reserved region and then later executed after
572 a crash by kdump/kexec.
573 For more details see Documentation/kdump/kdump.txt
574
575config ZFCPDUMP
576 def_bool n
577 prompt "zfcpdump support"
578 select SMP
579 help
580 Select this option if you want to build an zfcpdump enabled kernel.
581 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
582 652
583config S390_GUEST 653config S390_GUEST
584 def_bool y 654 def_bool y
585 prompt "s390 guest support for KVM (EXPERIMENTAL)" 655 prompt "s390 support for virtio devices (EXPERIMENTAL)"
586 depends on 64BIT && EXPERIMENTAL 656 depends on 64BIT && EXPERIMENTAL
587 select VIRTUALIZATION 657 select VIRTUALIZATION
588 select VIRTIO 658 select VIRTIO
589 select VIRTIO_RING 659 select VIRTIO_RING
590 select VIRTIO_CONSOLE 660 select VIRTIO_CONSOLE
591 help 661 help
592 Select this option if you want to run the kernel as a guest under 662 Enabling this option adds support for virtio based paravirtual device
593 the KVM hypervisor. This will add detection for KVM as well as a 663 drivers on s390.
594 virtio transport. If KVM is detected, the virtio console will be
595 the default console.
596
597config SECCOMP
598 def_bool y
599 prompt "Enable seccomp to safely compute untrusted bytecode"
600 depends on PROC_FS
601 help
602 This kernel feature is useful for number crunching applications
603 that may need to compute untrusted bytecode during their
604 execution. By using pipes or other transports made available to
605 the process as file descriptors supporting the read/write
606 syscalls, it's possible to isolate those applications in
607 their own address space using seccomp. Once seccomp is
608 enabled via /proc/<pid>/seccomp, it cannot be disabled
609 and the task is only allowed to execute a few safe syscalls
610 defined by each seccomp mode.
611
612 If unsure, say Y.
613
614endmenu
615 664
616menu "Power Management" 665 Select this option if you want to run the kernel as a guest under
617 666 the KVM hypervisor.
618source "kernel/power/Kconfig"
619 667
620endmenu 668endmenu
621
622source "net/Kconfig"
623
624config PCMCIA
625 def_bool n
626
627config CCW
628 def_bool y
629
630source "drivers/Kconfig"
631
632source "fs/Kconfig"
633
634source "arch/s390/Kconfig.debug"
635
636source "security/Kconfig"
637
638source "crypto/Kconfig"
639
640source "lib/Kconfig"
641
642source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 10e22c4ec4a7..3ad8f61c9985 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -11,6 +11,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
11 sizes.h head$(BITS).o 11 sizes.h head$(BITS).o
12 12
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
14KBUILD_CFLAGS += $(cflags-y) 15KBUILD_CFLAGS += $(cflags-y)
15KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) 16KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
16KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 17KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 465eca756feb..c4c6a1cf221b 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -71,34 +71,37 @@ void *memset(void *s, int c, size_t n)
71{ 71{
72 char *xs; 72 char *xs;
73 73
74 if (c == 0) 74 xs = s;
75 return __builtin_memset(s, 0, n); 75 while (n--)
76 76 *xs++ = c;
77 xs = (char *) s;
78 if (n > 0)
79 do {
80 *xs++ = c;
81 } while (--n > 0);
82 return s; 77 return s;
83} 78}
84 79
85void *memcpy(void *__dest, __const void *__src, size_t __n) 80void *memcpy(void *dest, const void *src, size_t n)
86{ 81{
87 return __builtin_memcpy(__dest, __src, __n); 82 const char *s = src;
83 char *d = dest;
84
85 while (n--)
86 *d++ = *s++;
87 return dest;
88} 88}
89 89
90void *memmove(void *__dest, __const void *__src, size_t __n) 90void *memmove(void *dest, const void *src, size_t n)
91{ 91{
92 char *d; 92 const char *s = src;
93 const char *s; 93 char *d = dest;
94 94
95 if (__dest <= __src) 95 if (d <= s) {
96 return __builtin_memcpy(__dest, __src, __n); 96 while (n--)
97 d = __dest + __n; 97 *d++ = *s++;
98 s = __src + __n; 98 } else {
99 while (__n--) 99 d += n;
100 *--d = *--s; 100 s += n;
101 return __dest; 101 while (n--)
102 *--d = *--s;
103 }
104 return dest;
102} 105}
103 106
104static void error(char *x) 107static void error(char *x)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f39cd710980b..b74400e3e035 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -16,8 +16,8 @@ CONFIG_CGROUPS=y
16CONFIG_CPUSETS=y 16CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y 17CONFIG_CGROUP_CPUACCT=y
18CONFIG_RESOURCE_COUNTERS=y 18CONFIG_RESOURCE_COUNTERS=y
19CONFIG_CGROUP_MEMCG=y 19CONFIG_MEMCG=y
20CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 20CONFIG_MEMCG_SWAP=y
21CONFIG_CGROUP_SCHED=y 21CONFIG_CGROUP_SCHED=y
22CONFIG_RT_GROUP_SCHED=y 22CONFIG_RT_GROUP_SCHED=y
23CONFIG_BLK_CGROUP=y 23CONFIG_BLK_CGROUP=y
@@ -32,20 +32,19 @@ CONFIG_EXPERT=y
32CONFIG_PROFILING=y 32CONFIG_PROFILING=y
33CONFIG_OPROFILE=y 33CONFIG_OPROFILE=y
34CONFIG_KPROBES=y 34CONFIG_KPROBES=y
35CONFIG_JUMP_LABEL=y
35CONFIG_MODULES=y 36CONFIG_MODULES=y
36CONFIG_MODULE_UNLOAD=y 37CONFIG_MODULE_UNLOAD=y
37CONFIG_MODVERSIONS=y 38CONFIG_MODVERSIONS=y
38CONFIG_PARTITION_ADVANCED=y 39CONFIG_PARTITION_ADVANCED=y
39CONFIG_IBM_PARTITION=y 40CONFIG_IBM_PARTITION=y
40CONFIG_DEFAULT_DEADLINE=y 41CONFIG_DEFAULT_DEADLINE=y
41CONFIG_PREEMPT=y 42CONFIG_HZ_100=y
42CONFIG_MEMORY_HOTPLUG=y 43CONFIG_MEMORY_HOTPLUG=y
43CONFIG_MEMORY_HOTREMOVE=y 44CONFIG_MEMORY_HOTREMOVE=y
44CONFIG_KSM=y 45CONFIG_KSM=y
45CONFIG_BINFMT_MISC=m
46CONFIG_CMM=m
47CONFIG_HZ_100=y
48CONFIG_CRASH_DUMP=y 46CONFIG_CRASH_DUMP=y
47CONFIG_BINFMT_MISC=m
49CONFIG_HIBERNATION=y 48CONFIG_HIBERNATION=y
50CONFIG_PACKET=y 49CONFIG_PACKET=y
51CONFIG_UNIX=y 50CONFIG_UNIX=y
@@ -75,6 +74,7 @@ CONFIG_NET_CLS_RSVP=m
75CONFIG_NET_CLS_RSVP6=m 74CONFIG_NET_CLS_RSVP6=m
76CONFIG_NET_CLS_ACT=y 75CONFIG_NET_CLS_ACT=y
77CONFIG_NET_ACT_POLICE=y 76CONFIG_NET_ACT_POLICE=y
77CONFIG_BPF_JIT=y
78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
79CONFIG_DEVTMPFS=y 79CONFIG_DEVTMPFS=y
80CONFIG_BLK_DEV_LOOP=m 80CONFIG_BLK_DEV_LOOP=m
@@ -121,7 +121,6 @@ CONFIG_DEBUG_NOTIFIERS=y
121CONFIG_RCU_TRACE=y 121CONFIG_RCU_TRACE=y
122CONFIG_KPROBES_SANITY_TEST=y 122CONFIG_KPROBES_SANITY_TEST=y
123CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 123CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
124CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
125CONFIG_LATENCYTOP=y 124CONFIG_LATENCYTOP=y
126CONFIG_DEBUG_PAGEALLOC=y 125CONFIG_DEBUG_PAGEALLOC=y
127CONFIG_BLK_DEV_IO_TRACE=y 126CONFIG_BLK_DEV_IO_TRACE=y
@@ -173,3 +172,4 @@ CONFIG_CRYPTO_SHA512_S390=m
173CONFIG_CRYPTO_DES_S390=m 172CONFIG_CRYPTO_DES_S390=m
174CONFIG_CRYPTO_AES_S390=m 173CONFIG_CRYPTO_AES_S390=m
175CONFIG_CRC7=m 174CONFIG_CRC7=m
175CONFIG_CMM=m
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index f328294faeae..32a705987156 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -70,7 +70,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
70 int ry; 70 int ry;
71 71
72 if (!MACHINE_IS_VM) 72 if (!MACHINE_IS_VM)
73 return -ENOSYS; 73 return -EOPNOTSUPP;
74 parm_list.diag = 0xdc; 74 parm_list.diag = 0xdc;
75 parm_list.function = fn; 75 parm_list.function = fn;
76 parm_list.parlist_length = sizeof(parm_list); 76 parm_list.parlist_length = sizeof(parm_list);
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index bf115b49f444..aea451fd182e 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,32 +125,4 @@ struct chsc_cpd_info {
125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) 125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) 126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
127 127
128#ifdef __KERNEL__
129
130struct css_general_char {
131 u64 : 12;
132 u32 dynio : 1; /* bit 12 */
133 u32 : 28;
134 u32 aif : 1; /* bit 41 */
135 u32 : 3;
136 u32 mcss : 1; /* bit 45 */
137 u32 fcs : 1; /* bit 46 */
138 u32 : 1;
139 u32 ext_mb : 1; /* bit 48 */
140 u32 : 7;
141 u32 aif_tdd : 1; /* bit 56 */
142 u32 : 1;
143 u32 qebsm : 1; /* bit 58 */
144 u32 : 8;
145 u32 aif_osa : 1; /* bit 67 */
146 u32 : 14;
147 u32 cib : 1; /* bit 82 */
148 u32 : 5;
149 u32 fcx : 1; /* bit 88 */
150 u32 : 7;
151}__attribute__((packed));
152
153extern struct css_general_char css_general_characteristics;
154
155#endif /* __KERNEL__ */
156#endif 128#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 77043aa44d67..55bde6035216 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -80,6 +80,18 @@ struct erw {
80} __attribute__ ((packed)); 80} __attribute__ ((packed));
81 81
82/** 82/**
83 * struct erw_eadm - EADM Subchannel extended report word
84 * @b: aob error
85 * @r: arsb error
86 */
87struct erw_eadm {
88 __u32 : 16;
89 __u32 b : 1;
90 __u32 r : 1;
91 __u32 : 14;
92} __packed;
93
94/**
83 * struct sublog - subchannel logout area 95 * struct sublog - subchannel logout area
84 * @res0: reserved 96 * @res0: reserved
85 * @esf: extended status flags 97 * @esf: extended status flags
@@ -170,9 +182,22 @@ struct esw3 {
170} __attribute__ ((packed)); 182} __attribute__ ((packed));
171 183
172/** 184/**
185 * struct esw_eadm - EADM Subchannel Extended Status Word (ESW)
186 * @sublog: subchannel logout
187 * @erw: extended report word
188 */
189struct esw_eadm {
190 __u32 sublog;
191 struct erw_eadm erw;
192 __u32 : 32;
193 __u32 : 32;
194 __u32 : 32;
195} __packed;
196
197/**
173 * struct irb - interruption response block 198 * struct irb - interruption response block
174 * @scsw: subchannel status word 199 * @scsw: subchannel status word
175 * @esw: extened status word, 4 formats 200 * @esw: extened status word
176 * @ecw: extended control word 201 * @ecw: extended control word
177 * 202 *
178 * The irb that is handed to the device driver when an interrupt occurs. For 203 * The irb that is handed to the device driver when an interrupt occurs. For
@@ -191,6 +216,7 @@ struct irb {
191 struct esw1 esw1; 216 struct esw1 esw1;
192 struct esw2 esw2; 217 struct esw2 esw2;
193 struct esw3 esw3; 218 struct esw3 esw3;
219 struct esw_eadm eadm;
194 } esw; 220 } esw;
195 __u8 ecw[32]; 221 __u8 ecw[32];
196} __attribute__ ((packed,aligned(4))); 222} __attribute__ ((packed,aligned(4)));
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 8d798e962b63..0f636cbdf342 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -7,7 +7,9 @@
7#ifndef __ASM_CMPXCHG_H 7#ifndef __ASM_CMPXCHG_H
8#define __ASM_CMPXCHG_H 8#define __ASM_CMPXCHG_H
9 9
10#include <linux/mmdebug.h>
10#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h>
11 13
12extern void __xchg_called_with_bad_pointer(void); 14extern void __xchg_called_with_bad_pointer(void);
13 15
@@ -203,6 +205,65 @@ static inline unsigned long long __cmpxchg64(void *ptr,
203}) 205})
204#endif /* CONFIG_64BIT */ 206#endif /* CONFIG_64BIT */
205 207
208#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
209({ \
210 register __typeof__(*(p1)) __old1 asm("2") = (o1); \
211 register __typeof__(*(p2)) __old2 asm("3") = (o2); \
212 register __typeof__(*(p1)) __new1 asm("4") = (n1); \
213 register __typeof__(*(p2)) __new2 asm("5") = (n2); \
214 int cc; \
215 asm volatile( \
216 insn " %[old],%[new],%[ptr]\n" \
217 " ipm %[cc]\n" \
218 " srl %[cc],28" \
219 : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
220 : [new] "d" (__new1), "d" (__new2), \
221 [ptr] "Q" (*(p1)), "Q" (*(p2)) \
222 : "memory", "cc"); \
223 !cc; \
224})
225
226#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
227 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
228
229#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
230 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
231
232extern void __cmpxchg_double_called_with_bad_pointer(void);
233
234#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
235({ \
236 int __ret; \
237 switch (sizeof(*(p1))) { \
238 case 4: \
239 __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
240 break; \
241 case 8: \
242 __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
243 break; \
244 default: \
245 __cmpxchg_double_called_with_bad_pointer(); \
246 } \
247 __ret; \
248})
249
250#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
251({ \
252 __typeof__(p1) __p1 = (p1); \
253 __typeof__(p2) __p2 = (p2); \
254 int __ret; \
255 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
256 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
257 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
258 if (sizeof(long) == 4) \
259 __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
260 else \
261 __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
262 __ret; \
263})
264
265#define system_has_cmpxchg_double() 1
266
206#include <asm-generic/cmpxchg-local.h> 267#include <asm-generic/cmpxchg-local.h>
207 268
208static inline unsigned long __cmpxchg_local(void *ptr, 269static inline unsigned long __cmpxchg_local(void *ptr,
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index a3afecdae145..35f0020b7ba7 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -21,11 +21,15 @@
21#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ 21#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
22#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ 22#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
23#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ 23#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
24#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
25#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
26 buffer full */
24 27
25#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA) 28#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
26#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ 29#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
27 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ 30 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
28 CPU_MF_INT_SF_LSDA) 31 CPU_MF_INT_SF_LSDA)
32#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
29 33
30/* CPU measurement facility support */ 34/* CPU measurement facility support */
31static inline int cpum_cf_avail(void) 35static inline int cpum_cf_avail(void)
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
new file mode 100644
index 000000000000..a06ebc2623fb
--- /dev/null
+++ b/arch/s390/include/asm/css_chars.h
@@ -0,0 +1,39 @@
1#ifndef _ASM_CSS_CHARS_H
2#define _ASM_CSS_CHARS_H
3
4#include <linux/types.h>
5
6#ifdef __KERNEL__
7
8struct css_general_char {
9 u64 : 12;
10 u32 dynio : 1; /* bit 12 */
11 u32 : 4;
12 u32 eadm : 1; /* bit 17 */
13 u32 : 23;
14 u32 aif : 1; /* bit 41 */
15 u32 : 3;
16 u32 mcss : 1; /* bit 45 */
17 u32 fcs : 1; /* bit 46 */
18 u32 : 1;
19 u32 ext_mb : 1; /* bit 48 */
20 u32 : 7;
21 u32 aif_tdd : 1; /* bit 56 */
22 u32 : 1;
23 u32 qebsm : 1; /* bit 58 */
24 u32 : 8;
25 u32 aif_osa : 1; /* bit 67 */
26 u32 : 12;
27 u32 eadm_rf : 1; /* bit 80 */
28 u32 : 1;
29 u32 cib : 1; /* bit 82 */
30 u32 : 5;
31 u32 fcx : 1; /* bit 88 */
32 u32 : 19;
33 u32 alt_ssi : 1; /* bit 108 */
34} __packed;
35
36extern struct css_general_char css_general_characteristics;
37
38#endif /* __KERNEL__ */
39#endif
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
new file mode 100644
index 000000000000..8d4847191ecc
--- /dev/null
+++ b/arch/s390/include/asm/eadm.h
@@ -0,0 +1,124 @@
1#ifndef _ASM_S390_EADM_H
2#define _ASM_S390_EADM_H
3
4#include <linux/types.h>
5#include <linux/device.h>
6
7struct arqb {
8 u64 data;
9 u16 fmt:4;
10 u16:12;
11 u16 cmd_code;
12 u16:16;
13 u16 msb_count;
14 u32 reserved[12];
15} __packed;
16
17#define ARQB_CMD_MOVE 1
18
19struct arsb {
20 u16 fmt:4;
21 u32:28;
22 u8 ef;
23 u8:8;
24 u8 ecbi;
25 u8:8;
26 u8 fvf;
27 u16:16;
28 u8 eqc;
29 u32:32;
30 u64 fail_msb;
31 u64 fail_aidaw;
32 u64 fail_ms;
33 u64 fail_scm;
34 u32 reserved[4];
35} __packed;
36
37struct msb {
38 u8 fmt:4;
39 u8 oc:4;
40 u8 flags;
41 u16:12;
42 u16 bs:4;
43 u32 blk_count;
44 u64 data_addr;
45 u64 scm_addr;
46 u64:64;
47} __packed;
48
49struct aidaw {
50 u8 flags;
51 u32 :24;
52 u32 :32;
53 u64 data_addr;
54} __packed;
55
56#define MSB_OC_CLEAR 0
57#define MSB_OC_READ 1
58#define MSB_OC_WRITE 2
59#define MSB_OC_RELEASE 3
60
61#define MSB_FLAG_BNM 0x80
62#define MSB_FLAG_IDA 0x40
63
64#define MSB_BS_4K 0
65#define MSB_BS_1M 1
66
67#define AOB_NR_MSB 124
68
69struct aob {
70 struct arqb request;
71 struct arsb response;
72 struct msb msb[AOB_NR_MSB];
73} __packed __aligned(PAGE_SIZE);
74
75struct aob_rq_header {
76 struct scm_device *scmdev;
77 char data[0];
78};
79
80struct scm_device {
81 u64 address;
82 u64 size;
83 unsigned int nr_max_block;
84 struct device dev;
85 struct {
86 unsigned int persistence:4;
87 unsigned int oper_state:4;
88 unsigned int data_state:4;
89 unsigned int rank:4;
90 unsigned int release:1;
91 unsigned int res_id:8;
92 } __packed attrs;
93};
94
95#define OP_STATE_GOOD 1
96#define OP_STATE_TEMP_ERR 2
97#define OP_STATE_PERM_ERR 3
98
99struct scm_driver {
100 struct device_driver drv;
101 int (*probe) (struct scm_device *scmdev);
102 int (*remove) (struct scm_device *scmdev);
103 void (*notify) (struct scm_device *scmdev);
104 void (*handler) (struct scm_device *scmdev, void *data, int error);
105};
106
107int scm_driver_register(struct scm_driver *scmdrv);
108void scm_driver_unregister(struct scm_driver *scmdrv);
109
110int scm_start_aob(struct aob *aob);
111void scm_irq_handler(struct aob *aob, int error);
112
113struct eadm_ops {
114 int (*eadm_start) (struct aob *aob);
115 struct module *owner;
116};
117
118int scm_get_ref(void);
119void scm_put_ref(void);
120
121void register_eadm_ops(struct eadm_ops *ops);
122void unregister_eadm_ops(struct eadm_ops *ops);
123
124#endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 9b94a160fe7f..178ff966a8ba 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -101,6 +101,7 @@
101#define HWCAP_S390_HPAGE 128 101#define HWCAP_S390_HPAGE 128
102#define HWCAP_S390_ETF3EH 256 102#define HWCAP_S390_ETF3EH 256
103#define HWCAP_S390_HIGH_GPRS 512 103#define HWCAP_S390_HIGH_GPRS 512
104#define HWCAP_S390_TE 1024
104 105
105/* 106/*
106 * These are used to set parameters in the core dumps. 107 * These are used to set parameters in the core dumps.
@@ -212,4 +213,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
212extern unsigned long arch_randomize_brk(struct mm_struct *mm); 213extern unsigned long arch_randomize_brk(struct mm_struct *mm);
213#define arch_randomize_brk arch_randomize_brk 214#define arch_randomize_brk arch_randomize_brk
214 215
216void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
217
215#endif 218#endif
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index a24b03b9fb64..629b79a93165 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -140,7 +140,7 @@ struct etr_ptff_qto {
140/* Inline assembly helper functions */ 140/* Inline assembly helper functions */
141static inline int etr_setr(struct etr_eacr *ctrl) 141static inline int etr_setr(struct etr_eacr *ctrl)
142{ 142{
143 int rc = -ENOSYS; 143 int rc = -EOPNOTSUPP;
144 144
145 asm volatile( 145 asm volatile(
146 " .insn s,0xb2160000,%1\n" 146 " .insn s,0xb2160000,%1\n"
@@ -154,7 +154,7 @@ static inline int etr_setr(struct etr_eacr *ctrl)
154/* Stores a format 1 aib with 64 bytes */ 154/* Stores a format 1 aib with 64 bytes */
155static inline int etr_stetr(struct etr_aib *aib) 155static inline int etr_stetr(struct etr_aib *aib)
156{ 156{
157 int rc = -ENOSYS; 157 int rc = -EOPNOTSUPP;
158 158
159 asm volatile( 159 asm volatile(
160 " .insn s,0xb2170000,%1\n" 160 " .insn s,0xb2170000,%1\n"
@@ -169,7 +169,7 @@ static inline int etr_stetr(struct etr_aib *aib)
169static inline int etr_steai(struct etr_aib *aib, unsigned int func) 169static inline int etr_steai(struct etr_aib *aib, unsigned int func)
170{ 170{
171 register unsigned int reg0 asm("0") = func; 171 register unsigned int reg0 asm("0") = func;
172 int rc = -ENOSYS; 172 int rc = -EOPNOTSUPP;
173 173
174 asm volatile( 174 asm volatile(
175 " .insn s,0xb2b30000,%1\n" 175 " .insn s,0xb2b30000,%1\n"
@@ -190,7 +190,7 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
190{ 190{
191 register unsigned int reg0 asm("0") = func; 191 register unsigned int reg0 asm("0") = func;
192 register unsigned long reg1 asm("1") = (unsigned long) ptff_block; 192 register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
193 int rc = -ENOSYS; 193 int rc = -EOPNOTSUPP;
194 194
195 asm volatile( 195 asm volatile(
196 " .word 0x0104\n" 196 " .word 0x0104\n"
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 2b9d41899d21..6703dd986fd4 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -19,6 +19,7 @@ enum interruption_class {
19 EXTINT_IUC, 19 EXTINT_IUC,
20 EXTINT_CMS, 20 EXTINT_CMS,
21 EXTINT_CMC, 21 EXTINT_CMC,
22 EXTINT_CMR,
22 IOINT_CIO, 23 IOINT_CIO,
23 IOINT_QAI, 24 IOINT_QAI,
24 IOINT_DAS, 25 IOINT_DAS,
@@ -30,6 +31,7 @@ enum interruption_class {
30 IOINT_CLW, 31 IOINT_CLW,
31 IOINT_CTC, 32 IOINT_CTC,
32 IOINT_APB, 33 IOINT_APB,
34 IOINT_ADM,
33 IOINT_CSC, 35 IOINT_CSC,
34 NMI_NMI, 36 NMI_NMI,
35 NR_IRQS, 37 NR_IRQS,
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 1420a1115948..5ae606456b0a 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -14,6 +14,7 @@
14/* Regular I/O interrupts. */ 14/* Regular I/O interrupts. */
15#define IO_SCH_ISC 3 /* regular I/O subchannels */ 15#define IO_SCH_ISC 3 /* regular I/O subchannels */
16#define CONSOLE_ISC 1 /* console I/O subchannel */ 16#define CONSOLE_ISC 1 /* console I/O subchannel */
17#define EADM_SCH_ISC 4 /* EADM subchannels */
17#define CHSC_SCH_ISC 7 /* CHSC subchannels */ 18#define CHSC_SCH_ISC 7 /* CHSC subchannels */
18/* Adapter interrupts. */ 19/* Adapter interrupts. */
19#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ 20#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index aab5555bbbda..bbf8141408cd 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -329,9 +329,13 @@ struct _lowcore {
329 __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */ 329 __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
330 __u32 access_regs_save_area[16]; /* 0x1340 */ 330 __u32 access_regs_save_area[16]; /* 0x1340 */
331 __u64 cregs_save_area[16]; /* 0x1380 */ 331 __u64 cregs_save_area[16]; /* 0x1380 */
332 __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
333
334 /* Transaction abort diagnostic block */
335 __u8 pgm_tdb[256]; /* 0x1800 */
332 336
333 /* align to the top of the prefix area */ 337 /* align to the top of the prefix area */
334 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ 338 __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
335} __packed; 339} __packed;
336 340
337#endif /* CONFIG_32BIT */ 341#endif /* CONFIG_32BIT */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index b749c5733657..084e7755ed9b 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -57,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
57 pgd_t *pgd = mm->pgd; 57 pgd_t *pgd = mm->pgd;
58 58
59 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 59 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
60 if (addressing_mode != HOME_SPACE_MODE) { 60 if (s390_user_mode != HOME_SPACE_MODE) {
61 /* Load primary space page table origin. */ 61 /* Load primary space page table origin. */
62 asm volatile(LCTL_OPCODE" 1,1,%0\n" 62 asm volatile(LCTL_OPCODE" 1,1,%0\n"
63 : : "m" (S390_lowcore.user_asce) ); 63 : : "m" (S390_lowcore.user_asce) );
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 6537e72e0853..86fe0ee2cee5 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -20,7 +20,7 @@
20#endif 20#endif
21 21
22#define arch_this_cpu_to_op(pcp, val, op) \ 22#define arch_this_cpu_to_op(pcp, val, op) \
23do { \ 23({ \
24 typedef typeof(pcp) pcp_op_T__; \ 24 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \ 25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \ 26 pcp_op_T__ *ptr__; \
@@ -39,13 +39,19 @@ do { \
39 } \ 39 } \
40 } while (prev__ != old__); \ 40 } while (prev__ != old__); \
41 preempt_enable(); \ 41 preempt_enable(); \
42} while (0) 42 new__; \
43})
43 44
44#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) 45#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
45#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) 46#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
46#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) 47#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
47#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) 48#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
48 49
50#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
51#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
52#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
53#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
54
49#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) 55#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
50#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) 56#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
51#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) 57#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
@@ -61,7 +67,7 @@ do { \
61#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 67#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
62#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 68#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
63 69
64#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ 70#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
65({ \ 71({ \
66 typedef typeof(pcp) pcp_op_T__; \ 72 typedef typeof(pcp) pcp_op_T__; \
67 pcp_op_T__ ret__; \ 73 pcp_op_T__ ret__; \
@@ -84,6 +90,44 @@ do { \
84#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 90#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
85#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 91#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
86 92
93#define arch_this_cpu_xchg(pcp, nval) \
94({ \
95 typeof(pcp) *ptr__; \
96 typeof(pcp) ret__; \
97 preempt_disable(); \
98 ptr__ = __this_cpu_ptr(&(pcp)); \
99 ret__ = xchg(ptr__, nval); \
100 preempt_enable(); \
101 ret__; \
102})
103
104#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
105#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
106#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
107#ifdef CONFIG_64BIT
108#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
109#endif
110
111#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
112({ \
113 typeof(pcp1) o1__ = (o1), n1__ = (n1); \
114 typeof(pcp2) o2__ = (o2), n2__ = (n2); \
115 typeof(pcp1) *p1__; \
116 typeof(pcp2) *p2__; \
117 int ret__; \
118 preempt_disable(); \
119 p1__ = __this_cpu_ptr(&(pcp1)); \
120 p2__ = __this_cpu_ptr(&(pcp2)); \
121 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
122 preempt_enable(); \
123 ret__; \
124})
125
126#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
127#ifdef CONFIG_64BIT
128#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
129#endif
130
87#include <asm-generic/percpu.h> 131#include <asm-generic/percpu.h>
88 132
89#endif /* __ARCH_S390_PERCPU__ */ 133#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 11e4e3236937..f3e0aabfc6bc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -11,12 +11,15 @@
11#ifndef __ASM_S390_PROCESSOR_H 11#ifndef __ASM_S390_PROCESSOR_H
12#define __ASM_S390_PROCESSOR_H 12#define __ASM_S390_PROCESSOR_H
13 13
14#ifndef __ASSEMBLY__
15
14#include <linux/linkage.h> 16#include <linux/linkage.h>
15#include <linux/irqflags.h> 17#include <linux/irqflags.h>
16#include <asm/cpu.h> 18#include <asm/cpu.h>
17#include <asm/page.h> 19#include <asm/page.h>
18#include <asm/ptrace.h> 20#include <asm/ptrace.h>
19#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/runtime_instr.h>
20 23
21/* 24/*
22 * Default implementation of macro that returns current 25 * Default implementation of macro that returns current
@@ -75,11 +78,20 @@ struct thread_struct {
75 unsigned long gmap_addr; /* address of last gmap fault. */ 78 unsigned long gmap_addr; /* address of last gmap fault. */
76 struct per_regs per_user; /* User specified PER registers */ 79 struct per_regs per_user; /* User specified PER registers */
77 struct per_event per_event; /* Cause of the last PER trap */ 80 struct per_event per_event; /* Cause of the last PER trap */
81 unsigned long per_flags; /* Flags to control debug behavior */
78 /* pfault_wait is used to block the process on a pfault event */ 82 /* pfault_wait is used to block the process on a pfault event */
79 unsigned long pfault_wait; 83 unsigned long pfault_wait;
80 struct list_head list; 84 struct list_head list;
85 /* cpu runtime instrumentation */
86 struct runtime_instr_cb *ri_cb;
87 int ri_signum;
88#ifdef CONFIG_64BIT
89 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
90#endif
81}; 91};
82 92
93#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */
94
83typedef struct thread_struct thread_struct; 95typedef struct thread_struct thread_struct;
84 96
85/* 97/*
@@ -130,6 +142,12 @@ struct task_struct;
130struct mm_struct; 142struct mm_struct;
131struct seq_file; 143struct seq_file;
132 144
145#ifdef CONFIG_64BIT
146extern void show_cacheinfo(struct seq_file *m);
147#else
148static inline void show_cacheinfo(struct seq_file *m) { }
149#endif
150
133/* Free all resources held by a thread. */ 151/* Free all resources held by a thread. */
134extern void release_thread(struct task_struct *); 152extern void release_thread(struct task_struct *);
135extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 153extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
@@ -140,6 +158,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
140extern unsigned long thread_saved_pc(struct task_struct *t); 158extern unsigned long thread_saved_pc(struct task_struct *t);
141 159
142extern void show_code(struct pt_regs *regs); 160extern void show_code(struct pt_regs *regs);
161extern void print_fn_code(unsigned char *code, unsigned long len);
143 162
144unsigned long get_wchan(struct task_struct *p); 163unsigned long get_wchan(struct task_struct *p);
145#define task_pt_regs(tsk) ((struct pt_regs *) \ 164#define task_pt_regs(tsk) ((struct pt_regs *) \
@@ -331,23 +350,6 @@ extern void (*s390_base_ext_handler_fn)(void);
331 350
332#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL 351#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
333 352
334/*
335 * Helper macro for exception table entries
336 */
337#ifndef CONFIG_64BIT
338#define EX_TABLE(_fault,_target) \
339 ".section __ex_table,\"a\"\n" \
340 " .align 4\n" \
341 " .long " #_fault "," #_target "\n" \
342 ".previous\n"
343#else
344#define EX_TABLE(_fault,_target) \
345 ".section __ex_table,\"a\"\n" \
346 " .align 8\n" \
347 " .quad " #_fault "," #_target "\n" \
348 ".previous\n"
349#endif
350
351extern int memcpy_real(void *, void *, size_t); 353extern int memcpy_real(void *, void *, size_t);
352extern void memcpy_absolute(void *, void *, size_t); 354extern void memcpy_absolute(void *, void *, size_t);
353 355
@@ -358,4 +360,25 @@ extern void memcpy_absolute(void *, void *, size_t);
358 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ 360 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
359} 361}
360 362
361#endif /* __ASM_S390_PROCESSOR_H */ 363/*
364 * Helper macro for exception table entries
365 */
366#define EX_TABLE(_fault, _target) \
367 ".section __ex_table,\"a\"\n" \
368 ".align 4\n" \
369 ".long (" #_fault ") - .\n" \
370 ".long (" #_target ") - .\n" \
371 ".previous\n"
372
373#else /* __ASSEMBLY__ */
374
375#define EX_TABLE(_fault, _target) \
376 .section __ex_table,"a" ; \
377 .align 4 ; \
378 .long (_fault) - . ; \
379 .long (_target) - . ; \
380 .previous
381
382#endif /* __ASSEMBLY__ */
383
384#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d5f08ea566ed..ce20a53afe91 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -235,6 +235,7 @@ typedef struct
235#define PSW_MASK_ASC 0x0000C000UL 235#define PSW_MASK_ASC 0x0000C000UL
236#define PSW_MASK_CC 0x00003000UL 236#define PSW_MASK_CC 0x00003000UL
237#define PSW_MASK_PM 0x00000F00UL 237#define PSW_MASK_PM 0x00000F00UL
238#define PSW_MASK_RI 0x00000000UL
238#define PSW_MASK_EA 0x00000000UL 239#define PSW_MASK_EA 0x00000000UL
239#define PSW_MASK_BA 0x00000000UL 240#define PSW_MASK_BA 0x00000000UL
240 241
@@ -264,10 +265,11 @@ typedef struct
264#define PSW_MASK_ASC 0x0000C00000000000UL 265#define PSW_MASK_ASC 0x0000C00000000000UL
265#define PSW_MASK_CC 0x0000300000000000UL 266#define PSW_MASK_CC 0x0000300000000000UL
266#define PSW_MASK_PM 0x00000F0000000000UL 267#define PSW_MASK_PM 0x00000F0000000000UL
268#define PSW_MASK_RI 0x0000008000000000UL
267#define PSW_MASK_EA 0x0000000100000000UL 269#define PSW_MASK_EA 0x0000000100000000UL
268#define PSW_MASK_BA 0x0000000080000000UL 270#define PSW_MASK_BA 0x0000000080000000UL
269 271
270#define PSW_MASK_USER 0x00003F0180000000UL 272#define PSW_MASK_USER 0x00003F8180000000UL
271 273
272#define PSW_ADDR_AMODE 0x0000000000000000UL 274#define PSW_ADDR_AMODE 0x0000000000000000UL
273#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL 275#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
@@ -359,17 +361,19 @@ struct per_struct_kernel {
359 unsigned char access_id; /* PER trap access identification */ 361 unsigned char access_id; /* PER trap access identification */
360}; 362};
361 363
362#define PER_EVENT_MASK 0xE9000000UL 364#define PER_EVENT_MASK 0xEB000000UL
363 365
364#define PER_EVENT_BRANCH 0x80000000UL 366#define PER_EVENT_BRANCH 0x80000000UL
365#define PER_EVENT_IFETCH 0x40000000UL 367#define PER_EVENT_IFETCH 0x40000000UL
366#define PER_EVENT_STORE 0x20000000UL 368#define PER_EVENT_STORE 0x20000000UL
367#define PER_EVENT_STORE_REAL 0x08000000UL 369#define PER_EVENT_STORE_REAL 0x08000000UL
370#define PER_EVENT_TRANSACTION_END 0x02000000UL
368#define PER_EVENT_NULLIFICATION 0x01000000UL 371#define PER_EVENT_NULLIFICATION 0x01000000UL
369 372
370#define PER_CONTROL_MASK 0x00a00000UL 373#define PER_CONTROL_MASK 0x00e00000UL
371 374
372#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL 375#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
376#define PER_CONTROL_SUSPENSION 0x00400000UL
373#define PER_CONTROL_ALTERATION 0x00200000UL 377#define PER_CONTROL_ALTERATION 0x00200000UL
374 378
375#endif 379#endif
@@ -483,6 +487,8 @@ typedef struct
483#define PTRACE_GET_LAST_BREAK 0x5006 487#define PTRACE_GET_LAST_BREAK 0x5006
484#define PTRACE_PEEK_SYSTEM_CALL 0x5007 488#define PTRACE_PEEK_SYSTEM_CALL 0x5007
485#define PTRACE_POKE_SYSTEM_CALL 0x5008 489#define PTRACE_POKE_SYSTEM_CALL 0x5008
490#define PTRACE_ENABLE_TE 0x5009
491#define PTRACE_DISABLE_TE 0x5010
486 492
487/* 493/*
488 * PT_PROT definition is loosely based on hppa bsd definition in 494 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
new file mode 100644
index 000000000000..830da737ff85
--- /dev/null
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -0,0 +1,98 @@
1#ifndef _RUNTIME_INSTR_H
2#define _RUNTIME_INSTR_H
3
4#define S390_RUNTIME_INSTR_START 0x1
5#define S390_RUNTIME_INSTR_STOP 0x2
6
7struct runtime_instr_cb {
8 __u64 buf_current;
9 __u64 buf_origin;
10 __u64 buf_limit;
11
12 __u32 valid : 1;
13 __u32 pstate : 1;
14 __u32 pstate_set_buf : 1;
15 __u32 home_space : 1;
16 __u32 altered : 1;
17 __u32 : 3;
18 __u32 pstate_sample : 1;
19 __u32 sstate_sample : 1;
20 __u32 pstate_collect : 1;
21 __u32 sstate_collect : 1;
22 __u32 : 1;
23 __u32 halted_int : 1;
24 __u32 int_requested : 1;
25 __u32 buffer_full_int : 1;
26 __u32 key : 4;
27 __u32 : 9;
28 __u32 rgs : 3;
29
30 __u32 mode : 4;
31 __u32 next : 1;
32 __u32 mae : 1;
33 __u32 : 2;
34 __u32 call_type_br : 1;
35 __u32 return_type_br : 1;
36 __u32 other_type_br : 1;
37 __u32 bc_other_type : 1;
38 __u32 emit : 1;
39 __u32 tx_abort : 1;
40 __u32 : 2;
41 __u32 bp_xn : 1;
42 __u32 bp_xt : 1;
43 __u32 bp_ti : 1;
44 __u32 bp_ni : 1;
45 __u32 suppr_y : 1;
46 __u32 suppr_z : 1;
47
48 __u32 dc_miss_extra : 1;
49 __u32 lat_lev_ignore : 1;
50 __u32 ic_lat_lev : 4;
51 __u32 dc_lat_lev : 4;
52
53 __u64 reserved1;
54 __u64 scaling_factor;
55 __u64 rsic;
56 __u64 reserved2;
57} __packed __aligned(8);
58
59extern struct runtime_instr_cb runtime_instr_empty_cb;
60
61static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
62{
63 asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
64 : : "Q" (*cb));
65}
66
67static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
68{
69 asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
70 : "=Q" (*cb) : : "cc");
71}
72
73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
74{
75#ifdef CONFIG_64BIT
76 if (cb_prev)
77 store_runtime_instr_cb(cb_prev);
78#endif
79}
80
81static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
82 struct runtime_instr_cb *cb_prev)
83{
84#ifdef CONFIG_64BIT
85 if (cb_next)
86 load_runtime_instr_cb(cb_next);
87 else if (cb_prev)
88 load_runtime_instr_cb(&runtime_instr_empty_cb);
89#endif
90}
91
92#ifdef CONFIG_64BIT
93extern void exit_thread_runtime_instr(void);
94#else
95static inline void exit_thread_runtime_instr(void) { }
96#endif
97
98#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4071d00978cb..4af99cdaddf5 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Helper functions for scsw access. 2 * Helper functions for scsw access.
3 * 3 *
4 * Copyright IBM Corp. 2008, 2009 4 * Copyright IBM Corp. 2008, 2012
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
@@ -9,7 +9,7 @@
9#define _ASM_S390_SCSW_H_ 9#define _ASM_S390_SCSW_H_
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/chsc.h> 12#include <asm/css_chars.h>
13#include <asm/cio.h> 13#include <asm/cio.h>
14 14
15/** 15/**
@@ -100,14 +100,46 @@ struct tm_scsw {
100} __attribute__ ((packed)); 100} __attribute__ ((packed));
101 101
102/** 102/**
103 * struct eadm_scsw - subchannel status word for eadm subchannels
104 * @key: subchannel key
105 * @eswf: esw format
106 * @cc: deferred condition code
107 * @ectl: extended control
108 * @fctl: function control
109 * @actl: activity control
110 * @stctl: status control
111 * @aob: AOB address
112 * @dstat: device status
113 * @cstat: subchannel status
114 */
115struct eadm_scsw {
116 u32 key:4;
117 u32:1;
118 u32 eswf:1;
119 u32 cc:2;
120 u32:6;
121 u32 ectl:1;
122 u32:2;
123 u32 fctl:3;
124 u32 actl:7;
125 u32 stctl:5;
126 u32 aob;
127 u32 dstat:8;
128 u32 cstat:8;
129 u32:16;
130} __packed;
131
132/**
103 * union scsw - subchannel status word 133 * union scsw - subchannel status word
104 * @cmd: command-mode SCSW 134 * @cmd: command-mode SCSW
105 * @tm: transport-mode SCSW 135 * @tm: transport-mode SCSW
136 * @eadm: eadm SCSW
106 */ 137 */
107union scsw { 138union scsw {
108 struct cmd_scsw cmd; 139 struct cmd_scsw cmd;
109 struct tm_scsw tm; 140 struct tm_scsw tm;
110} __attribute__ ((packed)); 141 struct eadm_scsw eadm;
142} __packed;
111 143
112#define SCSW_FCTL_CLEAR_FUNC 0x1 144#define SCSW_FCTL_CLEAR_FUNC 0x1
113#define SCSW_FCTL_HALT_FUNC 0x2 145#define SCSW_FCTL_HALT_FUNC 0x2
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e6859d16ee2d..87b47ca954f1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
60#define SECONDARY_SPACE_MODE 2 60#define SECONDARY_SPACE_MODE 2
61#define HOME_SPACE_MODE 3 61#define HOME_SPACE_MODE 3
62 62
63extern unsigned int addressing_mode; 63extern unsigned int s390_user_mode;
64 64
65/* 65/*
66 * Machine features detected in head.S 66 * Machine features detected in head.S
@@ -80,6 +80,7 @@ extern unsigned int addressing_mode;
80#define MACHINE_FLAG_LPAR (1UL << 12) 80#define MACHINE_FLAG_LPAR (1UL << 12)
81#define MACHINE_FLAG_SPP (1UL << 13) 81#define MACHINE_FLAG_SPP (1UL << 13)
82#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 82#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
83#define MACHINE_FLAG_TE (1UL << 15)
83 84
84#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 85#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
85#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 86#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -98,6 +99,7 @@ extern unsigned int addressing_mode;
98#define MACHINE_HAS_PFMF (0) 99#define MACHINE_HAS_PFMF (0)
99#define MACHINE_HAS_SPP (0) 100#define MACHINE_HAS_SPP (0)
100#define MACHINE_HAS_TOPOLOGY (0) 101#define MACHINE_HAS_TOPOLOGY (0)
102#define MACHINE_HAS_TE (0)
101#else /* CONFIG_64BIT */ 103#else /* CONFIG_64BIT */
102#define MACHINE_HAS_IEEE (1) 104#define MACHINE_HAS_IEEE (1)
103#define MACHINE_HAS_CSP (1) 105#define MACHINE_HAS_CSP (1)
@@ -109,6 +111,7 @@ extern unsigned int addressing_mode;
109#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
110#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
111#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
114#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
112#endif /* CONFIG_64BIT */ 115#endif /* CONFIG_64BIT */
113 116
114#define ZFCPDUMP_HSA_SIZE (32UL<<20) 117#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ce26ac3cb162..b64f15c3b4cc 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -30,6 +30,8 @@ extern int smp_vcpu_scheduled(int cpu);
30extern void smp_yield_cpu(int cpu); 30extern void smp_yield_cpu(int cpu);
31extern void smp_yield(void); 31extern void smp_yield(void);
32extern void smp_stop_cpu(void); 32extern void smp_stop_cpu(void);
33extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu);
33 35
34#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
35 37
@@ -43,7 +45,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
43 func(data); 45 func(data);
44} 46}
45 47
46static inline int smp_find_processor_id(int address) { return 0; } 48static inline int smp_find_processor_id(u16 address) { return 0; }
47static inline int smp_store_status(int cpu) { return 0; } 49static inline int smp_store_status(int cpu) { return 0; }
48static inline int smp_vcpu_scheduled(int cpu) { return 1; } 50static inline int smp_vcpu_scheduled(int cpu) { return 1; }
49static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 1bd1352fa3b5..7e2dcd7c57ef 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -96,7 +96,6 @@ static inline char *strcat(char *dst, const char *src)
96 96
97static inline char *strcpy(char *dst, const char *src) 97static inline char *strcpy(char *dst, const char *src)
98{ 98{
99#if __GNUC__ < 4
100 register int r0 asm("0") = 0; 99 register int r0 asm("0") = 0;
101 char *ret = dst; 100 char *ret = dst;
102 101
@@ -106,14 +105,10 @@ static inline char *strcpy(char *dst, const char *src)
106 : "+&a" (dst), "+&a" (src) : "d" (r0) 105 : "+&a" (dst), "+&a" (src) : "d" (r0)
107 : "cc", "memory"); 106 : "cc", "memory");
108 return ret; 107 return ret;
109#else
110 return __builtin_strcpy(dst, src);
111#endif
112} 108}
113 109
114static inline size_t strlen(const char *s) 110static inline size_t strlen(const char *s)
115{ 111{
116#if __GNUC__ < 4
117 register unsigned long r0 asm("0") = 0; 112 register unsigned long r0 asm("0") = 0;
118 const char *tmp = s; 113 const char *tmp = s;
119 114
@@ -122,9 +117,6 @@ static inline size_t strlen(const char *s)
122 " jo 0b" 117 " jo 0b"
123 : "+d" (r0), "+a" (tmp) : : "cc"); 118 : "+d" (r0), "+a" (tmp) : : "cc");
124 return r0 - (unsigned long) s; 119 return r0 - (unsigned long) s;
125#else
126 return __builtin_strlen(s);
127#endif
128} 120}
129 121
130static inline size_t strnlen(const char * s, size_t n) 122static inline size_t strnlen(const char * s, size_t n)
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 314cc9426fc4..f3a9e0f92704 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -80,10 +80,12 @@ static inline void restore_access_regs(unsigned int *acrs)
80 if (prev->mm) { \ 80 if (prev->mm) { \
81 save_fp_regs(&prev->thread.fp_regs); \ 81 save_fp_regs(&prev->thread.fp_regs); \
82 save_access_regs(&prev->thread.acrs[0]); \ 82 save_access_regs(&prev->thread.acrs[0]); \
83 save_ri_cb(prev->thread.ri_cb); \
83 } \ 84 } \
84 if (next->mm) { \ 85 if (next->mm) { \
85 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
86 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
87 update_per_regs(next); \ 89 update_per_regs(next); \
88 } \ 90 } \
89 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 282ee36f6162..f92428e459f8 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -17,7 +17,10 @@
17#include <asm/bitsperlong.h> 17#include <asm/bitsperlong.h>
18 18
19struct sysinfo_1_1_1 { 19struct sysinfo_1_1_1 {
20 unsigned short :16; 20 unsigned char p:1;
21 unsigned char :6;
22 unsigned char t:1;
23 unsigned char :8;
21 unsigned char ccr; 24 unsigned char ccr;
22 unsigned char cai; 25 unsigned char cai;
23 char reserved_0[28]; 26 char reserved_0[28];
@@ -30,9 +33,14 @@ struct sysinfo_1_1_1 {
30 char model[16]; 33 char model[16];
31 char model_perm_cap[16]; 34 char model_perm_cap[16];
32 char model_temp_cap[16]; 35 char model_temp_cap[16];
33 char model_cap_rating[4]; 36 unsigned int model_cap_rating;
34 char model_perm_cap_rating[4]; 37 unsigned int model_perm_cap_rating;
35 char model_temp_cap_rating[4]; 38 unsigned int model_temp_cap_rating;
39 unsigned char typepct[5];
40 unsigned char reserved_2[3];
41 unsigned int ncr;
42 unsigned int npr;
43 unsigned int ntr;
36}; 44};
37 45
38struct sysinfo_1_2_1 { 46struct sysinfo_1_2_1 {
@@ -47,8 +55,9 @@ struct sysinfo_1_2_2 {
47 char format; 55 char format;
48 char reserved_0[1]; 56 char reserved_0[1];
49 unsigned short acc_offset; 57 unsigned short acc_offset;
50 char reserved_1[24]; 58 char reserved_1[20];
51 unsigned int secondary_capability; 59 unsigned int nominal_cap;
60 unsigned int secondary_cap;
52 unsigned int capability; 61 unsigned int capability;
53 unsigned short cpus_total; 62 unsigned short cpus_total;
54 unsigned short cpus_configured; 63 unsigned short cpus_configured;
@@ -109,6 +118,8 @@ struct sysinfo_3_2_2 {
109 char reserved_544[3552]; 118 char reserved_544[3552];
110}; 119};
111 120
121extern int topology_max_mnest;
122
112#define TOPOLOGY_CPU_BITS 64 123#define TOPOLOGY_CPU_BITS 64
113#define TOPOLOGY_NR_MAG 6 124#define TOPOLOGY_NR_MAG 6
114 125
@@ -142,21 +153,7 @@ struct sysinfo_15_1_x {
142 union topology_entry tle[0]; 153 union topology_entry tle[0];
143}; 154};
144 155
145static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) 156int stsi(void *sysinfo, int fc, int sel1, int sel2);
146{
147 register int r0 asm("0") = (fc << 28) | sel1;
148 register int r1 asm("1") = sel2;
149
150 asm volatile(
151 " stsi 0(%2)\n"
152 "0: jz 2f\n"
153 "1: lhi %0,%3\n"
154 "2:\n"
155 EX_TABLE(0b, 1b)
156 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
157 : "cc", "memory");
158 return r0;
159}
160 157
161/* 158/*
162 * Service level reporting interface. 159 * Service level reporting interface.
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 0837de80c351..9ca305383760 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -2,8 +2,8 @@
2#define _ASM_S390_TOPOLOGY_H 2#define _ASM_S390_TOPOLOGY_H
3 3
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/sysinfo.h>
6 5
6struct sysinfo_15_1_x;
7struct cpu; 7struct cpu;
8 8
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
@@ -51,24 +51,6 @@ static inline void topology_expect_change(void) { }
51#define POLARIZATION_VM (2) 51#define POLARIZATION_VM (2)
52#define POLARIZATION_VH (3) 52#define POLARIZATION_VH (3)
53 53
54extern int cpu_polarization[];
55
56static inline void cpu_set_polarization(int cpu, int val)
57{
58#ifdef CONFIG_SCHED_BOOK
59 cpu_polarization[cpu] = val;
60#endif
61}
62
63static inline int cpu_read_polarization(int cpu)
64{
65#ifdef CONFIG_SCHED_BOOK
66 return cpu_polarization[cpu];
67#else
68 return POLARIZATION_HRZ;
69#endif
70}
71
72#ifdef CONFIG_SCHED_BOOK 54#ifdef CONFIG_SCHED_BOOK
73void s390_init_cpu_topology(void); 55void s390_init_cpu_topology(void);
74#else 56#else
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a8ab18b18b54..34268df959a3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -76,9 +76,22 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
76 76
77struct exception_table_entry 77struct exception_table_entry
78{ 78{
79 unsigned long insn, fixup; 79 int insn, fixup;
80}; 80};
81 81
82static inline unsigned long extable_insn(const struct exception_table_entry *x)
83{
84 return (unsigned long)&x->insn + x->insn;
85}
86
87static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88{
89 return (unsigned long)&x->fixup + x->fixup;
90}
91
92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE
94
82struct uaccess_ops { 95struct uaccess_ops {
83 size_t (*copy_from_user)(size_t, const void __user *, void *); 96 size_t (*copy_from_user)(size_t, const void __user *, void *);
84 size_t (*copy_from_user_small)(size_t, const void __user *, void *); 97 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 6756e78f4808..4e64b5cd1558 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -277,7 +277,9 @@
277#define __NR_setns 339 277#define __NR_setns 339
278#define __NR_process_vm_readv 340 278#define __NR_process_vm_readv 340
279#define __NR_process_vm_writev 341 279#define __NR_process_vm_writev 341
280#define NR_syscalls 342 280#define __NR_s390_runtime_instr 342
281#define __NR_kcmp 343
282#define NR_syscalls 344
281 283
282/* 284/*
283 * There are some system calls that are not present on 64 bit, some 285 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9733b3f0eb6d..4da52fe31743 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,10 +23,11 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 sysinfo.o jump_label.o lgr.o os_info.o 26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
30 31
31extra-y += head.o vmlinux.lds 32extra-y += head.o vmlinux.lds
32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) 33extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
@@ -48,12 +49,11 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
48obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
49obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
50obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 51obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
51obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
52 52
53# Kexec part 53ifdef CONFIG_64BIT
54S390_KEXEC_OBJS := machine_kexec.o crash.o 54obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
55S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) 55obj-y += runtime_instr.o cache.o
56obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 56endif
57 57
58# vdso 58# vdso
59obj-$(CONFIG_64BIT) += vdso64/ 59obj-$(CONFIG_64BIT) += vdso64/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 45ef1a7b08f9..fface87056eb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -157,6 +157,8 @@ int main(void)
157 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 157 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
158 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 158 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
159 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 159 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
160 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
161 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
160 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 162 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
161#endif /* CONFIG_32BIT */ 163#endif /* CONFIG_32BIT */
162 return 0; 164 return 0;
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
new file mode 100644
index 000000000000..8df8d8a19c98
--- /dev/null
+++ b/arch/s390/kernel/cache.c
@@ -0,0 +1,385 @@
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/notifier.h>
9#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h>
14#include <asm/facility.h>
15
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum {
40 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE,
42 CACHE_SCOPE_SHARED,
43 CACHE_SCOPE_RESERVED,
44};
45
46enum {
47 CACHE_TYPE_SEPARATE,
48 CACHE_TYPE_DATA,
49 CACHE_TYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED,
51};
52
53enum {
54 EXTRACT_TOPOLOGY,
55 EXTRACT_LINE_SIZE,
56 EXTRACT_SIZE,
57 EXTRACT_ASSOCIATIVITY,
58};
59
60enum {
61 CACHE_TI_UNIFIED = 0,
62 CACHE_TI_INSTRUCTION = 0,
63 CACHE_TI_DATA,
64};
65
66struct cache_info {
67 unsigned char : 4;
68 unsigned char scope : 2;
69 unsigned char type : 2;
70};
71
72#define CACHE_MAX_LEVEL 8
73
74union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw;
77};
78
79static const char * const cache_type_string[] = {
80 "Data",
81 "Instruction",
82 "Unified",
83};
84
85static struct cache_dir *cache_dir_cpu[NR_CPUS];
86static LIST_HEAD(cache_list);
87
88void show_cacheinfo(struct seq_file *m)
89{
90 struct cache *cache;
91 int index = 0;
92
93 list_for_each_entry(cache, &cache_list, list) {
94 seq_printf(m, "cache%-11d: ", index);
95 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98 seq_printf(m, "size=%luK ", cache->size >> 10);
99 seq_printf(m, "line_size=%u ", cache->line_size);
100 seq_printf(m, "associativity=%d", cache->associativity);
101 seq_puts(m, "\n");
102 index++;
103 }
104}
105
106static inline unsigned long ecag(int ai, int li, int ti)
107{
108 unsigned long cmd, val;
109
110 cmd = ai << 4 | li << 1 | ti;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val) : "a" (cmd));
113 return val;
114}
115
116static int __init cache_add(int level, int private, int type)
117{
118 struct cache *cache;
119 int ti;
120
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122 if (!cache)
123 return -ENOMEM;
124 ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
125 cache->size = ecag(EXTRACT_SIZE, level, ti);
126 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 cache->nr_sets = cache->size / cache->associativity;
129 cache->nr_sets /= cache->line_size;
130 cache->private = private;
131 cache->level = level + 1;
132 cache->type = type - 1;
133 list_add_tail(&cache->list, &cache_list);
134 return 0;
135}
136
137static void __init cache_build_info(void)
138{
139 struct cache *cache, *next;
140 union cache_topology ct;
141 int level, private, rc;
142
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
144 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
145 switch (ct.ci[level].scope) {
146 case CACHE_SCOPE_NOTEXISTS:
147 case CACHE_SCOPE_RESERVED:
148 return;
149 case CACHE_SCOPE_SHARED:
150 private = 0;
151 break;
152 case CACHE_SCOPE_PRIVATE:
153 private = 1;
154 break;
155 }
156 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
157 rc = cache_add(level, private, CACHE_TYPE_DATA);
158 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
159 } else {
160 rc = cache_add(level, private, ct.ci[level].type);
161 }
162 if (rc)
163 goto error;
164 }
165 return;
166error:
167 list_for_each_entry_safe(cache, next, &cache_list, list) {
168 list_del(&cache->list);
169 kfree(cache);
170 }
171}
172
173static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
174{
175 struct cache_dir *cache_dir;
176 struct kobject *kobj = NULL;
177 struct device *dev;
178
179 dev = get_cpu_device(cpu);
180 if (!dev)
181 goto out;
182 kobj = kobject_create_and_add("cache", &dev->kobj);
183 if (!kobj)
184 goto out;
185 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
186 if (!cache_dir)
187 goto out;
188 cache_dir->kobj = kobj;
189 cache_dir_cpu[cpu] = cache_dir;
190 return cache_dir;
191out:
192 kobject_put(kobj);
193 return NULL;
194}
195
196static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
197{
198 return container_of(kobj, struct cache_index_dir, kobj);
199}
200
201static void cache_index_release(struct kobject *kobj)
202{
203 struct cache_index_dir *index;
204
205 index = kobj_to_cache_index_dir(kobj);
206 kfree(index);
207}
208
209static ssize_t cache_index_show(struct kobject *kobj,
210 struct attribute *attr, char *buf)
211{
212 struct kobj_attribute *kobj_attr;
213
214 kobj_attr = container_of(attr, struct kobj_attribute, attr);
215 return kobj_attr->show(kobj, kobj_attr, buf);
216}
217
218#define DEFINE_CACHE_ATTR(_name, _format, _value) \
219static ssize_t cache_##_name##_show(struct kobject *kobj, \
220 struct kobj_attribute *attr, \
221 char *buf) \
222{ \
223 struct cache_index_dir *index; \
224 \
225 index = kobj_to_cache_index_dir(kobj); \
226 return sprintf(buf, _format, _value); \
227} \
228static struct kobj_attribute cache_##_name##_attr = \
229 __ATTR(_name, 0444, cache_##_name##_show, NULL);
230
231DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
232DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
233DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
234DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
235DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
236DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
237
238static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
239{
240 struct cache_index_dir *index;
241 int len;
242
243 index = kobj_to_cache_index_dir(kobj);
244 len = type ?
245 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
246 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
247 len += sprintf(&buf[len], "\n");
248 return len;
249}
250
251static ssize_t shared_cpu_map_show(struct kobject *kobj,
252 struct kobj_attribute *attr, char *buf)
253{
254 return shared_cpu_map_func(kobj, 0, buf);
255}
256static struct kobj_attribute cache_shared_cpu_map_attr =
257 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
258
259static ssize_t shared_cpu_list_show(struct kobject *kobj,
260 struct kobj_attribute *attr, char *buf)
261{
262 return shared_cpu_map_func(kobj, 1, buf);
263}
264static struct kobj_attribute cache_shared_cpu_list_attr =
265 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
266
267static struct attribute *cache_index_default_attrs[] = {
268 &cache_type_attr.attr,
269 &cache_size_attr.attr,
270 &cache_number_of_sets_attr.attr,
271 &cache_ways_of_associativity_attr.attr,
272 &cache_level_attr.attr,
273 &cache_coherency_line_size_attr.attr,
274 &cache_shared_cpu_map_attr.attr,
275 &cache_shared_cpu_list_attr.attr,
276 NULL,
277};
278
279static const struct sysfs_ops cache_index_ops = {
280 .show = cache_index_show,
281};
282
283static struct kobj_type cache_index_type = {
284 .sysfs_ops = &cache_index_ops,
285 .release = cache_index_release,
286 .default_attrs = cache_index_default_attrs,
287};
288
289static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
290 struct cache *cache, int index,
291 int cpu)
292{
293 struct cache_index_dir *index_dir;
294 int rc;
295
296 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
297 if (!index_dir)
298 return -ENOMEM;
299 index_dir->cache = cache;
300 index_dir->cpu = cpu;
301 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
302 cache_dir->kobj, "index%d", index);
303 if (rc)
304 goto out;
305 index_dir->next = cache_dir->index;
306 cache_dir->index = index_dir;
307 return 0;
308out:
309 kfree(index_dir);
310 return rc;
311}
312
313static int __cpuinit cache_add_cpu(int cpu)
314{
315 struct cache_dir *cache_dir;
316 struct cache *cache;
317 int rc, index = 0;
318
319 if (list_empty(&cache_list))
320 return 0;
321 cache_dir = cache_create_cache_dir(cpu);
322 if (!cache_dir)
323 return -ENOMEM;
324 list_for_each_entry(cache, &cache_list, list) {
325 if (!cache->private)
326 break;
327 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
328 if (rc)
329 return rc;
330 index++;
331 }
332 return 0;
333}
334
335static void __cpuinit cache_remove_cpu(int cpu)
336{
337 struct cache_index_dir *index, *next;
338 struct cache_dir *cache_dir;
339
340 cache_dir = cache_dir_cpu[cpu];
341 if (!cache_dir)
342 return;
343 index = cache_dir->index;
344 while (index) {
345 next = index->next;
346 kobject_put(&index->kobj);
347 index = next;
348 }
349 kobject_put(cache_dir->kobj);
350 kfree(cache_dir);
351 cache_dir_cpu[cpu] = NULL;
352}
353
354static int __cpuinit cache_hotplug(struct notifier_block *nfb,
355 unsigned long action, void *hcpu)
356{
357 int cpu = (long)hcpu;
358 int rc = 0;
359
360 switch (action & ~CPU_TASKS_FROZEN) {
361 case CPU_ONLINE:
362 rc = cache_add_cpu(cpu);
363 if (rc)
364 cache_remove_cpu(cpu);
365 break;
366 case CPU_DEAD:
367 cache_remove_cpu(cpu);
368 break;
369 }
370 return rc ? NOTIFY_BAD : NOTIFY_OK;
371}
372
373static int __init cache_init(void)
374{
375 int cpu;
376
377 if (!test_facility(34))
378 return 0;
379 cache_build_info();
380 for_each_online_cpu(cpu)
381 cache_add_cpu(cpu);
382 hotcpu_notifier(cache_hotplug, 0);
383 return 0;
384}
385device_initcall(cache_init);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 2d82cfcbce5b..3afba804fe97 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1646,3 +1646,16 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
1646 llgf %r0,164(%r15) # unsigned long 1646 llgf %r0,164(%r15) # unsigned long
1647 stg %r0,160(%r15) 1647 stg %r0,160(%r15)
1648 jg compat_sys_process_vm_writev 1648 jg compat_sys_process_vm_writev
1649
1650ENTRY(sys_s390_runtime_instr_wrapper)
1651 lgfr %r2,%r2 # int
1652 lgfr %r3,%r3 # int
1653 jg sys_s390_runtime_instr
1654
1655ENTRY(sys_kcmp_wrapper)
1656 lgfr %r2,%r2 # pid_t
1657 lgfr %r3,%r3 # pid_t
1658 lgfr %r4,%r4 # int
1659 llgfr %r5,%r5 # unsigned long
1660 llgfr %r6,%r6 # unsigned long
1661 jg sys_kcmp
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
deleted file mode 100644
index 3819153de8bd..000000000000
--- a/arch/s390/kernel/crash.c
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 *
6 */
7
8#include <linux/threads.h>
9#include <linux/kexec.h>
10#include <linux/reboot.h>
11
12void machine_crash_shutdown(struct pt_regs *regs)
13{
14}
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index cc1172b26873..fb8d8781a011 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -13,8 +13,9 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/elf.h> 15#include <linux/elf.h>
16#include <asm/ipl.h>
17#include <asm/os_info.h> 16#include <asm/os_info.h>
17#include <asm/elf.h>
18#include <asm/ipl.h>
18 19
19#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) 20#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
20#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 619c5d350726..cc84a24c023f 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -315,6 +315,11 @@ enum {
315 LONG_INSN_POPCNT, 315 LONG_INSN_POPCNT,
316 LONG_INSN_RISBHG, 316 LONG_INSN_RISBHG,
317 LONG_INSN_RISBLG, 317 LONG_INSN_RISBLG,
318 LONG_INSN_RINEXT,
319 LONG_INSN_RIEMIT,
320 LONG_INSN_TABORT,
321 LONG_INSN_TBEGIN,
322 LONG_INSN_TBEGINC,
318}; 323};
319 324
320static char *long_insn_name[] = { 325static char *long_insn_name[] = {
@@ -329,7 +334,12 @@ static char *long_insn_name[] = {
329 [LONG_INSN_LLGHRL] = "llghrl", 334 [LONG_INSN_LLGHRL] = "llghrl",
330 [LONG_INSN_POPCNT] = "popcnt", 335 [LONG_INSN_POPCNT] = "popcnt",
331 [LONG_INSN_RISBHG] = "risbhg", 336 [LONG_INSN_RISBHG] = "risbhg",
332 [LONG_INSN_RISBLG] = "risblk", 337 [LONG_INSN_RISBLG] = "risblg",
338 [LONG_INSN_RINEXT] = "rinext",
339 [LONG_INSN_RIEMIT] = "riemit",
340 [LONG_INSN_TABORT] = "tabort",
341 [LONG_INSN_TBEGIN] = "tbegin",
342 [LONG_INSN_TBEGINC] = "tbeginc",
333}; 343};
334 344
335static struct insn opcode[] = { 345static struct insn opcode[] = {
@@ -582,6 +592,17 @@ static struct insn opcode_a7[] = {
582 { "", 0, INSTR_INVALID } 592 { "", 0, INSTR_INVALID }
583}; 593};
584 594
595static struct insn opcode_aa[] = {
596#ifdef CONFIG_64BIT
597 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
598 { "rion", 0x01, INSTR_RI_RI },
599 { "tric", 0x02, INSTR_RI_RI },
600 { "rioff", 0x03, INSTR_RI_RI },
601 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
602#endif
603 { "", 0, INSTR_INVALID }
604};
605
585static struct insn opcode_b2[] = { 606static struct insn opcode_b2[] = {
586#ifdef CONFIG_64BIT 607#ifdef CONFIG_64BIT
587 { "sske", 0x2b, INSTR_RRF_M0RR }, 608 { "sske", 0x2b, INSTR_RRF_M0RR },
@@ -594,6 +615,9 @@ static struct insn opcode_b2[] = {
594 { "lpswe", 0xb2, INSTR_S_RD }, 615 { "lpswe", 0xb2, INSTR_S_RD },
595 { "srnmt", 0xb9, INSTR_S_RD }, 616 { "srnmt", 0xb9, INSTR_S_RD },
596 { "lfas", 0xbd, INSTR_S_RD }, 617 { "lfas", 0xbd, INSTR_S_RD },
618 { "etndg", 0xec, INSTR_RRE_R0 },
619 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
620 { "tend", 0xf8, INSTR_S_RD },
597#endif 621#endif
598 { "stidp", 0x02, INSTR_S_RD }, 622 { "stidp", 0x02, INSTR_S_RD },
599 { "sck", 0x04, INSTR_S_RD }, 623 { "sck", 0x04, INSTR_S_RD },
@@ -1150,6 +1174,7 @@ static struct insn opcode_e3[] = {
1150 { "stfh", 0xcb, INSTR_RXY_RRRD }, 1174 { "stfh", 0xcb, INSTR_RXY_RRRD },
1151 { "chf", 0xcd, INSTR_RXY_RRRD }, 1175 { "chf", 0xcd, INSTR_RXY_RRRD },
1152 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1176 { "clhf", 0xcf, INSTR_RXY_RRRD },
1177 { "ntstg", 0x25, INSTR_RXY_RRRD },
1153#endif 1178#endif
1154 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1179 { "lrv", 0x1e, INSTR_RXY_RRRD },
1155 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1180 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1173,6 +1198,8 @@ static struct insn opcode_e5[] = {
1173 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1198 { "mvhhi", 0x44, INSTR_SIL_RDI },
1174 { "mvhi", 0x4c, INSTR_SIL_RDI }, 1199 { "mvhi", 0x4c, INSTR_SIL_RDI },
1175 { "mvghi", 0x48, INSTR_SIL_RDI }, 1200 { "mvghi", 0x48, INSTR_SIL_RDI },
1201 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
1202 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
1176#endif 1203#endif
1177 { "lasp", 0x00, INSTR_SSE_RDRD }, 1204 { "lasp", 0x00, INSTR_SSE_RDRD },
1178 { "tprot", 0x01, INSTR_SSE_RDRD }, 1205 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -1210,6 +1237,9 @@ static struct insn opcode_eb[] = {
1210 { "cliy", 0x55, INSTR_SIY_URD }, 1237 { "cliy", 0x55, INSTR_SIY_URD },
1211 { "oiy", 0x56, INSTR_SIY_URD }, 1238 { "oiy", 0x56, INSTR_SIY_URD },
1212 { "xiy", 0x57, INSTR_SIY_URD }, 1239 { "xiy", 0x57, INSTR_SIY_URD },
1240 { "lric", 0x60, INSTR_RSY_RDRM },
1241 { "stric", 0x61, INSTR_RSY_RDRM },
1242 { "mric", 0x62, INSTR_RSY_RDRM },
1213 { "icmh", 0x80, INSTR_RSE_RURD }, 1243 { "icmh", 0x80, INSTR_RSE_RURD },
1214 { "icmh", 0x80, INSTR_RSY_RURD }, 1244 { "icmh", 0x80, INSTR_RSY_RURD },
1215 { "icmy", 0x81, INSTR_RSY_RURD }, 1245 { "icmy", 0x81, INSTR_RSY_RURD },
@@ -1408,6 +1438,9 @@ static struct insn *find_insn(unsigned char *code)
1408 case 0xa7: 1438 case 0xa7:
1409 table = opcode_a7; 1439 table = opcode_a7;
1410 break; 1440 break;
1441 case 0xaa:
1442 table = opcode_aa;
1443 break;
1411 case 0xb2: 1444 case 0xb2:
1412 table = opcode_b2; 1445 table = opcode_b2;
1413 break; 1446 break;
@@ -1601,3 +1634,26 @@ void show_code(struct pt_regs *regs)
1601 } 1634 }
1602 printk("\n"); 1635 printk("\n");
1603} 1636}
1637
1638void print_fn_code(unsigned char *code, unsigned long len)
1639{
1640 char buffer[64], *ptr;
1641 int opsize, i;
1642
1643 while (len) {
1644 ptr = buffer;
1645 opsize = insn_length(*code);
1646 ptr += sprintf(ptr, "%p: ", code);
1647 for (i = 0; i < opsize; i++)
1648 ptr += sprintf(ptr, "%02x", code[i]);
1649 *ptr++ = '\t';
1650 if (i < 4)
1651 *ptr++ = '\t';
1652 ptr += print_insn(ptr, code, (unsigned long) code);
1653 *ptr++ = '\n';
1654 *ptr++ = 0;
1655 printk(buffer);
1656 code += opsize;
1657 len -= opsize;
1658 }
1659}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 83c3271c442b..7f4717675c19 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -215,36 +215,54 @@ static noinline __init void init_kernel_storage_key(void)
215 PAGE_DEFAULT_KEY, 0); 215 PAGE_DEFAULT_KEY, 0);
216} 216}
217 217
218static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); 218static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
219 219
220static noinline __init void detect_machine_type(void) 220static noinline __init void detect_machine_type(void)
221{ 221{
222 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
223
222 /* Check current-configuration-level */ 224 /* Check current-configuration-level */
223 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) { 225 if (stsi(NULL, 0, 0, 0) <= 2) {
224 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; 226 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
225 return; 227 return;
226 } 228 }
227 /* Get virtual-machine cpu information. */ 229 /* Get virtual-machine cpu information. */
228 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count) 230 if (stsi(vmms, 3, 2, 2) || !vmms->count)
229 return; 231 return;
230 232
231 /* Running under KVM? If not we assume z/VM */ 233 /* Running under KVM? If not we assume z/VM */
232 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) 234 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
233 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 235 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
234 else 236 else
235 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 237 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
236} 238}
237 239
240static __init void setup_topology(void)
241{
242#ifdef CONFIG_64BIT
243 int max_mnest;
244
245 if (!test_facility(11))
246 return;
247 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
248 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
249 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
250 break;
251 }
252 topology_max_mnest = max_mnest;
253#endif
254}
255
238static void early_pgm_check_handler(void) 256static void early_pgm_check_handler(void)
239{ 257{
240 unsigned long addr;
241 const struct exception_table_entry *fixup; 258 const struct exception_table_entry *fixup;
259 unsigned long addr;
242 260
243 addr = S390_lowcore.program_old_psw.addr; 261 addr = S390_lowcore.program_old_psw.addr;
244 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 262 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
245 if (!fixup) 263 if (!fixup)
246 disabled_wait(0); 264 disabled_wait(0);
247 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; 265 S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
248} 266}
249 267
250static noinline __init void setup_lowcore_early(void) 268static noinline __init void setup_lowcore_early(void)
@@ -267,12 +285,10 @@ static noinline __init void setup_facility_list(void)
267 285
268static noinline __init void setup_hpage(void) 286static noinline __init void setup_hpage(void)
269{ 287{
270#ifndef CONFIG_DEBUG_PAGEALLOC
271 if (!test_facility(2) || !test_facility(8)) 288 if (!test_facility(2) || !test_facility(8))
272 return; 289 return;
273 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; 290 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
274 __ctl_set_bit(0, 23); 291 __ctl_set_bit(0, 23);
275#endif
276} 292}
277 293
278static __init void detect_mvpg(void) 294static __init void detect_mvpg(void)
@@ -366,12 +382,12 @@ static __init void detect_machine_facilities(void)
366 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 382 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
367 if (test_facility(8)) 383 if (test_facility(8))
368 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 384 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
369 if (test_facility(11))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
371 if (test_facility(27)) 385 if (test_facility(27))
372 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 386 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
373 if (test_facility(40)) 387 if (test_facility(40))
374 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 388 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
389 if (test_facility(50) && test_facility(73))
390 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
375#endif 391#endif
376} 392}
377 393
@@ -441,7 +457,6 @@ static void __init setup_boot_command_line(void)
441 append_to_cmdline(append_ipl_scpdata); 457 append_to_cmdline(append_ipl_scpdata);
442} 458}
443 459
444
445/* 460/*
446 * Save ipl parameters, clear bss memory, initialize storage keys 461 * Save ipl parameters, clear bss memory, initialize storage keys
447 * and create a kernel NSS at startup if the SAVESYS= parm is defined 462 * and create a kernel NSS at startup if the SAVESYS= parm is defined
@@ -468,6 +483,7 @@ void __init startup_init(void)
468 detect_diag44(); 483 detect_diag44();
469 detect_machine_facilities(); 484 detect_machine_facilities();
470 setup_hpage(); 485 setup_hpage();
486 setup_topology();
471 sclp_facilities_detect(); 487 sclp_facilities_detect();
472 detect_memory_layout(memory_chunk); 488 detect_memory_layout(memory_chunk);
473#ifdef CONFIG_DYNAMIC_FTRACE 489#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 349b7eeb348a..7549985402f7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -10,6 +10,7 @@
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/processor.h>
13#include <asm/cache.h> 14#include <asm/cache.h>
14#include <asm/errno.h> 15#include <asm/errno.h>
15#include <asm/ptrace.h> 16#include <asm/ptrace.h>
@@ -412,6 +413,11 @@ ENTRY(pgm_check_handler)
4121: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER 4131: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
413 LAST_BREAK %r14 414 LAST_BREAK %r14
414 lg %r15,__LC_KERNEL_STACK 415 lg %r15,__LC_KERNEL_STACK
416 lg %r14,__TI_task(%r12)
417 lghi %r13,__LC_PGM_TDB
418 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
419 jz 2f
420 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
4152: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 4212: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
416 la %r11,STACK_FRAME_OVERHEAD(%r15) 422 la %r11,STACK_FRAME_OVERHEAD(%r15)
417 stmg %r0,%r7,__PT_R0(%r11) 423 stmg %r0,%r7,__PT_R0(%r11)
@@ -422,13 +428,12 @@ ENTRY(pgm_check_handler)
422 stg %r10,__PT_ARGS(%r11) 428 stg %r10,__PT_ARGS(%r11)
423 tm __LC_PGM_ILC+3,0x80 # check for per exception 429 tm __LC_PGM_ILC+3,0x80 # check for per exception
424 jz 0f 430 jz 0f
425 lg %r1,__TI_task(%r12)
426 tmhh %r8,0x0001 # kernel per event ? 431 tmhh %r8,0x0001 # kernel per event ?
427 jz pgm_kprobe 432 jz pgm_kprobe
428 oi __TI_flags+7(%r12),_TIF_PER_TRAP 433 oi __TI_flags+7(%r12),_TIF_PER_TRAP
429 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS 434 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
430 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 435 mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE
431 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 436 mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID
4320: REENABLE_IRQS 4370: REENABLE_IRQS
433 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 438 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
434 larl %r1,pgm_check_table 439 larl %r1,pgm_check_table
@@ -1004,9 +1009,7 @@ sie_fault:
1004.Lhost_id: 1009.Lhost_id:
1005 .quad 0 1010 .quad 0
1006 1011
1007 .section __ex_table,"a" 1012 EX_TABLE(sie_loop,sie_fault)
1008 .quad sie_loop,sie_fault
1009 .previous
1010#endif 1013#endif
1011 1014
1012 .section .rodata, "a" 1015 .section .rodata, "a"
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index dd7630d8aab7..6cdc55b26d68 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -30,33 +30,35 @@ struct irq_class {
30}; 30};
31 31
32static const struct irq_class intrclass_names[] = { 32static const struct irq_class intrclass_names[] = {
33 {.name = "EXT" }, 33 [EXTERNAL_INTERRUPT] = {.name = "EXT"},
34 {.name = "I/O" }, 34 [IO_INTERRUPT] = {.name = "I/O"},
35 {.name = "CLK", .desc = "[EXT] Clock Comparator" }, 35 [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
36 {.name = "EXC", .desc = "[EXT] External Call" }, 36 [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
37 {.name = "EMS", .desc = "[EXT] Emergency Signal" }, 37 [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
38 {.name = "TMR", .desc = "[EXT] CPU Timer" }, 38 [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
39 {.name = "TAL", .desc = "[EXT] Timing Alert" }, 39 [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
40 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, 40 [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
41 {.name = "DSD", .desc = "[EXT] DASD Diag" }, 41 [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
42 {.name = "VRT", .desc = "[EXT] Virtio" }, 42 [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
43 {.name = "SCP", .desc = "[EXT] Service Call" }, 43 [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
44 {.name = "IUC", .desc = "[EXT] IUCV" }, 44 [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
45 {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" }, 45 [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
46 {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" }, 46 [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
47 {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, 47 [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
48 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 48 [IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
49 {.name = "DAS", .desc = "[I/O] DASD" }, 49 [IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
50 {.name = "C15", .desc = "[I/O] 3215" }, 50 [IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
51 {.name = "C70", .desc = "[I/O] 3270" }, 51 [IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"},
52 {.name = "TAP", .desc = "[I/O] Tape" }, 52 [IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"},
53 {.name = "VMR", .desc = "[I/O] Unit Record Devices" }, 53 [IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
54 {.name = "LCS", .desc = "[I/O] LCS" }, 54 [IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
55 {.name = "CLW", .desc = "[I/O] CLAW" }, 55 [IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
56 {.name = "CTC", .desc = "[I/O] CTC" }, 56 [IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
57 {.name = "APB", .desc = "[I/O] AP Bus" }, 57 [IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
58 {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, 58 [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
59 {.name = "NMI", .desc = "[NMI] Machine Check" }, 59 [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
60 [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
61 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
60}; 62};
61 63
62/* 64/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 8aa634f5944b..d1c7214e157c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -547,7 +547,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
547 */ 547 */
548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
549 if (entry) { 549 if (entry) {
550 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; 550 regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
551 return 1; 551 return 1;
552 } 552 }
553 553
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index eca94e74d19a..6ea6d69339b5 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -51,16 +51,6 @@ static struct lgr_info lgr_info_cur;
51static struct debug_info *lgr_dbf; 51static struct debug_info *lgr_dbf;
52 52
53/* 53/*
54 * Return number of valid stsi levels
55 */
56static inline int stsi_0(void)
57{
58 int rc = stsi(NULL, 0, 0, 0);
59
60 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
61}
62
63/*
64 * Copy buffer and then convert it to ASCII 54 * Copy buffer and then convert it to ASCII
65 */ 55 */
66static void cpascii(char *dst, char *src, int size) 56static void cpascii(char *dst, char *src, int size)
@@ -76,7 +66,7 @@ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
76{ 66{
77 struct sysinfo_1_1_1 *si = (void *) lgr_page; 67 struct sysinfo_1_1_1 *si = (void *) lgr_page;
78 68
79 if (stsi(si, 1, 1, 1) == -ENOSYS) 69 if (stsi(si, 1, 1, 1))
80 return; 70 return;
81 cpascii(lgr_info->manufacturer, si->manufacturer, 71 cpascii(lgr_info->manufacturer, si->manufacturer,
82 sizeof(si->manufacturer)); 72 sizeof(si->manufacturer));
@@ -93,7 +83,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
93{ 83{
94 struct sysinfo_2_2_2 *si = (void *) lgr_page; 84 struct sysinfo_2_2_2 *si = (void *) lgr_page;
95 85
96 if (stsi(si, 2, 2, 2) == -ENOSYS) 86 if (stsi(si, 2, 2, 2))
97 return; 87 return;
98 cpascii(lgr_info->name, si->name, sizeof(si->name)); 88 cpascii(lgr_info->name, si->name, sizeof(si->name));
99 memcpy(&lgr_info->lpar_number, &si->lpar_number, 89 memcpy(&lgr_info->lpar_number, &si->lpar_number,
@@ -108,7 +98,7 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
108 struct sysinfo_3_2_2 *si = (void *) lgr_page; 98 struct sysinfo_3_2_2 *si = (void *) lgr_page;
109 int i; 99 int i;
110 100
111 if (stsi(si, 3, 2, 2) == -ENOSYS) 101 if (stsi(si, 3, 2, 2))
112 return; 102 return;
113 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) { 103 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
114 cpascii(lgr_info->vm[i].name, si->vm[i].name, 104 cpascii(lgr_info->vm[i].name, si->vm[i].name,
@@ -124,16 +114,17 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
124 */ 114 */
125static void lgr_info_get(struct lgr_info *lgr_info) 115static void lgr_info_get(struct lgr_info *lgr_info)
126{ 116{
117 int level;
118
127 memset(lgr_info, 0, sizeof(*lgr_info)); 119 memset(lgr_info, 0, sizeof(*lgr_info));
128 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list)); 120 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
129 lgr_info->level = stsi_0(); 121 level = stsi(NULL, 0, 0, 0);
130 if (lgr_info->level == -ENOSYS) 122 lgr_info->level = level;
131 return; 123 if (level >= 1)
132 if (lgr_info->level >= 1)
133 lgr_stsi_1_1_1(lgr_info); 124 lgr_stsi_1_1_1(lgr_info);
134 if (lgr_info->level >= 2) 125 if (level >= 2)
135 lgr_stsi_2_2_2(lgr_info); 126 lgr_stsi_2_2_2(lgr_info);
136 if (lgr_info->level >= 3) 127 if (level >= 3)
137 lgr_stsi_3_2_2(lgr_info); 128 lgr_stsi_3_2_2(lgr_info);
138} 129}
139 130
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 493304bdf1c7..b3de27700016 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -21,6 +21,7 @@
21#include <asm/reset.h> 21#include <asm/reset.h>
22#include <asm/ipl.h> 22#include <asm/ipl.h>
23#include <asm/diag.h> 23#include <asm/diag.h>
24#include <asm/elf.h>
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
25#include <asm/os_info.h> 26#include <asm/os_info.h>
26 27
@@ -31,8 +32,6 @@ extern const unsigned long long relocate_kernel_len;
31 32
32#ifdef CONFIG_CRASH_DUMP 33#ifdef CONFIG_CRASH_DUMP
33 34
34void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
35
36/* 35/*
37 * Create ELF notes for one CPU 36 * Create ELF notes for one CPU
38 */ 37 */
@@ -159,7 +158,7 @@ int machine_kexec_prepare(struct kimage *image)
159 158
160 /* Can't replace kernel image since it is read-only. */ 159 /* Can't replace kernel image since it is read-only. */
161 if (ipl_flags & IPL_NSS_VALID) 160 if (ipl_flags & IPL_NSS_VALID)
162 return -ENOSYS; 161 return -EOPNOTSUPP;
163 162
164 if (image->type == KEXEC_TYPE_CRASH) 163 if (image->type == KEXEC_TYPE_CRASH)
165 return machine_kexec_prepare_kdump(); 164 return machine_kexec_prepare_kdump();
@@ -191,6 +190,10 @@ void machine_shutdown(void)
191{ 190{
192} 191}
193 192
193void machine_crash_shutdown(struct pt_regs *regs)
194{
195}
196
194/* 197/*
195 * Do normal kexec 198 * Do normal kexec
196 */ 199 */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 733175373a4c..5024be27df44 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -26,10 +26,12 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/vtimer.h> 28#include <asm/vtimer.h>
29#include <asm/exec.h>
29#include <asm/irq.h> 30#include <asm/irq.h>
30#include <asm/nmi.h> 31#include <asm/nmi.h>
31#include <asm/smp.h> 32#include <asm/smp.h>
32#include <asm/switch_to.h> 33#include <asm/switch_to.h>
34#include <asm/runtime_instr.h>
33#include "entry.h" 35#include "entry.h"
34 36
35asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 37asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -132,6 +134,7 @@ EXPORT_SYMBOL(kernel_thread);
132 */ 134 */
133void exit_thread(void) 135void exit_thread(void)
134{ 136{
137 exit_thread_runtime_instr();
135} 138}
136 139
137void flush_thread(void) 140void flush_thread(void)
@@ -170,6 +173,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
170 /* Save access registers to new thread structure. */ 173 /* Save access registers to new thread structure. */
171 save_access_regs(&p->thread.acrs[0]); 174 save_access_regs(&p->thread.acrs[0]);
172 175
176 /* Don't copy runtime instrumentation info */
177 p->thread.ri_cb = NULL;
178 p->thread.ri_signum = 0;
179 frame->childregs.psw.mask &= ~PSW_MASK_RI;
180
173#ifndef CONFIG_64BIT 181#ifndef CONFIG_64BIT
174 /* 182 /*
175 * save fprs to current->thread.fp_regs to merge them with 183 * save fprs to current->thread.fp_regs to merge them with
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 572d4c9cb33b..753c41d0ffd3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -39,9 +39,9 @@ void __cpuinit cpu_init(void)
39 */ 39 */
40static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
41{ 41{
42 static const char *hwcap_str[10] = { 42 static const char *hwcap_str[] = {
43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
44 "edat", "etf3eh", "highgprs" 44 "edat", "etf3eh", "highgprs", "te"
45 }; 45 };
46 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
47 int i; 47 int i;
@@ -54,10 +54,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
54 num_online_cpus(), loops_per_jiffy/(500000/HZ), 54 num_online_cpus(), loops_per_jiffy/(500000/HZ),
55 (loops_per_jiffy/(5000/HZ))%100); 55 (loops_per_jiffy/(5000/HZ))%100);
56 seq_puts(m, "features\t: "); 56 seq_puts(m, "features\t: ");
57 for (i = 0; i < 10; i++) 57 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
58 if (hwcap_str[i] && (elf_hwcap & (1UL << i))) 58 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
59 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
60 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
61 show_cacheinfo(m);
61 } 62 }
62 get_online_cpus(); 63 get_online_cpus();
63 if (cpu_online(n)) { 64 if (cpu_online(n)) {
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e4be113fbac6..a314c57f4e94 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,6 +42,7 @@ enum s390_regset {
42 REGSET_GENERAL, 42 REGSET_GENERAL,
43 REGSET_FP, 43 REGSET_FP,
44 REGSET_LAST_BREAK, 44 REGSET_LAST_BREAK,
45 REGSET_TDB,
45 REGSET_SYSTEM_CALL, 46 REGSET_SYSTEM_CALL,
46 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
47}; 48};
@@ -52,6 +53,22 @@ void update_per_regs(struct task_struct *task)
52 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
53 struct per_regs old, new; 54 struct per_regs old, new;
54 55
56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new;
60
61 __ctl_store(cr0, 0, 0);
62 /* set or clear transaction execution bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54);
65 else
66 cr0_new = cr0 | (3UL << 54);
67 /* Only load control register 0 if necessary. */
68 if (cr0 != cr0_new)
69 __ctl_load(cr0_new, 0, 0);
70 }
71#endif
55 /* Copy user specified PER registers */ 72 /* Copy user specified PER registers */
56 new.control = thread->per_user.control; 73 new.control = thread->per_user.control;
57 new.start = thread->per_user.start; 74 new.start = thread->per_user.start;
@@ -60,6 +77,10 @@ void update_per_regs(struct task_struct *task)
60 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 77 /* merge TIF_SINGLE_STEP into user specified PER registers. */
61 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 78 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
62 new.control |= PER_EVENT_IFETCH; 79 new.control |= PER_EVENT_IFETCH;
80#ifdef CONFIG_64BIT
81 new.control |= PER_CONTROL_SUSPENSION;
82 new.control |= PER_EVENT_TRANSACTION_END;
83#endif
63 new.start = 0; 84 new.start = 0;
64 new.end = PSW_ADDR_INSN; 85 new.end = PSW_ADDR_INSN;
65 } 86 }
@@ -100,6 +121,7 @@ void ptrace_disable(struct task_struct *task)
100 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 121 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
101 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 122 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 clear_tsk_thread_flag(task, TIF_PER_TRAP); 123 clear_tsk_thread_flag(task, TIF_PER_TRAP);
124 task->thread.per_flags = 0;
103} 125}
104 126
105#ifndef CONFIG_64BIT 127#ifndef CONFIG_64BIT
@@ -416,6 +438,16 @@ long arch_ptrace(struct task_struct *child, long request,
416 put_user(task_thread_info(child)->last_break, 438 put_user(task_thread_info(child)->last_break,
417 (unsigned long __user *) data); 439 (unsigned long __user *) data);
418 return 0; 440 return 0;
441 case PTRACE_ENABLE_TE:
442 if (!MACHINE_HAS_TE)
443 return -EIO;
444 child->thread.per_flags &= ~PER_FLAG_NO_TE;
445 return 0;
446 case PTRACE_DISABLE_TE:
447 if (!MACHINE_HAS_TE)
448 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE;
450 return 0;
419 default: 451 default:
420 /* Removing high order bit from addr (only for 31 bit). */ 452 /* Removing high order bit from addr (only for 31 bit). */
421 addr &= PSW_ADDR_INSN; 453 addr &= PSW_ADDR_INSN;
@@ -903,6 +935,28 @@ static int s390_last_break_set(struct task_struct *target,
903 return 0; 935 return 0;
904} 936}
905 937
938static int s390_tdb_get(struct task_struct *target,
939 const struct user_regset *regset,
940 unsigned int pos, unsigned int count,
941 void *kbuf, void __user *ubuf)
942{
943 struct pt_regs *regs = task_pt_regs(target);
944 unsigned char *data;
945
946 if (!(regs->int_code & 0x200))
947 return -ENODATA;
948 data = target->thread.trap_tdb;
949 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
950}
951
952static int s390_tdb_set(struct task_struct *target,
953 const struct user_regset *regset,
954 unsigned int pos, unsigned int count,
955 const void *kbuf, const void __user *ubuf)
956{
957 return 0;
958}
959
906#endif 960#endif
907 961
908static int s390_system_call_get(struct task_struct *target, 962static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +1005,14 @@ static const struct user_regset s390_regsets[] = {
951 .get = s390_last_break_get, 1005 .get = s390_last_break_get,
952 .set = s390_last_break_set, 1006 .set = s390_last_break_set,
953 }, 1007 },
1008 [REGSET_TDB] = {
1009 .core_note_type = NT_S390_TDB,
1010 .n = 1,
1011 .size = 256,
1012 .align = 1,
1013 .get = s390_tdb_get,
1014 .set = s390_tdb_set,
1015 },
954#endif 1016#endif
955 [REGSET_SYSTEM_CALL] = { 1017 [REGSET_SYSTEM_CALL] = {
956 .core_note_type = NT_S390_SYSTEM_CALL, 1018 .core_note_type = NT_S390_SYSTEM_CALL,
@@ -1148,6 +1210,14 @@ static const struct user_regset s390_compat_regsets[] = {
1148 .get = s390_compat_last_break_get, 1210 .get = s390_compat_last_break_get,
1149 .set = s390_compat_last_break_set, 1211 .set = s390_compat_last_break_set,
1150 }, 1212 },
1213 [REGSET_TDB] = {
1214 .core_note_type = NT_S390_TDB,
1215 .n = 1,
1216 .size = 256,
1217 .align = 1,
1218 .get = s390_tdb_get,
1219 .set = s390_tdb_set,
1220 },
1151 [REGSET_SYSTEM_CALL] = { 1221 [REGSET_SYSTEM_CALL] = {
1152 .core_note_type = NT_S390_SYSTEM_CALL, 1222 .core_note_type = NT_S390_SYSTEM_CALL,
1153 .n = 1, 1223 .n = 1,
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
new file mode 100644
index 000000000000..61066f6f71a5
--- /dev/null
+++ b/arch/s390/kernel/runtime_instr.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/syscalls.h>
8#include <linux/signal.h>
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <asm/runtime_instr.h>
15#include <asm/cpu_mf.h>
16#include <asm/irq.h>
17
18/* empty control block to disable RI by loading it */
19struct runtime_instr_cb runtime_instr_empty_cb;
20
21static int runtime_instr_avail(void)
22{
23 return test_facility(64);
24}
25
26static void disable_runtime_instr(void)
27{
28 struct pt_regs *regs = task_pt_regs(current);
29
30 load_runtime_instr_cb(&runtime_instr_empty_cb);
31
32 /*
33 * Make sure the RI bit is deleted from the PSW. If the user did not
34 * switch off RI before the system call the process will get a
35 * specification exception otherwise.
36 */
37 regs->psw.mask &= ~PSW_MASK_RI;
38}
39
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{
42 cb->buf_limit = 0xfff;
43 if (s390_user_mode == HOME_SPACE_MODE)
44 cb->home_space = 1;
45 cb->int_requested = 1;
46 cb->pstate = 1;
47 cb->pstate_set_buf = 1;
48 cb->pstate_sample = 1;
49 cb->pstate_collect = 1;
50 cb->key = PAGE_DEFAULT_KEY;
51 cb->valid = 1;
52}
53
54void exit_thread_runtime_instr(void)
55{
56 struct task_struct *task = current;
57
58 if (!task->thread.ri_cb)
59 return;
60 disable_runtime_instr();
61 kfree(task->thread.ri_cb);
62 task->thread.ri_signum = 0;
63 task->thread.ri_cb = NULL;
64}
65
66static void runtime_instr_int_handler(struct ext_code ext_code,
67 unsigned int param32, unsigned long param64)
68{
69 struct siginfo info;
70
71 if (!(param32 & CPU_MF_INT_RI_MASK))
72 return;
73
74 kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
75
76 if (!current->thread.ri_cb)
77 return;
78 if (current->thread.ri_signum < SIGRTMIN ||
79 current->thread.ri_signum > SIGRTMAX) {
80 WARN_ON_ONCE(1);
81 return;
82 }
83
84 memset(&info, 0, sizeof(info));
85 info.si_signo = current->thread.ri_signum;
86 info.si_code = SI_QUEUE;
87 if (param32 & CPU_MF_INT_RI_BUF_FULL)
88 info.si_int = ENOBUFS;
89 else if (param32 & CPU_MF_INT_RI_HALTED)
90 info.si_int = ECANCELED;
91 else
92 return; /* unknown reason */
93
94 send_sig_info(current->thread.ri_signum, &info, current);
95}
96
97SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
98{
99 struct runtime_instr_cb *cb;
100
101 if (!runtime_instr_avail())
102 return -EOPNOTSUPP;
103
104 if (command == S390_RUNTIME_INSTR_STOP) {
105 preempt_disable();
106 exit_thread_runtime_instr();
107 preempt_enable();
108 return 0;
109 }
110
111 if (command != S390_RUNTIME_INSTR_START ||
112 (signum < SIGRTMIN || signum > SIGRTMAX))
113 return -EINVAL;
114
115 if (!current->thread.ri_cb) {
116 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
117 if (!cb)
118 return -ENOMEM;
119 } else {
120 cb = current->thread.ri_cb;
121 memset(cb, 0, sizeof(*cb));
122 }
123
124 init_runtime_instr_cb(cb);
125 current->thread.ri_signum = signum;
126
127 /* now load the control block to make it available */
128 preempt_disable();
129 current->thread.ri_cb = cb;
130 load_runtime_instr_cb(cb);
131 preempt_enable();
132 return 0;
133}
134
135static int __init runtime_instr_init(void)
136{
137 int rc;
138
139 if (!runtime_instr_avail())
140 return 0;
141
142 measurement_alert_subclass_register();
143 rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
144 if (rc)
145 measurement_alert_subclass_unregister();
146 else
147 pr_info("Runtime instrumentation facility initialized\n");
148 return rc;
149}
150device_initcall(runtime_instr_init);
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 57b536649b00..9bdbcef1da9e 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -8,3 +8,5 @@ EXPORT_SYMBOL(_mcount);
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10#endif 10#endif
11EXPORT_SYMBOL(memcpy);
12EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 40b57693de38..afa9fdba200e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -302,10 +302,10 @@ static int __init parse_vmalloc(char *arg)
302} 302}
303early_param("vmalloc", parse_vmalloc); 303early_param("vmalloc", parse_vmalloc);
304 304
305unsigned int addressing_mode = HOME_SPACE_MODE; 305unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
306EXPORT_SYMBOL_GPL(addressing_mode); 306EXPORT_SYMBOL_GPL(s390_user_mode);
307 307
308static int set_amode_primary(void) 308static void __init set_user_mode_primary(void)
309{ 309{
310 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; 310 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
311 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; 311 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
@@ -313,48 +313,30 @@ static int set_amode_primary(void)
313 psw32_user_bits = 313 psw32_user_bits =
314 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; 314 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
315#endif 315#endif
316 316 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
317 if (MACHINE_HAS_MVCOS) {
318 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
319 return 1;
320 } else {
321 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
322 return 0;
323 }
324}
325
326/*
327 * Switch kernel/user addressing modes?
328 */
329static int __init early_parse_switch_amode(char *p)
330{
331 addressing_mode = PRIMARY_SPACE_MODE;
332 return 0;
333} 317}
334early_param("switch_amode", early_parse_switch_amode);
335 318
336static int __init early_parse_user_mode(char *p) 319static int __init early_parse_user_mode(char *p)
337{ 320{
338 if (p && strcmp(p, "primary") == 0) 321 if (p && strcmp(p, "primary") == 0)
339 addressing_mode = PRIMARY_SPACE_MODE; 322 s390_user_mode = PRIMARY_SPACE_MODE;
340 else if (!p || strcmp(p, "home") == 0) 323 else if (!p || strcmp(p, "home") == 0)
341 addressing_mode = HOME_SPACE_MODE; 324 s390_user_mode = HOME_SPACE_MODE;
342 else 325 else
343 return 1; 326 return 1;
344 return 0; 327 return 0;
345} 328}
346early_param("user_mode", early_parse_user_mode); 329early_param("user_mode", early_parse_user_mode);
347 330
348static void setup_addressing_mode(void) 331static void __init setup_addressing_mode(void)
349{ 332{
350 if (addressing_mode == PRIMARY_SPACE_MODE) { 333 if (s390_user_mode != PRIMARY_SPACE_MODE)
351 if (set_amode_primary()) 334 return;
352 pr_info("Address spaces switched, " 335 set_user_mode_primary();
353 "mvcos available\n"); 336 if (MACHINE_HAS_MVCOS)
354 else 337 pr_info("Address spaces switched, mvcos available\n");
355 pr_info("Address spaces switched, " 338 else
356 "mvcos not available\n"); 339 pr_info("Address spaces switched, mvcos not available\n");
357 }
358} 340}
359 341
360void *restart_stack __attribute__((__section__(".data"))); 342void *restart_stack __attribute__((__section__(".data")));
@@ -602,9 +584,7 @@ static void __init setup_memory_end(void)
602 584
603static void __init setup_vmcoreinfo(void) 585static void __init setup_vmcoreinfo(void)
604{ 586{
605#ifdef CONFIG_KEXEC
606 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); 587 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
607#endif
608} 588}
609 589
610#ifdef CONFIG_CRASH_DUMP 590#ifdef CONFIG_CRASH_DUMP
@@ -980,6 +960,12 @@ static void __init setup_hwcaps(void)
980 * HWCAP_S390_HIGH_GPRS is bit 9. 960 * HWCAP_S390_HIGH_GPRS is bit 9.
981 */ 961 */
982 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 962 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
963
964 /*
965 * Transactional execution support HWCAP_S390_TE is bit 10.
966 */
967 if (test_facility(50) && test_facility(73))
968 elf_hwcap |= HWCAP_S390_TE;
983#endif 969#endif
984 970
985 get_cpu_id(&cpu_id); 971 get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 720fda1620f2..ea431e551c6b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -66,7 +66,7 @@ struct pcpu {
66 unsigned long panic_stack; /* panic stack for the cpu */ 66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */ 68 int state; /* physical cpu state */
69 u32 status; /* last status received via sigp */ 69 int polarization; /* physical polarization */
70 u16 address; /* physical cpu address */ 70 u16 address; /* physical cpu address */
71}; 71};
72 72
@@ -74,6 +74,10 @@ static u8 boot_cpu_type;
74static u16 boot_cpu_address; 74static u16 boot_cpu_address;
75static struct pcpu pcpu_devices[NR_CPUS]; 75static struct pcpu pcpu_devices[NR_CPUS];
76 76
77/*
78 * The smp_cpu_state_mutex must be held when changing the state or polarization
79 * member of a pcpu data structure within the pcpu_devices arreay.
80 */
77DEFINE_MUTEX(smp_cpu_state_mutex); 81DEFINE_MUTEX(smp_cpu_state_mutex);
78 82
79/* 83/*
@@ -99,7 +103,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
99 int cc; 103 int cc;
100 104
101 while (1) { 105 while (1) {
102 cc = __pcpu_sigp(addr, order, parm, status); 106 cc = __pcpu_sigp(addr, order, parm, NULL);
103 if (cc != SIGP_CC_BUSY) 107 if (cc != SIGP_CC_BUSY)
104 return cc; 108 return cc;
105 cpu_relax(); 109 cpu_relax();
@@ -111,7 +115,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
111 int cc, retry; 115 int cc, retry;
112 116
113 for (retry = 0; ; retry++) { 117 for (retry = 0; ; retry++) {
114 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); 118 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
115 if (cc != SIGP_CC_BUSY) 119 if (cc != SIGP_CC_BUSY)
116 break; 120 break;
117 if (retry >= 3) 121 if (retry >= 3)
@@ -122,16 +126,18 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
122 126
123static inline int pcpu_stopped(struct pcpu *pcpu) 127static inline int pcpu_stopped(struct pcpu *pcpu)
124{ 128{
129 u32 uninitialized_var(status);
130
125 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 131 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
126 0, &pcpu->status) != SIGP_CC_STATUS_STORED) 132 0, &status) != SIGP_CC_STATUS_STORED)
127 return 0; 133 return 0;
128 return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 134 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
129} 135}
130 136
131static inline int pcpu_running(struct pcpu *pcpu) 137static inline int pcpu_running(struct pcpu *pcpu)
132{ 138{
133 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 139 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
134 0, &pcpu->status) != SIGP_CC_STATUS_STORED) 140 0, NULL) != SIGP_CC_STATUS_STORED)
135 return 1; 141 return 1;
136 /* Status stored condition code is equivalent to cpu not running. */ 142 /* Status stored condition code is equivalent to cpu not running. */
137 return 0; 143 return 0;
@@ -586,6 +592,16 @@ static inline void smp_get_save_area(int cpu, u16 address) { }
586 592
587#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 593#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
588 594
595void smp_cpu_set_polarization(int cpu, int val)
596{
597 pcpu_devices[cpu].polarization = val;
598}
599
600int smp_cpu_get_polarization(int cpu)
601{
602 return pcpu_devices[cpu].polarization;
603}
604
589static struct sclp_cpu_info *smp_get_cpu_info(void) 605static struct sclp_cpu_info *smp_get_cpu_info(void)
590{ 606{
591 static int use_sigp_detection; 607 static int use_sigp_detection;
@@ -628,7 +644,7 @@ static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
628 pcpu->address = info->cpu[i].address; 644 pcpu->address = info->cpu[i].address;
629 pcpu->state = (cpu >= info->configured) ? 645 pcpu->state = (cpu >= info->configured) ?
630 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 646 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
631 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 647 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
632 set_cpu_present(cpu, true); 648 set_cpu_present(cpu, true);
633 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 649 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
634 set_cpu_present(cpu, false); 650 set_cpu_present(cpu, false);
@@ -796,7 +812,7 @@ void __init smp_prepare_boot_cpu(void)
796 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; 812 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
797 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; 813 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
798 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 814 S390_lowcore.percpu_offset = __per_cpu_offset[0];
799 cpu_set_polarization(0, POLARIZATION_UNKNOWN); 815 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
800 set_cpu_present(0, true); 816 set_cpu_present(0, true);
801 set_cpu_online(0, true); 817 set_cpu_online(0, true);
802} 818}
@@ -862,7 +878,7 @@ static ssize_t cpu_configure_store(struct device *dev,
862 if (rc) 878 if (rc)
863 break; 879 break;
864 pcpu->state = CPU_STATE_STANDBY; 880 pcpu->state = CPU_STATE_STANDBY;
865 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 881 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
866 topology_expect_change(); 882 topology_expect_change();
867 break; 883 break;
868 case 1: 884 case 1:
@@ -872,7 +888,7 @@ static ssize_t cpu_configure_store(struct device *dev,
872 if (rc) 888 if (rc)
873 break; 889 break;
874 pcpu->state = CPU_STATE_CONFIGURED; 890 pcpu->state = CPU_STATE_CONFIGURED;
875 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 891 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
876 topology_expect_change(); 892 topology_expect_change();
877 break; 893 break;
878 default: 894 default:
@@ -959,23 +975,17 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
959 struct device *s = &c->dev; 975 struct device *s = &c->dev;
960 int err = 0; 976 int err = 0;
961 977
962 switch (action) { 978 switch (action & ~CPU_TASKS_FROZEN) {
963 case CPU_ONLINE: 979 case CPU_ONLINE:
964 case CPU_ONLINE_FROZEN:
965 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 980 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
966 break; 981 break;
967 case CPU_DEAD: 982 case CPU_DEAD:
968 case CPU_DEAD_FROZEN:
969 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 983 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
970 break; 984 break;
971 } 985 }
972 return notifier_from_errno(err); 986 return notifier_from_errno(err);
973} 987}
974 988
975static struct notifier_block __cpuinitdata smp_cpu_nb = {
976 .notifier_call = smp_cpu_notify,
977};
978
979static int __devinit smp_add_present_cpu(int cpu) 989static int __devinit smp_add_present_cpu(int cpu)
980{ 990{
981 struct cpu *c = &pcpu_devices[cpu].cpu; 991 struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -1050,7 +1060,7 @@ static int __init s390_smp_init(void)
1050{ 1060{
1051 int cpu, rc; 1061 int cpu, rc;
1052 1062
1053 register_cpu_notifier(&smp_cpu_nb); 1063 hotcpu_notifier(smp_cpu_notify, 0);
1054#ifdef CONFIG_HOTPLUG_CPU 1064#ifdef CONFIG_HOTPLUG_CPU
1055 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1065 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1056 if (rc) 1066 if (rc)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index bcab2f04ba58..48174850f3b0 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -350,3 +350,5 @@ SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) 350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */ 351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) 352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
353SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
354SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index fa0eb238dac7..62f89d98e880 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -22,17 +22,41 @@
22#include <math-emu/soft-fp.h> 22#include <math-emu/soft-fp.h>
23#include <math-emu/single.h> 23#include <math-emu/single.h>
24 24
25static inline int stsi_0(void) 25int topology_max_mnest;
26
27/*
28 * stsi - store system information
29 *
30 * Returns the current configuration level if function code 0 was specified.
31 * Otherwise returns 0 on success or a negative value on error.
32 */
33int stsi(void *sysinfo, int fc, int sel1, int sel2)
26{ 34{
27 int rc = stsi(NULL, 0, 0, 0); 35 register int r0 asm("0") = (fc << 28) | sel1;
28 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); 36 register int r1 asm("1") = sel2;
37 int rc = 0;
38
39 asm volatile(
40 " stsi 0(%3)\n"
41 "0: jz 2f\n"
42 "1: lhi %1,%4\n"
43 "2:\n"
44 EX_TABLE(0b, 1b)
45 : "+d" (r0), "+d" (rc)
46 : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
47 : "cc", "memory");
48 if (rc)
49 return rc;
50 return fc ? 0 : ((unsigned int) r0) >> 28;
29} 51}
52EXPORT_SYMBOL(stsi);
30 53
31static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) 54static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
32{ 55{
33 if (stsi(info, 1, 1, 1) == -ENOSYS) 56 int i;
34 return len;
35 57
58 if (stsi(info, 1, 1, 1))
59 return;
36 EBCASC(info->manufacturer, sizeof(info->manufacturer)); 60 EBCASC(info->manufacturer, sizeof(info->manufacturer));
37 EBCASC(info->type, sizeof(info->type)); 61 EBCASC(info->type, sizeof(info->type));
38 EBCASC(info->model, sizeof(info->model)); 62 EBCASC(info->model, sizeof(info->model));
@@ -41,242 +65,197 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
41 EBCASC(info->model_capacity, sizeof(info->model_capacity)); 65 EBCASC(info->model_capacity, sizeof(info->model_capacity));
42 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); 66 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
43 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); 67 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
44 len += sprintf(page + len, "Manufacturer: %-16.16s\n", 68 seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer);
45 info->manufacturer); 69 seq_printf(m, "Type: %-4.4s\n", info->type);
46 len += sprintf(page + len, "Type: %-4.4s\n", 70 /*
47 info->type); 71 * Sigh: the model field has been renamed with System z9
72 * to model_capacity and a new model field has been added
73 * after the plant field. To avoid confusing older programs
74 * the "Model:" prints "model_capacity model" or just
75 * "model_capacity" if the model string is empty .
76 */
77 seq_printf(m, "Model: %-16.16s", info->model_capacity);
48 if (info->model[0] != '\0') 78 if (info->model[0] != '\0')
49 /* 79 seq_printf(m, " %-16.16s", info->model);
50 * Sigh: the model field has been renamed with System z9 80 seq_putc(m, '\n');
51 * to model_capacity and a new model field has been added 81 seq_printf(m, "Sequence Code: %-16.16s\n", info->sequence);
52 * after the plant field. To avoid confusing older programs 82 seq_printf(m, "Plant: %-4.4s\n", info->plant);
53 * the "Model:" prints "model_capacity model" or just 83 seq_printf(m, "Model Capacity: %-16.16s %08u\n",
54 * "model_capacity" if the model string is empty . 84 info->model_capacity, info->model_cap_rating);
55 */ 85 if (info->model_perm_cap_rating)
56 len += sprintf(page + len, 86 seq_printf(m, "Model Perm. Capacity: %-16.16s %08u\n",
57 "Model: %-16.16s %-16.16s\n", 87 info->model_perm_cap,
58 info->model_capacity, info->model); 88 info->model_perm_cap_rating);
59 else 89 if (info->model_temp_cap_rating)
60 len += sprintf(page + len, "Model: %-16.16s\n", 90 seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n",
61 info->model_capacity); 91 info->model_temp_cap,
62 len += sprintf(page + len, "Sequence Code: %-16.16s\n", 92 info->model_temp_cap_rating);
63 info->sequence); 93 if (info->ncr)
64 len += sprintf(page + len, "Plant: %-4.4s\n", 94 seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr);
65 info->plant); 95 if (info->npr)
66 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n", 96 seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr);
67 info->model_capacity, *(u32 *) info->model_cap_rating); 97 if (info->ntr)
68 if (info->model_perm_cap[0] != '\0') 98 seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr);
69 len += sprintf(page + len,
70 "Model Perm. Capacity: %-16.16s %08u\n",
71 info->model_perm_cap,
72 *(u32 *) info->model_perm_cap_rating);
73 if (info->model_temp_cap[0] != '\0')
74 len += sprintf(page + len,
75 "Model Temp. Capacity: %-16.16s %08u\n",
76 info->model_temp_cap,
77 *(u32 *) info->model_temp_cap_rating);
78 if (info->cai) { 99 if (info->cai) {
79 len += sprintf(page + len, 100 seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai);
80 "Capacity Adj. Ind.: %d\n", 101 seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr);
81 info->cai); 102 seq_printf(m, "Capacity Transient: %d\n", info->t);
82 len += sprintf(page + len, "Capacity Ch. Reason: %d\n", 103 }
83 info->ccr); 104 if (info->p) {
105 for (i = 1; i <= ARRAY_SIZE(info->typepct); i++) {
106 seq_printf(m, "Type %d Percentage: %d\n",
107 i, info->typepct[i - 1]);
108 }
84 } 109 }
85 return len;
86} 110}
87 111
88static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len) 112static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
89{ 113{
90 static int max_mnest; 114 static int max_mnest;
91 int i, rc; 115 int i, rc;
92 116
93 len += sprintf(page + len, "\n"); 117 seq_putc(m, '\n');
94 if (!MACHINE_HAS_TOPOLOGY) 118 if (!MACHINE_HAS_TOPOLOGY)
95 return len; 119 return;
96 if (max_mnest) { 120 if (stsi(info, 15, 1, topology_max_mnest))
97 stsi(info, 15, 1, max_mnest); 121 return;
98 } else { 122 seq_printf(m, "CPU Topology HW: ");
99 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
100 rc = stsi(info, 15, 1, max_mnest);
101 if (rc != -ENOSYS)
102 break;
103 }
104 }
105 len += sprintf(page + len, "CPU Topology HW: ");
106 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 123 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
107 len += sprintf(page + len, " %d", info->mag[i]); 124 seq_printf(m, " %d", info->mag[i]);
108 len += sprintf(page + len, "\n"); 125 seq_putc(m, '\n');
109#ifdef CONFIG_SCHED_MC 126#ifdef CONFIG_SCHED_MC
110 store_topology(info); 127 store_topology(info);
111 len += sprintf(page + len, "CPU Topology SW: "); 128 seq_printf(m, "CPU Topology SW: ");
112 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 129 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
113 len += sprintf(page + len, " %d", info->mag[i]); 130 seq_printf(m, " %d", info->mag[i]);
114 len += sprintf(page + len, "\n"); 131 seq_putc(m, '\n');
115#endif 132#endif
116 return len;
117} 133}
118 134
119static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) 135static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info)
120{ 136{
121 struct sysinfo_1_2_2_extension *ext; 137 struct sysinfo_1_2_2_extension *ext;
122 int i; 138 int i;
123 139
124 if (stsi(info, 1, 2, 2) == -ENOSYS) 140 if (stsi(info, 1, 2, 2))
125 return len; 141 return;
126 ext = (struct sysinfo_1_2_2_extension *) 142 ext = (struct sysinfo_1_2_2_extension *)
127 ((unsigned long) info + info->acc_offset); 143 ((unsigned long) info + info->acc_offset);
128 144 seq_printf(m, "CPUs Total: %d\n", info->cpus_total);
129 len += sprintf(page + len, "CPUs Total: %d\n", 145 seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured);
130 info->cpus_total); 146 seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby);
131 len += sprintf(page + len, "CPUs Configured: %d\n", 147 seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved);
132 info->cpus_configured); 148 /*
133 len += sprintf(page + len, "CPUs Standby: %d\n", 149 * Sigh 2. According to the specification the alternate
134 info->cpus_standby); 150 * capability field is a 32 bit floating point number
135 len += sprintf(page + len, "CPUs Reserved: %d\n", 151 * if the higher order 8 bits are not zero. Printing
136 info->cpus_reserved); 152 * a floating point number in the kernel is a no-no,
137 153 * always print the number as 32 bit unsigned integer.
138 if (info->format == 1) { 154 * The user-space needs to know about the strange
139 /* 155 * encoding of the alternate cpu capability.
140 * Sigh 2. According to the specification the alternate 156 */
141 * capability field is a 32 bit floating point number 157 seq_printf(m, "Capability: %u", info->capability);
142 * if the higher order 8 bits are not zero. Printing 158 if (info->format == 1)
143 * a floating point number in the kernel is a no-no, 159 seq_printf(m, " %u", ext->alt_capability);
144 * always print the number as 32 bit unsigned integer. 160 seq_putc(m, '\n');
145 * The user-space needs to know about the strange 161 if (info->nominal_cap)
146 * encoding of the alternate cpu capability. 162 seq_printf(m, "Nominal Capability: %d\n", info->nominal_cap);
147 */ 163 if (info->secondary_cap)
148 len += sprintf(page + len, "Capability: %u %u\n", 164 seq_printf(m, "Secondary Capability: %d\n", info->secondary_cap);
149 info->capability, ext->alt_capability); 165 for (i = 2; i <= info->cpus_total; i++) {
150 for (i = 2; i <= info->cpus_total; i++) 166 seq_printf(m, "Adjustment %02d-way: %u",
151 len += sprintf(page + len, 167 i, info->adjustment[i-2]);
152 "Adjustment %02d-way: %u %u\n", 168 if (info->format == 1)
153 i, info->adjustment[i-2], 169 seq_printf(m, " %u", ext->alt_adjustment[i-2]);
154 ext->alt_adjustment[i-2]); 170 seq_putc(m, '\n');
155
156 } else {
157 len += sprintf(page + len, "Capability: %u\n",
158 info->capability);
159 for (i = 2; i <= info->cpus_total; i++)
160 len += sprintf(page + len,
161 "Adjustment %02d-way: %u\n",
162 i, info->adjustment[i-2]);
163 } 171 }
164
165 if (info->secondary_capability != 0)
166 len += sprintf(page + len, "Secondary Capability: %d\n",
167 info->secondary_capability);
168 return len;
169} 172}
170 173
171static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) 174static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
172{ 175{
173 if (stsi(info, 2, 2, 2) == -ENOSYS) 176 if (stsi(info, 2, 2, 2))
174 return len; 177 return;
175
176 EBCASC(info->name, sizeof(info->name)); 178 EBCASC(info->name, sizeof(info->name));
177 179 seq_putc(m, '\n');
178 len += sprintf(page + len, "\n"); 180 seq_printf(m, "LPAR Number: %d\n", info->lpar_number);
179 len += sprintf(page + len, "LPAR Number: %d\n", 181 seq_printf(m, "LPAR Characteristics: ");
180 info->lpar_number);
181
182 len += sprintf(page + len, "LPAR Characteristics: ");
183 if (info->characteristics & LPAR_CHAR_DEDICATED) 182 if (info->characteristics & LPAR_CHAR_DEDICATED)
184 len += sprintf(page + len, "Dedicated "); 183 seq_printf(m, "Dedicated ");
185 if (info->characteristics & LPAR_CHAR_SHARED) 184 if (info->characteristics & LPAR_CHAR_SHARED)
186 len += sprintf(page + len, "Shared "); 185 seq_printf(m, "Shared ");
187 if (info->characteristics & LPAR_CHAR_LIMITED) 186 if (info->characteristics & LPAR_CHAR_LIMITED)
188 len += sprintf(page + len, "Limited "); 187 seq_printf(m, "Limited ");
189 len += sprintf(page + len, "\n"); 188 seq_putc(m, '\n');
190 189 seq_printf(m, "LPAR Name: %-8.8s\n", info->name);
191 len += sprintf(page + len, "LPAR Name: %-8.8s\n", 190 seq_printf(m, "LPAR Adjustment: %d\n", info->caf);
192 info->name); 191 seq_printf(m, "LPAR CPUs Total: %d\n", info->cpus_total);
193 192 seq_printf(m, "LPAR CPUs Configured: %d\n", info->cpus_configured);
194 len += sprintf(page + len, "LPAR Adjustment: %d\n", 193 seq_printf(m, "LPAR CPUs Standby: %d\n", info->cpus_standby);
195 info->caf); 194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
196 195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
197 len += sprintf(page + len, "LPAR CPUs Total: %d\n", 196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
198 info->cpus_total);
199 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
200 info->cpus_configured);
201 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
202 info->cpus_standby);
203 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
204 info->cpus_reserved);
205 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
206 info->cpus_dedicated);
207 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
208 info->cpus_shared);
209 return len;
210} 197}
211 198
212static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) 199static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
213{ 200{
214 int i; 201 int i;
215 202
216 if (stsi(info, 3, 2, 2) == -ENOSYS) 203 if (stsi(info, 3, 2, 2))
217 return len; 204 return;
218 for (i = 0; i < info->count; i++) { 205 for (i = 0; i < info->count; i++) {
219 EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); 206 EBCASC(info->vm[i].name, sizeof(info->vm[i].name));
220 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); 207 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi));
221 len += sprintf(page + len, "\n"); 208 seq_putc(m, '\n');
222 len += sprintf(page + len, "VM%02d Name: %-8.8s\n", 209 seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name);
223 i, info->vm[i].name); 210 seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi);
224 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", 211 seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf);
225 i, info->vm[i].cpi); 212 seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total);
226 213 seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
227 len += sprintf(page + len, "VM%02d Adjustment: %d\n", 214 seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby);
228 i, info->vm[i].caf); 215 seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved);
229
230 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
231 i, info->vm[i].cpus_total);
232 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
233 i, info->vm[i].cpus_configured);
234 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
235 i, info->vm[i].cpus_standby);
236 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
237 i, info->vm[i].cpus_reserved);
238 } 216 }
239 return len;
240} 217}
241 218
242static int proc_read_sysinfo(char *page, char **start, 219static int sysinfo_show(struct seq_file *m, void *v)
243 off_t off, int count,
244 int *eof, void *data)
245{ 220{
246 unsigned long info = get_zeroed_page(GFP_KERNEL); 221 void *info = (void *)get_zeroed_page(GFP_KERNEL);
247 int level, len; 222 int level;
248 223
249 if (!info) 224 if (!info)
250 return 0; 225 return 0;
251 226 level = stsi(NULL, 0, 0, 0);
252 len = 0;
253 level = stsi_0();
254 if (level >= 1) 227 if (level >= 1)
255 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); 228 stsi_1_1_1(m, info);
256
257 if (level >= 1) 229 if (level >= 1)
258 len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len); 230 stsi_15_1_x(m, info);
259
260 if (level >= 1) 231 if (level >= 1)
261 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); 232 stsi_1_2_2(m, info);
262
263 if (level >= 2) 233 if (level >= 2)
264 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); 234 stsi_2_2_2(m, info);
265
266 if (level >= 3) 235 if (level >= 3)
267 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); 236 stsi_3_2_2(m, info);
237 free_page((unsigned long)info);
238 return 0;
239}
268 240
269 free_page(info); 241static int sysinfo_open(struct inode *inode, struct file *file)
270 return len; 242{
243 return single_open(file, sysinfo_show, NULL);
271} 244}
272 245
273static __init int create_proc_sysinfo(void) 246static const struct file_operations sysinfo_fops = {
247 .open = sysinfo_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252
253static int __init sysinfo_create_proc(void)
274{ 254{
275 create_proc_read_entry("sysinfo", 0444, NULL, 255 proc_create("sysinfo", 0444, NULL, &sysinfo_fops);
276 proc_read_sysinfo, NULL);
277 return 0; 256 return 0;
278} 257}
279device_initcall(create_proc_sysinfo); 258device_initcall(sysinfo_create_proc);
280 259
281/* 260/*
282 * Service levels interface. 261 * Service levels interface.
@@ -407,7 +386,7 @@ void s390_adjust_jiffies(void)
407 if (!info) 386 if (!info)
408 return; 387 return;
409 388
410 if (stsi(info, 1, 2, 2) != -ENOSYS) { 389 if (stsi(info, 1, 2, 2) == 0) {
411 /* 390 /*
412 * Major sigh. The cpu capability encoding is "special". 391 * Major sigh. The cpu capability encoding is "special".
413 * If the first 9 bits of info->capability are 0 then it 392 * If the first 9 bits of info->capability are 0 then it
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index dcec960fc724..2db1011b8b19 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -329,7 +329,7 @@ static unsigned long clock_sync_flags;
329 * The synchronous get_clock function. It will write the current clock 329 * The synchronous get_clock function. It will write the current clock
330 * value to the clock pointer and return 0 if the clock is in sync with 330 * value to the clock pointer and return 0 if the clock is in sync with
331 * the external time source. If the clock mode is local it will return 331 * the external time source. If the clock mode is local it will return
332 * -ENOSYS and -EAGAIN if the clock is not in sync with the external 332 * -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external
333 * reference. 333 * reference.
334 */ 334 */
335int get_sync_clock(unsigned long long *clock) 335int get_sync_clock(unsigned long long *clock)
@@ -347,7 +347,7 @@ int get_sync_clock(unsigned long long *clock)
347 return 0; 347 return 0;
348 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && 348 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
349 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 349 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
350 return -ENOSYS; 350 return -EOPNOTSUPP;
351 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && 351 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
352 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 352 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
353 return -EACCES; 353 return -EACCES;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 05151e06c388..54d93f4b6818 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,6 +17,7 @@
17#include <linux/cpu.h> 17#include <linux/cpu.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <asm/sysinfo.h>
20 21
21#define PTF_HORIZONTAL (0UL) 22#define PTF_HORIZONTAL (0UL)
22#define PTF_VERTICAL (1UL) 23#define PTF_VERTICAL (1UL)
@@ -44,9 +45,6 @@ static struct mask_info book_info;
44cpumask_t cpu_book_map[NR_CPUS]; 45cpumask_t cpu_book_map[NR_CPUS];
45unsigned char cpu_book_id[NR_CPUS]; 46unsigned char cpu_book_id[NR_CPUS];
46 47
47/* smp_cpu_state_mutex must be held when accessing this array */
48int cpu_polarization[NR_CPUS];
49
50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
51{ 49{
52 cpumask_t mask; 50 cpumask_t mask;
@@ -75,10 +73,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
75{ 73{
76 unsigned int cpu; 74 unsigned int cpu;
77 75
78 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); 76 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
79 cpu < TOPOLOGY_CPU_BITS;
80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
81 {
82 unsigned int rcpu; 77 unsigned int rcpu;
83 int lcpu; 78 int lcpu;
84 79
@@ -94,7 +89,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
94 } else { 89 } else {
95 cpu_core_id[lcpu] = core->id; 90 cpu_core_id[lcpu] = core->id;
96 } 91 }
97 cpu_set_polarization(lcpu, tl_cpu->pp); 92 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
98 } 93 }
99 } 94 }
100 return core; 95 return core;
@@ -201,7 +196,7 @@ static void topology_update_polarization_simple(void)
201 196
202 mutex_lock(&smp_cpu_state_mutex); 197 mutex_lock(&smp_cpu_state_mutex);
203 for_each_possible_cpu(cpu) 198 for_each_possible_cpu(cpu)
204 cpu_set_polarization(cpu, POLARIZATION_HRZ); 199 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
205 mutex_unlock(&smp_cpu_state_mutex); 200 mutex_unlock(&smp_cpu_state_mutex);
206} 201}
207 202
@@ -231,7 +226,7 @@ int topology_set_cpu_management(int fc)
231 if (rc) 226 if (rc)
232 return -EBUSY; 227 return -EBUSY;
233 for_each_possible_cpu(cpu) 228 for_each_possible_cpu(cpu)
234 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 229 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
235 return rc; 230 return rc;
236} 231}
237 232
@@ -250,12 +245,10 @@ static void update_cpu_core_map(void)
250 245
251void store_topology(struct sysinfo_15_1_x *info) 246void store_topology(struct sysinfo_15_1_x *info)
252{ 247{
253 int rc; 248 if (topology_max_mnest >= 3)
254 249 stsi(info, 15, 1, 3);
255 rc = stsi(info, 15, 1, 3); 250 else
256 if (rc != -ENOSYS) 251 stsi(info, 15, 1, 2);
257 return;
258 stsi(info, 15, 1, 2);
259} 252}
260 253
261int arch_update_cpu_topology(void) 254int arch_update_cpu_topology(void)
@@ -415,7 +408,7 @@ static ssize_t cpu_polarization_show(struct device *dev,
415 ssize_t count; 408 ssize_t count;
416 409
417 mutex_lock(&smp_cpu_state_mutex); 410 mutex_lock(&smp_cpu_state_mutex);
418 switch (cpu_read_polarization(cpu)) { 411 switch (smp_cpu_get_polarization(cpu)) {
419 case POLARIZATION_HRZ: 412 case POLARIZATION_HRZ:
420 count = sprintf(buf, "horizontal\n"); 413 count = sprintf(buf, "horizontal\n");
421 break; 414 break;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 01775c04a90e..3d2b0fa37db0 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -57,6 +57,23 @@ static int kstack_depth_to_print = 12;
57static int kstack_depth_to_print = 20; 57static int kstack_depth_to_print = 20;
58#endif /* CONFIG_64BIT */ 58#endif /* CONFIG_64BIT */
59 59
60static inline void __user *get_trap_ip(struct pt_regs *regs)
61{
62#ifdef CONFIG_64BIT
63 unsigned long address;
64
65 if (regs->int_code & 0x200)
66 address = *(unsigned long *)(current->thread.trap_tdb + 24);
67 else
68 address = regs->psw.addr;
69 return (void __user *)
70 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
71#else
72 return (void __user *)
73 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
74#endif
75}
76
60/* 77/*
61 * For show_trace we have tree different stack to consider: 78 * For show_trace we have tree different stack to consider:
62 * - the panic stack which is used if the kernel stack has overflown 79 * - the panic stack which is used if the kernel stack has overflown
@@ -214,7 +231,6 @@ void show_registers(struct pt_regs *regs)
214 231
215void show_regs(struct pt_regs *regs) 232void show_regs(struct pt_regs *regs)
216{ 233{
217 print_modules();
218 printk("CPU: %d %s %s %.*s\n", 234 printk("CPU: %d %s %s %.*s\n",
219 task_thread_info(current)->cpu, print_tainted(), 235 task_thread_info(current)->cpu, print_tainted(),
220 init_utsname()->release, 236 init_utsname()->release,
@@ -254,6 +270,7 @@ void die(struct pt_regs *regs, const char *str)
254#endif 270#endif
255 printk("\n"); 271 printk("\n");
256 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 272 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
273 print_modules();
257 show_regs(regs); 274 show_regs(regs);
258 bust_spinlocks(0); 275 bust_spinlocks(0);
259 add_taint(TAINT_DIE); 276 add_taint(TAINT_DIE);
@@ -285,12 +302,6 @@ int is_valid_bugaddr(unsigned long addr)
285 return 1; 302 return 1;
286} 303}
287 304
288static inline void __user *get_psw_address(struct pt_regs *regs)
289{
290 return (void __user *)
291 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
292}
293
294static void __kprobes do_trap(struct pt_regs *regs, 305static void __kprobes do_trap(struct pt_regs *regs,
295 int si_signo, int si_code, char *str) 306 int si_signo, int si_code, char *str)
296{ 307{
@@ -304,14 +315,14 @@ static void __kprobes do_trap(struct pt_regs *regs,
304 info.si_signo = si_signo; 315 info.si_signo = si_signo;
305 info.si_errno = 0; 316 info.si_errno = 0;
306 info.si_code = si_code; 317 info.si_code = si_code;
307 info.si_addr = get_psw_address(regs); 318 info.si_addr = get_trap_ip(regs);
308 force_sig_info(si_signo, &info, current); 319 force_sig_info(si_signo, &info, current);
309 report_user_fault(regs, si_signo); 320 report_user_fault(regs, si_signo);
310 } else { 321 } else {
311 const struct exception_table_entry *fixup; 322 const struct exception_table_entry *fixup;
312 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 323 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
313 if (fixup) 324 if (fixup)
314 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 325 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
315 else { 326 else {
316 enum bug_trap_type btt; 327 enum bug_trap_type btt;
317 328
@@ -381,6 +392,11 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
381DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 392DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
382 "translation exception") 393 "translation exception")
383 394
395#ifdef CONFIG_64BIT
396DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
397 "transaction constraint exception")
398#endif
399
384static inline void do_fp_trap(struct pt_regs *regs, int fpc) 400static inline void do_fp_trap(struct pt_regs *regs, int fpc)
385{ 401{
386 int si_code = 0; 402 int si_code = 0;
@@ -408,7 +424,7 @@ static void __kprobes illegal_op(struct pt_regs *regs)
408 __u16 __user *location; 424 __u16 __user *location;
409 int signal = 0; 425 int signal = 0;
410 426
411 location = get_psw_address(regs); 427 location = get_trap_ip(regs);
412 428
413 if (user_mode(regs)) { 429 if (user_mode(regs)) {
414 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 430 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -476,7 +492,7 @@ void specification_exception(struct pt_regs *regs)
476 __u16 __user *location = NULL; 492 __u16 __user *location = NULL;
477 int signal = 0; 493 int signal = 0;
478 494
479 location = (__u16 __user *) get_psw_address(regs); 495 location = (__u16 __user *) get_trap_ip(regs);
480 496
481 if (user_mode(regs)) { 497 if (user_mode(regs)) {
482 get_user(*((__u16 *) opcode), location); 498 get_user(*((__u16 *) opcode), location);
@@ -525,7 +541,7 @@ static void data_exception(struct pt_regs *regs)
525 __u16 __user *location; 541 __u16 __user *location;
526 int signal = 0; 542 int signal = 0;
527 543
528 location = get_psw_address(regs); 544 location = get_trap_ip(regs);
529 545
530 if (MACHINE_HAS_IEEE) 546 if (MACHINE_HAS_IEEE)
531 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 547 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -641,6 +657,7 @@ void __init trap_init(void)
641 pgm_check_table[0x12] = &translation_exception; 657 pgm_check_table[0x12] = &translation_exception;
642 pgm_check_table[0x13] = &special_op_exception; 658 pgm_check_table[0x13] = &special_op_exception;
643#ifdef CONFIG_64BIT 659#ifdef CONFIG_64BIT
660 pgm_check_table[0x18] = &transaction_exception;
644 pgm_check_table[0x38] = &do_asce_exception; 661 pgm_check_table[0x38] = &do_asce_exception;
645 pgm_check_table[0x39] = &do_dat_exception; 662 pgm_check_table[0x39] = &do_dat_exception;
646 pgm_check_table[0x3A] = &do_dat_exception; 663 pgm_check_table[0x3A] = &do_dat_exception;
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 9a19ca367c17..d7776281cb60 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -85,7 +85,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = 87 vd->ectg_available =
88 addressing_mode != HOME_SPACE_MODE && test_facility(31); 88 s390_user_mode != HOME_SPACE_MODE && test_facility(31);
89} 89}
90 90
91#ifdef CONFIG_64BIT 91#ifdef CONFIG_64BIT
@@ -102,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
102 102
103 lowcore->vdso_per_cpu_data = __LC_PASTE; 103 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 104
105 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
106 return 0; 106 return 0;
107 107
108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
147 unsigned long segment_table, page_table, page_frame; 147 unsigned long segment_table, page_table, page_frame;
148 u32 *psal, *aste; 148 u32 *psal, *aste;
149 149
150 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
151 return; 151 return;
152 152
153 psal = (u32 *)(addr_t) lowcore->paste[4]; 153 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +165,7 @@ static void vdso_init_cr5(void)
165{ 165{
166 unsigned long cr5; 166 unsigned long cr5;
167 167
168 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
169 return; 169 return;
170 cr5 = offsetof(struct _lowcore, paste); 170 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5); 171 __ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index cb5093c26d16..790334427895 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -378,9 +378,8 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
378 long cpu = (long) hcpu; 378 long cpu = (long) hcpu;
379 379
380 idle = &per_cpu(s390_idle, cpu); 380 idle = &per_cpu(s390_idle, cpu);
381 switch (action) { 381 switch (action & ~CPU_TASKS_FROZEN) {
382 case CPU_DYING: 382 case CPU_DYING:
383 case CPU_DYING_FROZEN:
384 idle->nohz_delay = 0; 383 idle->nohz_delay = 0;
385 default: 384 default:
386 break; 385 break;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 78eb9847008f..9b04a32e5695 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -5,7 +5,7 @@ source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 def_bool y 7 def_bool y
8 prompt "Virtualization" 8 prompt "KVM"
9 ---help--- 9 ---help---
10 Say Y here to get to see options for using your Linux host to run other 10 Say Y here to get to see options for using your Linux host to run other
11 operating systems inside virtual machines (guests). 11 operating systems inside virtual machines (guests).
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 60da903d6f3e..310be61bead7 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -211,7 +211,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
211 spin_unlock(&fi->lock); 211 spin_unlock(&fi->lock);
212 212
213 /* deal with other level 3 hypervisors */ 213 /* deal with other level 3 hypervisors */
214 if (stsi(mem, 3, 2, 2) == -ENOSYS) 214 if (stsi(mem, 3, 2, 2))
215 mem->count = 0; 215 mem->count = 0;
216 if (mem->count < 8) 216 if (mem->count < 8)
217 mem->count++; 217 mem->count++;
@@ -259,7 +259,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
259 mem = get_zeroed_page(GFP_KERNEL); 259 mem = get_zeroed_page(GFP_KERNEL);
260 if (!mem) 260 if (!mem)
261 goto out_fail; 261 goto out_fail;
262 if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS) 262 if (stsi((void *) mem, fc, sel1, sel2))
263 goto out_mem; 263 goto out_mem;
264 break; 264 break;
265 case 3: 265 case 3:
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 761ab8b56afc..6ab0d0b5cec8 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,7 @@
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o 6obj-y += usercopy.o
7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o 7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
8obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
9lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
new file mode 100644
index 000000000000..14ca9244b615
--- /dev/null
+++ b/arch/s390/lib/mem32.S
@@ -0,0 +1,92 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 basr %r5,%r0
26.Lmemset_base:
27 ltr %r4,%r4
28 bzr %r14
29 ltr %r3,%r3
30 jnz .Lmemset_fill
31 ahi %r4,-1
32 lr %r3,%r4
33 srl %r3,8
34 ltr %r3,%r3
35 lr %r1,%r2
36 je .Lmemset_clear_rest
37.Lmemset_clear_loop:
38 xc 0(256,%r1),0(%r1)
39 la %r1,256(%r1)
40 brct %r3,.Lmemset_clear_loop
41.Lmemset_clear_rest:
42 ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
43 br %r14
44.Lmemset_fill:
45 stc %r3,0(%r2)
46 chi %r4,1
47 lr %r1,%r2
48 ber %r14
49 ahi %r4,-2
50 lr %r3,%r4
51 srl %r3,8
52 ltr %r3,%r3
53 je .Lmemset_fill_rest
54.Lmemset_fill_loop:
55 mvc 1(256,%r1),0(%r1)
56 la %r1,256(%r1)
57 brct %r3,.Lmemset_fill_loop
58.Lmemset_fill_rest:
59 ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
60 br %r14
61.Lmemset_xc:
62 xc 0(1,%r1),0(%r1)
63.Lmemset_mvc:
64 mvc 1(1,%r1),0(%r1)
65
66/*
67 * memcpy implementation
68 *
69 * void *memcpy(void *dest, const void *src, size_t n)
70 */
71ENTRY(memcpy)
72 basr %r5,%r0
73.Lmemcpy_base:
74 ltr %r4,%r4
75 bzr %r14
76 ahi %r4,-1
77 lr %r0,%r4
78 srl %r0,8
79 ltr %r0,%r0
80 lr %r1,%r2
81 jnz .Lmemcpy_loop
82.Lmemcpy_rest:
83 ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
84 br %r14
85.Lmemcpy_loop:
86 mvc 0(256,%r1),0(%r3)
87 la %r1,256(%r1)
88 la %r3,256(%r3)
89 brct %r0,.Lmemcpy_loop
90 j .Lmemcpy_rest
91.Lmemcpy_mvc:
92 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem64.S
new file mode 100644
index 000000000000..c6d553e85ab1
--- /dev/null
+++ b/arch/s390/lib/mem64.S
@@ -0,0 +1,88 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 ltgr %r4,%r4
26 bzr %r14
27 ltgr %r3,%r3
28 jnz .Lmemset_fill
29 aghi %r4,-1
30 srlg %r3,%r4,8
31 ltgr %r3,%r3
32 lgr %r1,%r2
33 jz .Lmemset_clear_rest
34.Lmemset_clear_loop:
35 xc 0(256,%r1),0(%r1)
36 la %r1,256(%r1)
37 brctg %r3,.Lmemset_clear_loop
38.Lmemset_clear_rest:
39 larl %r3,.Lmemset_xc
40 ex %r4,0(%r3)
41 br %r14
42.Lmemset_fill:
43 stc %r3,0(%r2)
44 cghi %r4,1
45 lgr %r1,%r2
46 ber %r14
47 aghi %r4,-2
48 srlg %r3,%r4,8
49 ltgr %r3,%r3
50 jz .Lmemset_fill_rest
51.Lmemset_fill_loop:
52 mvc 1(256,%r1),0(%r1)
53 la %r1,256(%r1)
54 brctg %r3,.Lmemset_fill_loop
55.Lmemset_fill_rest:
56 larl %r3,.Lmemset_mvc
57 ex %r4,0(%r3)
58 br %r14
59.Lmemset_xc:
60 xc 0(1,%r1),0(%r1)
61.Lmemset_mvc:
62 mvc 1(1,%r1),0(%r1)
63
64/*
65 * memcpy implementation
66 *
67 * void *memcpy(void *dest, const void *src, size_t n)
68 */
69ENTRY(memcpy)
70 ltgr %r4,%r4
71 bzr %r14
72 aghi %r4,-1
73 srlg %r5,%r4,8
74 ltgr %r5,%r5
75 lgr %r1,%r2
76 jnz .Lmemcpy_loop
77.Lmemcpy_rest:
78 larl %r5,.Lmemcpy_mvc
79 ex %r4,0(%r5)
80 br %r14
81.Lmemcpy_loop:
82 mvc 0(256,%r1),0(%r3)
83 la %r1,256(%r1)
84 la %r3,256(%r3)
85 brctg %r5,.Lmemcpy_loop
86 j .Lmemcpy_rest
87.Lmemcpy_mvc:
88 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 846ec64ab2c9..b647d5ff0ad9 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -43,11 +43,7 @@ static inline char *__strnend(const char *s, size_t n)
43 */ 43 */
44size_t strlen(const char *s) 44size_t strlen(const char *s)
45{ 45{
46#if __GNUC__ < 4
47 return __strend(s) - s; 46 return __strend(s) - s;
48#else
49 return __builtin_strlen(s);
50#endif
51} 47}
52EXPORT_SYMBOL(strlen); 48EXPORT_SYMBOL(strlen);
53 49
@@ -73,7 +69,6 @@ EXPORT_SYMBOL(strnlen);
73 */ 69 */
74char *strcpy(char *dest, const char *src) 70char *strcpy(char *dest, const char *src)
75{ 71{
76#if __GNUC__ < 4
77 register int r0 asm("0") = 0; 72 register int r0 asm("0") = 0;
78 char *ret = dest; 73 char *ret = dest;
79 74
@@ -82,9 +77,6 @@ char *strcpy(char *dest, const char *src)
82 : "+&a" (dest), "+&a" (src) : "d" (r0) 77 : "+&a" (dest), "+&a" (src) : "d" (r0)
83 : "cc", "memory" ); 78 : "cc", "memory" );
84 return ret; 79 return ret;
85#else
86 return __builtin_strcpy(dest, src);
87#endif
88} 80}
89EXPORT_SYMBOL(strcpy); 81EXPORT_SYMBOL(strcpy);
90 82
@@ -106,7 +98,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
106 if (size) { 98 if (size) {
107 size_t len = (ret >= size) ? size-1 : ret; 99 size_t len = (ret >= size) ? size-1 : ret;
108 dest[len] = '\0'; 100 dest[len] = '\0';
109 __builtin_memcpy(dest, src, len); 101 memcpy(dest, src, len);
110 } 102 }
111 return ret; 103 return ret;
112} 104}
@@ -124,8 +116,8 @@ EXPORT_SYMBOL(strlcpy);
124char *strncpy(char *dest, const char *src, size_t n) 116char *strncpy(char *dest, const char *src, size_t n)
125{ 117{
126 size_t len = __strnend(src, n) - src; 118 size_t len = __strnend(src, n) - src;
127 __builtin_memset(dest + len, 0, n - len); 119 memset(dest + len, 0, n - len);
128 __builtin_memcpy(dest, src, len); 120 memcpy(dest, src, len);
129 return dest; 121 return dest;
130} 122}
131EXPORT_SYMBOL(strncpy); 123EXPORT_SYMBOL(strncpy);
@@ -171,7 +163,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
171 if (len >= n) 163 if (len >= n)
172 len = n - 1; 164 len = n - 1;
173 dest[len] = '\0'; 165 dest[len] = '\0';
174 __builtin_memcpy(dest, src, len); 166 memcpy(dest, src, len);
175 } 167 }
176 return res; 168 return res;
177} 169}
@@ -194,7 +186,7 @@ char *strncat(char *dest, const char *src, size_t n)
194 char *p = __strend(dest); 186 char *p = __strend(dest);
195 187
196 p[len] = '\0'; 188 p[len] = '\0';
197 __builtin_memcpy(p, src, len); 189 memcpy(p, src, len);
198 return dest; 190 return dest;
199} 191}
200EXPORT_SYMBOL(strncat); 192EXPORT_SYMBOL(strncat);
@@ -348,41 +340,3 @@ void *memscan(void *s, int c, size_t n)
348 return (void *) ret; 340 return (void *) ret;
349} 341}
350EXPORT_SYMBOL(memscan); 342EXPORT_SYMBOL(memscan);
351
352/**
353 * memcpy - Copy one area of memory to another
354 * @dest: Where to copy to
355 * @src: Where to copy from
356 * @n: The size of the area.
357 *
358 * returns a pointer to @dest
359 */
360void *memcpy(void *dest, const void *src, size_t n)
361{
362 return __builtin_memcpy(dest, src, n);
363}
364EXPORT_SYMBOL(memcpy);
365
366/**
367 * memset - Fill a region of memory with the given value
368 * @s: Pointer to the start of the area.
369 * @c: The byte to fill the area with
370 * @n: The size of the area.
371 *
372 * returns a pointer to @s
373 */
374void *memset(void *s, int c, size_t n)
375{
376 char *xs;
377
378 if (c == 0)
379 return __builtin_memset(s, 0, n);
380
381 xs = (char *) s;
382 if (n > 0)
383 do {
384 *xs++ = c;
385 } while (--n > 0);
386 return s;
387}
388EXPORT_SYMBOL(memset);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index d98fe9004a52..0f5536b0c1a1 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
6 page-states.o gup.o 6 page-states.o gup.o extable.o
7obj-$(CONFIG_CMM) += cmm.o 7obj-$(CONFIG_CMM) += cmm.o
8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
9obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o 9obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
new file mode 100644
index 000000000000..4d1ee88864e8
--- /dev/null
+++ b/arch/s390/mm/extable.c
@@ -0,0 +1,81 @@
1#include <linux/module.h>
2#include <linux/sort.h>
3#include <asm/uaccess.h>
4
5/*
6 * Search one exception table for an entry corresponding to the
7 * given instruction address, and return the address of the entry,
8 * or NULL if none is found.
9 * We use a binary search, and thus we assume that the table is
10 * already sorted.
11 */
12const struct exception_table_entry *
13search_extable(const struct exception_table_entry *first,
14 const struct exception_table_entry *last,
15 unsigned long value)
16{
17 const struct exception_table_entry *mid;
18 unsigned long addr;
19
20 while (first <= last) {
21 mid = ((last - first) >> 1) + first;
22 addr = extable_insn(mid);
23 if (addr < value)
24 first = mid + 1;
25 else if (addr > value)
26 last = mid - 1;
27 else
28 return mid;
29 }
30 return NULL;
31}
32
33/*
34 * The exception table needs to be sorted so that the binary
35 * search that we use to find entries in it works properly.
36 * This is used both for the kernel exception table and for
37 * the exception tables of modules that get loaded.
38 *
39 */
40static int cmp_ex(const void *a, const void *b)
41{
42 const struct exception_table_entry *x = a, *y = b;
43
44 /* This compare is only valid after normalization. */
45 return x->insn - y->insn;
46}
47
48void sort_extable(struct exception_table_entry *start,
49 struct exception_table_entry *finish)
50{
51 struct exception_table_entry *p;
52 int i;
53
54 /* Normalize entries to being relative to the start of the section */
55 for (p = start, i = 0; p < finish; p++, i += 8)
56 p->insn += i;
57 sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
58 /* Denormalize all entries */
59 for (p = start, i = 0; p < finish; p++, i += 8)
60 p->insn -= i;
61}
62
63#ifdef CONFIG_MODULES
64/*
65 * If the exception table is sorted, any referring to the module init
66 * will be at the beginning or the end.
67 */
68void trim_init_extable(struct module *m)
69{
70 /* Trim the beginning */
71 while (m->num_exentries &&
72 within_module_init(extable_insn(&m->extable[0]), m)) {
73 m->extable++;
74 m->num_exentries--;
75 }
76 /* Trim the end */
77 while (m->num_exentries &&
78 within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
79 m->num_exentries--;
80}
81#endif /* CONFIG_MODULES */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6c013f544146..ac9122ca1152 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -111,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
111 if (trans_exc_code == 2) 111 if (trans_exc_code == 2)
112 /* Access via secondary space, set_fs setting decides */ 112 /* Access via secondary space, set_fs setting decides */
113 return current->thread.mm_segment.ar4; 113 return current->thread.mm_segment.ar4;
114 if (addressing_mode == HOME_SPACE_MODE) 114 if (s390_user_mode == HOME_SPACE_MODE)
115 /* User space if the access has been done via home space. */ 115 /* User space if the access has been done via home space. */
116 return trans_exc_code == 3; 116 return trans_exc_code == 3;
117 /* 117 /*
@@ -163,7 +163,7 @@ static noinline void do_no_context(struct pt_regs *regs)
163 /* Are we prepared to handle this kernel fault? */ 163 /* Are we prepared to handle this kernel fault? */
164 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 164 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
165 if (fixup) { 165 if (fixup) {
166 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 166 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
167 return; 167 return;
168 } 168 }
169 169
@@ -628,9 +628,8 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
628 struct thread_struct *thread, *next; 628 struct thread_struct *thread, *next;
629 struct task_struct *tsk; 629 struct task_struct *tsk;
630 630
631 switch (action) { 631 switch (action & ~CPU_TASKS_FROZEN) {
632 case CPU_DEAD: 632 case CPU_DEAD:
633 case CPU_DEAD_FROZEN:
634 spin_lock_irq(&pfault_lock); 633 spin_lock_irq(&pfault_lock);
635 list_for_each_entry_safe(thread, next, &pfault_list, list) { 634 list_for_each_entry_safe(thread, next, &pfault_list, list) {
636 thread->pfault_wait = 0; 635 thread->pfault_wait = 0;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 65cb06e2af4e..eeaf8023851f 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -154,6 +154,43 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
154 return 1; 154 return 1;
155} 155}
156 156
157/*
158 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
159 * back to the regular GUP.
160 */
161int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
162 struct page **pages)
163{
164 struct mm_struct *mm = current->mm;
165 unsigned long addr, len, end;
166 unsigned long next, flags;
167 pgd_t *pgdp, pgd;
168 int nr = 0;
169
170 start &= PAGE_MASK;
171 addr = start;
172 len = (unsigned long) nr_pages << PAGE_SHIFT;
173 end = start + len;
174 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
175 (void __user *)start, len)))
176 return 0;
177
178 local_irq_save(flags);
179 pgdp = pgd_offset(mm, addr);
180 do {
181 pgd = *pgdp;
182 barrier();
183 next = pgd_addr_end(addr, end);
184 if (pgd_none(pgd))
185 break;
186 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
187 break;
188 } while (pgdp++, addr = next, addr != end);
189 local_irq_restore(flags);
190
191 return nr;
192}
193
157/** 194/**
158 * get_user_pages_fast() - pin user pages in memory 195 * get_user_pages_fast() - pin user pages in memory
159 * @start: starting user address 196 * @start: starting user address
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6adbc082618a..81e596c65dee 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,7 +42,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42unsigned long empty_zero_page, zero_page_mask; 42unsigned long empty_zero_page, zero_page_mask;
43EXPORT_SYMBOL(empty_zero_page); 43EXPORT_SYMBOL(empty_zero_page);
44 44
45static unsigned long setup_zero_pages(void) 45static unsigned long __init setup_zero_pages(void)
46{ 46{
47 struct cpuid cpu_id; 47 struct cpuid cpu_id;
48 unsigned int order; 48 unsigned int order;
@@ -212,7 +212,7 @@ void free_initmem(void)
212} 212}
213 213
214#ifdef CONFIG_BLK_DEV_INITRD 214#ifdef CONFIG_BLK_DEV_INITRD
215void free_initrd_mem(unsigned long start, unsigned long end) 215void __init free_initrd_mem(unsigned long start, unsigned long end)
216{ 216{
217 free_init_pages("initrd memory", start, end); 217 free_init_pages("initrd memory", start, end);
218} 218}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18df31d1f2c9..b402991e43d7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -609,8 +609,8 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
609 */ 609 */
610unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) 610unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
611{ 611{
612 struct page *page; 612 unsigned long *uninitialized_var(table);
613 unsigned long *table; 613 struct page *uninitialized_var(page);
614 unsigned int mask, bit; 614 unsigned int mask, bit;
615 615
616 if (mm_has_pgste(mm)) 616 if (mm_has_pgste(mm))
@@ -796,7 +796,7 @@ int s390_enable_sie(void)
796 struct mm_struct *mm, *old_mm; 796 struct mm_struct *mm, *old_mm;
797 797
798 /* Do we have switched amode? If no, we cannot do sie */ 798 /* Do we have switched amode? If no, we cannot do sie */
799 if (addressing_mode == HOME_SPACE_MODE) 799 if (s390_user_mode == HOME_SPACE_MODE)
800 return -EINVAL; 800 return -EINVAL;
801 801
802 /* Do we have pgstes? if yes, we are done */ 802 /* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6f896e75ab49..c22abf900c9e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -107,7 +107,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
107 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 107 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
108 pm_dir = pmd_offset(pu_dir, address); 108 pm_dir = pmd_offset(pu_dir, address);
109 109
110#ifdef CONFIG_64BIT 110#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
112 (address + HPAGE_SIZE <= start + size) && 112 (address + HPAGE_SIZE <= start + size) &&
113 (address >= HPAGE_SIZE)) { 113 (address >= HPAGE_SIZE)) {
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
new file mode 100644
index 000000000000..90568c33ddb0
--- /dev/null
+++ b/arch/s390/net/Makefile
@@ -0,0 +1,4 @@
1#
2# Arch-specific network modules
3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
new file mode 100644
index 000000000000..7e45d13816c1
--- /dev/null
+++ b/arch/s390/net/bpf_jit.S
@@ -0,0 +1,130 @@
1/*
2 * BPF Jit compiler for s390, help functions.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/linkage.h>
9
10/*
11 * Calling convention:
12 * registers %r2, %r6-%r8, %r10-%r11, %r13, %r15 are call saved
13 * %r2: skb pointer
14 * %r3: offset parameter
15 * %r5: BPF A accumulator
16 * %r8: return address
17 * %r9: save register for skb pointer
18 * %r10: skb->data
19 * %r11: skb->len - skb->data_len (headlen)
20 * %r12: BPF X accumulator
21 *
22 * skb_copy_bits takes 4 parameters:
23 * %r2 = skb pointer
24 * %r3 = offset into skb data
25 * %r4 = length to copy
26 * %r5 = pointer to temp buffer
27 */
28#define SKBDATA %r8
29
30 /* A = *(u32 *) (skb->data+K+X) */
31ENTRY(sk_load_word_ind)
32 ar %r3,%r12 # offset += X
33 bmr %r8 # < 0 -> return with cc
34
35 /* A = *(u32 *) (skb->data+K) */
36ENTRY(sk_load_word)
37 llgfr %r1,%r3 # extend offset
38 ahi %r3,4 # offset + 4
39 clr %r11,%r3 # hlen <= offset + 4 ?
40 jl sk_load_word_slow
41 l %r5,0(%r1,%r10) # get word from skb
42 xr %r1,%r1 # set cc to zero
43 br %r8
44
45sk_load_word_slow:
46 lgr %r9,%r2 # save %r2
47 lhi %r4,4 # 4 bytes
48 la %r5,160(%r15) # pointer to temp buffer
49 brasl %r14,skb_copy_bits # get data from skb
50 l %r5,160(%r15) # load result from temp buffer
51 ltgr %r2,%r2 # set cc to (%r2 != 0)
52 lgr %r2,%r9 # restore %r2
53 br %r8
54
55 /* A = *(u16 *) (skb->data+K+X) */
56ENTRY(sk_load_half_ind)
57 ar %r3,%r12 # offset += X
58 bmr %r8 # < 0 -> return with cc
59
60 /* A = *(u16 *) (skb->data+K) */
61ENTRY(sk_load_half)
62 llgfr %r1,%r3 # extend offset
63 ahi %r3,2 # offset + 2
64 clr %r11,%r3 # hlen <= offset + 2 ?
65 jl sk_load_half_slow
66 llgh %r5,0(%r1,%r10) # get half from skb
67 xr %r1,%r1 # set cc to zero
68 br %r8
69
70sk_load_half_slow:
71 lgr %r9,%r2 # save %r2
72 lhi %r4,2 # 2 bytes
73 la %r5,162(%r15) # pointer to temp buffer
74 brasl %r14,skb_copy_bits # get data from skb
75 xc 160(2,%r15),160(%r15)
76 l %r5,160(%r15) # load result from temp buffer
77 ltgr %r2,%r2 # set cc to (%r2 != 0)
78 lgr %r2,%r9 # restore %r2
79 br %r8
80
81 /* A = *(u8 *) (skb->data+K+X) */
82ENTRY(sk_load_byte_ind)
83 ar %r3,%r12 # offset += X
84 bmr %r8 # < 0 -> return with cc
85
86 /* A = *(u8 *) (skb->data+K) */
87ENTRY(sk_load_byte)
88 llgfr %r1,%r3 # extend offset
89 clr %r11,%r3 # hlen < offset ?
90 jle sk_load_byte_slow
91 lhi %r5,0
92 ic %r5,0(%r1,%r10) # get byte from skb
93 xr %r1,%r1 # set cc to zero
94 br %r8
95
96sk_load_byte_slow:
97 lgr %r9,%r2 # save %r2
98 lhi %r4,1 # 1 bytes
99 la %r5,163(%r15) # pointer to temp buffer
100 brasl %r14,skb_copy_bits # get data from skb
101 xc 160(3,%r15),160(%r15)
102 l %r5,160(%r15) # load result from temp buffer
103 ltgr %r2,%r2 # set cc to (%r2 != 0)
104 lgr %r2,%r9 # restore %r2
105 br %r8
106
107 /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
108ENTRY(sk_load_byte_msh)
109 llgfr %r1,%r3 # extend offset
110 clr %r11,%r3 # hlen < offset ?
111 jle sk_load_byte_slow
112 lhi %r12,0
113 ic %r12,0(%r1,%r10) # get byte from skb
114 nill %r12,0x0f
115 sll %r12,2
116 xr %r1,%r1 # set cc to zero
117 br %r8
118
119sk_load_byte_msh_slow:
120 lgr %r9,%r2 # save %r2
121 lhi %r4,2 # 2 bytes
122 la %r5,162(%r15) # pointer to temp buffer
123 brasl %r14,skb_copy_bits # get data from skb
124 xc 160(3,%r15),160(%r15)
125 l %r12,160(%r15) # load result from temp buffer
126 nill %r12,0x0f
127 sll %r12,2
128 ltgr %r2,%r2 # set cc to (%r2 != 0)
129 lgr %r2,%r9 # restore %r2
130 br %r8
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..9b355b406afa
--- /dev/null
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -0,0 +1,776 @@
1/*
2 * BPF Jit compiler for s390.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/moduleloader.h>
9#include <linux/netdevice.h>
10#include <linux/filter.h>
11#include <asm/cacheflush.h>
12#include <asm/processor.h>
13#include <asm/facility.h>
14
15/*
16 * Conventions:
17 * %r2 = skb pointer
18 * %r3 = offset parameter
19 * %r4 = scratch register / length parameter
20 * %r5 = BPF A accumulator
21 * %r8 = return address
22 * %r9 = save register for skb pointer
23 * %r10 = skb->data
24 * %r11 = skb->len - skb->data_len (headlen)
25 * %r12 = BPF X accumulator
26 * %r13 = literal pool pointer
27 * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
28 */
29int bpf_jit_enable __read_mostly;
30
31/*
32 * assembly code in arch/x86/net/bpf_jit.S
33 */
34extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
35extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
36
37struct bpf_jit {
38 unsigned int seen;
39 u8 *start;
40 u8 *prg;
41 u8 *mid;
42 u8 *lit;
43 u8 *end;
44 u8 *base_ip;
45 u8 *ret0_ip;
46 u8 *exit_ip;
47 unsigned int off_load_word;
48 unsigned int off_load_half;
49 unsigned int off_load_byte;
50 unsigned int off_load_bmsh;
51 unsigned int off_load_iword;
52 unsigned int off_load_ihalf;
53 unsigned int off_load_ibyte;
54};
55
56#define BPF_SIZE_MAX 4096 /* Max size for program */
57
58#define SEEN_DATAREF 1 /* might call external helpers */
59#define SEEN_XREG 2 /* ebx is used */
60#define SEEN_MEM 4 /* use mem[] for temporary storage */
61#define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
62#define SEEN_LITERAL 16 /* code uses literals */
63#define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
64#define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
65#define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
66#define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
67#define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
68#define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
69#define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
70
71#define EMIT2(op) \
72({ \
73 if (jit->prg + 2 <= jit->mid) \
74 *(u16 *) jit->prg = op; \
75 jit->prg += 2; \
76})
77
78#define EMIT4(op) \
79({ \
80 if (jit->prg + 4 <= jit->mid) \
81 *(u32 *) jit->prg = op; \
82 jit->prg += 4; \
83})
84
85#define EMIT4_DISP(op, disp) \
86({ \
87 unsigned int __disp = (disp) & 0xfff; \
88 EMIT4(op | __disp); \
89})
90
91#define EMIT4_IMM(op, imm) \
92({ \
93 unsigned int __imm = (imm) & 0xffff; \
94 EMIT4(op | __imm); \
95})
96
97#define EMIT4_PCREL(op, pcrel) \
98({ \
99 long __pcrel = ((pcrel) >> 1) & 0xffff; \
100 EMIT4(op | __pcrel); \
101})
102
103#define EMIT6(op1, op2) \
104({ \
105 if (jit->prg + 6 <= jit->mid) { \
106 *(u32 *) jit->prg = op1; \
107 *(u16 *) (jit->prg + 4) = op2; \
108 } \
109 jit->prg += 6; \
110})
111
112#define EMIT6_DISP(op1, op2, disp) \
113({ \
114 unsigned int __disp = (disp) & 0xfff; \
115 EMIT6(op1 | __disp, op2); \
116})
117
118#define EMIT6_IMM(op, imm) \
119({ \
120 unsigned int __imm = (imm); \
121 EMIT6(op | (__imm >> 16), __imm & 0xffff); \
122})
123
124#define EMIT_CONST(val) \
125({ \
126 unsigned int ret; \
127 ret = (unsigned int) (jit->lit - jit->base_ip); \
128 jit->seen |= SEEN_LITERAL; \
129 if (jit->lit + 4 <= jit->end) \
130 *(u32 *) jit->lit = val; \
131 jit->lit += 4; \
132 ret; \
133})
134
135#define EMIT_FN_CONST(bit, fn) \
136({ \
137 unsigned int ret; \
138 ret = (unsigned int) (jit->lit - jit->base_ip); \
139 if (jit->seen & bit) { \
140 jit->seen |= SEEN_LITERAL; \
141 if (jit->lit + 8 <= jit->end) \
142 *(void **) jit->lit = fn; \
143 jit->lit += 8; \
144 } \
145 ret; \
146})
147
148static void bpf_jit_prologue(struct bpf_jit *jit)
149{
150 /* Save registers and create stack frame if necessary */
151 if (jit->seen & SEEN_DATAREF) {
152 /* stmg %r8,%r15,88(%r15) */
153 EMIT6(0xeb8ff058, 0x0024);
154 /* lgr %r14,%r15 */
155 EMIT4(0xb90400ef);
156 /* ahi %r15,<offset> */
157 EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
158 /* stg %r14,152(%r15) */
159 EMIT6(0xe3e0f098, 0x0024);
160 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
161 /* stmg %r12,%r13,120(%r15) */
162 EMIT6(0xebcdf078, 0x0024);
163 else if (jit->seen & SEEN_XREG)
164 /* stg %r12,120(%r15) */
165 EMIT6(0xe3c0f078, 0x0024);
166 else if (jit->seen & SEEN_LITERAL)
167 /* stg %r13,128(%r15) */
168 EMIT6(0xe3d0f080, 0x0024);
169
170 /* Setup literal pool */
171 if (jit->seen & SEEN_LITERAL) {
172 /* basr %r13,0 */
173 EMIT2(0x0dd0);
174 jit->base_ip = jit->prg;
175 }
176 jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
177 jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
178 jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
179 jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
180 jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
181 jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
182 jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
183
184 /* Filter needs to access skb data */
185 if (jit->seen & SEEN_DATAREF) {
186 /* l %r11,<len>(%r2) */
187 EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
188 /* s %r11,<data_len>(%r2) */
189 EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
190 /* lg %r10,<data>(%r2) */
191 EMIT6_DISP(0xe3a02000, 0x0004,
192 offsetof(struct sk_buff, data));
193 }
194}
195
196static void bpf_jit_epilogue(struct bpf_jit *jit)
197{
198 /* Return 0 */
199 if (jit->seen & SEEN_RET0) {
200 jit->ret0_ip = jit->prg;
201 /* lghi %r2,0 */
202 EMIT4(0xa7290000);
203 }
204 jit->exit_ip = jit->prg;
205 /* Restore registers */
206 if (jit->seen & SEEN_DATAREF)
207 /* lmg %r8,%r15,<offset>(%r15) */
208 EMIT6_DISP(0xeb8ff000, 0x0004,
209 (jit->seen & SEEN_MEM) ? 200 : 168);
210 else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
211 /* lmg %r12,%r13,120(%r15) */
212 EMIT6(0xebcdf078, 0x0004);
213 else if (jit->seen & SEEN_XREG)
214 /* lg %r12,120(%r15) */
215 EMIT6(0xe3c0f078, 0x0004);
216 else if (jit->seen & SEEN_LITERAL)
217 /* lg %r13,128(%r15) */
218 EMIT6(0xe3d0f080, 0x0004);
219 /* br %r14 */
220 EMIT2(0x07fe);
221}
222
223/*
224 * make sure we dont leak kernel information to user
225 */
226static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
227{
228 /* Clear temporary memory if (seen & SEEN_MEM) */
229 if (jit->seen & SEEN_MEM)
230 /* xc 0(64,%r15),0(%r15) */
231 EMIT6(0xd73ff000, 0xf000);
232 /* Clear X if (seen & SEEN_XREG) */
233 if (jit->seen & SEEN_XREG)
234 /* lhi %r12,0 */
235 EMIT4(0xa7c80000);
236 /* Clear A if the first register does not set it. */
237 switch (filter[0].code) {
238 case BPF_S_LD_W_ABS:
239 case BPF_S_LD_H_ABS:
240 case BPF_S_LD_B_ABS:
241 case BPF_S_LD_W_LEN:
242 case BPF_S_LD_W_IND:
243 case BPF_S_LD_H_IND:
244 case BPF_S_LD_B_IND:
245 case BPF_S_LDX_B_MSH:
246 case BPF_S_LD_IMM:
247 case BPF_S_LD_MEM:
248 case BPF_S_MISC_TXA:
249 case BPF_S_ANC_PROTOCOL:
250 case BPF_S_ANC_PKTTYPE:
251 case BPF_S_ANC_IFINDEX:
252 case BPF_S_ANC_MARK:
253 case BPF_S_ANC_QUEUE:
254 case BPF_S_ANC_HATYPE:
255 case BPF_S_ANC_RXHASH:
256 case BPF_S_ANC_CPU:
257 case BPF_S_RET_K:
258 /* first instruction sets A register */
259 break;
260 default: /* A = 0 */
261 /* lhi %r5,0 */
262 EMIT4(0xa7580000);
263 }
264}
265
266static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
267 unsigned int *addrs, int i, int last)
268{
269 unsigned int K;
270 int offset;
271 unsigned int mask;
272
273 K = filter->k;
274 switch (filter->code) {
275 case BPF_S_ALU_ADD_X: /* A += X */
276 jit->seen |= SEEN_XREG;
277 /* ar %r5,%r12 */
278 EMIT2(0x1a5c);
279 break;
280 case BPF_S_ALU_ADD_K: /* A += K */
281 if (!K)
282 break;
283 if (K <= 16383)
284 /* ahi %r5,<K> */
285 EMIT4_IMM(0xa75a0000, K);
286 else if (test_facility(21))
287 /* alfi %r5,<K> */
288 EMIT6_IMM(0xc25b0000, K);
289 else
290 /* a %r5,<d(K)>(%r13) */
291 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
292 break;
293 case BPF_S_ALU_SUB_X: /* A -= X */
294 jit->seen |= SEEN_XREG;
295 /* sr %r5,%r12 */
296 EMIT2(0x1b5c);
297 break;
298 case BPF_S_ALU_SUB_K: /* A -= K */
299 if (!K)
300 break;
301 if (K <= 16384)
302 /* ahi %r5,-K */
303 EMIT4_IMM(0xa75a0000, -K);
304 else if (test_facility(21))
305 /* alfi %r5,-K */
306 EMIT6_IMM(0xc25b0000, -K);
307 else
308 /* s %r5,<d(K)>(%r13) */
309 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
310 break;
311 case BPF_S_ALU_MUL_X: /* A *= X */
312 jit->seen |= SEEN_XREG;
313 /* msr %r5,%r12 */
314 EMIT4(0xb252005c);
315 break;
316 case BPF_S_ALU_MUL_K: /* A *= K */
317 if (K <= 16383)
318 /* mhi %r5,K */
319 EMIT4_IMM(0xa75c0000, K);
320 else if (test_facility(34))
321 /* msfi %r5,<K> */
322 EMIT6_IMM(0xc2510000, K);
323 else
324 /* ms %r5,<d(K)>(%r13) */
325 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
326 break;
327 case BPF_S_ALU_DIV_X: /* A /= X */
328 jit->seen |= SEEN_XREG | SEEN_RET0;
329 /* ltr %r12,%r12 */
330 EMIT2(0x12cc);
331 /* jz <ret0> */
332 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
333 /* lhi %r4,0 */
334 EMIT4(0xa7480000);
335 /* dr %r4,%r12 */
336 EMIT2(0x1d4c);
337 break;
338 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
339 /* m %r4,<d(K)>(%r13) */
340 EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
341 /* lr %r5,%r4 */
342 EMIT2(0x1854);
343 break;
344 case BPF_S_ALU_AND_X: /* A &= X */
345 jit->seen |= SEEN_XREG;
346 /* nr %r5,%r12 */
347 EMIT2(0x145c);
348 break;
349 case BPF_S_ALU_AND_K: /* A &= K */
350 if (test_facility(21))
351 /* nilf %r5,<K> */
352 EMIT6_IMM(0xc05b0000, K);
353 else
354 /* n %r5,<d(K)>(%r13) */
355 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
356 break;
357 case BPF_S_ALU_OR_X: /* A |= X */
358 jit->seen |= SEEN_XREG;
359 /* or %r5,%r12 */
360 EMIT2(0x165c);
361 break;
362 case BPF_S_ALU_OR_K: /* A |= K */
363 if (test_facility(21))
364 /* oilf %r5,<K> */
365 EMIT6_IMM(0xc05d0000, K);
366 else
367 /* o %r5,<d(K)>(%r13) */
368 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
369 break;
370 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
371 jit->seen |= SEEN_XREG;
372 /* xr %r5,%r12 */
373 EMIT2(0x175c);
374 break;
375 case BPF_S_ALU_LSH_X: /* A <<= X; */
376 jit->seen |= SEEN_XREG;
377 /* sll %r5,0(%r12) */
378 EMIT4(0x8950c000);
379 break;
380 case BPF_S_ALU_LSH_K: /* A <<= K */
381 if (K == 0)
382 break;
383 /* sll %r5,K */
384 EMIT4_DISP(0x89500000, K);
385 break;
386 case BPF_S_ALU_RSH_X: /* A >>= X; */
387 jit->seen |= SEEN_XREG;
388 /* srl %r5,0(%r12) */
389 EMIT4(0x8850c000);
390 break;
391 case BPF_S_ALU_RSH_K: /* A >>= K; */
392 if (K == 0)
393 break;
394 /* srl %r5,K */
395 EMIT4_DISP(0x88500000, K);
396 break;
397 case BPF_S_ALU_NEG: /* A = -A */
398 /* lnr %r5,%r5 */
399 EMIT2(0x1155);
400 break;
401 case BPF_S_JMP_JA: /* ip += K */
402 offset = addrs[i + K] + jit->start - jit->prg;
403 EMIT4_PCREL(0xa7f40000, offset);
404 break;
405 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
406 mask = 0x200000; /* jh */
407 goto kbranch;
408 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
409 mask = 0xa00000; /* jhe */
410 goto kbranch;
411 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
412 mask = 0x800000; /* je */
413kbranch: /* Emit compare if the branch targets are different */
414 if (filter->jt != filter->jf) {
415 if (K <= 16383)
416 /* chi %r5,<K> */
417 EMIT4_IMM(0xa75e0000, K);
418 else if (test_facility(21))
419 /* clfi %r5,<K> */
420 EMIT6_IMM(0xc25f0000, K);
421 else
422 /* c %r5,<d(K)>(%r13) */
423 EMIT4_DISP(0x5950d000, EMIT_CONST(K));
424 }
425branch: if (filter->jt == filter->jf) {
426 if (filter->jt == 0)
427 break;
428 /* j <jt> */
429 offset = addrs[i + filter->jt] + jit->start - jit->prg;
430 EMIT4_PCREL(0xa7f40000, offset);
431 break;
432 }
433 if (filter->jt != 0) {
434 /* brc <mask>,<jt> */
435 offset = addrs[i + filter->jt] + jit->start - jit->prg;
436 EMIT4_PCREL(0xa7040000 | mask, offset);
437 }
438 if (filter->jf != 0) {
439 /* brc <mask^15>,<jf> */
440 offset = addrs[i + filter->jf] + jit->start - jit->prg;
441 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
442 }
443 break;
444 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
445 mask = 0x700000; /* jnz */
446 /* Emit test if the branch targets are different */
447 if (filter->jt != filter->jf) {
448 if (K > 65535) {
449 /* lr %r4,%r5 */
450 EMIT2(0x1845);
451 /* n %r4,<d(K)>(%r13) */
452 EMIT4_DISP(0x5440d000, EMIT_CONST(K));
453 } else
454 /* tmll %r5,K */
455 EMIT4_IMM(0xa7510000, K);
456 }
457 goto branch;
458 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
459 mask = 0x200000; /* jh */
460 goto xbranch;
461 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
462 mask = 0xa00000; /* jhe */
463 goto xbranch;
464 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
465 mask = 0x800000; /* je */
466xbranch: /* Emit compare if the branch targets are different */
467 if (filter->jt != filter->jf) {
468 jit->seen |= SEEN_XREG;
469 /* cr %r5,%r12 */
470 EMIT2(0x195c);
471 }
472 goto branch;
473 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
474 mask = 0x700000; /* jnz */
475 /* Emit test if the branch targets are different */
476 if (filter->jt != filter->jf) {
477 jit->seen |= SEEN_XREG;
478 /* lr %r4,%r5 */
479 EMIT2(0x1845);
480 /* nr %r4,%r12 */
481 EMIT2(0x144c);
482 }
483 goto branch;
484 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
485 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
486 offset = jit->off_load_word;
487 goto load_abs;
488 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
489 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
490 offset = jit->off_load_half;
491 goto load_abs;
492 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
493 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
494 offset = jit->off_load_byte;
495load_abs: if ((int) K < 0)
496 goto out;
497call_fn: /* lg %r1,<d(function)>(%r13) */
498 EMIT6_DISP(0xe310d000, 0x0004, offset);
499 /* l %r3,<d(K)>(%r13) */
500 EMIT4_DISP(0x5830d000, EMIT_CONST(K));
501 /* basr %r8,%r1 */
502 EMIT2(0x0d81);
503 /* jnz <ret0> */
504 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
505 break;
506 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
507 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
508 offset = jit->off_load_iword;
509 goto call_fn;
510 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
511 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
512 offset = jit->off_load_ihalf;
513 goto call_fn;
514 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
515 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
516 offset = jit->off_load_ibyte;
517 goto call_fn;
518 case BPF_S_LDX_B_MSH:
519 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
520 jit->seen |= SEEN_RET0;
521 if ((int) K < 0) {
522 /* j <ret0> */
523 EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
524 break;
525 }
526 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
527 offset = jit->off_load_bmsh;
528 goto call_fn;
529 case BPF_S_LD_W_LEN: /* A = skb->len; */
530 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
531 /* l %r5,<d(len)>(%r2) */
532 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
533 break;
534 case BPF_S_LDX_W_LEN: /* X = skb->len; */
535 jit->seen |= SEEN_XREG;
536 /* l %r12,<d(len)>(%r2) */
537 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
538 break;
539 case BPF_S_LD_IMM: /* A = K */
540 if (K <= 16383)
541 /* lhi %r5,K */
542 EMIT4_IMM(0xa7580000, K);
543 else if (test_facility(21))
544 /* llilf %r5,<K> */
545 EMIT6_IMM(0xc05f0000, K);
546 else
547 /* l %r5,<d(K)>(%r13) */
548 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
549 break;
550 case BPF_S_LDX_IMM: /* X = K */
551 jit->seen |= SEEN_XREG;
552 if (K <= 16383)
553 /* lhi %r12,<K> */
554 EMIT4_IMM(0xa7c80000, K);
555 else if (test_facility(21))
556 /* llilf %r12,<K> */
557 EMIT6_IMM(0xc0cf0000, K);
558 else
559 /* l %r12,<d(K)>(%r13) */
560 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
561 break;
562 case BPF_S_LD_MEM: /* A = mem[K] */
563 jit->seen |= SEEN_MEM;
564 /* l %r5,<K>(%r15) */
565 EMIT4_DISP(0x5850f000,
566 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
567 break;
568 case BPF_S_LDX_MEM: /* X = mem[K] */
569 jit->seen |= SEEN_XREG | SEEN_MEM;
570 /* l %r12,<K>(%r15) */
571 EMIT4_DISP(0x58c0f000,
572 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
573 break;
574 case BPF_S_MISC_TAX: /* X = A */
575 jit->seen |= SEEN_XREG;
576 /* lr %r12,%r5 */
577 EMIT2(0x18c5);
578 break;
579 case BPF_S_MISC_TXA: /* A = X */
580 jit->seen |= SEEN_XREG;
581 /* lr %r5,%r12 */
582 EMIT2(0x185c);
583 break;
584 case BPF_S_RET_K:
585 if (K == 0) {
586 jit->seen |= SEEN_RET0;
587 if (last)
588 break;
589 /* j <ret0> */
590 EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
591 } else {
592 if (K <= 16383)
593 /* lghi %r2,K */
594 EMIT4_IMM(0xa7290000, K);
595 else
596 /* llgf %r2,<K>(%r13) */
597 EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
598 /* j <exit> */
599 if (last && !(jit->seen & SEEN_RET0))
600 break;
601 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
602 }
603 break;
604 case BPF_S_RET_A:
605 /* llgfr %r2,%r5 */
606 EMIT4(0xb9160025);
607 /* j <exit> */
608 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
609 break;
610 case BPF_S_ST: /* mem[K] = A */
611 jit->seen |= SEEN_MEM;
612 /* st %r5,<K>(%r15) */
613 EMIT4_DISP(0x5050f000,
614 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
615 break;
616 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
617 jit->seen |= SEEN_XREG | SEEN_MEM;
618 /* st %r12,<K>(%r15) */
619 EMIT4_DISP(0x50c0f000,
620 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
621 break;
622 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
623 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
624 /* lhi %r5,0 */
625 EMIT4(0xa7580000);
626 /* icm %r5,3,<d(protocol)>(%r2) */
627 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
628 break;
629 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
630 * A = skb->dev->ifindex */
631 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
632 jit->seen |= SEEN_RET0;
633 /* lg %r1,<d(dev)>(%r2) */
634 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
635 /* ltgr %r1,%r1 */
636 EMIT4(0xb9020011);
637 /* jz <ret0> */
638 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
639 /* l %r5,<d(ifindex)>(%r1) */
640 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
641 break;
642 case BPF_S_ANC_MARK: /* A = skb->mark */
643 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
644 /* l %r5,<d(mark)>(%r2) */
645 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
646 break;
647 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
648 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
649 /* lhi %r5,0 */
650 EMIT4(0xa7580000);
651 /* icm %r5,3,<d(queue_mapping)>(%r2) */
652 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
653 break;
654 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
655 * A = skb->dev->type */
656 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
657 jit->seen |= SEEN_RET0;
658 /* lg %r1,<d(dev)>(%r2) */
659 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
660 /* ltgr %r1,%r1 */
661 EMIT4(0xb9020011);
662 /* jz <ret0> */
663 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
664 /* lhi %r5,0 */
665 EMIT4(0xa7580000);
666 /* icm %r5,3,<d(type)>(%r1) */
667 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
668 break;
669 case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
670 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
671 /* l %r5,<d(rxhash)>(%r2) */
672 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
673 break;
674 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
675#ifdef CONFIG_SMP
676 /* l %r5,<d(cpu_nr)> */
677 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
678#else
679 /* lhi %r5,0 */
680 EMIT4(0xa7580000);
681#endif
682 break;
683 default: /* too complex, give up */
684 goto out;
685 }
686 addrs[i] = jit->prg - jit->start;
687 return 0;
688out:
689 return -1;
690}
691
692void bpf_jit_compile(struct sk_filter *fp)
693{
694 unsigned long size, prg_len, lit_len;
695 struct bpf_jit jit, cjit;
696 unsigned int *addrs;
697 int pass, i;
698
699 if (!bpf_jit_enable)
700 return;
701 addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
702 if (addrs == NULL)
703 return;
704 memset(addrs, 0, fp->len * sizeof(*addrs));
705 memset(&jit, 0, sizeof(cjit));
706 memset(&cjit, 0, sizeof(cjit));
707
708 for (pass = 0; pass < 10; pass++) {
709 jit.prg = jit.start;
710 jit.lit = jit.mid;
711
712 bpf_jit_prologue(&jit);
713 bpf_jit_noleaks(&jit, fp->insns);
714 for (i = 0; i < fp->len; i++) {
715 if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
716 i == fp->len - 1))
717 goto out;
718 }
719 bpf_jit_epilogue(&jit);
720 if (jit.start) {
721 WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
722 if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
723 break;
724 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
725 prg_len = jit.prg - jit.start;
726 lit_len = jit.lit - jit.mid;
727 size = max_t(unsigned long, prg_len + lit_len,
728 sizeof(struct work_struct));
729 if (size >= BPF_SIZE_MAX)
730 goto out;
731 jit.start = module_alloc(size);
732 if (!jit.start)
733 goto out;
734 jit.prg = jit.mid = jit.start + prg_len;
735 jit.lit = jit.end = jit.start + prg_len + lit_len;
736 jit.base_ip += (unsigned long) jit.start;
737 jit.exit_ip += (unsigned long) jit.start;
738 jit.ret0_ip += (unsigned long) jit.start;
739 }
740 cjit = jit;
741 }
742 if (bpf_jit_enable > 1) {
743 pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
744 fp->len, jit.end - jit.start, pass, jit.start);
745 if (jit.start) {
746 printk(KERN_ERR "JIT code:\n");
747 print_fn_code(jit.start, jit.mid - jit.start);
748 print_hex_dump(KERN_ERR, "JIT literals:\n",
749 DUMP_PREFIX_ADDRESS, 16, 1,
750 jit.mid, jit.end - jit.mid, false);
751 }
752 }
753 if (jit.start)
754 fp->bpf_func = (void *) jit.start;
755out:
756 kfree(addrs);
757}
758
759static void jit_free_defer(struct work_struct *arg)
760{
761 module_free(NULL, arg);
762}
763
764/* run from softirq, we must use a work_struct to call
765 * module_free() from process context
766 */
767void bpf_jit_free(struct sk_filter *fp)
768{
769 struct work_struct *work;
770
771 if (fp->bpf_func == sk_run_filter)
772 return;
773 work = (struct work_struct *)fp->bpf_func;
774 INIT_WORK(work, jit_free_defer);
775 schedule_work(work);
776}
diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c
index 1104acac780b..47a61474e795 100644
--- a/block/partitions/ibm.c
+++ b/block/partitions/ibm.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * File...........: linux/fs/partitions/ibm.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Volker Sameske <sameske@de.ibm.com> 3 * Volker Sameske <sameske@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 5 * Copyright IBM Corp. 1999, 2012
7 */ 6 */
8 7
9#include <linux/buffer_head.h> 8#include <linux/buffer_head.h>
@@ -17,17 +16,23 @@
17#include "check.h" 16#include "check.h"
18#include "ibm.h" 17#include "ibm.h"
19 18
19
20union label_t {
21 struct vtoc_volume_label_cdl vol;
22 struct vtoc_volume_label_ldl lnx;
23 struct vtoc_cms_label cms;
24};
25
20/* 26/*
21 * compute the block number from a 27 * compute the block number from a
22 * cyl-cyl-head-head structure 28 * cyl-cyl-head-head structure
23 */ 29 */
24static sector_t 30static sector_t cchh2blk(struct vtoc_cchh *ptr, struct hd_geometry *geo)
25cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) { 31{
26
27 sector_t cyl; 32 sector_t cyl;
28 __u16 head; 33 __u16 head;
29 34
30 /*decode cylinder and heads for large volumes */ 35 /* decode cylinder and heads for large volumes */
31 cyl = ptr->hh & 0xFFF0; 36 cyl = ptr->hh & 0xFFF0;
32 cyl <<= 12; 37 cyl <<= 12;
33 cyl |= ptr->cc; 38 cyl |= ptr->cc;
@@ -40,13 +45,12 @@ cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
40 * compute the block number from a 45 * compute the block number from a
41 * cyl-cyl-head-head-block structure 46 * cyl-cyl-head-head-block structure
42 */ 47 */
43static sector_t 48static sector_t cchhb2blk(struct vtoc_cchhb *ptr, struct hd_geometry *geo)
44cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { 49{
45
46 sector_t cyl; 50 sector_t cyl;
47 __u16 head; 51 __u16 head;
48 52
49 /*decode cylinder and heads for large volumes */ 53 /* decode cylinder and heads for large volumes */
50 cyl = ptr->hh & 0xFFF0; 54 cyl = ptr->hh & 0xFFF0;
51 cyl <<= 12; 55 cyl <<= 12;
52 cyl |= ptr->cc; 56 cyl |= ptr->cc;
@@ -56,26 +60,243 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
56 ptr->b; 60 ptr->b;
57} 61}
58 62
63static int find_label(struct parsed_partitions *state,
64 dasd_information2_t *info,
65 struct hd_geometry *geo,
66 int blocksize,
67 sector_t *labelsect,
68 char name[],
69 char type[],
70 union label_t *label)
71{
72 Sector sect;
73 unsigned char *data;
74 sector_t testsect[3];
75 unsigned char temp[5];
76 int found = 0;
77 int i, testcount;
78
79 /* There a three places where we may find a valid label:
80 * - on an ECKD disk it's block 2
81 * - on an FBA disk it's block 1
82 * - on an CMS formatted FBA disk it is sector 1, even if the block size
83 * is larger than 512 bytes (possible if the DIAG discipline is used)
84 * If we have a valid info structure, then we know exactly which case we
85 * have, otherwise we just search through all possebilities.
86 */
87 if (info) {
88 if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
89 (info->cu_type == 0x3880 && info->dev_type == 0x3370))
90 testsect[0] = info->label_block;
91 else
92 testsect[0] = info->label_block * (blocksize >> 9);
93 testcount = 1;
94 } else {
95 testsect[0] = 1;
96 testsect[1] = (blocksize >> 9);
97 testsect[2] = 2 * (blocksize >> 9);
98 testcount = 3;
99 }
100 for (i = 0; i < testcount; ++i) {
101 data = read_part_sector(state, testsect[i], &sect);
102 if (data == NULL)
103 continue;
104 memcpy(label, data, sizeof(*label));
105 memcpy(temp, data, 4);
106 temp[4] = 0;
107 EBCASC(temp, 4);
108 put_dev_sector(sect);
109 if (!strcmp(temp, "VOL1") ||
110 !strcmp(temp, "LNX1") ||
111 !strcmp(temp, "CMS1")) {
112 if (!strcmp(temp, "VOL1")) {
113 strncpy(type, label->vol.vollbl, 4);
114 strncpy(name, label->vol.volid, 6);
115 } else {
116 strncpy(type, label->lnx.vollbl, 4);
117 strncpy(name, label->lnx.volid, 6);
118 }
119 EBCASC(type, 4);
120 EBCASC(name, 6);
121 *labelsect = testsect[i];
122 found = 1;
123 break;
124 }
125 }
126 if (!found)
127 memset(label, 0, sizeof(*label));
128
129 return found;
130}
131
132static int find_vol1_partitions(struct parsed_partitions *state,
133 struct hd_geometry *geo,
134 int blocksize,
135 char name[],
136 union label_t *label)
137{
138 sector_t blk;
139 int counter;
140 char tmp[64];
141 Sector sect;
142 unsigned char *data;
143 loff_t offset, size;
144 struct vtoc_format1_label f1;
145 int secperblk;
146
147 snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name);
148 strlcat(state->pp_buf, tmp, PAGE_SIZE);
149 /*
150 * get start of VTOC from the disk label and then search for format1
151 * and format8 labels
152 */
153 secperblk = blocksize >> 9;
154 blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
155 counter = 0;
156 data = read_part_sector(state, blk * secperblk, &sect);
157 while (data != NULL) {
158 memcpy(&f1, data, sizeof(struct vtoc_format1_label));
159 put_dev_sector(sect);
160 /* skip FMT4 / FMT5 / FMT7 labels */
161 if (f1.DS1FMTID == _ascebc['4']
162 || f1.DS1FMTID == _ascebc['5']
163 || f1.DS1FMTID == _ascebc['7']
164 || f1.DS1FMTID == _ascebc['9']) {
165 blk++;
166 data = read_part_sector(state, blk * secperblk, &sect);
167 continue;
168 }
169 /* only FMT1 and 8 labels valid at this point */
170 if (f1.DS1FMTID != _ascebc['1'] &&
171 f1.DS1FMTID != _ascebc['8'])
172 break;
173 /* OK, we got valid partition data */
174 offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
175 size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
176 offset + geo->sectors;
177 offset *= secperblk;
178 size *= secperblk;
179 if (counter >= state->limit)
180 break;
181 put_partition(state, counter + 1, offset, size);
182 counter++;
183 blk++;
184 data = read_part_sector(state, blk * secperblk, &sect);
185 }
186 strlcat(state->pp_buf, "\n", PAGE_SIZE);
187
188 if (!data)
189 return -1;
190
191 return 1;
192}
193
194static int find_lnx1_partitions(struct parsed_partitions *state,
195 struct hd_geometry *geo,
196 int blocksize,
197 char name[],
198 union label_t *label,
199 sector_t labelsect,
200 loff_t i_size,
201 dasd_information2_t *info)
202{
203 loff_t offset, geo_size, size;
204 char tmp[64];
205 int secperblk;
206
207 snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
208 strlcat(state->pp_buf, tmp, PAGE_SIZE);
209 secperblk = blocksize >> 9;
210 if (label->lnx.ldl_version == 0xf2) {
211 size = label->lnx.formatted_blocks * secperblk;
212 } else {
213 /*
214 * Formated w/o large volume support. If the sanity check
215 * 'size based on geo == size based on i_size' is true, then
216 * we can safely assume that we know the formatted size of
217 * the disk, otherwise we need additional information
218 * that we can only get from a real DASD device.
219 */
220 geo_size = geo->cylinders * geo->heads
221 * geo->sectors * secperblk;
222 size = i_size >> 9;
223 if (size != geo_size) {
224 if (!info) {
225 strlcat(state->pp_buf, "\n", PAGE_SIZE);
226 return 1;
227 }
228 if (!strcmp(info->type, "ECKD"))
229 if (geo_size < size)
230 size = geo_size;
231 /* else keep size based on i_size */
232 }
233 }
234 /* first and only partition starts in the first block after the label */
235 offset = labelsect + secperblk;
236 put_partition(state, 1, offset, size - offset);
237 strlcat(state->pp_buf, "\n", PAGE_SIZE);
238 return 1;
239}
240
241static int find_cms1_partitions(struct parsed_partitions *state,
242 struct hd_geometry *geo,
243 int blocksize,
244 char name[],
245 union label_t *label,
246 sector_t labelsect)
247{
248 loff_t offset, size;
249 char tmp[64];
250 int secperblk;
251
252 /*
253 * VM style CMS1 labeled disk
254 */
255 blocksize = label->cms.block_size;
256 secperblk = blocksize >> 9;
257 if (label->cms.disk_offset != 0) {
258 snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
259 strlcat(state->pp_buf, tmp, PAGE_SIZE);
260 /* disk is reserved minidisk */
261 offset = label->cms.disk_offset * secperblk;
262 size = (label->cms.block_count - 1) * secperblk;
263 } else {
264 snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
265 strlcat(state->pp_buf, tmp, PAGE_SIZE);
266 /*
267 * Special case for FBA devices:
268 * If an FBA device is CMS formatted with blocksize > 512 byte
269 * and the DIAG discipline is used, then the CMS label is found
270 * in sector 1 instead of block 1. However, the partition is
271 * still supposed to start in block 2.
272 */
273 if (labelsect == 1)
274 offset = 2 * secperblk;
275 else
276 offset = labelsect + secperblk;
277 size = label->cms.block_count * secperblk;
278 }
279
280 put_partition(state, 1, offset, size-offset);
281 strlcat(state->pp_buf, "\n", PAGE_SIZE);
282 return 1;
283}
284
285
59/* 286/*
287 * This is the main function, called by check.c
60 */ 288 */
61int ibm_partition(struct parsed_partitions *state) 289int ibm_partition(struct parsed_partitions *state)
62{ 290{
63 struct block_device *bdev = state->bdev; 291 struct block_device *bdev = state->bdev;
64 int blocksize, res; 292 int blocksize, res;
65 loff_t i_size, offset, size, fmt_size; 293 loff_t i_size, offset, size;
66 dasd_information2_t *info; 294 dasd_information2_t *info;
67 struct hd_geometry *geo; 295 struct hd_geometry *geo;
68 char type[5] = {0,}; 296 char type[5] = {0,};
69 char name[7] = {0,}; 297 char name[7] = {0,};
70 union label_t {
71 struct vtoc_volume_label_cdl vol;
72 struct vtoc_volume_label_ldl lnx;
73 struct vtoc_cms_label cms;
74 } *label;
75 unsigned char *data;
76 Sector sect;
77 sector_t labelsect; 298 sector_t labelsect;
78 char tmp[64]; 299 union label_t *label;
79 300
80 res = 0; 301 res = 0;
81 blocksize = bdev_logical_block_size(bdev); 302 blocksize = bdev_logical_block_size(bdev);
@@ -84,7 +305,6 @@ int ibm_partition(struct parsed_partitions *state)
84 i_size = i_size_read(bdev->bd_inode); 305 i_size = i_size_read(bdev->bd_inode);
85 if (i_size == 0) 306 if (i_size == 0)
86 goto out_exit; 307 goto out_exit;
87
88 info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL); 308 info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL);
89 if (info == NULL) 309 if (info == NULL)
90 goto out_exit; 310 goto out_exit;
@@ -94,176 +314,45 @@ int ibm_partition(struct parsed_partitions *state)
94 label = kmalloc(sizeof(union label_t), GFP_KERNEL); 314 label = kmalloc(sizeof(union label_t), GFP_KERNEL);
95 if (label == NULL) 315 if (label == NULL)
96 goto out_nolab; 316 goto out_nolab;
97 317 if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
98 if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 ||
99 ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
100 goto out_freeall; 318 goto out_freeall;
101 319 if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0) {
102 /* 320 kfree(info);
103 * Special case for FBA disks: label sector does not depend on 321 info = NULL;
104 * blocksize.
105 */
106 if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
107 (info->cu_type == 0x3880 && info->dev_type == 0x3370))
108 labelsect = info->label_block;
109 else
110 labelsect = info->label_block * (blocksize >> 9);
111
112 /*
113 * Get volume label, extract name and type.
114 */
115 data = read_part_sector(state, labelsect, &sect);
116 if (data == NULL)
117 goto out_readerr;
118
119 memcpy(label, data, sizeof(union label_t));
120 put_dev_sector(sect);
121
122 if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
123 strncpy(type, label->vol.vollbl, 4);
124 strncpy(name, label->vol.volid, 6);
125 } else {
126 strncpy(type, label->lnx.vollbl, 4);
127 strncpy(name, label->lnx.volid, 6);
128 } 322 }
129 EBCASC(type, 4);
130 EBCASC(name, 6);
131
132 res = 1;
133 323
134 /* 324 if (find_label(state, info, geo, blocksize, &labelsect, name, type,
135 * Three different formats: LDL, CDL and unformated disk 325 label)) {
136 * 326 if (!strncmp(type, "VOL1", 4)) {
137 * identified by info->format 327 res = find_vol1_partitions(state, geo, blocksize, name,
138 * 328 label);
139 * unformated disks we do not have to care about 329 } else if (!strncmp(type, "LNX1", 4)) {
140 */ 330 res = find_lnx1_partitions(state, geo, blocksize, name,
141 if (info->format == DASD_FORMAT_LDL) { 331 label, labelsect, i_size,
142 if (strncmp(type, "CMS1", 4) == 0) { 332 info);
143 /* 333 } else if (!strncmp(type, "CMS1", 4)) {
144 * VM style CMS1 labeled disk 334 res = find_cms1_partitions(state, geo, blocksize, name,
145 */ 335 label, labelsect);
146 blocksize = label->cms.block_size;
147 if (label->cms.disk_offset != 0) {
148 snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
149 strlcat(state->pp_buf, tmp, PAGE_SIZE);
150 /* disk is reserved minidisk */
151 offset = label->cms.disk_offset;
152 size = (label->cms.block_count - 1)
153 * (blocksize >> 9);
154 } else {
155 snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
156 strlcat(state->pp_buf, tmp, PAGE_SIZE);
157 offset = (info->label_block + 1);
158 size = label->cms.block_count
159 * (blocksize >> 9);
160 }
161 put_partition(state, 1, offset*(blocksize >> 9),
162 size-offset*(blocksize >> 9));
163 } else {
164 if (strncmp(type, "LNX1", 4) == 0) {
165 snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
166 strlcat(state->pp_buf, tmp, PAGE_SIZE);
167 if (label->lnx.ldl_version == 0xf2) {
168 fmt_size = label->lnx.formatted_blocks
169 * (blocksize >> 9);
170 } else if (!strcmp(info->type, "ECKD")) {
171 /* formated w/o large volume support */
172 fmt_size = geo->cylinders * geo->heads
173 * geo->sectors * (blocksize >> 9);
174 } else {
175 /* old label and no usable disk geometry
176 * (e.g. DIAG) */
177 fmt_size = i_size >> 9;
178 }
179 size = i_size >> 9;
180 if (fmt_size < size)
181 size = fmt_size;
182 offset = (info->label_block + 1);
183 } else {
184 /* unlabeled disk */
185 strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
186 size = i_size >> 9;
187 offset = (info->label_block + 1);
188 }
189 put_partition(state, 1, offset*(blocksize >> 9),
190 size-offset*(blocksize >> 9));
191 } 336 }
192 } else if (info->format == DASD_FORMAT_CDL) { 337 } else if (info) {
193 /*
194 * New style CDL formatted disk
195 */
196 sector_t blk;
197 int counter;
198
199 /* 338 /*
200 * check if VOL1 label is available 339 * ugly but needed for backward compatibility:
201 * if not, something is wrong, skipping partition detection 340 * If the block device is a DASD (i.e. BIODASDINFO2 works),
341 * then we claim it in any case, even though it has no valid
342 * label. If it has the LDL format, then we simply define a
343 * partition as if it had an LNX1 label.
202 */ 344 */
203 if (strncmp(type, "VOL1", 4) == 0) { 345 res = 1;
204 snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name); 346 if (info->format == DASD_FORMAT_LDL) {
205 strlcat(state->pp_buf, tmp, PAGE_SIZE); 347 strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
206 /* 348 size = i_size >> 9;
207 * get block number and read then go through format1 349 offset = (info->label_block + 1) * (blocksize >> 9);
208 * labels 350 put_partition(state, 1, offset, size-offset);
209 */ 351 strlcat(state->pp_buf, "\n", PAGE_SIZE);
210 blk = cchhb2blk(&label->vol.vtoc, geo) + 1; 352 }
211 counter = 0; 353 } else
212 data = read_part_sector(state, blk * (blocksize/512), 354 res = 0;
213 &sect);
214 while (data != NULL) {
215 struct vtoc_format1_label f1;
216
217 memcpy(&f1, data,
218 sizeof(struct vtoc_format1_label));
219 put_dev_sector(sect);
220
221 /* skip FMT4 / FMT5 / FMT7 labels */
222 if (f1.DS1FMTID == _ascebc['4']
223 || f1.DS1FMTID == _ascebc['5']
224 || f1.DS1FMTID == _ascebc['7']
225 || f1.DS1FMTID == _ascebc['9']) {
226 blk++;
227 data = read_part_sector(state,
228 blk * (blocksize/512), &sect);
229 continue;
230 }
231
232 /* only FMT1 and 8 labels valid at this point */
233 if (f1.DS1FMTID != _ascebc['1'] &&
234 f1.DS1FMTID != _ascebc['8'])
235 break;
236
237 /* OK, we got valid partition data */
238 offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
239 size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
240 offset + geo->sectors;
241 if (counter >= state->limit)
242 break;
243 put_partition(state, counter + 1,
244 offset * (blocksize >> 9),
245 size * (blocksize >> 9));
246 counter++;
247 blk++;
248 data = read_part_sector(state,
249 blk * (blocksize/512), &sect);
250 }
251
252 if (!data)
253 /* Are we not supposed to report this ? */
254 goto out_readerr;
255 } else
256 printk(KERN_INFO "Expected Label VOL1 not "
257 "found, treating as CDL formated Disk");
258
259 }
260
261 strlcat(state->pp_buf, "\n", PAGE_SIZE);
262 goto out_freeall;
263
264 355
265out_readerr:
266 res = -1;
267out_freeall: 356out_freeall:
268 kfree(label); 357 kfree(label);
269out_nolab: 358out_nolab:
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8e477bb1f3f6..4a3b62326183 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -70,3 +70,21 @@ config DASD_EER
70 This driver provides a character device interface to the 70 This driver provides a character device interface to the
71 DASD extended error reporting. This is only needed if you want to 71 DASD extended error reporting. This is only needed if you want to
72 use applications written for the EER facility. 72 use applications written for the EER facility.
73
74config SCM_BLOCK
75 def_tristate m
76 prompt "Support for Storage Class Memory"
77 depends on S390 && BLOCK && EADM_SCH && SCM_BUS
78 help
79 Block device driver for Storage Class Memory (SCM). This driver
80 provides a block device interface for each available SCM increment.
81
82 To compile this driver as a module, choose M here: the
83 module will be called scm_block.
84
85config SCM_BLOCK_CLUSTER_WRITE
86 def_bool y
87 prompt "SCM force cluster writes"
88 depends on SCM_BLOCK
89 help
90 Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 0a89e080b389..c2f4e673e031 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -17,3 +17,9 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o 17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o 18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
19obj-$(CONFIG_DCSSBLK) += dcssblk.o 19obj-$(CONFIG_DCSSBLK) += dcssblk.o
20
21scm_block-objs := scm_drv.o scm_blk.o
22ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
23scm_block-objs += scm_blk_cluster.o
24endif
25obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c48c72abbefc..108332b44d98 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -20,6 +20,7 @@
20#include <linux/compat.h> 20#include <linux/compat.h>
21#include <linux/init.h> 21#include <linux/init.h>
22 22
23#include <asm/css_chars.h>
23#include <asm/debug.h> 24#include <asm/debug.h>
24#include <asm/idals.h> 25#include <asm/idals.h>
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
@@ -31,8 +32,6 @@
31 32
32#include "dasd_int.h" 33#include "dasd_int.h"
33#include "dasd_eckd.h" 34#include "dasd_eckd.h"
34#include "../cio/chsc.h"
35
36 35
37#ifdef PRINTK_HEADER 36#ifdef PRINTK_HEADER
38#undef PRINTK_HEADER 37#undef PRINTK_HEADER
@@ -140,6 +139,10 @@ dasd_eckd_set_online(struct ccw_device *cdev)
140static const int sizes_trk0[] = { 28, 148, 84 }; 139static const int sizes_trk0[] = { 28, 148, 84 };
141#define LABEL_SIZE 140 140#define LABEL_SIZE 140
142 141
142/* head and record addresses of count_area read in analysis ccw */
143static const int count_area_head[] = { 0, 0, 0, 0, 2 };
144static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
145
143static inline unsigned int 146static inline unsigned int
144round_up_multiple(unsigned int no, unsigned int mult) 147round_up_multiple(unsigned int no, unsigned int mult)
145{ 148{
@@ -212,7 +215,7 @@ check_XRC (struct ccw1 *de_ccw,
212 215
213 rc = get_sync_clock(&data->ep_sys_time); 216 rc = get_sync_clock(&data->ep_sys_time);
214 /* Ignore return code if sync clock is switched off. */ 217 /* Ignore return code if sync clock is switched off. */
215 if (rc == -ENOSYS || rc == -EACCES) 218 if (rc == -EOPNOTSUPP || rc == -EACCES)
216 rc = 0; 219 rc = 0;
217 220
218 de_ccw->count = sizeof(struct DE_eckd_data); 221 de_ccw->count = sizeof(struct DE_eckd_data);
@@ -323,7 +326,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
323 326
324 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); 327 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
325 /* Ignore return code if sync clock is switched off. */ 328 /* Ignore return code if sync clock is switched off. */
326 if (rc == -ENOSYS || rc == -EACCES) 329 if (rc == -EOPNOTSUPP || rc == -EACCES)
327 rc = 0; 330 rc = 0;
328 return rc; 331 return rc;
329} 332}
@@ -1940,7 +1943,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1940 count_area = NULL; 1943 count_area = NULL;
1941 for (i = 0; i < 3; i++) { 1944 for (i = 0; i < 3; i++) {
1942 if (private->count_area[i].kl != 4 || 1945 if (private->count_area[i].kl != 4 ||
1943 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { 1946 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
1947 private->count_area[i].cyl != 0 ||
1948 private->count_area[i].head != count_area_head[i] ||
1949 private->count_area[i].record != count_area_rec[i]) {
1944 private->uses_cdl = 0; 1950 private->uses_cdl = 0;
1945 break; 1951 break;
1946 } 1952 }
@@ -1952,7 +1958,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1952 for (i = 0; i < 5; i++) { 1958 for (i = 0; i < 5; i++) {
1953 if ((private->count_area[i].kl != 0) || 1959 if ((private->count_area[i].kl != 0) ||
1954 (private->count_area[i].dl != 1960 (private->count_area[i].dl !=
1955 private->count_area[0].dl)) 1961 private->count_area[0].dl) ||
1962 private->count_area[i].cyl != 0 ||
1963 private->count_area[i].head != count_area_head[i] ||
1964 private->count_area[i].record != count_area_rec[i])
1956 break; 1965 break;
1957 } 1966 }
1958 if (i == 5) 1967 if (i == 5)
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 654c6921a6d4..8252f37d04ed 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -292,12 +292,12 @@ out:
292#else 292#else
293static int dasd_ioctl_reset_profile(struct dasd_block *block) 293static int dasd_ioctl_reset_profile(struct dasd_block *block)
294{ 294{
295 return -ENOSYS; 295 return -ENOTTY;
296} 296}
297 297
298static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 298static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
299{ 299{
300 return -ENOSYS; 300 return -ENOTTY;
301} 301}
302#endif 302#endif
303 303
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
new file mode 100644
index 000000000000..9978ad4433cb
--- /dev/null
+++ b/drivers/s390/block/scm_blk.c
@@ -0,0 +1,445 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/genhd.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <asm/eadm.h>
19#include "scm_blk.h"
20
21debug_info_t *scm_debug;
22static int scm_major;
23static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64;
26static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29
30MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*");
33
34static void __scm_free_rq(struct scm_request *scmrq)
35{
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq);
41 kfree(aobrq);
42}
43
44static void scm_free_rqs(void)
45{
46 struct list_head *iter, *safe;
47 struct scm_request *scmrq;
48
49 spin_lock_irq(&list_lock);
50 list_for_each_safe(iter, safe, &inactive_requests) {
51 scmrq = list_entry(iter, struct scm_request, list);
52 list_del(&scmrq->list);
53 __scm_free_rq(scmrq);
54 }
55 spin_unlock_irq(&list_lock);
56}
57
58static int __scm_alloc_rq(void)
59{
60 struct aob_rq_header *aobrq;
61 struct scm_request *scmrq;
62
63 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
64 if (!aobrq)
65 return -ENOMEM;
66
67 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) {
71 __scm_free_rq(scmrq);
72 return -ENOMEM;
73 }
74
75 if (__scm_alloc_rq_cluster(scmrq)) {
76 __scm_free_rq(scmrq);
77 return -ENOMEM;
78 }
79
80 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock);
82 list_add(&scmrq->list, &inactive_requests);
83 spin_unlock_irq(&list_lock);
84
85 return 0;
86}
87
88static int scm_alloc_rqs(unsigned int nrqs)
89{
90 int ret = 0;
91
92 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq();
94
95 return ret;
96}
97
98static struct scm_request *scm_request_fetch(void)
99{
100 struct scm_request *scmrq = NULL;
101
102 spin_lock(&list_lock);
103 if (list_empty(&inactive_requests))
104 goto out;
105 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
106 list_del(&scmrq->list);
107out:
108 spin_unlock(&list_lock);
109 return scmrq;
110}
111
112static void scm_request_done(struct scm_request *scmrq)
113{
114 unsigned long flags;
115
116 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests);
118 spin_unlock_irqrestore(&list_lock, flags);
119}
120
121static int scm_open(struct block_device *blkdev, fmode_t mode)
122{
123 return scm_get_ref();
124}
125
126static int scm_release(struct gendisk *gendisk, fmode_t mode)
127{
128 scm_put_ref();
129 return 0;
130}
131
132static const struct block_device_operations scm_blk_devops = {
133 .owner = THIS_MODULE,
134 .open = scm_open,
135 .release = scm_release,
136};
137
138static void scm_request_prepare(struct scm_request *scmrq)
139{
140 struct scm_blk_dev *bdev = scmrq->bdev;
141 struct scm_device *scmdev = bdev->gendisk->private_data;
142 struct aidaw *aidaw = scmrq->aidaw;
143 struct msb *msb = &scmrq->aob->msb[0];
144 struct req_iterator iter;
145 struct bio_vec *bv;
146
147 msb->bs = MSB_BS_4K;
148 scmrq->aob->request.msb_count = 1;
149 msb->scm_addr = scmdev->address +
150 ((u64) blk_rq_pos(scmrq->request) << 9);
151 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
152 MSB_OC_READ : MSB_OC_WRITE;
153 msb->flags |= MSB_FLAG_IDA;
154 msb->data_addr = (u64) aidaw;
155
156 rq_for_each_segment(bv, scmrq->request, iter) {
157 WARN_ON(bv->bv_offset);
158 msb->blk_count += bv->bv_len >> 12;
159 aidaw->data_addr = (u64) page_address(bv->bv_page);
160 aidaw++;
161 }
162}
163
164static inline void scm_request_init(struct scm_blk_dev *bdev,
165 struct scm_request *scmrq,
166 struct request *req)
167{
168 struct aob_rq_header *aobrq = to_aobrq(scmrq);
169 struct aob *aob = scmrq->aob;
170
171 memset(aob, 0, sizeof(*aob));
172 memset(scmrq->aidaw, 0, PAGE_SIZE);
173 aobrq->scmdev = bdev->scmdev;
174 aob->request.cmd_code = ARQB_CMD_MOVE;
175 aob->request.data = (u64) aobrq;
176 scmrq->request = req;
177 scmrq->bdev = bdev;
178 scmrq->retries = 4;
179 scmrq->error = 0;
180 scm_request_cluster_init(scmrq);
181}
182
183static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
184{
185 if (atomic_read(&bdev->queued_reqs)) {
186 /* Queue restart is triggered by the next interrupt. */
187 return;
188 }
189 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
190}
191
192void scm_request_requeue(struct scm_request *scmrq)
193{
194 struct scm_blk_dev *bdev = scmrq->bdev;
195
196 scm_release_cluster(scmrq);
197 blk_requeue_request(bdev->rq, scmrq->request);
198 scm_request_done(scmrq);
199 scm_ensure_queue_restart(bdev);
200}
201
202void scm_request_finish(struct scm_request *scmrq)
203{
204 scm_release_cluster(scmrq);
205 blk_end_request_all(scmrq->request, scmrq->error);
206 scm_request_done(scmrq);
207}
208
209static void scm_blk_request(struct request_queue *rq)
210{
211 struct scm_device *scmdev = rq->queuedata;
212 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
213 struct scm_request *scmrq;
214 struct request *req;
215 int ret;
216
217 while ((req = blk_peek_request(rq))) {
218 if (req->cmd_type != REQ_TYPE_FS)
219 continue;
220
221 scmrq = scm_request_fetch();
222 if (!scmrq) {
223 SCM_LOG(5, "no request");
224 scm_ensure_queue_restart(bdev);
225 return;
226 }
227 scm_request_init(bdev, scmrq, req);
228 if (!scm_reserve_cluster(scmrq)) {
229 SCM_LOG(5, "cluster busy");
230 scm_request_done(scmrq);
231 return;
232 }
233 if (scm_need_cluster_request(scmrq)) {
234 blk_start_request(req);
235 scm_initiate_cluster_request(scmrq);
236 return;
237 }
238 scm_request_prepare(scmrq);
239 blk_start_request(req);
240
241 ret = scm_start_aob(scmrq->aob);
242 if (ret) {
243 SCM_LOG(5, "no subchannel");
244 scm_request_requeue(scmrq);
245 return;
246 }
247 atomic_inc(&bdev->queued_reqs);
248 }
249}
250
251static void __scmrq_log_error(struct scm_request *scmrq)
252{
253 struct aob *aob = scmrq->aob;
254
255 if (scmrq->error == -ETIMEDOUT)
256 SCM_LOG(1, "Request timeout");
257 else {
258 SCM_LOG(1, "Request error");
259 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
260 }
261 if (scmrq->retries)
262 SCM_LOG(1, "Retry request");
263 else
264 pr_err("An I/O operation to SCM failed with rc=%d\n",
265 scmrq->error);
266}
267
268void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
269{
270 struct scm_request *scmrq = data;
271 struct scm_blk_dev *bdev = scmrq->bdev;
272
273 scmrq->error = error;
274 if (error)
275 __scmrq_log_error(scmrq);
276
277 spin_lock(&bdev->lock);
278 list_add_tail(&scmrq->list, &bdev->finished_requests);
279 spin_unlock(&bdev->lock);
280 tasklet_hi_schedule(&bdev->tasklet);
281}
282
283static void scm_blk_tasklet(struct scm_blk_dev *bdev)
284{
285 struct scm_request *scmrq;
286 unsigned long flags;
287
288 spin_lock_irqsave(&bdev->lock, flags);
289 while (!list_empty(&bdev->finished_requests)) {
290 scmrq = list_first_entry(&bdev->finished_requests,
291 struct scm_request, list);
292 list_del(&scmrq->list);
293 spin_unlock_irqrestore(&bdev->lock, flags);
294
295 if (scmrq->error && scmrq->retries-- > 0) {
296 if (scm_start_aob(scmrq->aob)) {
297 spin_lock_irqsave(&bdev->rq_lock, flags);
298 scm_request_requeue(scmrq);
299 spin_unlock_irqrestore(&bdev->rq_lock, flags);
300 }
301 /* Request restarted or requeued, handle next. */
302 spin_lock_irqsave(&bdev->lock, flags);
303 continue;
304 }
305
306 if (scm_test_cluster_request(scmrq)) {
307 scm_cluster_request_irq(scmrq);
308 spin_lock_irqsave(&bdev->lock, flags);
309 continue;
310 }
311
312 scm_request_finish(scmrq);
313 atomic_dec(&bdev->queued_reqs);
314 spin_lock_irqsave(&bdev->lock, flags);
315 }
316 spin_unlock_irqrestore(&bdev->lock, flags);
317 /* Look out for more requests. */
318 blk_run_queue(bdev->rq);
319}
320
321int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
322{
323 struct request_queue *rq;
324 int len, ret = -ENOMEM;
325 unsigned int devindex, nr_max_blk;
326
327 devindex = atomic_inc_return(&nr_devices) - 1;
328 /* scma..scmz + scmaa..scmzz */
329 if (devindex > 701) {
330 ret = -ENODEV;
331 goto out;
332 }
333
334 bdev->scmdev = scmdev;
335 spin_lock_init(&bdev->rq_lock);
336 spin_lock_init(&bdev->lock);
337 INIT_LIST_HEAD(&bdev->finished_requests);
338 atomic_set(&bdev->queued_reqs, 0);
339 tasklet_init(&bdev->tasklet,
340 (void (*)(unsigned long)) scm_blk_tasklet,
341 (unsigned long) bdev);
342
343 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
344 if (!rq)
345 goto out;
346
347 bdev->rq = rq;
348 nr_max_blk = min(scmdev->nr_max_block,
349 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
350
351 blk_queue_logical_block_size(rq, 1 << 12);
352 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
353 blk_queue_max_segments(rq, nr_max_blk);
354 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
355 scm_blk_dev_cluster_setup(bdev);
356
357 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
358 if (!bdev->gendisk)
359 goto out_queue;
360
361 rq->queuedata = scmdev;
362 bdev->gendisk->driverfs_dev = &scmdev->dev;
363 bdev->gendisk->private_data = scmdev;
364 bdev->gendisk->fops = &scm_blk_devops;
365 bdev->gendisk->queue = rq;
366 bdev->gendisk->major = scm_major;
367 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
368
369 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
370 if (devindex > 25) {
371 len += snprintf(bdev->gendisk->disk_name + len,
372 DISK_NAME_LEN - len, "%c",
373 'a' + (devindex / 26) - 1);
374 devindex = devindex % 26;
375 }
376 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
377 'a' + devindex);
378
379 /* 512 byte sectors */
380 set_capacity(bdev->gendisk, scmdev->size >> 9);
381 add_disk(bdev->gendisk);
382 return 0;
383
384out_queue:
385 blk_cleanup_queue(rq);
386out:
387 atomic_dec(&nr_devices);
388 return ret;
389}
390
391void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
392{
393 tasklet_kill(&bdev->tasklet);
394 del_gendisk(bdev->gendisk);
395 blk_cleanup_queue(bdev->gendisk->queue);
396 put_disk(bdev->gendisk);
397}
398
399static int __init scm_blk_init(void)
400{
401 int ret = -EINVAL;
402
403 if (!scm_cluster_size_valid())
404 goto out;
405
406 ret = register_blkdev(0, "scm");
407 if (ret < 0)
408 goto out;
409
410 scm_major = ret;
411 if (scm_alloc_rqs(nr_requests))
412 goto out_unreg;
413
414 scm_debug = debug_register("scm_log", 16, 1, 16);
415 if (!scm_debug)
416 goto out_free;
417
418 debug_register_view(scm_debug, &debug_hex_ascii_view);
419 debug_set_level(scm_debug, 2);
420
421 ret = scm_drv_init();
422 if (ret)
423 goto out_dbf;
424
425 return ret;
426
427out_dbf:
428 debug_unregister(scm_debug);
429out_free:
430 scm_free_rqs();
431out_unreg:
432 unregister_blkdev(scm_major, "scm");
433out:
434 return ret;
435}
436module_init(scm_blk_init);
437
438static void __exit scm_blk_cleanup(void)
439{
440 scm_drv_cleanup();
441 debug_unregister(scm_debug);
442 scm_free_rqs();
443 unregister_blkdev(scm_major, "scm");
444}
445module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
new file mode 100644
index 000000000000..7ac6bad919ef
--- /dev/null
+++ b/drivers/s390/block/scm_blk.h
@@ -0,0 +1,117 @@
1#ifndef SCM_BLK_H
2#define SCM_BLK_H
3
4#include <linux/interrupt.h>
5#include <linux/spinlock.h>
6#include <linux/blkdev.h>
7#include <linux/genhd.h>
8#include <linux/list.h>
9
10#include <asm/debug.h>
11#include <asm/eadm.h>
12
13#define SCM_NR_PARTS 8
14#define SCM_QUEUE_DELAY 5
15
16struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq;
19 struct gendisk *gendisk;
20 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs;
24 struct list_head finished_requests;
25#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 struct list_head cluster_list;
27#endif
28};
29
30struct scm_request {
31 struct scm_blk_dev *bdev;
32 struct request *request;
33 struct aidaw *aidaw;
34 struct aob *aob;
35 struct list_head list;
36 u8 retries;
37 int error;
38#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
39 struct {
40 enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
41 struct list_head list;
42 void **buf;
43 } cluster;
44#endif
45};
46
47#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
48
49int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50void scm_blk_dev_cleanup(struct scm_blk_dev *);
51void scm_blk_irq(struct scm_device *, void *, int);
52
53void scm_request_finish(struct scm_request *);
54void scm_request_requeue(struct scm_request *);
55
56int scm_drv_init(void);
57void scm_drv_cleanup(void);
58
59#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
60void __scm_free_rq_cluster(struct scm_request *);
61int __scm_alloc_rq_cluster(struct scm_request *);
62void scm_request_cluster_init(struct scm_request *);
63bool scm_reserve_cluster(struct scm_request *);
64void scm_release_cluster(struct scm_request *);
65void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
66bool scm_need_cluster_request(struct scm_request *);
67void scm_initiate_cluster_request(struct scm_request *);
68void scm_cluster_request_irq(struct scm_request *);
69bool scm_test_cluster_request(struct scm_request *);
70bool scm_cluster_size_valid(void);
71#else
72#define __scm_free_rq_cluster(scmrq) {}
73#define __scm_alloc_rq_cluster(scmrq) 0
74#define scm_request_cluster_init(scmrq) {}
75#define scm_reserve_cluster(scmrq) true
76#define scm_release_cluster(scmrq) {}
77#define scm_blk_dev_cluster_setup(bdev) {}
78#define scm_need_cluster_request(scmrq) false
79#define scm_initiate_cluster_request(scmrq) {}
80#define scm_cluster_request_irq(scmrq) {}
81#define scm_test_cluster_request(scmrq) false
82#define scm_cluster_size_valid() true
83#endif
84
85extern debug_info_t *scm_debug;
86
87#define SCM_LOG(imp, txt) do { \
88 debug_text_event(scm_debug, imp, txt); \
89 } while (0)
90
91static inline void SCM_LOG_HEX(int level, void *data, int length)
92{
93 if (level > scm_debug->level)
94 return;
95 while (length > 0) {
96 debug_event(scm_debug, level, data, length);
97 length -= scm_debug->buf_size;
98 data += scm_debug->buf_size;
99 }
100}
101
102static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
103{
104 struct {
105 u64 address;
106 u8 oper_state;
107 u8 rank;
108 } __packed data = {
109 .address = scmdev->address,
110 .oper_state = scmdev->attrs.oper_state,
111 .rank = scmdev->attrs.rank,
112 };
113
114 SCM_LOG_HEX(level, &data, sizeof(data));
115}
116
117#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
new file mode 100644
index 000000000000..f4bb61b0cea1
--- /dev/null
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -0,0 +1,228 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/list.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static unsigned int write_cluster_size = 64;
18module_param(write_cluster_size, uint, S_IRUGO);
19MODULE_PARM_DESC(write_cluster_size,
20 "Number of pages used for contiguous writes.");
21
22#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24void __scm_free_rq_cluster(struct scm_request *scmrq)
25{
26 int i;
27
28 if (!scmrq->cluster.buf)
29 return;
30
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34 kfree(scmrq->cluster.buf);
35}
36
37int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38{
39 int i;
40
41 scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42 GFP_KERNEL);
43 if (!scmrq->cluster.buf)
44 return -ENOMEM;
45
46 for (i = 0; i < 2 * write_cluster_size; i++) {
47 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48 if (!scmrq->cluster.buf[i])
49 return -ENOMEM;
50 }
51 INIT_LIST_HEAD(&scmrq->cluster.list);
52 return 0;
53}
54
55void scm_request_cluster_init(struct scm_request *scmrq)
56{
57 scmrq->cluster.state = CLUSTER_NONE;
58}
59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
61{
62 unsigned long firstA, lastA, firstB, lastB;
63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
71
72 return (firstB <= lastA && firstA <= lastB);
73}
74
75bool scm_reserve_cluster(struct scm_request *scmrq)
76{
77 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter;
79
80 if (write_cluster_size == 0)
81 return true;
82
83 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) &&
86 (rq_data_dir(scmrq->request) == WRITE ||
87 rq_data_dir(iter->request) == WRITE)) {
88 spin_unlock(&bdev->lock);
89 return false;
90 }
91 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock);
94
95 return true;
96}
97
98void scm_release_cluster(struct scm_request *scmrq)
99{
100 struct scm_blk_dev *bdev = scmrq->bdev;
101 unsigned long flags;
102
103 if (write_cluster_size == 0)
104 return;
105
106 spin_lock_irqsave(&bdev->lock, flags);
107 list_del(&scmrq->cluster.list);
108 spin_unlock_irqrestore(&bdev->lock, flags);
109}
110
111void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
112{
113 INIT_LIST_HEAD(&bdev->cluster_list);
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115}
116
117static void scm_prepare_cluster_request(struct scm_request *scmrq)
118{
119 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request;
122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter;
125 struct bio_vec *bv;
126 int i = 0;
127 u64 addr;
128
129 switch (scmrq->cluster.state) {
130 case CLUSTER_NONE:
131 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */
133 case CLUSTER_READ:
134 scmrq->aob->request.msb_count = 1;
135 msb->bs = MSB_BS_4K;
136 msb->oc = MSB_OC_READ;
137 msb->flags = MSB_FLAG_IDA;
138 msb->data_addr = (u64) aidaw;
139 msb->blk_count = write_cluster_size;
140
141 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
142 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
143
144 if (msb->scm_addr !=
145 round_down(addr + (u64) blk_rq_bytes(req) - 1,
146 CLUSTER_SIZE))
147 msb->blk_count = 2 * write_cluster_size;
148
149 for (i = 0; i < msb->blk_count; i++) {
150 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
151 aidaw++;
152 }
153
154 break;
155 case CLUSTER_WRITE:
156 msb->oc = MSB_OC_WRITE;
157
158 for (addr = msb->scm_addr;
159 addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
160 addr += PAGE_SIZE) {
161 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
162 aidaw++;
163 i++;
164 }
165 rq_for_each_segment(bv, req, iter) {
166 aidaw->data_addr = (u64) page_address(bv->bv_page);
167 aidaw++;
168 i++;
169 }
170 for (; i < msb->blk_count; i++) {
171 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
172 aidaw++;
173 }
174 break;
175 }
176}
177
178bool scm_need_cluster_request(struct scm_request *scmrq)
179{
180 if (rq_data_dir(scmrq->request) == READ)
181 return false;
182
183 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
184}
185
186/* Called with queue lock held. */
187void scm_initiate_cluster_request(struct scm_request *scmrq)
188{
189 scm_prepare_cluster_request(scmrq);
190 if (scm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq);
192}
193
194bool scm_test_cluster_request(struct scm_request *scmrq)
195{
196 return scmrq->cluster.state != CLUSTER_NONE;
197}
198
199void scm_cluster_request_irq(struct scm_request *scmrq)
200{
201 struct scm_blk_dev *bdev = scmrq->bdev;
202 unsigned long flags;
203
204 switch (scmrq->cluster.state) {
205 case CLUSTER_NONE:
206 BUG();
207 break;
208 case CLUSTER_READ:
209 if (scmrq->error) {
210 scm_request_finish(scmrq);
211 break;
212 }
213 scmrq->cluster.state = CLUSTER_WRITE;
214 spin_lock_irqsave(&bdev->rq_lock, flags);
215 scm_initiate_cluster_request(scmrq);
216 spin_unlock_irqrestore(&bdev->rq_lock, flags);
217 break;
218 case CLUSTER_WRITE:
219 scm_request_finish(scmrq);
220 break;
221 }
222}
223
224bool scm_cluster_size_valid(void)
225{
226 return write_cluster_size == 0 || write_cluster_size == 32 ||
227 write_cluster_size == 64 || write_cluster_size == 128;
228}
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
new file mode 100644
index 000000000000..9fa0a908607b
--- /dev/null
+++ b/drivers/s390/block/scm_drv.c
@@ -0,0 +1,81 @@
1/*
2 * Device driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <asm/eadm.h>
14#include "scm_blk.h"
15
16static void notify(struct scm_device *scmdev)
17{
18 pr_info("%lu: The capabilities of the SCM increment changed\n",
19 (unsigned long) scmdev->address);
20 SCM_LOG(2, "State changed");
21 SCM_LOG_STATE(2, scmdev);
22}
23
24static int scm_probe(struct scm_device *scmdev)
25{
26 struct scm_blk_dev *bdev;
27 int ret;
28
29 SCM_LOG(2, "probe");
30 SCM_LOG_STATE(2, scmdev);
31
32 if (scmdev->attrs.oper_state != OP_STATE_GOOD)
33 return -EINVAL;
34
35 bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
36 if (!bdev)
37 return -ENOMEM;
38
39 dev_set_drvdata(&scmdev->dev, bdev);
40 ret = scm_blk_dev_setup(bdev, scmdev);
41 if (ret) {
42 dev_set_drvdata(&scmdev->dev, NULL);
43 kfree(bdev);
44 goto out;
45 }
46
47out:
48 return ret;
49}
50
51static int scm_remove(struct scm_device *scmdev)
52{
53 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
54
55 scm_blk_dev_cleanup(bdev);
56 dev_set_drvdata(&scmdev->dev, NULL);
57 kfree(bdev);
58
59 return 0;
60}
61
62static struct scm_driver scm_drv = {
63 .drv = {
64 .name = "scm_block",
65 .owner = THIS_MODULE,
66 },
67 .notify = notify,
68 .probe = scm_probe,
69 .remove = scm_remove,
70 .handler = scm_blk_irq,
71};
72
73int __init scm_drv_init(void)
74{
75 return scm_driver_register(&scm_drv);
76}
77
78void scm_drv_cleanup(void)
79{
80 scm_driver_unregister(&scm_drv);
81}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb07577e8fd4..699fd3e363df 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -35,7 +35,6 @@ static struct raw3270_fn con3270_fn;
35 */ 35 */
36struct con3270 { 36struct con3270 {
37 struct raw3270_view view; 37 struct raw3270_view view;
38 spinlock_t lock;
39 struct list_head freemem; /* list of free memory for strings. */ 38 struct list_head freemem; /* list of free memory for strings. */
40 39
41 /* Output stuff. */ 40 /* Output stuff. */
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 5b8b8592d311..f4ff515db251 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -571,8 +571,11 @@ static int __init mon_init(void)
571 if (rc) 571 if (rc)
572 goto out_iucv; 572 goto out_iucv;
573 monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); 573 monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
574 if (!monreader_device) 574 if (!monreader_device) {
575 rc = -ENOMEM;
575 goto out_driver; 576 goto out_driver;
577 }
578
576 dev_set_name(monreader_device, "monreader-dev"); 579 dev_set_name(monreader_device, "monreader-dev");
577 monreader_device->bus = &iucv_bus; 580 monreader_device->bus = &iucv_bus;
578 monreader_device->parent = iucv_root; 581 monreader_device->parent = iucv_root;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3fcc000efc53..4fa21f7e2308 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -334,7 +334,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
334 reg->receiver_fn(evbuf); 334 reg->receiver_fn(evbuf);
335 spin_lock_irqsave(&sclp_lock, flags); 335 spin_lock_irqsave(&sclp_lock, flags);
336 } else if (reg == NULL) 336 } else if (reg == NULL)
337 rc = -ENOSYS; 337 rc = -EOPNOTSUPP;
338 } 338 }
339 spin_unlock_irqrestore(&sclp_lock, flags); 339 spin_unlock_irqrestore(&sclp_lock, flags);
340 return rc; 340 return rc;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 4be63be73445..3b13d58fe87b 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -463,7 +463,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
463 /* Use write priority message */ 463 /* Use write priority message */
464 sccb->msg_buf.header.type = EVTYP_PMSGCMD; 464 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
465 else 465 else
466 return -ENOSYS; 466 return -EOPNOTSUPP;
467 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 467 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
468 buffer->request.status = SCLP_REQ_FILLED; 468 buffer->request.status = SCLP_REQ_FILLED;
469 buffer->request.callback = sclp_writedata_callback; 469 buffer->request.callback = sclp_writedata_callback;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c06be6cc2fc3..ea664dd4f56d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -15,7 +15,6 @@
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/debug.h> 16#include <asm/debug.h>
17#include <asm/idals.h> 17#include <asm/idals.h>
18#include <linux/blkdev.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/module.h> 19#include <linux/module.h>
21#include <linux/mtio.h> 20#include <linux/mtio.h>
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index c5816ad9ed7d..8c760c036832 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -100,11 +100,7 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t);
100void tape_std_read_backward(struct tape_device *device, 100void tape_std_read_backward(struct tape_device *device,
101 struct tape_request *request); 101 struct tape_request *request);
102struct tape_request *tape_std_write_block(struct tape_device *, size_t); 102struct tape_request *tape_std_write_block(struct tape_device *, size_t);
103struct tape_request *tape_std_bread(struct tape_device *, struct request *);
104void tape_std_free_bread(struct tape_request *);
105void tape_std_check_locate(struct tape_device *, struct tape_request *); 103void tape_std_check_locate(struct tape_device *, struct tape_request *);
106struct tape_request *tape_std_bwrite(struct request *,
107 struct tape_device *, int);
108 104
109/* Some non-mtop commands. */ 105/* Some non-mtop commands. */
110int tape_std_assign(struct tape_device *); 106int tape_std_assign(struct tape_device *);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c131bc40f962..9b3a24e8d3a0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -321,7 +321,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
321 * only allow for blocking reads to be open 321 * only allow for blocking reads to be open
322 */ 322 */
323 if (filp->f_flags & O_NONBLOCK) 323 if (filp->f_flags & O_NONBLOCK)
324 return -ENOSYS; 324 return -EOPNOTSUPP;
325 325
326 /* Besure this device hasn't already been opened */ 326 /* Besure this device hasn't already been opened */
327 spin_lock_bh(&logptr->priv_lock); 327 spin_lock_bh(&logptr->priv_lock);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index e1b700a19648..8c4a386e97f6 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -8,6 +8,8 @@ ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_EADM_SCH) += eadm_sch.o
12obj-$(CONFIG_SCM_BUS) += scm.o
11obj-$(CONFIG_CCWGROUP) += ccwgroup.o 13obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12 14
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o 15qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cfe0c087fe5c..4d51a7c4eb8b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -52,6 +52,11 @@ int chsc_error_from_response(int response)
52 return -EINVAL; 52 return -EINVAL;
53 case 0x0004: 53 case 0x0004:
54 return -EOPNOTSUPP; 54 return -EOPNOTSUPP;
55 case 0x000b:
56 return -EBUSY;
57 case 0x0100:
58 case 0x0102:
59 return -ENOMEM;
55 default: 60 default:
56 return -EIO; 61 return -EIO;
57 } 62 }
@@ -393,6 +398,20 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
393 } 398 }
394} 399}
395 400
401static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
402{
403 int ret;
404
405 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
406 if (sei_area->rs != 7)
407 return;
408
409 ret = scm_update_information();
410 if (ret)
411 CIO_CRW_EVENT(0, "chsc: updating change notification"
412 " failed (rc=%d).\n", ret);
413}
414
396static void chsc_process_sei(struct chsc_sei_area *sei_area) 415static void chsc_process_sei(struct chsc_sei_area *sei_area)
397{ 416{
398 /* Check if we might have lost some information. */ 417 /* Check if we might have lost some information. */
@@ -414,6 +433,9 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
414 case 8: /* channel-path-configuration notification */ 433 case 8: /* channel-path-configuration notification */
415 chsc_process_sei_chp_config(sei_area); 434 chsc_process_sei_chp_config(sei_area);
416 break; 435 break;
436 case 12: /* scm change notification */
437 chsc_process_sei_scm_change(sei_area);
438 break;
417 default: /* other stuff */ 439 default: /* other stuff */
418 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 440 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
419 sei_area->cc); 441 sei_area->cc);
@@ -1047,3 +1069,33 @@ out:
1047 return rc; 1069 return rc;
1048} 1070}
1049EXPORT_SYMBOL_GPL(chsc_siosl); 1071EXPORT_SYMBOL_GPL(chsc_siosl);
1072
1073/**
1074 * chsc_scm_info() - store SCM information (SSI)
1075 * @scm_area: request and response block for SSI
1076 * @token: continuation token
1077 *
1078 * Returns 0 on success.
1079 */
1080int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1081{
1082 int ccode, ret;
1083
1084 memset(scm_area, 0, sizeof(*scm_area));
1085 scm_area->request.length = 0x0020;
1086 scm_area->request.code = 0x004C;
1087 scm_area->reqtok = token;
1088
1089 ccode = chsc(scm_area);
1090 if (ccode > 0) {
1091 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1092 goto out;
1093 }
1094 ret = chsc_error_from_response(scm_area->response.code);
1095 if (ret != 0)
1096 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1097 scm_area->response.code);
1098out:
1099 return ret;
1100}
1101EXPORT_SYMBOL_GPL(chsc_scm_info);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 3f15b2aaeaea..662dab4b93e6 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/css_chars.h>
6#include <asm/chpid.h> 7#include <asm/chpid.h>
7#include <asm/chsc.h> 8#include <asm/chsc.h>
8#include <asm/schid.h> 9#include <asm/schid.h>
@@ -118,4 +119,46 @@ int chsc_error_from_response(int response);
118 119
119int chsc_siosl(struct subchannel_id schid); 120int chsc_siosl(struct subchannel_id schid);
120 121
122/* Functions and definitions to query storage-class memory. */
123struct sale {
124 u64 sa;
125 u32 p:4;
126 u32 op_state:4;
127 u32 data_state:4;
128 u32 rank:4;
129 u32 r:1;
130 u32:7;
131 u32 rid:8;
132 u32:32;
133} __packed;
134
135struct chsc_scm_info {
136 struct chsc_header request;
137 u32:32;
138 u64 reqtok;
139 u32 reserved1[4];
140 struct chsc_header response;
141 u64:56;
142 u8 rq;
143 u32 mbc;
144 u64 msa;
145 u16 is;
146 u16 mmc;
147 u32 mci;
148 u64 nr_scm_ini;
149 u64 nr_scm_unini;
150 u32 reserved2[10];
151 u64 restok;
152 struct sale scmal[248];
153} __packed;
154
155int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
156
157#ifdef CONFIG_SCM_BUS
158int scm_update_information(void);
159#else /* CONFIG_SCM_BUS */
160#define scm_update_information() 0
161#endif /* CONFIG_SCM_BUS */
162
163
121#endif 164#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 33d1ef703593..8e927b9f285f 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1029,7 +1029,7 @@ extern void do_reipl_asm(__u32 schid);
1029/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1029/* Make sure all subchannels are quiet before we re-ipl an lpar. */
1030void reipl_ccw_dev(struct ccw_dev_id *devid) 1030void reipl_ccw_dev(struct ccw_dev_id *devid)
1031{ 1031{
1032 struct subchannel_id schid; 1032 struct subchannel_id uninitialized_var(schid);
1033 1033
1034 s390_reset_system(NULL, NULL); 1034 s390_reset_system(NULL, NULL);
1035 if (reipl_find_schid(devid, &schid) != 0) 1035 if (reipl_find_schid(devid, &schid) != 0)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 21908e67bf67..b4d572f65f07 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -445,6 +445,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
445 put_device(&sch->dev); 445 put_device(&sch->dev);
446 } 446 }
447} 447}
448EXPORT_SYMBOL_GPL(css_sched_sch_todo);
448 449
449static void css_sch_todo(struct work_struct *work) 450static void css_sch_todo(struct work_struct *work)
450{ 451{
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 000000000000..6c9673400464
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,401 @@
1/*
2 * Driver for s390 eadm subchannels
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel_stat.h>
9#include <linux/workqueue.h>
10#include <linux/spinlock.h>
11#include <linux/device.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/slab.h>
15#include <linux/list.h>
16
17#include <asm/css_chars.h>
18#include <asm/debug.h>
19#include <asm/isc.h>
20#include <asm/cio.h>
21#include <asm/scsw.h>
22#include <asm/eadm.h>
23
24#include "eadm_sch.h"
25#include "ioasm.h"
26#include "cio.h"
27#include "css.h"
28#include "orb.h"
29
30MODULE_DESCRIPTION("driver for s390 eadm subchannels");
31MODULE_LICENSE("GPL");
32
33#define EADM_TIMEOUT (5 * HZ)
34static DEFINE_SPINLOCK(list_lock);
35static LIST_HEAD(eadm_list);
36
37static debug_info_t *eadm_debug;
38
39#define EADM_LOG(imp, txt) do { \
40 debug_text_event(eadm_debug, imp, txt); \
41 } while (0)
42
43static void EADM_LOG_HEX(int level, void *data, int length)
44{
45 if (level > eadm_debug->level)
46 return;
47 while (length > 0) {
48 debug_event(eadm_debug, level, data, length);
49 length -= eadm_debug->buf_size;
50 data += eadm_debug->buf_size;
51 }
52}
53
54static void orb_init(union orb *orb)
55{
56 memset(orb, 0, sizeof(union orb));
57 orb->eadm.compat1 = 1;
58 orb->eadm.compat2 = 1;
59 orb->eadm.fmt = 1;
60 orb->eadm.x = 1;
61}
62
63static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
64{
65 union orb *orb = &get_eadm_private(sch)->orb;
66 int cc;
67
68 orb_init(orb);
69 orb->eadm.aob = (u32)__pa(aob);
70 orb->eadm.intparm = (u32)(addr_t)sch;
71 orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
72
73 EADM_LOG(6, "start");
74 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
75
76 cc = ssch(sch->schid, orb);
77 switch (cc) {
78 case 0:
79 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
80 break;
81 case 1: /* status pending */
82 case 2: /* busy */
83 return -EBUSY;
84 case 3: /* not operational */
85 return -ENODEV;
86 }
87 return 0;
88}
89
90static int eadm_subchannel_clear(struct subchannel *sch)
91{
92 int cc;
93
94 cc = csch(sch->schid);
95 if (cc)
96 return -ENODEV;
97
98 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
99 return 0;
100}
101
102static void eadm_subchannel_timeout(unsigned long data)
103{
104 struct subchannel *sch = (struct subchannel *) data;
105
106 spin_lock_irq(sch->lock);
107 EADM_LOG(1, "timeout");
108 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
109 if (eadm_subchannel_clear(sch))
110 EADM_LOG(0, "clear failed");
111 spin_unlock_irq(sch->lock);
112}
113
114static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
115{
116 struct eadm_private *private = get_eadm_private(sch);
117
118 if (expires == 0) {
119 del_timer(&private->timer);
120 return;
121 }
122 if (timer_pending(&private->timer)) {
123 if (mod_timer(&private->timer, jiffies + expires))
124 return;
125 }
126 private->timer.function = eadm_subchannel_timeout;
127 private->timer.data = (unsigned long) sch;
128 private->timer.expires = jiffies + expires;
129 add_timer(&private->timer);
130}
131
132static void eadm_subchannel_irq(struct subchannel *sch)
133{
134 struct eadm_private *private = get_eadm_private(sch);
135 struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
136 struct irb *irb = (struct irb *)&S390_lowcore.irb;
137 int error = 0;
138
139 EADM_LOG(6, "irq");
140 EADM_LOG_HEX(6, irb, sizeof(*irb));
141
142 kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
143
144 if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
145 && scsw->eswf == 1 && irb->esw.eadm.erw.r)
146 error = -EIO;
147
148 if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
149 error = -ETIMEDOUT;
150
151 eadm_subchannel_set_timeout(sch, 0);
152
153 if (private->state != EADM_BUSY) {
154 EADM_LOG(1, "irq unsol");
155 EADM_LOG_HEX(1, irb, sizeof(*irb));
156 private->state = EADM_NOT_OPER;
157 css_sched_sch_todo(sch, SCH_TODO_EVAL);
158 return;
159 }
160 scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
161 private->state = EADM_IDLE;
162}
163
164static struct subchannel *eadm_get_idle_sch(void)
165{
166 struct eadm_private *private;
167 struct subchannel *sch;
168 unsigned long flags;
169
170 spin_lock_irqsave(&list_lock, flags);
171 list_for_each_entry(private, &eadm_list, head) {
172 sch = private->sch;
173 spin_lock(sch->lock);
174 if (private->state == EADM_IDLE) {
175 private->state = EADM_BUSY;
176 list_move_tail(&private->head, &eadm_list);
177 spin_unlock(sch->lock);
178 spin_unlock_irqrestore(&list_lock, flags);
179
180 return sch;
181 }
182 spin_unlock(sch->lock);
183 }
184 spin_unlock_irqrestore(&list_lock, flags);
185
186 return NULL;
187}
188
189static int eadm_start_aob(struct aob *aob)
190{
191 struct eadm_private *private;
192 struct subchannel *sch;
193 unsigned long flags;
194 int ret;
195
196 sch = eadm_get_idle_sch();
197 if (!sch)
198 return -EBUSY;
199
200 spin_lock_irqsave(sch->lock, flags);
201 eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
202 ret = eadm_subchannel_start(sch, aob);
203 if (!ret)
204 goto out_unlock;
205
206 /* Handle start subchannel failure. */
207 eadm_subchannel_set_timeout(sch, 0);
208 private = get_eadm_private(sch);
209 private->state = EADM_NOT_OPER;
210 css_sched_sch_todo(sch, SCH_TODO_EVAL);
211
212out_unlock:
213 spin_unlock_irqrestore(sch->lock, flags);
214
215 return ret;
216}
217
218static int eadm_subchannel_probe(struct subchannel *sch)
219{
220 struct eadm_private *private;
221 int ret;
222
223 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
224 if (!private)
225 return -ENOMEM;
226
227 INIT_LIST_HEAD(&private->head);
228 init_timer(&private->timer);
229
230 spin_lock_irq(sch->lock);
231 set_eadm_private(sch, private);
232 private->state = EADM_IDLE;
233 private->sch = sch;
234 sch->isc = EADM_SCH_ISC;
235 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
236 if (ret) {
237 set_eadm_private(sch, NULL);
238 spin_unlock_irq(sch->lock);
239 kfree(private);
240 goto out;
241 }
242 spin_unlock_irq(sch->lock);
243
244 spin_lock_irq(&list_lock);
245 list_add(&private->head, &eadm_list);
246 spin_unlock_irq(&list_lock);
247
248 if (dev_get_uevent_suppress(&sch->dev)) {
249 dev_set_uevent_suppress(&sch->dev, 0);
250 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
251 }
252out:
253 return ret;
254}
255
256static void eadm_quiesce(struct subchannel *sch)
257{
258 int ret;
259
260 do {
261 spin_lock_irq(sch->lock);
262 ret = cio_disable_subchannel(sch);
263 spin_unlock_irq(sch->lock);
264 } while (ret == -EBUSY);
265}
266
267static int eadm_subchannel_remove(struct subchannel *sch)
268{
269 struct eadm_private *private = get_eadm_private(sch);
270
271 spin_lock_irq(&list_lock);
272 list_del(&private->head);
273 spin_unlock_irq(&list_lock);
274
275 eadm_quiesce(sch);
276
277 spin_lock_irq(sch->lock);
278 set_eadm_private(sch, NULL);
279 spin_unlock_irq(sch->lock);
280
281 kfree(private);
282
283 return 0;
284}
285
286static void eadm_subchannel_shutdown(struct subchannel *sch)
287{
288 eadm_quiesce(sch);
289}
290
291static int eadm_subchannel_freeze(struct subchannel *sch)
292{
293 return cio_disable_subchannel(sch);
294}
295
296static int eadm_subchannel_restore(struct subchannel *sch)
297{
298 return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
299}
300
301/**
302 * eadm_subchannel_sch_event - process subchannel event
303 * @sch: subchannel
304 * @process: non-zero if function is called in process context
305 *
306 * An unspecified event occurred for this subchannel. Adjust data according
307 * to the current operational state of the subchannel. Return zero when the
308 * event has been handled sufficiently or -EAGAIN when this function should
309 * be called again in process context.
310 */
311static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
312{
313 struct eadm_private *private;
314 unsigned long flags;
315 int ret = 0;
316
317 spin_lock_irqsave(sch->lock, flags);
318 if (!device_is_registered(&sch->dev))
319 goto out_unlock;
320
321 if (work_pending(&sch->todo_work))
322 goto out_unlock;
323
324 if (cio_update_schib(sch)) {
325 css_sched_sch_todo(sch, SCH_TODO_UNREG);
326 goto out_unlock;
327 }
328 private = get_eadm_private(sch);
329 if (private->state == EADM_NOT_OPER)
330 private->state = EADM_IDLE;
331
332out_unlock:
333 spin_unlock_irqrestore(sch->lock, flags);
334
335 return ret;
336}
337
338static struct css_device_id eadm_subchannel_ids[] = {
339 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
340 { /* end of list */ },
341};
342MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
343
344static struct css_driver eadm_subchannel_driver = {
345 .drv = {
346 .name = "eadm_subchannel",
347 .owner = THIS_MODULE,
348 },
349 .subchannel_type = eadm_subchannel_ids,
350 .irq = eadm_subchannel_irq,
351 .probe = eadm_subchannel_probe,
352 .remove = eadm_subchannel_remove,
353 .shutdown = eadm_subchannel_shutdown,
354 .sch_event = eadm_subchannel_sch_event,
355 .freeze = eadm_subchannel_freeze,
356 .thaw = eadm_subchannel_restore,
357 .restore = eadm_subchannel_restore,
358};
359
360static struct eadm_ops eadm_ops = {
361 .eadm_start = eadm_start_aob,
362 .owner = THIS_MODULE,
363};
364
365static int __init eadm_sch_init(void)
366{
367 int ret;
368
369 if (!css_general_characteristics.eadm)
370 return -ENXIO;
371
372 eadm_debug = debug_register("eadm_log", 16, 1, 16);
373 if (!eadm_debug)
374 return -ENOMEM;
375
376 debug_register_view(eadm_debug, &debug_hex_ascii_view);
377 debug_set_level(eadm_debug, 2);
378
379 isc_register(EADM_SCH_ISC);
380 ret = css_driver_register(&eadm_subchannel_driver);
381 if (ret)
382 goto cleanup;
383
384 register_eadm_ops(&eadm_ops);
385 return ret;
386
387cleanup:
388 isc_unregister(EADM_SCH_ISC);
389 debug_unregister(eadm_debug);
390 return ret;
391}
392
393static void __exit eadm_sch_exit(void)
394{
395 unregister_eadm_ops(&eadm_ops);
396 css_driver_unregister(&eadm_subchannel_driver);
397 isc_unregister(EADM_SCH_ISC);
398 debug_unregister(eadm_debug);
399}
400module_init(eadm_sch_init);
401module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 000000000000..2779be093982
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,20 @@
1#ifndef EADM_SCH_H
2#define EADM_SCH_H
3
4#include <linux/device.h>
5#include <linux/timer.h>
6#include <linux/list.h>
7#include "orb.h"
8
9struct eadm_private {
10 union orb orb;
11 enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
12 struct timer_list timer;
13 struct list_head head;
14 struct subchannel *sch;
15} __aligned(8);
16
17#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
18#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
19
20#endif
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index 45a9865c2b36..7a640530e7f5 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -59,9 +59,33 @@ struct tm_orb {
59 u32:32; 59 u32:32;
60} __packed __aligned(4); 60} __packed __aligned(4);
61 61
62/*
63 * eadm operation request block
64 */
65struct eadm_orb {
66 u32 intparm;
67 u32 key:4;
68 u32:4;
69 u32 compat1:1;
70 u32 compat2:1;
71 u32:21;
72 u32 x:1;
73 u32 aob;
74 u32 css_prio:8;
75 u32:8;
76 u32 scm_prio:8;
77 u32:8;
78 u32:29;
79 u32 fmt:3;
80 u32:32;
81 u32:32;
82 u32:32;
83} __packed __aligned(4);
84
62union orb { 85union orb {
63 struct cmd_orb cmd; 86 struct cmd_orb cmd;
64 struct tm_orb tm; 87 struct tm_orb tm;
88 struct eadm_orb eadm;
65} __packed __aligned(4); 89} __packed __aligned(4);
66 90
67#endif /* S390_ORB_H */ 91#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index e1f646800ddb..7f8b973da298 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -37,10 +37,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
37 debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ 37 debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
38 } while (0) 38 } while (0)
39 39
40#define DBF_HEX(addr, len) \ 40static inline void DBF_HEX(void *addr, int len)
41 do { \ 41{
42 debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ 42 while (len > 0) {
43 } while (0) 43 debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
44 len -= qdio_dbf_setup->buf_size;
45 addr += qdio_dbf_setup->buf_size;
46 }
47}
44 48
45#define DBF_ERROR(text...) \ 49#define DBF_ERROR(text...) \
46 do { \ 50 do { \
@@ -49,11 +53,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
49 debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ 53 debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
50 } while (0) 54 } while (0)
51 55
52#define DBF_ERROR_HEX(addr, len) \ 56static inline void DBF_ERROR_HEX(void *addr, int len)
53 do { \ 57{
54 debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ 58 while (len > 0) {
55 } while (0) 59 debug_event(qdio_dbf_error, DBF_ERR, addr, len);
56 60 len -= qdio_dbf_error->buf_size;
61 addr += qdio_dbf_error->buf_size;
62 }
63}
57 64
58#define DBF_DEV_EVENT(level, device, text...) \ 65#define DBF_DEV_EVENT(level, device, text...) \
59 do { \ 66 do { \
@@ -64,10 +71,15 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
64 } \ 71 } \
65 } while (0) 72 } while (0)
66 73
67#define DBF_DEV_HEX(level, device, addr, len) \ 74static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
68 do { \ 75 int len, int level)
69 debug_event(device->debug_area, level, (void*)(addr), len); \ 76{
70 } while (0) 77 while (len > 0) {
78 debug_event(dev->debug_area, level, addr, len);
79 len -= dev->debug_area->buf_size;
80 addr += dev->debug_area->buf_size;
81 }
82}
71 83
72void qdio_allocate_dbf(struct qdio_initialize *init_data, 84void qdio_allocate_dbf(struct qdio_initialize *init_data,
73 struct qdio_irq *irq_ptr); 85 struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 000000000000..bcf20f3aa51b
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,317 @@
1/*
2 * Recognize and maintain s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/device.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <asm/eadm.h>
15#include "chsc.h"
16
17static struct device *scm_root;
18static struct eadm_ops *eadm_ops;
19static DEFINE_MUTEX(eadm_ops_mutex);
20
21#define to_scm_dev(n) container_of(n, struct scm_device, dev)
22#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
23
24static int scmdev_probe(struct device *dev)
25{
26 struct scm_device *scmdev = to_scm_dev(dev);
27 struct scm_driver *scmdrv = to_scm_drv(dev->driver);
28
29 return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
30}
31
32static int scmdev_remove(struct device *dev)
33{
34 struct scm_device *scmdev = to_scm_dev(dev);
35 struct scm_driver *scmdrv = to_scm_drv(dev->driver);
36
37 return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
38}
39
40static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
41{
42 return add_uevent_var(env, "MODALIAS=scm:scmdev");
43}
44
45static struct bus_type scm_bus_type = {
46 .name = "scm",
47 .probe = scmdev_probe,
48 .remove = scmdev_remove,
49 .uevent = scmdev_uevent,
50};
51
52/**
53 * scm_driver_register() - register a scm driver
54 * @scmdrv: driver to be registered
55 */
56int scm_driver_register(struct scm_driver *scmdrv)
57{
58 struct device_driver *drv = &scmdrv->drv;
59
60 drv->bus = &scm_bus_type;
61
62 return driver_register(drv);
63}
64EXPORT_SYMBOL_GPL(scm_driver_register);
65
66/**
67 * scm_driver_unregister() - deregister a scm driver
68 * @scmdrv: driver to be deregistered
69 */
70void scm_driver_unregister(struct scm_driver *scmdrv)
71{
72 driver_unregister(&scmdrv->drv);
73}
74EXPORT_SYMBOL_GPL(scm_driver_unregister);
75
76int scm_get_ref(void)
77{
78 int ret = 0;
79
80 mutex_lock(&eadm_ops_mutex);
81 if (!eadm_ops || !try_module_get(eadm_ops->owner))
82 ret = -ENOENT;
83 mutex_unlock(&eadm_ops_mutex);
84
85 return ret;
86}
87EXPORT_SYMBOL_GPL(scm_get_ref);
88
89void scm_put_ref(void)
90{
91 mutex_lock(&eadm_ops_mutex);
92 module_put(eadm_ops->owner);
93 mutex_unlock(&eadm_ops_mutex);
94}
95EXPORT_SYMBOL_GPL(scm_put_ref);
96
97void register_eadm_ops(struct eadm_ops *ops)
98{
99 mutex_lock(&eadm_ops_mutex);
100 eadm_ops = ops;
101 mutex_unlock(&eadm_ops_mutex);
102}
103EXPORT_SYMBOL_GPL(register_eadm_ops);
104
105void unregister_eadm_ops(struct eadm_ops *ops)
106{
107 mutex_lock(&eadm_ops_mutex);
108 eadm_ops = NULL;
109 mutex_unlock(&eadm_ops_mutex);
110}
111EXPORT_SYMBOL_GPL(unregister_eadm_ops);
112
113int scm_start_aob(struct aob *aob)
114{
115 return eadm_ops->eadm_start(aob);
116}
117EXPORT_SYMBOL_GPL(scm_start_aob);
118
119void scm_irq_handler(struct aob *aob, int error)
120{
121 struct aob_rq_header *aobrq = (void *) aob->request.data;
122 struct scm_device *scmdev = aobrq->scmdev;
123 struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
124
125 scmdrv->handler(scmdev, aobrq->data, error);
126}
127EXPORT_SYMBOL_GPL(scm_irq_handler);
128
129#define scm_attr(name) \
130static ssize_t show_##name(struct device *dev, \
131 struct device_attribute *attr, char *buf) \
132{ \
133 struct scm_device *scmdev = to_scm_dev(dev); \
134 int ret; \
135 \
136 device_lock(dev); \
137 ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
138 device_unlock(dev); \
139 \
140 return ret; \
141} \
142static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
143
144scm_attr(persistence);
145scm_attr(oper_state);
146scm_attr(data_state);
147scm_attr(rank);
148scm_attr(release);
149scm_attr(res_id);
150
151static struct attribute *scmdev_attrs[] = {
152 &dev_attr_persistence.attr,
153 &dev_attr_oper_state.attr,
154 &dev_attr_data_state.attr,
155 &dev_attr_rank.attr,
156 &dev_attr_release.attr,
157 &dev_attr_res_id.attr,
158 NULL,
159};
160
161static struct attribute_group scmdev_attr_group = {
162 .attrs = scmdev_attrs,
163};
164
165static const struct attribute_group *scmdev_attr_groups[] = {
166 &scmdev_attr_group,
167 NULL,
168};
169
170static void scmdev_release(struct device *dev)
171{
172 struct scm_device *scmdev = to_scm_dev(dev);
173
174 kfree(scmdev);
175}
176
177static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
178 unsigned int size, unsigned int max_blk_count)
179{
180 dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
181 scmdev->nr_max_block = max_blk_count;
182 scmdev->address = sale->sa;
183 scmdev->size = 1UL << size;
184 scmdev->attrs.rank = sale->rank;
185 scmdev->attrs.persistence = sale->p;
186 scmdev->attrs.oper_state = sale->op_state;
187 scmdev->attrs.data_state = sale->data_state;
188 scmdev->attrs.rank = sale->rank;
189 scmdev->attrs.release = sale->r;
190 scmdev->attrs.res_id = sale->rid;
191 scmdev->dev.parent = scm_root;
192 scmdev->dev.bus = &scm_bus_type;
193 scmdev->dev.release = scmdev_release;
194 scmdev->dev.groups = scmdev_attr_groups;
195}
196
197/*
198 * Check for state-changes, notify the driver and userspace.
199 */
200static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
201{
202 struct scm_driver *scmdrv;
203 bool changed;
204
205 device_lock(&scmdev->dev);
206 changed = scmdev->attrs.rank != sale->rank ||
207 scmdev->attrs.oper_state != sale->op_state;
208 scmdev->attrs.rank = sale->rank;
209 scmdev->attrs.oper_state = sale->op_state;
210 if (!scmdev->dev.driver)
211 goto out;
212 scmdrv = to_scm_drv(scmdev->dev.driver);
213 if (changed && scmdrv->notify)
214 scmdrv->notify(scmdev);
215out:
216 device_unlock(&scmdev->dev);
217 if (changed)
218 kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
219}
220
221static int check_address(struct device *dev, void *data)
222{
223 struct scm_device *scmdev = to_scm_dev(dev);
224 struct sale *sale = data;
225
226 return scmdev->address == sale->sa;
227}
228
229static struct scm_device *scmdev_find(struct sale *sale)
230{
231 struct device *dev;
232
233 dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
234
235 return dev ? to_scm_dev(dev) : NULL;
236}
237
238static int scm_add(struct chsc_scm_info *scm_info, size_t num)
239{
240 struct sale *sale, *scmal = scm_info->scmal;
241 struct scm_device *scmdev;
242 int ret;
243
244 for (sale = scmal; sale < scmal + num; sale++) {
245 scmdev = scmdev_find(sale);
246 if (scmdev) {
247 scmdev_update(scmdev, sale);
248 /* Release reference from scm_find(). */
249 put_device(&scmdev->dev);
250 continue;
251 }
252 scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
253 if (!scmdev)
254 return -ENODEV;
255 scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
256 ret = device_register(&scmdev->dev);
257 if (ret) {
258 /* Release reference from device_initialize(). */
259 put_device(&scmdev->dev);
260 return ret;
261 }
262 }
263
264 return 0;
265}
266
267int scm_update_information(void)
268{
269 struct chsc_scm_info *scm_info;
270 u64 token = 0;
271 size_t num;
272 int ret;
273
274 scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
275 if (!scm_info)
276 return -ENOMEM;
277
278 do {
279 ret = chsc_scm_info(scm_info, token);
280 if (ret)
281 break;
282
283 num = (scm_info->response.length -
284 (offsetof(struct chsc_scm_info, scmal) -
285 offsetof(struct chsc_scm_info, response))
286 ) / sizeof(struct sale);
287
288 ret = scm_add(scm_info, num);
289 if (ret)
290 break;
291
292 token = scm_info->restok;
293 } while (token);
294
295 free_page((unsigned long)scm_info);
296
297 return ret;
298}
299
300static int __init scm_init(void)
301{
302 int ret;
303
304 ret = bus_register(&scm_bus_type);
305 if (ret)
306 return ret;
307
308 scm_root = root_device_register("scm");
309 if (IS_ERR(scm_root)) {
310 bus_unregister(&scm_bus_type);
311 return PTR_ERR(scm_root);
312 }
313
314 scm_update_information();
315 return 0;
316}
317subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index af3c7f16ea88..771faf7094d6 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -4,4 +4,5 @@
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
7obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o 7obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ae258a4b4e5e..7b865a7300e6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2006 2 * Copyright IBM Corp. 2006, 2012
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -62,13 +62,14 @@ static void ap_interrupt_handler(void *unused1, void *unused2);
62static void ap_reset(struct ap_device *ap_dev); 62static void ap_reset(struct ap_device *ap_dev);
63static void ap_config_timeout(unsigned long ptr); 63static void ap_config_timeout(unsigned long ptr);
64static int ap_select_domain(void); 64static int ap_select_domain(void);
65static void ap_query_configuration(void);
65 66
66/* 67/*
67 * Module description. 68 * Module description.
68 */ 69 */
69MODULE_AUTHOR("IBM Corporation"); 70MODULE_AUTHOR("IBM Corporation");
70MODULE_DESCRIPTION("Adjunct Processor Bus driver, " 71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
71 "Copyright IBM Corp. 2006"); 72 "Copyright IBM Corp. 2006, 2012");
72MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
73 74
74/* 75/*
@@ -84,6 +85,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000);
84MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 85MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
85 86
86static struct device *ap_root_device = NULL; 87static struct device *ap_root_device = NULL;
88static struct ap_config_info *ap_configuration;
87static DEFINE_SPINLOCK(ap_device_list_lock); 89static DEFINE_SPINLOCK(ap_device_list_lock);
88static LIST_HEAD(ap_device_list); 90static LIST_HEAD(ap_device_list);
89 91
@@ -158,6 +160,19 @@ static int ap_interrupts_available(void)
158} 160}
159 161
160/** 162/**
163 * ap_configuration_available(): Test if AP configuration
164 * information is available.
165 *
166 * Returns 1 if AP configuration information is available.
167 */
168#ifdef CONFIG_64BIT
169static int ap_configuration_available(void)
170{
171 return test_facility(2) && test_facility(12);
172}
173#endif
174
175/**
161 * ap_test_queue(): Test adjunct processor queue. 176 * ap_test_queue(): Test adjunct processor queue.
162 * @qid: The AP queue number 177 * @qid: The AP queue number
163 * @queue_depth: Pointer to queue depth value 178 * @queue_depth: Pointer to queue depth value
@@ -242,6 +257,26 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
242} 257}
243#endif 258#endif
244 259
260#ifdef CONFIG_64BIT
261static inline int __ap_query_configuration(struct ap_config_info *config)
262{
263 register unsigned long reg0 asm ("0") = 0x04000000UL;
264 register unsigned long reg1 asm ("1") = -EINVAL;
265 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
266
267 asm volatile(
268 ".long 0xb2af0000\n" /* PQAP(QCI) */
269 "0: la %1,0\n"
270 "1:\n"
271 EX_TABLE(0b, 1b)
272 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
273 :
274 : "cc");
275
276 return reg1;
277}
278#endif
279
245/** 280/**
246 * ap_query_functions(): Query supported functions. 281 * ap_query_functions(): Query supported functions.
247 * @qid: The AP queue number 282 * @qid: The AP queue number
@@ -292,25 +327,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
292} 327}
293 328
294/** 329/**
295 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
296 * support.
297 * @qid: The AP queue number
298 *
299 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
300 */
301int ap_4096_commands_available(ap_qid_t qid)
302{
303 unsigned int functions;
304
305 if (ap_query_functions(qid, &functions))
306 return 0;
307
308 return test_ap_facility(functions, 1) &&
309 test_ap_facility(functions, 2);
310}
311EXPORT_SYMBOL(ap_4096_commands_available);
312
313/**
314 * ap_queue_enable_interruption(): Enable interruption on an AP. 330 * ap_queue_enable_interruption(): Enable interruption on an AP.
315 * @qid: The AP queue number 331 * @qid: The AP queue number
316 * @ind: the notification indicator byte 332 * @ind: the notification indicator byte
@@ -657,6 +673,34 @@ static ssize_t ap_request_count_show(struct device *dev,
657 673
658static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 674static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
659 675
676static ssize_t ap_requestq_count_show(struct device *dev,
677 struct device_attribute *attr, char *buf)
678{
679 struct ap_device *ap_dev = to_ap_dev(dev);
680 int rc;
681
682 spin_lock_bh(&ap_dev->lock);
683 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
684 spin_unlock_bh(&ap_dev->lock);
685 return rc;
686}
687
688static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
689
690static ssize_t ap_pendingq_count_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct ap_device *ap_dev = to_ap_dev(dev);
694 int rc;
695
696 spin_lock_bh(&ap_dev->lock);
697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
698 spin_unlock_bh(&ap_dev->lock);
699 return rc;
700}
701
702static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
703
660static ssize_t ap_modalias_show(struct device *dev, 704static ssize_t ap_modalias_show(struct device *dev,
661 struct device_attribute *attr, char *buf) 705 struct device_attribute *attr, char *buf)
662{ 706{
@@ -665,11 +709,23 @@ static ssize_t ap_modalias_show(struct device *dev,
665 709
666static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 710static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
667 711
712static ssize_t ap_functions_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct ap_device *ap_dev = to_ap_dev(dev);
716 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
717}
718
719static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
720
668static struct attribute *ap_dev_attrs[] = { 721static struct attribute *ap_dev_attrs[] = {
669 &dev_attr_hwtype.attr, 722 &dev_attr_hwtype.attr,
670 &dev_attr_depth.attr, 723 &dev_attr_depth.attr,
671 &dev_attr_request_count.attr, 724 &dev_attr_request_count.attr,
725 &dev_attr_requestq_count.attr,
726 &dev_attr_pendingq_count.attr,
672 &dev_attr_modalias.attr, 727 &dev_attr_modalias.attr,
728 &dev_attr_ap_functions.attr,
673 NULL 729 NULL
674}; 730};
675static struct attribute_group ap_dev_attr_group = { 731static struct attribute_group ap_dev_attr_group = {
@@ -772,6 +828,7 @@ static int ap_bus_resume(struct device *dev)
772 ap_suspend_flag = 0; 828 ap_suspend_flag = 0;
773 if (!ap_interrupts_available()) 829 if (!ap_interrupts_available())
774 ap_interrupt_indicator = NULL; 830 ap_interrupt_indicator = NULL;
831 ap_query_configuration();
775 if (!user_set_domain) { 832 if (!user_set_domain) {
776 ap_domain_index = -1; 833 ap_domain_index = -1;
777 ap_select_domain(); 834 ap_select_domain();
@@ -895,6 +952,20 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
895} 952}
896EXPORT_SYMBOL(ap_driver_unregister); 953EXPORT_SYMBOL(ap_driver_unregister);
897 954
955void ap_bus_force_rescan(void)
956{
957 /* Delete the AP bus rescan timer. */
958 del_timer(&ap_config_timer);
959
960 /* processing a synchonuous bus rescan */
961 ap_scan_bus(NULL);
962
963 /* Setup the AP bus rescan timer again. */
964 ap_config_timer.expires = jiffies + ap_config_time * HZ;
965 add_timer(&ap_config_timer);
966}
967EXPORT_SYMBOL(ap_bus_force_rescan);
968
898/* 969/*
899 * AP bus attributes. 970 * AP bus attributes.
900 */ 971 */
@@ -997,6 +1068,65 @@ static struct bus_attribute *const ap_bus_attrs[] = {
997 NULL, 1068 NULL,
998}; 1069};
999 1070
1071static inline int ap_test_config(unsigned int *field, unsigned int nr)
1072{
1073 if (nr > 0xFFu)
1074 return 0;
1075 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1076}
1077
1078/*
1079 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1080 * @id AP card ID
1081 *
1082 * Returns 0 if the card is not configured
1083 * 1 if the card is configured or
1084 * if the configuration information is not available
1085 */
1086static inline int ap_test_config_card_id(unsigned int id)
1087{
1088 if (!ap_configuration)
1089 return 1;
1090 return ap_test_config(ap_configuration->apm, id);
1091}
1092
1093/*
1094 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1095 * @domain AP usage domain ID
1096 *
1097 * Returns 0 if the usage domain is not configured
1098 * 1 if the usage domain is configured or
1099 * if the configuration information is not available
1100 */
1101static inline int ap_test_config_domain(unsigned int domain)
1102{
1103 if (!ap_configuration)
1104 return 1;
1105 return ap_test_config(ap_configuration->aqm, domain);
1106}
1107
1108/**
1109 * ap_query_configuration(): Query AP configuration information.
1110 *
1111 * Query information of installed cards and configured domains from AP.
1112 */
1113static void ap_query_configuration(void)
1114{
1115#ifdef CONFIG_64BIT
1116 if (ap_configuration_available()) {
1117 if (!ap_configuration)
1118 ap_configuration =
1119 kzalloc(sizeof(struct ap_config_info),
1120 GFP_KERNEL);
1121 if (ap_configuration)
1122 __ap_query_configuration(ap_configuration);
1123 } else
1124 ap_configuration = NULL;
1125#else
1126 ap_configuration = NULL;
1127#endif
1128}
1129
1000/** 1130/**
1001 * ap_select_domain(): Select an AP domain. 1131 * ap_select_domain(): Select an AP domain.
1002 * 1132 *
@@ -1005,6 +1135,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
1005static int ap_select_domain(void) 1135static int ap_select_domain(void)
1006{ 1136{
1007 int queue_depth, device_type, count, max_count, best_domain; 1137 int queue_depth, device_type, count, max_count, best_domain;
1138 ap_qid_t qid;
1008 int rc, i, j; 1139 int rc, i, j;
1009 1140
1010 /* 1141 /*
@@ -1018,9 +1149,13 @@ static int ap_select_domain(void)
1018 best_domain = -1; 1149 best_domain = -1;
1019 max_count = 0; 1150 max_count = 0;
1020 for (i = 0; i < AP_DOMAINS; i++) { 1151 for (i = 0; i < AP_DOMAINS; i++) {
1152 if (!ap_test_config_domain(i))
1153 continue;
1021 count = 0; 1154 count = 0;
1022 for (j = 0; j < AP_DEVICES; j++) { 1155 for (j = 0; j < AP_DEVICES; j++) {
1023 ap_qid_t qid = AP_MKQID(j, i); 1156 if (!ap_test_config_card_id(j))
1157 continue;
1158 qid = AP_MKQID(j, i);
1024 rc = ap_query_queue(qid, &queue_depth, &device_type); 1159 rc = ap_query_queue(qid, &queue_depth, &device_type);
1025 if (rc) 1160 if (rc)
1026 continue; 1161 continue;
@@ -1169,6 +1304,7 @@ static void ap_scan_bus(struct work_struct *unused)
1169 unsigned int device_functions; 1304 unsigned int device_functions;
1170 int rc, i; 1305 int rc, i;
1171 1306
1307 ap_query_configuration();
1172 if (ap_select_domain() != 0) 1308 if (ap_select_domain() != 0)
1173 return; 1309 return;
1174 for (i = 0; i < AP_DEVICES; i++) { 1310 for (i = 0; i < AP_DEVICES; i++) {
@@ -1176,7 +1312,10 @@ static void ap_scan_bus(struct work_struct *unused)
1176 dev = bus_find_device(&ap_bus_type, NULL, 1312 dev = bus_find_device(&ap_bus_type, NULL,
1177 (void *)(unsigned long)qid, 1313 (void *)(unsigned long)qid,
1178 __ap_scan_bus); 1314 __ap_scan_bus);
1179 rc = ap_query_queue(qid, &queue_depth, &device_type); 1315 if (ap_test_config_card_id(i))
1316 rc = ap_query_queue(qid, &queue_depth, &device_type);
1317 else
1318 rc = -ENODEV;
1180 if (dev) { 1319 if (dev) {
1181 if (rc == -EBUSY) { 1320 if (rc == -EBUSY) {
1182 set_current_state(TASK_UNINTERRUPTIBLE); 1321 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1217,29 +1356,22 @@ static void ap_scan_bus(struct work_struct *unused)
1217 (unsigned long) ap_dev); 1356 (unsigned long) ap_dev);
1218 switch (device_type) { 1357 switch (device_type) {
1219 case 0: 1358 case 0:
1359 /* device type probing for old cards */
1220 if (ap_probe_device_type(ap_dev)) { 1360 if (ap_probe_device_type(ap_dev)) {
1221 kfree(ap_dev); 1361 kfree(ap_dev);
1222 continue; 1362 continue;
1223 } 1363 }
1224 break; 1364 break;
1225 case 10:
1226 if (ap_query_functions(qid, &device_functions)) {
1227 kfree(ap_dev);
1228 continue;
1229 }
1230 if (test_ap_facility(device_functions, 3))
1231 ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
1232 else if (test_ap_facility(device_functions, 4))
1233 ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
1234 else {
1235 kfree(ap_dev);
1236 continue;
1237 }
1238 break;
1239 default: 1365 default:
1240 ap_dev->device_type = device_type; 1366 ap_dev->device_type = device_type;
1241 } 1367 }
1242 1368
1369 rc = ap_query_functions(qid, &device_functions);
1370 if (!rc)
1371 ap_dev->functions = device_functions;
1372 else
1373 ap_dev->functions = 0u;
1374
1243 ap_dev->device.bus = &ap_bus_type; 1375 ap_dev->device.bus = &ap_bus_type;
1244 ap_dev->device.parent = ap_root_device; 1376 ap_dev->device.parent = ap_root_device;
1245 if (dev_set_name(&ap_dev->device, "card%02x", 1377 if (dev_set_name(&ap_dev->device, "card%02x",
@@ -1785,6 +1917,7 @@ int __init ap_module_init(void)
1785 goto out_root; 1917 goto out_root;
1786 } 1918 }
1787 1919
1920 ap_query_configuration();
1788 if (ap_select_domain() == 0) 1921 if (ap_select_domain() == 0)
1789 ap_scan_bus(NULL); 1922 ap_scan_bus(NULL);
1790 1923
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 52d61995af88..685f6cc022f9 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2006 2 * Copyright IBM Corp. 2006, 2012
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -83,13 +83,12 @@ int ap_queue_status_invalid_test(struct ap_queue_status *status)
83 return !(memcmp(status, &invalid, sizeof(struct ap_queue_status))); 83 return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
84} 84}
85 85
86#define MAX_AP_FACILITY 31 86#define AP_MAX_BITS 31
87 87static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
88static inline int test_ap_facility(unsigned int function, unsigned int nr)
89{ 88{
90 if (nr > MAX_AP_FACILITY) 89 if (nr > AP_MAX_BITS)
91 return 0; 90 return 0;
92 return function & (unsigned int)(0x80000000 >> nr); 91 return (*ptr & (0x80000000u >> nr)) != 0;
93} 92}
94 93
95#define AP_RESPONSE_NORMAL 0x00 94#define AP_RESPONSE_NORMAL 0x00
@@ -117,6 +116,15 @@ static inline int test_ap_facility(unsigned int function, unsigned int nr)
117#define AP_DEVICE_TYPE_CEX2C 7 116#define AP_DEVICE_TYPE_CEX2C 7
118#define AP_DEVICE_TYPE_CEX3A 8 117#define AP_DEVICE_TYPE_CEX3A 8
119#define AP_DEVICE_TYPE_CEX3C 9 118#define AP_DEVICE_TYPE_CEX3C 9
119#define AP_DEVICE_TYPE_CEX4 10
120
121/*
122 * Known function facilities
123 */
124#define AP_FUNC_MEX4K 1
125#define AP_FUNC_CRT4K 2
126#define AP_FUNC_COPRO 3
127#define AP_FUNC_ACCEL 4
120 128
121/* 129/*
122 * AP reset flag states 130 * AP reset flag states
@@ -151,6 +159,7 @@ struct ap_device {
151 ap_qid_t qid; /* AP queue id. */ 159 ap_qid_t qid; /* AP queue id. */
152 int queue_depth; /* AP queue depth.*/ 160 int queue_depth; /* AP queue depth.*/
153 int device_type; /* AP device type. */ 161 int device_type; /* AP device type. */
162 unsigned int functions; /* AP device function bitfield. */
154 int unregistered; /* marks AP device as unregistered */ 163 int unregistered; /* marks AP device as unregistered */
155 struct timer_list timeout; /* Timer for request timeouts. */ 164 struct timer_list timeout; /* Timer for request timeouts. */
156 int reset; /* Reset required after req. timeout. */ 165 int reset; /* Reset required after req. timeout. */
@@ -183,6 +192,17 @@ struct ap_message {
183 struct ap_message *); 192 struct ap_message *);
184}; 193};
185 194
195struct ap_config_info {
196 unsigned int special_command:1;
197 unsigned int ap_extended:1;
198 unsigned char reserved1:6;
199 unsigned char reserved2[15];
200 unsigned int apm[8]; /* AP ID mask */
201 unsigned int aqm[8]; /* AP queue mask */
202 unsigned int adm[8]; /* AP domain mask */
203 unsigned char reserved4[16];
204} __packed;
205
186#define AP_DEVICE(dt) \ 206#define AP_DEVICE(dt) \
187 .dev_type=(dt), \ 207 .dev_type=(dt), \
188 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 208 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
@@ -211,10 +231,9 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
211void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 231void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
212void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 232void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
213void ap_flush_queue(struct ap_device *ap_dev); 233void ap_flush_queue(struct ap_device *ap_dev);
234void ap_bus_force_rescan(void);
214 235
215int ap_module_init(void); 236int ap_module_init(void);
216void ap_module_exit(void); 237void ap_module_exit(void);
217 238
218int ap_4096_commands_available(ap_qid_t qid);
219
220#endif /* _AP_BUS_H_ */ 239#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 2f94132246a1..31cfaa556072 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * zcrypt 2.1.0 2 * zcrypt 2.1.0
3 * 3 *
4 * Copyright IBM Corp. 2001, 2006 4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs 5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,6 +9,7 @@
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -37,25 +38,39 @@
37#include <linux/atomic.h> 38#include <linux/atomic.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <linux/hw_random.h> 40#include <linux/hw_random.h>
41#include <linux/debugfs.h>
42#include <asm/debug.h>
40 43
44#include "zcrypt_debug.h"
41#include "zcrypt_api.h" 45#include "zcrypt_api.h"
42 46
43/* 47/*
44 * Module description. 48 * Module description.
45 */ 49 */
46MODULE_AUTHOR("IBM Corporation"); 50MODULE_AUTHOR("IBM Corporation");
47MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " 51MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
48 "Copyright IBM Corp. 2001, 2006"); 52 "Copyright IBM Corp. 2001, 2012");
49MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
50 54
51static DEFINE_SPINLOCK(zcrypt_device_lock); 55static DEFINE_SPINLOCK(zcrypt_device_lock);
52static LIST_HEAD(zcrypt_device_list); 56static LIST_HEAD(zcrypt_device_list);
53static int zcrypt_device_count = 0; 57static int zcrypt_device_count = 0;
54static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 58static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
59static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
60
61atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
62EXPORT_SYMBOL(zcrypt_rescan_req);
55 63
56static int zcrypt_rng_device_add(void); 64static int zcrypt_rng_device_add(void);
57static void zcrypt_rng_device_remove(void); 65static void zcrypt_rng_device_remove(void);
58 66
67static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
68static LIST_HEAD(zcrypt_ops_list);
69
70static debug_info_t *zcrypt_dbf_common;
71static debug_info_t *zcrypt_dbf_devices;
72static struct dentry *debugfs_root;
73
59/* 74/*
60 * Device attributes common for all crypto devices. 75 * Device attributes common for all crypto devices.
61 */ 76 */
@@ -85,6 +100,8 @@ static ssize_t zcrypt_online_store(struct device *dev,
85 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 100 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
86 return -EINVAL; 101 return -EINVAL;
87 zdev->online = online; 102 zdev->online = online;
103 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
104 zdev->online);
88 if (!online) 105 if (!online)
89 ap_flush_queue(zdev->ap_dev); 106 ap_flush_queue(zdev->ap_dev);
90 return count; 107 return count;
@@ -103,6 +120,24 @@ static struct attribute_group zcrypt_device_attr_group = {
103}; 120};
104 121
105/** 122/**
123 * Process a rescan of the transport layer.
124 *
125 * Returns 1, if the rescan has been processed, otherwise 0.
126 */
127static inline int zcrypt_process_rescan(void)
128{
129 if (atomic_read(&zcrypt_rescan_req)) {
130 atomic_set(&zcrypt_rescan_req, 0);
131 atomic_inc(&zcrypt_rescan_count);
132 ap_bus_force_rescan();
133 ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
134 atomic_inc_return(&zcrypt_rescan_count));
135 return 1;
136 }
137 return 0;
138}
139
140/**
106 * __zcrypt_increase_preference(): Increase preference of a crypto device. 141 * __zcrypt_increase_preference(): Increase preference of a crypto device.
107 * @zdev: Pointer the crypto device 142 * @zdev: Pointer the crypto device
108 * 143 *
@@ -190,6 +225,7 @@ struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
190 zdev->reply.length = max_response_size; 225 zdev->reply.length = max_response_size;
191 spin_lock_init(&zdev->lock); 226 spin_lock_init(&zdev->lock);
192 INIT_LIST_HEAD(&zdev->list); 227 INIT_LIST_HEAD(&zdev->list);
228 zdev->dbf_area = zcrypt_dbf_devices;
193 return zdev; 229 return zdev;
194 230
195out_free: 231out_free:
@@ -215,6 +251,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
215{ 251{
216 int rc; 252 int rc;
217 253
254 if (!zdev->ops)
255 return -ENODEV;
218 rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 256 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
219 &zcrypt_device_attr_group); 257 &zcrypt_device_attr_group);
220 if (rc) 258 if (rc)
@@ -223,6 +261,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
223 kref_init(&zdev->refcount); 261 kref_init(&zdev->refcount);
224 spin_lock_bh(&zcrypt_device_lock); 262 spin_lock_bh(&zcrypt_device_lock);
225 zdev->online = 1; /* New devices are online by default. */ 263 zdev->online = 1; /* New devices are online by default. */
264 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
265 zdev->online);
226 list_add_tail(&zdev->list, &zcrypt_device_list); 266 list_add_tail(&zdev->list, &zcrypt_device_list);
227 __zcrypt_increase_preference(zdev); 267 __zcrypt_increase_preference(zdev);
228 zcrypt_device_count++; 268 zcrypt_device_count++;
@@ -269,6 +309,67 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
269} 309}
270EXPORT_SYMBOL(zcrypt_device_unregister); 310EXPORT_SYMBOL(zcrypt_device_unregister);
271 311
312void zcrypt_msgtype_register(struct zcrypt_ops *zops)
313{
314 if (zops->owner) {
315 spin_lock_bh(&zcrypt_ops_list_lock);
316 list_add_tail(&zops->list, &zcrypt_ops_list);
317 spin_unlock_bh(&zcrypt_ops_list_lock);
318 }
319}
320EXPORT_SYMBOL(zcrypt_msgtype_register);
321
322void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
323{
324 spin_lock_bh(&zcrypt_ops_list_lock);
325 list_del_init(&zops->list);
326 spin_unlock_bh(&zcrypt_ops_list_lock);
327}
328EXPORT_SYMBOL(zcrypt_msgtype_unregister);
329
330static inline
331struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
332{
333 struct zcrypt_ops *zops;
334 int found = 0;
335
336 spin_lock_bh(&zcrypt_ops_list_lock);
337 list_for_each_entry(zops, &zcrypt_ops_list, list) {
338 if ((zops->variant == variant) &&
339 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
340 found = 1;
341 break;
342 }
343 }
344 spin_unlock_bh(&zcrypt_ops_list_lock);
345
346 if (!found)
347 return NULL;
348 return zops;
349}
350
351struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
352{
353 struct zcrypt_ops *zops = NULL;
354
355 zops = __ops_lookup(name, variant);
356 if (!zops) {
357 request_module(name);
358 zops = __ops_lookup(name, variant);
359 }
360 if ((!zops) || (!try_module_get(zops->owner)))
361 return NULL;
362 return zops;
363}
364EXPORT_SYMBOL(zcrypt_msgtype_request);
365
366void zcrypt_msgtype_release(struct zcrypt_ops *zops)
367{
368 if (zops)
369 module_put(zops->owner);
370}
371EXPORT_SYMBOL(zcrypt_msgtype_release);
372
272/** 373/**
273 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 374 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
274 * 375 *
@@ -640,6 +741,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
640 do { 741 do {
641 rc = zcrypt_rsa_modexpo(&mex); 742 rc = zcrypt_rsa_modexpo(&mex);
642 } while (rc == -EAGAIN); 743 } while (rc == -EAGAIN);
744 /* on failure: retry once again after a requested rescan */
745 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
746 do {
747 rc = zcrypt_rsa_modexpo(&mex);
748 } while (rc == -EAGAIN);
643 if (rc) 749 if (rc)
644 return rc; 750 return rc;
645 return put_user(mex.outputdatalength, &umex->outputdatalength); 751 return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -652,6 +758,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
652 do { 758 do {
653 rc = zcrypt_rsa_crt(&crt); 759 rc = zcrypt_rsa_crt(&crt);
654 } while (rc == -EAGAIN); 760 } while (rc == -EAGAIN);
761 /* on failure: retry once again after a requested rescan */
762 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
763 do {
764 rc = zcrypt_rsa_crt(&crt);
765 } while (rc == -EAGAIN);
655 if (rc) 766 if (rc)
656 return rc; 767 return rc;
657 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 768 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -664,6 +775,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
664 do { 775 do {
665 rc = zcrypt_send_cprb(&xcRB); 776 rc = zcrypt_send_cprb(&xcRB);
666 } while (rc == -EAGAIN); 777 } while (rc == -EAGAIN);
778 /* on failure: retry once again after a requested rescan */
779 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
780 do {
781 rc = zcrypt_send_cprb(&xcRB);
782 } while (rc == -EAGAIN);
667 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 783 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
668 return -EFAULT; 784 return -EFAULT;
669 return rc; 785 return rc;
@@ -770,10 +886,15 @@ static long trans_modexpo32(struct file *filp, unsigned int cmd,
770 do { 886 do {
771 rc = zcrypt_rsa_modexpo(&mex64); 887 rc = zcrypt_rsa_modexpo(&mex64);
772 } while (rc == -EAGAIN); 888 } while (rc == -EAGAIN);
773 if (!rc) 889 /* on failure: retry once again after a requested rescan */
774 rc = put_user(mex64.outputdatalength, 890 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
775 &umex32->outputdatalength); 891 do {
776 return rc; 892 rc = zcrypt_rsa_modexpo(&mex64);
893 } while (rc == -EAGAIN);
894 if (rc)
895 return rc;
896 return put_user(mex64.outputdatalength,
897 &umex32->outputdatalength);
777} 898}
778 899
779struct compat_ica_rsa_modexpo_crt { 900struct compat_ica_rsa_modexpo_crt {
@@ -810,10 +931,15 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
810 do { 931 do {
811 rc = zcrypt_rsa_crt(&crt64); 932 rc = zcrypt_rsa_crt(&crt64);
812 } while (rc == -EAGAIN); 933 } while (rc == -EAGAIN);
813 if (!rc) 934 /* on failure: retry once again after a requested rescan */
814 rc = put_user(crt64.outputdatalength, 935 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
815 &ucrt32->outputdatalength); 936 do {
816 return rc; 937 rc = zcrypt_rsa_crt(&crt64);
938 } while (rc == -EAGAIN);
939 if (rc)
940 return rc;
941 return put_user(crt64.outputdatalength,
942 &ucrt32->outputdatalength);
817} 943}
818 944
819struct compat_ica_xcRB { 945struct compat_ica_xcRB {
@@ -869,6 +995,11 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
869 do { 995 do {
870 rc = zcrypt_send_cprb(&xcRB64); 996 rc = zcrypt_send_cprb(&xcRB64);
871 } while (rc == -EAGAIN); 997 } while (rc == -EAGAIN);
998 /* on failure: retry once again after a requested rescan */
999 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1000 do {
1001 rc = zcrypt_send_cprb(&xcRB64);
1002 } while (rc == -EAGAIN);
872 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1003 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
873 xcRB32.reply_data_length = xcRB64.reply_data_length; 1004 xcRB32.reply_data_length = xcRB64.reply_data_length;
874 xcRB32.status = xcRB64.status; 1005 xcRB32.status = xcRB64.status;
@@ -1126,6 +1257,9 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1126 */ 1257 */
1127 if (zcrypt_rng_buffer_index == 0) { 1258 if (zcrypt_rng_buffer_index == 0) {
1128 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1259 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1260 /* on failure: retry once again after a requested rescan */
1261 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1262 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1129 if (rc < 0) 1263 if (rc < 0)
1130 return -EIO; 1264 return -EIO;
1131 zcrypt_rng_buffer_index = rc / sizeof *data; 1265 zcrypt_rng_buffer_index = rc / sizeof *data;
@@ -1178,6 +1312,30 @@ static void zcrypt_rng_device_remove(void)
1178 mutex_unlock(&zcrypt_rng_mutex); 1312 mutex_unlock(&zcrypt_rng_mutex);
1179} 1313}
1180 1314
1315int __init zcrypt_debug_init(void)
1316{
1317 debugfs_root = debugfs_create_dir("zcrypt", NULL);
1318
1319 zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
1320 debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
1321 debug_set_level(zcrypt_dbf_common, DBF_ERR);
1322
1323 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
1324 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
1325 debug_set_level(zcrypt_dbf_devices, DBF_ERR);
1326
1327 return 0;
1328}
1329
1330void zcrypt_debug_exit(void)
1331{
1332 debugfs_remove(debugfs_root);
1333 if (zcrypt_dbf_common)
1334 debug_unregister(zcrypt_dbf_common);
1335 if (zcrypt_dbf_devices)
1336 debug_unregister(zcrypt_dbf_devices);
1337}
1338
1181/** 1339/**
1182 * zcrypt_api_init(): Module initialization. 1340 * zcrypt_api_init(): Module initialization.
1183 * 1341 *
@@ -1187,6 +1345,12 @@ int __init zcrypt_api_init(void)
1187{ 1345{
1188 int rc; 1346 int rc;
1189 1347
1348 rc = zcrypt_debug_init();
1349 if (rc)
1350 goto out;
1351
1352 atomic_set(&zcrypt_rescan_req, 0);
1353
1190 /* Register the request sprayer. */ 1354 /* Register the request sprayer. */
1191 rc = misc_register(&zcrypt_misc_device); 1355 rc = misc_register(&zcrypt_misc_device);
1192 if (rc < 0) 1356 if (rc < 0)
@@ -1216,6 +1380,7 @@ void zcrypt_api_exit(void)
1216{ 1380{
1217 remove_proc_entry("driver/z90crypt", NULL); 1381 remove_proc_entry("driver/z90crypt", NULL);
1218 misc_deregister(&zcrypt_misc_device); 1382 misc_deregister(&zcrypt_misc_device);
1383 zcrypt_debug_exit();
1219} 1384}
1220 1385
1221module_init(zcrypt_api_init); 1386module_init(zcrypt_api_init);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 7a32c4bc8ef9..89632919c993 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * zcrypt 2.1.0 2 * zcrypt 2.1.0
3 * 3 *
4 * Copyright IBM Corp. 2001, 2006 4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs 5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,6 +9,7 @@
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -28,8 +29,10 @@
28#ifndef _ZCRYPT_API_H_ 29#ifndef _ZCRYPT_API_H_
29#define _ZCRYPT_API_H_ 30#define _ZCRYPT_API_H_
30 31
31#include "ap_bus.h" 32#include <linux/atomic.h>
33#include <asm/debug.h>
32#include <asm/zcrypt.h> 34#include <asm/zcrypt.h>
35#include "ap_bus.h"
33 36
34/* deprecated status calls */ 37/* deprecated status calls */
35#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status) 38#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
@@ -87,6 +90,9 @@ struct zcrypt_ops {
87 struct ica_rsa_modexpo_crt *); 90 struct ica_rsa_modexpo_crt *);
88 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 91 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
89 long (*rng)(struct zcrypt_device *, char *); 92 long (*rng)(struct zcrypt_device *, char *);
93 struct list_head list; /* zcrypt ops list. */
94 struct module *owner;
95 int variant;
90}; 96};
91 97
92struct zcrypt_device { 98struct zcrypt_device {
@@ -108,14 +114,23 @@ struct zcrypt_device {
108 114
109 struct ap_message reply; /* Per-device reply structure. */ 115 struct ap_message reply; /* Per-device reply structure. */
110 int max_exp_bit_length; 116 int max_exp_bit_length;
117
118 debug_info_t *dbf_area; /* debugging */
111}; 119};
112 120
121/* transport layer rescanning */
122extern atomic_t zcrypt_rescan_req;
123
113struct zcrypt_device *zcrypt_device_alloc(size_t); 124struct zcrypt_device *zcrypt_device_alloc(size_t);
114void zcrypt_device_free(struct zcrypt_device *); 125void zcrypt_device_free(struct zcrypt_device *);
115void zcrypt_device_get(struct zcrypt_device *); 126void zcrypt_device_get(struct zcrypt_device *);
116int zcrypt_device_put(struct zcrypt_device *); 127int zcrypt_device_put(struct zcrypt_device *);
117int zcrypt_device_register(struct zcrypt_device *); 128int zcrypt_device_register(struct zcrypt_device *);
118void zcrypt_device_unregister(struct zcrypt_device *); 129void zcrypt_device_unregister(struct zcrypt_device *);
130void zcrypt_msgtype_register(struct zcrypt_ops *);
131void zcrypt_msgtype_unregister(struct zcrypt_ops *);
132struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
133void zcrypt_msgtype_release(struct zcrypt_ops *);
119int zcrypt_api_init(void); 134int zcrypt_api_init(void);
120void zcrypt_api_exit(void); 135void zcrypt_api_exit(void);
121 136
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 744c668f586c..1e849d6e1dfe 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * zcrypt 2.1.0 2 * zcrypt 2.1.0
3 * 3 *
4 * Copyright IBM Corp. 2001, 2006 4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs 5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 6 * Eric Rossman (edrossma@us.ibm.com)
7 * 7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
@@ -35,6 +36,7 @@
35#include "zcrypt_api.h" 36#include "zcrypt_api.h"
36#include "zcrypt_error.h" 37#include "zcrypt_error.h"
37#include "zcrypt_cex2a.h" 38#include "zcrypt_cex2a.h"
39#include "zcrypt_msgtype50.h"
38 40
39#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
40#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
@@ -63,14 +65,12 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
63 65
64MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 66MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
65MODULE_AUTHOR("IBM Corporation"); 67MODULE_AUTHOR("IBM Corporation");
66MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " 68MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
67 "Copyright IBM Corp. 2001, 2006"); 69 "Copyright IBM Corp. 2001, 2012");
68MODULE_LICENSE("GPL"); 70MODULE_LICENSE("GPL");
69 71
70static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 72static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
71static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 73static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
72static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
73 struct ap_message *);
74 74
75static struct ap_driver zcrypt_cex2a_driver = { 75static struct ap_driver zcrypt_cex2a_driver = {
76 .probe = zcrypt_cex2a_probe, 76 .probe = zcrypt_cex2a_probe,
@@ -80,344 +80,6 @@ static struct ap_driver zcrypt_cex2a_driver = {
80}; 80};
81 81
82/** 82/**
83 * Convert a ICAMEX message to a type50 MEX message.
84 *
85 * @zdev: crypto device pointer
86 * @zreq: crypto request pointer
87 * @mex: pointer to user input data
88 *
89 * Returns 0 on success or -EFAULT.
90 */
91static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
92 struct ap_message *ap_msg,
93 struct ica_rsa_modexpo *mex)
94{
95 unsigned char *mod, *exp, *inp;
96 int mod_len;
97
98 mod_len = mex->inputdatalength;
99
100 if (mod_len <= 128) {
101 struct type50_meb1_msg *meb1 = ap_msg->message;
102 memset(meb1, 0, sizeof(*meb1));
103 ap_msg->length = sizeof(*meb1);
104 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
105 meb1->header.msg_len = sizeof(*meb1);
106 meb1->keyblock_type = TYPE50_MEB1_FMT;
107 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
108 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
109 inp = meb1->message + sizeof(meb1->message) - mod_len;
110 } else if (mod_len <= 256) {
111 struct type50_meb2_msg *meb2 = ap_msg->message;
112 memset(meb2, 0, sizeof(*meb2));
113 ap_msg->length = sizeof(*meb2);
114 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
115 meb2->header.msg_len = sizeof(*meb2);
116 meb2->keyblock_type = TYPE50_MEB2_FMT;
117 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
118 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
119 inp = meb2->message + sizeof(meb2->message) - mod_len;
120 } else {
121 /* mod_len > 256 = 4096 bit RSA Key */
122 struct type50_meb3_msg *meb3 = ap_msg->message;
123 memset(meb3, 0, sizeof(*meb3));
124 ap_msg->length = sizeof(*meb3);
125 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
126 meb3->header.msg_len = sizeof(*meb3);
127 meb3->keyblock_type = TYPE50_MEB3_FMT;
128 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
129 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
130 inp = meb3->message + sizeof(meb3->message) - mod_len;
131 }
132
133 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
134 copy_from_user(exp, mex->b_key, mod_len) ||
135 copy_from_user(inp, mex->inputdata, mod_len))
136 return -EFAULT;
137 return 0;
138}
139
140/**
141 * Convert a ICACRT message to a type50 CRT message.
142 *
143 * @zdev: crypto device pointer
144 * @zreq: crypto request pointer
145 * @crt: pointer to user input data
146 *
147 * Returns 0 on success or -EFAULT.
148 */
149static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
150 struct ap_message *ap_msg,
151 struct ica_rsa_modexpo_crt *crt)
152{
153 int mod_len, short_len, long_len, long_offset, limit;
154 unsigned char *p, *q, *dp, *dq, *u, *inp;
155
156 mod_len = crt->inputdatalength;
157 short_len = mod_len / 2;
158 long_len = mod_len / 2 + 8;
159
160 /*
161 * CEX2A cannot handle p, dp, or U > 128 bytes.
162 * If we have one of these, we need to do extra checking.
163 * For CEX3A the limit is 256 bytes.
164 */
165 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
166 limit = 256;
167 else
168 limit = 128;
169
170 if (long_len > limit) {
171 /*
172 * zcrypt_rsa_crt already checked for the leading
173 * zeroes of np_prime, bp_key and u_mult_inc.
174 */
175 long_offset = long_len - limit;
176 long_len = limit;
177 } else
178 long_offset = 0;
179
180 /*
181 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
182 * the larger message structure.
183 */
184 if (long_len <= 64) {
185 struct type50_crb1_msg *crb1 = ap_msg->message;
186 memset(crb1, 0, sizeof(*crb1));
187 ap_msg->length = sizeof(*crb1);
188 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
189 crb1->header.msg_len = sizeof(*crb1);
190 crb1->keyblock_type = TYPE50_CRB1_FMT;
191 p = crb1->p + sizeof(crb1->p) - long_len;
192 q = crb1->q + sizeof(crb1->q) - short_len;
193 dp = crb1->dp + sizeof(crb1->dp) - long_len;
194 dq = crb1->dq + sizeof(crb1->dq) - short_len;
195 u = crb1->u + sizeof(crb1->u) - long_len;
196 inp = crb1->message + sizeof(crb1->message) - mod_len;
197 } else if (long_len <= 128) {
198 struct type50_crb2_msg *crb2 = ap_msg->message;
199 memset(crb2, 0, sizeof(*crb2));
200 ap_msg->length = sizeof(*crb2);
201 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
202 crb2->header.msg_len = sizeof(*crb2);
203 crb2->keyblock_type = TYPE50_CRB2_FMT;
204 p = crb2->p + sizeof(crb2->p) - long_len;
205 q = crb2->q + sizeof(crb2->q) - short_len;
206 dp = crb2->dp + sizeof(crb2->dp) - long_len;
207 dq = crb2->dq + sizeof(crb2->dq) - short_len;
208 u = crb2->u + sizeof(crb2->u) - long_len;
209 inp = crb2->message + sizeof(crb2->message) - mod_len;
210 } else {
211 /* long_len >= 256 */
212 struct type50_crb3_msg *crb3 = ap_msg->message;
213 memset(crb3, 0, sizeof(*crb3));
214 ap_msg->length = sizeof(*crb3);
215 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
216 crb3->header.msg_len = sizeof(*crb3);
217 crb3->keyblock_type = TYPE50_CRB3_FMT;
218 p = crb3->p + sizeof(crb3->p) - long_len;
219 q = crb3->q + sizeof(crb3->q) - short_len;
220 dp = crb3->dp + sizeof(crb3->dp) - long_len;
221 dq = crb3->dq + sizeof(crb3->dq) - short_len;
222 u = crb3->u + sizeof(crb3->u) - long_len;
223 inp = crb3->message + sizeof(crb3->message) - mod_len;
224 }
225
226 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
227 copy_from_user(q, crt->nq_prime, short_len) ||
228 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
229 copy_from_user(dq, crt->bq_key, short_len) ||
230 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
231 copy_from_user(inp, crt->inputdata, mod_len))
232 return -EFAULT;
233
234 return 0;
235}
236
237/**
238 * Copy results from a type 80 reply message back to user space.
239 *
240 * @zdev: crypto device pointer
241 * @reply: reply AP message.
242 * @data: pointer to user output data
243 * @length: size of user output data
244 *
245 * Returns 0 on success or -EFAULT.
246 */
247static int convert_type80(struct zcrypt_device *zdev,
248 struct ap_message *reply,
249 char __user *outputdata,
250 unsigned int outputdatalength)
251{
252 struct type80_hdr *t80h = reply->message;
253 unsigned char *data;
254
255 if (t80h->len < sizeof(*t80h) + outputdatalength) {
256 /* The result is too short, the CEX2A card may not do that.. */
257 zdev->online = 0;
258 return -EAGAIN; /* repeat the request on a different device. */
259 }
260 if (zdev->user_space_type == ZCRYPT_CEX2A)
261 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
262 else
263 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
264 data = reply->message + t80h->len - outputdatalength;
265 if (copy_to_user(outputdata, data, outputdatalength))
266 return -EFAULT;
267 return 0;
268}
269
270static int convert_response(struct zcrypt_device *zdev,
271 struct ap_message *reply,
272 char __user *outputdata,
273 unsigned int outputdatalength)
274{
275 /* Response type byte is the second byte in the response. */
276 switch (((unsigned char *) reply->message)[1]) {
277 case TYPE82_RSP_CODE:
278 case TYPE88_RSP_CODE:
279 return convert_error(zdev, reply);
280 case TYPE80_RSP_CODE:
281 return convert_type80(zdev, reply,
282 outputdata, outputdatalength);
283 default: /* Unknown response type, this should NEVER EVER happen */
284 zdev->online = 0;
285 return -EAGAIN; /* repeat the request on a different device. */
286 }
287}
288
289/**
290 * This function is called from the AP bus code after a crypto request
291 * "msg" has finished with the reply message "reply".
292 * It is called from tasklet context.
293 * @ap_dev: pointer to the AP device
294 * @msg: pointer to the AP message
295 * @reply: pointer to the AP reply message
296 */
297static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
298 struct ap_message *msg,
299 struct ap_message *reply)
300{
301 static struct error_hdr error_reply = {
302 .type = TYPE82_RSP_CODE,
303 .reply_code = REP82_ERROR_MACHINE_FAILURE,
304 };
305 struct type80_hdr *t80h;
306 int length;
307
308 /* Copy the reply message to the request message buffer. */
309 if (IS_ERR(reply)) {
310 memcpy(msg->message, &error_reply, sizeof(error_reply));
311 goto out;
312 }
313 t80h = reply->message;
314 if (t80h->type == TYPE80_RSP_CODE) {
315 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
316 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
317 else
318 length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
319 memcpy(msg->message, reply->message, length);
320 } else
321 memcpy(msg->message, reply->message, sizeof error_reply);
322out:
323 complete((struct completion *) msg->private);
324}
325
326static atomic_t zcrypt_step = ATOMIC_INIT(0);
327
328/**
329 * The request distributor calls this function if it picked the CEX2A
330 * device to handle a modexpo request.
331 * @zdev: pointer to zcrypt_device structure that identifies the
332 * CEX2A device to the request distributor
333 * @mex: pointer to the modexpo request buffer
334 */
335static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
336 struct ica_rsa_modexpo *mex)
337{
338 struct ap_message ap_msg;
339 struct completion work;
340 int rc;
341
342 ap_init_message(&ap_msg);
343 if (zdev->user_space_type == ZCRYPT_CEX2A)
344 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
345 else
346 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
347 if (!ap_msg.message)
348 return -ENOMEM;
349 ap_msg.receive = zcrypt_cex2a_receive;
350 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
351 atomic_inc_return(&zcrypt_step);
352 ap_msg.private = &work;
353 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
354 if (rc)
355 goto out_free;
356 init_completion(&work);
357 ap_queue_message(zdev->ap_dev, &ap_msg);
358 rc = wait_for_completion_interruptible(&work);
359 if (rc == 0)
360 rc = convert_response(zdev, &ap_msg, mex->outputdata,
361 mex->outputdatalength);
362 else
363 /* Signal pending. */
364 ap_cancel_message(zdev->ap_dev, &ap_msg);
365out_free:
366 kfree(ap_msg.message);
367 return rc;
368}
369
370/**
371 * The request distributor calls this function if it picked the CEX2A
372 * device to handle a modexpo_crt request.
373 * @zdev: pointer to zcrypt_device structure that identifies the
374 * CEX2A device to the request distributor
375 * @crt: pointer to the modexpoc_crt request buffer
376 */
377static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
378 struct ica_rsa_modexpo_crt *crt)
379{
380 struct ap_message ap_msg;
381 struct completion work;
382 int rc;
383
384 ap_init_message(&ap_msg);
385 if (zdev->user_space_type == ZCRYPT_CEX2A)
386 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
387 else
388 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
389 if (!ap_msg.message)
390 return -ENOMEM;
391 ap_msg.receive = zcrypt_cex2a_receive;
392 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
393 atomic_inc_return(&zcrypt_step);
394 ap_msg.private = &work;
395 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
396 if (rc)
397 goto out_free;
398 init_completion(&work);
399 ap_queue_message(zdev->ap_dev, &ap_msg);
400 rc = wait_for_completion_interruptible(&work);
401 if (rc == 0)
402 rc = convert_response(zdev, &ap_msg, crt->outputdata,
403 crt->outputdatalength);
404 else
405 /* Signal pending. */
406 ap_cancel_message(zdev->ap_dev, &ap_msg);
407out_free:
408 kfree(ap_msg.message);
409 return rc;
410}
411
412/**
413 * The crypto operations for a CEX2A card.
414 */
415static struct zcrypt_ops zcrypt_cex2a_ops = {
416 .rsa_modexpo = zcrypt_cex2a_modexpo,
417 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
418};
419
420/**
421 * Probe function for CEX2A cards. It always accepts the AP device 83 * Probe function for CEX2A cards. It always accepts the AP device
422 * since the bus_match already checked the hardware type. 84 * since the bus_match already checked the hardware type.
423 * @ap_dev: pointer to the AP device. 85 * @ap_dev: pointer to the AP device.
@@ -449,7 +111,8 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
449 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 111 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
450 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 112 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
451 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 113 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
452 if (ap_4096_commands_available(ap_dev->qid)) { 114 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
115 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
453 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 116 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
454 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 117 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
455 } 118 }
@@ -457,16 +120,18 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
457 zdev->speed_rating = CEX3A_SPEED_RATING; 120 zdev->speed_rating = CEX3A_SPEED_RATING;
458 break; 121 break;
459 } 122 }
460 if (zdev != NULL) { 123 if (!zdev)
461 zdev->ap_dev = ap_dev; 124 return -ENODEV;
462 zdev->ops = &zcrypt_cex2a_ops; 125 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
463 zdev->online = 1; 126 MSGTYPE50_VARIANT_DEFAULT);
464 ap_dev->reply = &zdev->reply; 127 zdev->ap_dev = ap_dev;
465 ap_dev->private = zdev; 128 zdev->online = 1;
466 rc = zcrypt_device_register(zdev); 129 ap_dev->reply = &zdev->reply;
467 } 130 ap_dev->private = zdev;
131 rc = zcrypt_device_register(zdev);
468 if (rc) { 132 if (rc) {
469 ap_dev->private = NULL; 133 ap_dev->private = NULL;
134 zcrypt_msgtype_release(zdev->ops);
470 zcrypt_device_free(zdev); 135 zcrypt_device_free(zdev);
471 } 136 }
472 return rc; 137 return rc;
@@ -479,8 +144,10 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
479static void zcrypt_cex2a_remove(struct ap_device *ap_dev) 144static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
480{ 145{
481 struct zcrypt_device *zdev = ap_dev->private; 146 struct zcrypt_device *zdev = ap_dev->private;
147 struct zcrypt_ops *zops = zdev->ops;
482 148
483 zcrypt_device_unregister(zdev); 149 zcrypt_device_unregister(zdev);
150 zcrypt_msgtype_release(zops);
484} 151}
485 152
486int __init zcrypt_cex2a_init(void) 153int __init zcrypt_cex2a_init(void)
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
new file mode 100644
index 000000000000..ce1226398ac9
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
4 */
5
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/err.h>
10#include <linux/atomic.h>
11#include <linux/uaccess.h>
12
13#include "ap_bus.h"
14#include "zcrypt_api.h"
15#include "zcrypt_msgtype6.h"
16#include "zcrypt_msgtype50.h"
17#include "zcrypt_error.h"
18#include "zcrypt_cex4.h"
19
20#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
21#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
22#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */
23
24#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
25#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
26
27#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
28#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
29
30#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
31#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
32
33#define CEX4_CLEANUP_TIME (15*HZ)
34
35static struct ap_device_id zcrypt_cex4_ids[] = {
36 { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
37 { /* end of list */ },
38};
39
40MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
41MODULE_AUTHOR("IBM Corporation");
42MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
43 "Copyright IBM Corp. 2012");
44MODULE_LICENSE("GPL");
45
46static int zcrypt_cex4_probe(struct ap_device *ap_dev);
47static void zcrypt_cex4_remove(struct ap_device *ap_dev);
48
49static struct ap_driver zcrypt_cex4_driver = {
50 .probe = zcrypt_cex4_probe,
51 .remove = zcrypt_cex4_remove,
52 .ids = zcrypt_cex4_ids,
53 .request_timeout = CEX4_CLEANUP_TIME,
54};
55
56/**
57 * Probe function for CEX4 cards. It always accepts the AP device
58 * since the bus_match already checked the hardware type.
59 * @ap_dev: pointer to the AP device.
60 */
61static int zcrypt_cex4_probe(struct ap_device *ap_dev)
62{
63 struct zcrypt_device *zdev = NULL;
64 int rc = 0;
65
66 switch (ap_dev->device_type) {
67 case AP_DEVICE_TYPE_CEX4:
68 if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
69 zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
70 if (!zdev)
71 return -ENOMEM;
72 zdev->type_string = "CEX4A";
73 zdev->user_space_type = ZCRYPT_CEX3A;
74 zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
75 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
76 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
77 zdev->max_mod_size =
78 CEX4A_MAX_MOD_SIZE_4K;
79 zdev->max_exp_bit_length =
80 CEX4A_MAX_MOD_SIZE_4K;
81 } else {
82 zdev->max_mod_size =
83 CEX4A_MAX_MOD_SIZE_2K;
84 zdev->max_exp_bit_length =
85 CEX4A_MAX_MOD_SIZE_2K;
86 }
87 zdev->short_crt = 1;
88 zdev->speed_rating = CEX4A_SPEED_RATING;
89 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
90 MSGTYPE50_VARIANT_DEFAULT);
91 } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
92 zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
93 if (!zdev)
94 return -ENOMEM;
95 zdev->type_string = "CEX4C";
96 zdev->user_space_type = ZCRYPT_CEX3C;
97 zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
98 zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
99 zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
100 zdev->short_crt = 0;
101 zdev->speed_rating = CEX4C_SPEED_RATING;
102 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
103 MSGTYPE06_VARIANT_DEFAULT);
104 }
105 break;
106 }
107 if (!zdev)
108 return -ENODEV;
109 zdev->ap_dev = ap_dev;
110 zdev->online = 1;
111 ap_dev->reply = &zdev->reply;
112 ap_dev->private = zdev;
113 rc = zcrypt_device_register(zdev);
114 if (rc) {
115 zcrypt_msgtype_release(zdev->ops);
116 ap_dev->private = NULL;
117 zcrypt_device_free(zdev);
118 }
119 return rc;
120}
121
122/**
123 * This is called to remove the extended CEX4 driver information
124 * if an AP device is removed.
125 */
126static void zcrypt_cex4_remove(struct ap_device *ap_dev)
127{
128 struct zcrypt_device *zdev = ap_dev->private;
129 struct zcrypt_ops *zops;
130
131 if (zdev) {
132 zops = zdev->ops;
133 zcrypt_device_unregister(zdev);
134 zcrypt_msgtype_release(zops);
135 }
136}
137
138int __init zcrypt_cex4_init(void)
139{
140 return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
141}
142
143void __exit zcrypt_cex4_exit(void)
144{
145 ap_driver_unregister(&zcrypt_cex4_driver);
146}
147
148module_init(zcrypt_cex4_init);
149module_exit(zcrypt_cex4_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
new file mode 100644
index 000000000000..719571375ccc
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
4 */
5
6#ifndef _ZCRYPT_CEX4_H_
7#define _ZCRYPT_CEX4_H_
8
9int zcrypt_cex4_init(void);
10void zcrypt_cex4_exit(void);
11
12#endif /* _ZCRYPT_CEX4_H_ */
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
new file mode 100644
index 000000000000..841ea72e4a4e
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
4 */
5#ifndef ZCRYPT_DEBUG_H
6#define ZCRYPT_DEBUG_H
7
8#include <asm/debug.h>
9#include "zcrypt_api.h"
10
11/* that gives us 15 characters in the text event views */
12#define ZCRYPT_DBF_LEN 16
13
14/* sort out low debug levels early to avoid wasted sprints */
15static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
16{
17 return (level <= dbf_grp->level);
18}
19
20#define DBF_ERR 3 /* error conditions */
21#define DBF_WARN 4 /* warning conditions */
22#define DBF_INFO 6 /* informational */
23
24#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
25
26#define ZCRYPT_DBF_COMMON(level, text...) \
27 do { \
28 if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
29 char debug_buffer[ZCRYPT_DBF_LEN]; \
30 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
31 debug_text_event(zcrypt_dbf_common, level, \
32 debug_buffer); \
33 } \
34 } while (0)
35
36#define ZCRYPT_DBF_DEVICES(level, text...) \
37 do { \
38 if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
39 char debug_buffer[ZCRYPT_DBF_LEN]; \
40 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
41 debug_text_event(zcrypt_dbf_devices, level, \
42 debug_buffer); \
43 } \
44 } while (0)
45
46#define ZCRYPT_DBF_DEV(level, device, text...) \
47 do { \
48 if (zcrypt_dbf_passes(device->dbf_area, level)) { \
49 char debug_buffer[ZCRYPT_DBF_LEN]; \
50 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
51 debug_text_event(device->dbf_area, level, \
52 debug_buffer); \
53 } \
54 } while (0)
55
56int zcrypt_debug_init(void);
57void zcrypt_debug_exit(void);
58
59#endif /* ZCRYPT_DEBUG_H */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0965e2626d18..0079b6617211 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -26,6 +26,8 @@
26#ifndef _ZCRYPT_ERROR_H_ 26#ifndef _ZCRYPT_ERROR_H_
27#define _ZCRYPT_ERROR_H_ 27#define _ZCRYPT_ERROR_H_
28 28
29#include <linux/atomic.h>
30#include "zcrypt_debug.h"
29#include "zcrypt_api.h" 31#include "zcrypt_api.h"
30 32
31/** 33/**
@@ -108,16 +110,27 @@ static inline int convert_error(struct zcrypt_device *zdev,
108 * and then repeat the request. 110 * and then repeat the request.
109 */ 111 */
110 WARN_ON(1); 112 WARN_ON(1);
113 atomic_set(&zcrypt_rescan_req, 1);
111 zdev->online = 0; 114 zdev->online = 0;
115 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
116 zdev->ap_dev->qid,
117 zdev->online, ehdr->reply_code);
112 return -EAGAIN; 118 return -EAGAIN;
113 case REP82_ERROR_TRANSPORT_FAIL: 119 case REP82_ERROR_TRANSPORT_FAIL:
114 case REP82_ERROR_MACHINE_FAILURE: 120 case REP82_ERROR_MACHINE_FAILURE:
115 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 121 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
116 /* If a card fails disable it and repeat the request. */ 122 /* If a card fails disable it and repeat the request. */
123 atomic_set(&zcrypt_rescan_req, 1);
117 zdev->online = 0; 124 zdev->online = 0;
125 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
126 zdev->ap_dev->qid,
127 zdev->online, ehdr->reply_code);
118 return -EAGAIN; 128 return -EAGAIN;
119 default: 129 default:
120 zdev->online = 0; 130 zdev->online = 0;
131 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
132 zdev->ap_dev->qid,
133 zdev->online, ehdr->reply_code);
121 return -EAGAIN; /* repeat the request on a different device. */ 134 return -EAGAIN; /* repeat the request on a different device. */
122 } 135 }
123} 136}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
new file mode 100644
index 000000000000..035b6dc31b71
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -0,0 +1,531 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/atomic.h>
33#include <linux/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_msgtype50.h"
39
40#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
41
42#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
43
44#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
45 * (max outputdatalength) +
46 * type80_hdr*/
47
48MODULE_AUTHOR("IBM Corporation");
49MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
50 "Copyright IBM Corp. 2001, 2012");
51MODULE_LICENSE("GPL");
52
53static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
54 struct ap_message *);
55
56/**
57 * The type 50 message family is associated with a CEX2A card.
58 *
59 * The four members of the family are described below.
60 *
61 * Note that all unsigned char arrays are right-justified and left-padded
62 * with zeroes.
63 *
64 * Note that all reserved fields must be zeroes.
65 */
66struct type50_hdr {
67 unsigned char reserved1;
68 unsigned char msg_type_code; /* 0x50 */
69 unsigned short msg_len;
70 unsigned char reserved2;
71 unsigned char ignored;
72 unsigned short reserved3;
73} __packed;
74
75#define TYPE50_TYPE_CODE 0x50
76
77#define TYPE50_MEB1_FMT 0x0001
78#define TYPE50_MEB2_FMT 0x0002
79#define TYPE50_MEB3_FMT 0x0003
80#define TYPE50_CRB1_FMT 0x0011
81#define TYPE50_CRB2_FMT 0x0012
82#define TYPE50_CRB3_FMT 0x0013
83
84/* Mod-Exp, with a small modulus */
85struct type50_meb1_msg {
86 struct type50_hdr header;
87 unsigned short keyblock_type; /* 0x0001 */
88 unsigned char reserved[6];
89 unsigned char exponent[128];
90 unsigned char modulus[128];
91 unsigned char message[128];
92} __packed;
93
94/* Mod-Exp, with a large modulus */
95struct type50_meb2_msg {
96 struct type50_hdr header;
97 unsigned short keyblock_type; /* 0x0002 */
98 unsigned char reserved[6];
99 unsigned char exponent[256];
100 unsigned char modulus[256];
101 unsigned char message[256];
102} __packed;
103
104/* Mod-Exp, with a larger modulus */
105struct type50_meb3_msg {
106 struct type50_hdr header;
107 unsigned short keyblock_type; /* 0x0003 */
108 unsigned char reserved[6];
109 unsigned char exponent[512];
110 unsigned char modulus[512];
111 unsigned char message[512];
112} __packed;
113
114/* CRT, with a small modulus */
115struct type50_crb1_msg {
116 struct type50_hdr header;
117 unsigned short keyblock_type; /* 0x0011 */
118 unsigned char reserved[6];
119 unsigned char p[64];
120 unsigned char q[64];
121 unsigned char dp[64];
122 unsigned char dq[64];
123 unsigned char u[64];
124 unsigned char message[128];
125} __packed;
126
127/* CRT, with a large modulus */
128struct type50_crb2_msg {
129 struct type50_hdr header;
130 unsigned short keyblock_type; /* 0x0012 */
131 unsigned char reserved[6];
132 unsigned char p[128];
133 unsigned char q[128];
134 unsigned char dp[128];
135 unsigned char dq[128];
136 unsigned char u[128];
137 unsigned char message[256];
138} __packed;
139
140/* CRT, with a larger modulus */
141struct type50_crb3_msg {
142 struct type50_hdr header;
143 unsigned short keyblock_type; /* 0x0013 */
144 unsigned char reserved[6];
145 unsigned char p[256];
146 unsigned char q[256];
147 unsigned char dp[256];
148 unsigned char dq[256];
149 unsigned char u[256];
150 unsigned char message[512];
151} __packed;
152
153/**
154 * The type 80 response family is associated with a CEX2A card.
155 *
156 * Note that all unsigned char arrays are right-justified and left-padded
157 * with zeroes.
158 *
159 * Note that all reserved fields must be zeroes.
160 */
161
162#define TYPE80_RSP_CODE 0x80
163
164struct type80_hdr {
165 unsigned char reserved1;
166 unsigned char type; /* 0x80 */
167 unsigned short len;
168 unsigned char code; /* 0x00 */
169 unsigned char reserved2[3];
170 unsigned char reserved3[8];
171} __packed;
172
173/**
174 * Convert a ICAMEX message to a type50 MEX message.
175 *
176 * @zdev: crypto device pointer
177 * @zreq: crypto request pointer
178 * @mex: pointer to user input data
179 *
180 * Returns 0 on success or -EFAULT.
181 */
182static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
183 struct ap_message *ap_msg,
184 struct ica_rsa_modexpo *mex)
185{
186 unsigned char *mod, *exp, *inp;
187 int mod_len;
188
189 mod_len = mex->inputdatalength;
190
191 if (mod_len <= 128) {
192 struct type50_meb1_msg *meb1 = ap_msg->message;
193 memset(meb1, 0, sizeof(*meb1));
194 ap_msg->length = sizeof(*meb1);
195 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
196 meb1->header.msg_len = sizeof(*meb1);
197 meb1->keyblock_type = TYPE50_MEB1_FMT;
198 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
199 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
200 inp = meb1->message + sizeof(meb1->message) - mod_len;
201 } else if (mod_len <= 256) {
202 struct type50_meb2_msg *meb2 = ap_msg->message;
203 memset(meb2, 0, sizeof(*meb2));
204 ap_msg->length = sizeof(*meb2);
205 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
206 meb2->header.msg_len = sizeof(*meb2);
207 meb2->keyblock_type = TYPE50_MEB2_FMT;
208 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
209 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
210 inp = meb2->message + sizeof(meb2->message) - mod_len;
211 } else {
212 /* mod_len > 256 = 4096 bit RSA Key */
213 struct type50_meb3_msg *meb3 = ap_msg->message;
214 memset(meb3, 0, sizeof(*meb3));
215 ap_msg->length = sizeof(*meb3);
216 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
217 meb3->header.msg_len = sizeof(*meb3);
218 meb3->keyblock_type = TYPE50_MEB3_FMT;
219 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
220 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
221 inp = meb3->message + sizeof(meb3->message) - mod_len;
222 }
223
224 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
225 copy_from_user(exp, mex->b_key, mod_len) ||
226 copy_from_user(inp, mex->inputdata, mod_len))
227 return -EFAULT;
228 return 0;
229}
230
231/**
232 * Convert a ICACRT message to a type50 CRT message.
233 *
234 * @zdev: crypto device pointer
235 * @zreq: crypto request pointer
236 * @crt: pointer to user input data
237 *
238 * Returns 0 on success or -EFAULT.
239 */
240static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
241 struct ap_message *ap_msg,
242 struct ica_rsa_modexpo_crt *crt)
243{
244 int mod_len, short_len, long_len, long_offset, limit;
245 unsigned char *p, *q, *dp, *dq, *u, *inp;
246
247 mod_len = crt->inputdatalength;
248 short_len = mod_len / 2;
249 long_len = mod_len / 2 + 8;
250
251 /*
252 * CEX2A cannot handle p, dp, or U > 128 bytes.
253 * If we have one of these, we need to do extra checking.
254 * For CEX3A the limit is 256 bytes.
255 */
256 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
257 limit = 256;
258 else
259 limit = 128;
260
261 if (long_len > limit) {
262 /*
263 * zcrypt_rsa_crt already checked for the leading
264 * zeroes of np_prime, bp_key and u_mult_inc.
265 */
266 long_offset = long_len - limit;
267 long_len = limit;
268 } else
269 long_offset = 0;
270
271 /*
272 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
273 * the larger message structure.
274 */
275 if (long_len <= 64) {
276 struct type50_crb1_msg *crb1 = ap_msg->message;
277 memset(crb1, 0, sizeof(*crb1));
278 ap_msg->length = sizeof(*crb1);
279 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
280 crb1->header.msg_len = sizeof(*crb1);
281 crb1->keyblock_type = TYPE50_CRB1_FMT;
282 p = crb1->p + sizeof(crb1->p) - long_len;
283 q = crb1->q + sizeof(crb1->q) - short_len;
284 dp = crb1->dp + sizeof(crb1->dp) - long_len;
285 dq = crb1->dq + sizeof(crb1->dq) - short_len;
286 u = crb1->u + sizeof(crb1->u) - long_len;
287 inp = crb1->message + sizeof(crb1->message) - mod_len;
288 } else if (long_len <= 128) {
289 struct type50_crb2_msg *crb2 = ap_msg->message;
290 memset(crb2, 0, sizeof(*crb2));
291 ap_msg->length = sizeof(*crb2);
292 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
293 crb2->header.msg_len = sizeof(*crb2);
294 crb2->keyblock_type = TYPE50_CRB2_FMT;
295 p = crb2->p + sizeof(crb2->p) - long_len;
296 q = crb2->q + sizeof(crb2->q) - short_len;
297 dp = crb2->dp + sizeof(crb2->dp) - long_len;
298 dq = crb2->dq + sizeof(crb2->dq) - short_len;
299 u = crb2->u + sizeof(crb2->u) - long_len;
300 inp = crb2->message + sizeof(crb2->message) - mod_len;
301 } else {
302 /* long_len >= 256 */
303 struct type50_crb3_msg *crb3 = ap_msg->message;
304 memset(crb3, 0, sizeof(*crb3));
305 ap_msg->length = sizeof(*crb3);
306 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
307 crb3->header.msg_len = sizeof(*crb3);
308 crb3->keyblock_type = TYPE50_CRB3_FMT;
309 p = crb3->p + sizeof(crb3->p) - long_len;
310 q = crb3->q + sizeof(crb3->q) - short_len;
311 dp = crb3->dp + sizeof(crb3->dp) - long_len;
312 dq = crb3->dq + sizeof(crb3->dq) - short_len;
313 u = crb3->u + sizeof(crb3->u) - long_len;
314 inp = crb3->message + sizeof(crb3->message) - mod_len;
315 }
316
317 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
318 copy_from_user(q, crt->nq_prime, short_len) ||
319 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
320 copy_from_user(dq, crt->bq_key, short_len) ||
321 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
322 copy_from_user(inp, crt->inputdata, mod_len))
323 return -EFAULT;
324
325 return 0;
326}
327
328/**
329 * Copy results from a type 80 reply message back to user space.
330 *
331 * @zdev: crypto device pointer
332 * @reply: reply AP message.
333 * @data: pointer to user output data
334 * @length: size of user output data
335 *
336 * Returns 0 on success or -EFAULT.
337 */
338static int convert_type80(struct zcrypt_device *zdev,
339 struct ap_message *reply,
340 char __user *outputdata,
341 unsigned int outputdatalength)
342{
343 struct type80_hdr *t80h = reply->message;
344 unsigned char *data;
345
346 if (t80h->len < sizeof(*t80h) + outputdatalength) {
347 /* The result is too short, the CEX2A card may not do that.. */
348 zdev->online = 0;
349 return -EAGAIN; /* repeat the request on a different device. */
350 }
351 if (zdev->user_space_type == ZCRYPT_CEX2A)
352 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
353 else
354 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
355 data = reply->message + t80h->len - outputdatalength;
356 if (copy_to_user(outputdata, data, outputdatalength))
357 return -EFAULT;
358 return 0;
359}
360
361static int convert_response(struct zcrypt_device *zdev,
362 struct ap_message *reply,
363 char __user *outputdata,
364 unsigned int outputdatalength)
365{
366 /* Response type byte is the second byte in the response. */
367 switch (((unsigned char *) reply->message)[1]) {
368 case TYPE82_RSP_CODE:
369 case TYPE88_RSP_CODE:
370 return convert_error(zdev, reply);
371 case TYPE80_RSP_CODE:
372 return convert_type80(zdev, reply,
373 outputdata, outputdatalength);
374 default: /* Unknown response type, this should NEVER EVER happen */
375 zdev->online = 0;
376 return -EAGAIN; /* repeat the request on a different device. */
377 }
378}
379
380/**
381 * This function is called from the AP bus code after a crypto request
382 * "msg" has finished with the reply message "reply".
383 * It is called from tasklet context.
384 * @ap_dev: pointer to the AP device
385 * @msg: pointer to the AP message
386 * @reply: pointer to the AP reply message
387 */
388static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
389 struct ap_message *msg,
390 struct ap_message *reply)
391{
392 static struct error_hdr error_reply = {
393 .type = TYPE82_RSP_CODE,
394 .reply_code = REP82_ERROR_MACHINE_FAILURE,
395 };
396 struct type80_hdr *t80h;
397 int length;
398
399 /* Copy the reply message to the request message buffer. */
400 if (IS_ERR(reply)) {
401 memcpy(msg->message, &error_reply, sizeof(error_reply));
402 goto out;
403 }
404 t80h = reply->message;
405 if (t80h->type == TYPE80_RSP_CODE) {
406 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
407 length = min_t(int,
408 CEX2A_MAX_RESPONSE_SIZE, t80h->len);
409 else
410 length = min_t(int,
411 CEX3A_MAX_RESPONSE_SIZE, t80h->len);
412 memcpy(msg->message, reply->message, length);
413 } else
414 memcpy(msg->message, reply->message, sizeof(error_reply));
415out:
416 complete((struct completion *) msg->private);
417}
418
419static atomic_t zcrypt_step = ATOMIC_INIT(0);
420
421/**
422 * The request distributor calls this function if it picked the CEX2A
423 * device to handle a modexpo request.
424 * @zdev: pointer to zcrypt_device structure that identifies the
425 * CEX2A device to the request distributor
426 * @mex: pointer to the modexpo request buffer
427 */
428static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
429 struct ica_rsa_modexpo *mex)
430{
431 struct ap_message ap_msg;
432 struct completion work;
433 int rc;
434
435 ap_init_message(&ap_msg);
436 if (zdev->user_space_type == ZCRYPT_CEX2A)
437 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
438 GFP_KERNEL);
439 else
440 ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
441 GFP_KERNEL);
442 if (!ap_msg.message)
443 return -ENOMEM;
444 ap_msg.receive = zcrypt_cex2a_receive;
445 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
446 atomic_inc_return(&zcrypt_step);
447 ap_msg.private = &work;
448 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
449 if (rc)
450 goto out_free;
451 init_completion(&work);
452 ap_queue_message(zdev->ap_dev, &ap_msg);
453 rc = wait_for_completion_interruptible(&work);
454 if (rc == 0)
455 rc = convert_response(zdev, &ap_msg, mex->outputdata,
456 mex->outputdatalength);
457 else
458 /* Signal pending. */
459 ap_cancel_message(zdev->ap_dev, &ap_msg);
460out_free:
461 kfree(ap_msg.message);
462 return rc;
463}
464
465/**
466 * The request distributor calls this function if it picked the CEX2A
467 * device to handle a modexpo_crt request.
468 * @zdev: pointer to zcrypt_device structure that identifies the
469 * CEX2A device to the request distributor
470 * @crt: pointer to the modexpoc_crt request buffer
471 */
472static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
473 struct ica_rsa_modexpo_crt *crt)
474{
475 struct ap_message ap_msg;
476 struct completion work;
477 int rc;
478
479 ap_init_message(&ap_msg);
480 if (zdev->user_space_type == ZCRYPT_CEX2A)
481 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
482 GFP_KERNEL);
483 else
484 ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
485 GFP_KERNEL);
486 if (!ap_msg.message)
487 return -ENOMEM;
488 ap_msg.receive = zcrypt_cex2a_receive;
489 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
490 atomic_inc_return(&zcrypt_step);
491 ap_msg.private = &work;
492 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
493 if (rc)
494 goto out_free;
495 init_completion(&work);
496 ap_queue_message(zdev->ap_dev, &ap_msg);
497 rc = wait_for_completion_interruptible(&work);
498 if (rc == 0)
499 rc = convert_response(zdev, &ap_msg, crt->outputdata,
500 crt->outputdatalength);
501 else
502 /* Signal pending. */
503 ap_cancel_message(zdev->ap_dev, &ap_msg);
504out_free:
505 kfree(ap_msg.message);
506 return rc;
507}
508
509/**
510 * The crypto operations for message type 50.
511 */
512static struct zcrypt_ops zcrypt_msgtype50_ops = {
513 .rsa_modexpo = zcrypt_cex2a_modexpo,
514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
515 .owner = THIS_MODULE,
516 .variant = MSGTYPE50_VARIANT_DEFAULT,
517};
518
519int __init zcrypt_msgtype50_init(void)
520{
521 zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
522 return 0;
523}
524
525void __exit zcrypt_msgtype50_exit(void)
526{
527 zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
528}
529
530module_init(zcrypt_msgtype50_init);
531module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
new file mode 100644
index 000000000000..e56dc72c7733
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -0,0 +1,39 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _ZCRYPT_MSGTYPE50_H_
28#define _ZCRYPT_MSGTYPE50_H_
29
30#define MSGTYPE50_NAME "zcrypt_msgtype50"
31#define MSGTYPE50_VARIANT_DEFAULT 0
32
33#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
34#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
35
36int zcrypt_msgtype50_init(void);
37void zcrypt_msgtype50_exit(void);
38
39#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
new file mode 100644
index 000000000000..7d97fa5a26d0
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -0,0 +1,856 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/err.h>
31#include <linux/delay.h>
32#include <linux/slab.h>
33#include <linux/atomic.h>
34#include <linux/uaccess.h>
35
36#include "ap_bus.h"
37#include "zcrypt_api.h"
38#include "zcrypt_error.h"
39#include "zcrypt_msgtype6.h"
40#include "zcrypt_cca_key.h"
41
42#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
43#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
44
45#define CEIL4(x) ((((x)+3)/4)*4)
46
47struct response_type {
48 struct completion work;
49 int type;
50};
51#define PCIXCC_RESPONSE_TYPE_ICA 0
52#define PCIXCC_RESPONSE_TYPE_XCRB 1
53
54MODULE_AUTHOR("IBM Corporation");
55MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
56 "Copyright IBM Corp. 2001, 2012");
57MODULE_LICENSE("GPL");
58
59static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
60 struct ap_message *);
61
62/**
63 * CPRB
64 * Note that all shorts, ints and longs are little-endian.
65 * All pointer fields are 32-bits long, and mean nothing
66 *
67 * A request CPRB is followed by a request_parameter_block.
68 *
69 * The request (or reply) parameter block is organized thus:
70 * function code
71 * VUD block
72 * key block
73 */
74struct CPRB {
75 unsigned short cprb_len; /* CPRB length */
76 unsigned char cprb_ver_id; /* CPRB version id. */
77 unsigned char pad_000; /* Alignment pad byte. */
78 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
79 unsigned char srpi_verb; /* SRPI verb type */
80 unsigned char flags; /* flags */
81 unsigned char func_id[2]; /* function id */
82 unsigned char checkpoint_flag; /* */
83 unsigned char resv2; /* reserved */
84 unsigned short req_parml; /* request parameter buffer */
85 /* length 16-bit little endian */
86 unsigned char req_parmp[4]; /* request parameter buffer *
87 * pointer (means nothing: the *
88 * parameter buffer follows *
89 * the CPRB). */
90 unsigned char req_datal[4]; /* request data buffer */
91 /* length ULELONG */
92 unsigned char req_datap[4]; /* request data buffer */
93 /* pointer */
94 unsigned short rpl_parml; /* reply parameter buffer */
95 /* length 16-bit little endian */
96 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
97 unsigned char rpl_parmp[4]; /* reply parameter buffer *
98 * pointer (means nothing: the *
99 * parameter buffer follows *
100 * the CPRB). */
101 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
102 unsigned char rpl_datap[4]; /* reply data buffer */
103 /* pointer */
104 unsigned short ccp_rscode; /* server reason code ULESHORT */
105 unsigned short ccp_rtcode; /* server return code ULESHORT */
106 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
107 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
108 unsigned char repd_datal[4]; /* replied data length ULELONG */
109 unsigned char req_pc[2]; /* PC identifier */
110 unsigned char res_origin[8]; /* resource origin */
111 unsigned char mac_value[8]; /* Mac Value */
112 unsigned char logon_id[8]; /* Logon Identifier */
113 unsigned char usage_domain[2]; /* cdx */
114 unsigned char resv3[18]; /* reserved for requestor */
115 unsigned short svr_namel; /* server name length ULESHORT */
116 unsigned char svr_name[8]; /* server name */
117} __packed;
118
119struct function_and_rules_block {
120 unsigned char function_code[2];
121 unsigned short ulen;
122 unsigned char only_rule[8];
123} __packed;
124
125/**
126 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
127 * card in a type6 message. The 3 fields that must be filled in at execution
128 * time are req_parml, rpl_parml and usage_domain.
129 * Everything about this interface is ascii/big-endian, since the
130 * device does *not* have 'Intel inside'.
131 *
132 * The CPRBX is followed immediately by the parm block.
133 * The parm block contains:
134 * - function code ('PD' 0x5044 or 'PK' 0x504B)
135 * - rule block (one of:)
136 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
137 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
138 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
139 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
140 * - VUD block
141 */
142static struct CPRBX static_cprbx = {
143 .cprb_len = 0x00DC,
144 .cprb_ver_id = 0x02,
145 .func_id = {0x54, 0x32},
146};
147
148/**
149 * Convert a ICAMEX message to a type6 MEX message.
150 *
151 * @zdev: crypto device pointer
152 * @ap_msg: pointer to AP message
153 * @mex: pointer to user input data
154 *
155 * Returns 0 on success or -EFAULT.
156 */
157static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
158 struct ap_message *ap_msg,
159 struct ica_rsa_modexpo *mex)
160{
161 static struct type6_hdr static_type6_hdrX = {
162 .type = 0x06,
163 .offset1 = 0x00000058,
164 .agent_id = {'C', 'A',},
165 .function_code = {'P', 'K'},
166 };
167 static struct function_and_rules_block static_pke_fnr = {
168 .function_code = {'P', 'K'},
169 .ulen = 10,
170 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
171 };
172 static struct function_and_rules_block static_pke_fnr_MCL2 = {
173 .function_code = {'P', 'K'},
174 .ulen = 10,
175 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
176 };
177 struct {
178 struct type6_hdr hdr;
179 struct CPRBX cprbx;
180 struct function_and_rules_block fr;
181 unsigned short length;
182 char text[0];
183 } __packed * msg = ap_msg->message;
184 int size;
185
186 /* VUD.ciphertext */
187 msg->length = mex->inputdatalength + 2;
188 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
189 return -EFAULT;
190
191 /* Set up key which is located after the variable length text. */
192 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
193 if (size < 0)
194 return size;
195 size += sizeof(*msg) + mex->inputdatalength;
196
197 /* message header, cprbx and f&r */
198 msg->hdr = static_type6_hdrX;
199 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
200 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
201
202 msg->cprbx = static_cprbx;
203 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
204 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
205
206 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
207 static_pke_fnr_MCL2 : static_pke_fnr;
208
209 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
210
211 ap_msg->length = size;
212 return 0;
213}
214
215/**
216 * Convert a ICACRT message to a type6 CRT message.
217 *
218 * @zdev: crypto device pointer
219 * @ap_msg: pointer to AP message
220 * @crt: pointer to user input data
221 *
222 * Returns 0 on success or -EFAULT.
223 */
224static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
225 struct ap_message *ap_msg,
226 struct ica_rsa_modexpo_crt *crt)
227{
228 static struct type6_hdr static_type6_hdrX = {
229 .type = 0x06,
230 .offset1 = 0x00000058,
231 .agent_id = {'C', 'A',},
232 .function_code = {'P', 'D'},
233 };
234 static struct function_and_rules_block static_pkd_fnr = {
235 .function_code = {'P', 'D'},
236 .ulen = 10,
237 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
238 };
239
240 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
241 .function_code = {'P', 'D'},
242 .ulen = 10,
243 .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
244 };
245 struct {
246 struct type6_hdr hdr;
247 struct CPRBX cprbx;
248 struct function_and_rules_block fr;
249 unsigned short length;
250 char text[0];
251 } __packed * msg = ap_msg->message;
252 int size;
253
254 /* VUD.ciphertext */
255 msg->length = crt->inputdatalength + 2;
256 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
257 return -EFAULT;
258
259 /* Set up key which is located after the variable length text. */
260 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
261 if (size < 0)
262 return size;
263 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
264
265 /* message header, cprbx and f&r */
266 msg->hdr = static_type6_hdrX;
267 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
268 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
269
270 msg->cprbx = static_cprbx;
271 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
272 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
273 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
274
275 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
276 static_pkd_fnr_MCL2 : static_pkd_fnr;
277
278 ap_msg->length = size;
279 return 0;
280}
281
282/**
283 * Convert a XCRB message to a type6 CPRB message.
284 *
285 * @zdev: crypto device pointer
286 * @ap_msg: pointer to AP message
287 * @xcRB: pointer to user input data
288 *
289 * Returns 0 on success or -EFAULT, -EINVAL.
290 */
291struct type86_fmt2_msg {
292 struct type86_hdr hdr;
293 struct type86_fmt2_ext fmt2;
294} __packed;
295
296static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
297 struct ap_message *ap_msg,
298 struct ica_xcRB *xcRB)
299{
300 static struct type6_hdr static_type6_hdrX = {
301 .type = 0x06,
302 .offset1 = 0x00000058,
303 };
304 struct {
305 struct type6_hdr hdr;
306 struct CPRBX cprbx;
307 } __packed * msg = ap_msg->message;
308
309 int rcblen = CEIL4(xcRB->request_control_blk_length);
310 int replylen;
311 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
312 char *function_code;
313
314 /* length checks */
315 ap_msg->length = sizeof(struct type6_hdr) +
316 CEIL4(xcRB->request_control_blk_length) +
317 xcRB->request_data_length;
318 if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
319 return -EINVAL;
320 replylen = sizeof(struct type86_fmt2_msg) +
321 CEIL4(xcRB->reply_control_blk_length) +
322 xcRB->reply_data_length;
323 if (replylen > MSGTYPE06_MAX_MSG_SIZE)
324 return -EINVAL;
325
326 /* prepare type6 header */
327 msg->hdr = static_type6_hdrX;
328 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
329 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
330 if (xcRB->request_data_length) {
331 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
332 msg->hdr.ToCardLen2 = xcRB->request_data_length;
333 }
334 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
335 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
336
337 /* prepare CPRB */
338 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
339 xcRB->request_control_blk_length))
340 return -EFAULT;
341 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
342 xcRB->request_control_blk_length)
343 return -EINVAL;
344 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
345 memcpy(msg->hdr.function_code, function_code,
346 sizeof(msg->hdr.function_code));
347
348 if (memcmp(function_code, "US", 2) == 0)
349 ap_msg->special = 1;
350 else
351 ap_msg->special = 0;
352
353 /* copy data block */
354 if (xcRB->request_data_length &&
355 copy_from_user(req_data, xcRB->request_data_address,
356 xcRB->request_data_length))
357 return -EFAULT;
358 return 0;
359}
360
361/**
362 * Copy results from a type 86 ICA reply message back to user space.
363 *
364 * @zdev: crypto device pointer
365 * @reply: reply AP message.
366 * @data: pointer to user output data
367 * @length: size of user output data
368 *
369 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
370 */
371struct type86x_reply {
372 struct type86_hdr hdr;
373 struct type86_fmt2_ext fmt2;
374 struct CPRBX cprbx;
375 unsigned char pad[4]; /* 4 byte function code/rules block ? */
376 unsigned short length;
377 char text[0];
378} __packed;
379
380static int convert_type86_ica(struct zcrypt_device *zdev,
381 struct ap_message *reply,
382 char __user *outputdata,
383 unsigned int outputdatalength)
384{
385 static unsigned char static_pad[] = {
386 0x00, 0x02,
387 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
388 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
389 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
390 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
391 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
392 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
393 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
394 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
395 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
396 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
397 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
398 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
399 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
400 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
401 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
402 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
403 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
404 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
405 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
406 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
407 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
408 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
409 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
410 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
411 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
412 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
413 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
414 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
415 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
416 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
417 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
418 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
419 };
420 struct type86x_reply *msg = reply->message;
421 unsigned short service_rc, service_rs;
422 unsigned int reply_len, pad_len;
423 char *data;
424
425 service_rc = msg->cprbx.ccp_rtcode;
426 if (unlikely(service_rc != 0)) {
427 service_rs = msg->cprbx.ccp_rscode;
428 if (service_rc == 8 && service_rs == 66)
429 return -EINVAL;
430 if (service_rc == 8 && service_rs == 65)
431 return -EINVAL;
432 if (service_rc == 8 && service_rs == 770)
433 return -EINVAL;
434 if (service_rc == 8 && service_rs == 783) {
435 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
436 return -EAGAIN;
437 }
438 if (service_rc == 12 && service_rs == 769)
439 return -EINVAL;
440 if (service_rc == 8 && service_rs == 72)
441 return -EINVAL;
442 zdev->online = 0;
443 return -EAGAIN; /* repeat the request on a different device. */
444 }
445 data = msg->text;
446 reply_len = msg->length - 2;
447 if (reply_len > outputdatalength)
448 return -EINVAL;
449 /*
450 * For all encipher requests, the length of the ciphertext (reply_len)
451 * will always equal the modulus length. For MEX decipher requests
452 * the output needs to get padded. Minimum pad size is 10.
453 *
454 * Currently, the cases where padding will be added is for:
455 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
456 * ZERO-PAD and CRT is only supported for PKD requests)
457 * - PCICC, always
458 */
459 pad_len = outputdatalength - reply_len;
460 if (pad_len > 0) {
461 if (pad_len < 10)
462 return -EINVAL;
463 /* 'restore' padding left in the PCICC/PCIXCC card. */
464 if (copy_to_user(outputdata, static_pad, pad_len - 1))
465 return -EFAULT;
466 if (put_user(0, outputdata + pad_len - 1))
467 return -EFAULT;
468 }
469 /* Copy the crypto response to user space. */
470 if (copy_to_user(outputdata + pad_len, data, reply_len))
471 return -EFAULT;
472 return 0;
473}
474
475/**
476 * Copy results from a type 86 XCRB reply message back to user space.
477 *
478 * @zdev: crypto device pointer
479 * @reply: reply AP message.
480 * @xcRB: pointer to XCRB
481 *
482 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
483 */
484static int convert_type86_xcrb(struct zcrypt_device *zdev,
485 struct ap_message *reply,
486 struct ica_xcRB *xcRB)
487{
488 struct type86_fmt2_msg *msg = reply->message;
489 char *data = reply->message;
490
491 /* Copy CPRB to user */
492 if (copy_to_user(xcRB->reply_control_blk_addr,
493 data + msg->fmt2.offset1, msg->fmt2.count1))
494 return -EFAULT;
495 xcRB->reply_control_blk_length = msg->fmt2.count1;
496
497 /* Copy data buffer to user */
498 if (msg->fmt2.count2)
499 if (copy_to_user(xcRB->reply_data_addr,
500 data + msg->fmt2.offset2, msg->fmt2.count2))
501 return -EFAULT;
502 xcRB->reply_data_length = msg->fmt2.count2;
503 return 0;
504}
505
506static int convert_type86_rng(struct zcrypt_device *zdev,
507 struct ap_message *reply,
508 char *buffer)
509{
510 struct {
511 struct type86_hdr hdr;
512 struct type86_fmt2_ext fmt2;
513 struct CPRBX cprbx;
514 } __packed * msg = reply->message;
515 char *data = reply->message;
516
517 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
518 return -EINVAL;
519 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
520 return msg->fmt2.count2;
521}
522
523static int convert_response_ica(struct zcrypt_device *zdev,
524 struct ap_message *reply,
525 char __user *outputdata,
526 unsigned int outputdatalength)
527{
528 struct type86x_reply *msg = reply->message;
529
530 /* Response type byte is the second byte in the response. */
531 switch (((unsigned char *) reply->message)[1]) {
532 case TYPE82_RSP_CODE:
533 case TYPE88_RSP_CODE:
534 return convert_error(zdev, reply);
535 case TYPE86_RSP_CODE:
536 if (msg->cprbx.ccp_rtcode &&
537 (msg->cprbx.ccp_rscode == 0x14f) &&
538 (outputdatalength > 256)) {
539 if (zdev->max_exp_bit_length <= 17) {
540 zdev->max_exp_bit_length = 17;
541 return -EAGAIN;
542 } else
543 return -EINVAL;
544 }
545 if (msg->hdr.reply_code)
546 return convert_error(zdev, reply);
547 if (msg->cprbx.cprb_ver_id == 0x02)
548 return convert_type86_ica(zdev, reply,
549 outputdata, outputdatalength);
550 /* Fall through, no break, incorrect cprb version is an unknown
551 * response */
552 default: /* Unknown response type, this should NEVER EVER happen */
553 zdev->online = 0;
554 return -EAGAIN; /* repeat the request on a different device. */
555 }
556}
557
558static int convert_response_xcrb(struct zcrypt_device *zdev,
559 struct ap_message *reply,
560 struct ica_xcRB *xcRB)
561{
562 struct type86x_reply *msg = reply->message;
563
564 /* Response type byte is the second byte in the response. */
565 switch (((unsigned char *) reply->message)[1]) {
566 case TYPE82_RSP_CODE:
567 case TYPE88_RSP_CODE:
568 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
569 return convert_error(zdev, reply);
570 case TYPE86_RSP_CODE:
571 if (msg->hdr.reply_code) {
572 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
573 return convert_error(zdev, reply);
574 }
575 if (msg->cprbx.cprb_ver_id == 0x02)
576 return convert_type86_xcrb(zdev, reply, xcRB);
577 /* Fall through, no break, incorrect cprb version is an unknown
578 * response */
579 default: /* Unknown response type, this should NEVER EVER happen */
580 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
581 zdev->online = 0;
582 return -EAGAIN; /* repeat the request on a different device. */
583 }
584}
585
586static int convert_response_rng(struct zcrypt_device *zdev,
587 struct ap_message *reply,
588 char *data)
589{
590 struct type86x_reply *msg = reply->message;
591
592 switch (msg->hdr.type) {
593 case TYPE82_RSP_CODE:
594 case TYPE88_RSP_CODE:
595 return -EINVAL;
596 case TYPE86_RSP_CODE:
597 if (msg->hdr.reply_code)
598 return -EINVAL;
599 if (msg->cprbx.cprb_ver_id == 0x02)
600 return convert_type86_rng(zdev, reply, data);
601 /* Fall through, no break, incorrect cprb version is an unknown
602 * response */
603 default: /* Unknown response type, this should NEVER EVER happen */
604 zdev->online = 0;
605 return -EAGAIN; /* repeat the request on a different device. */
606 }
607}
608
609/**
610 * This function is called from the AP bus code after a crypto request
611 * "msg" has finished with the reply message "reply".
612 * It is called from tasklet context.
613 * @ap_dev: pointer to the AP device
614 * @msg: pointer to the AP message
615 * @reply: pointer to the AP reply message
616 */
617static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
618 struct ap_message *msg,
619 struct ap_message *reply)
620{
621 static struct error_hdr error_reply = {
622 .type = TYPE82_RSP_CODE,
623 .reply_code = REP82_ERROR_MACHINE_FAILURE,
624 };
625 struct response_type *resp_type =
626 (struct response_type *) msg->private;
627 struct type86x_reply *t86r;
628 int length;
629
630 /* Copy the reply message to the request message buffer. */
631 if (IS_ERR(reply)) {
632 memcpy(msg->message, &error_reply, sizeof(error_reply));
633 goto out;
634 }
635 t86r = reply->message;
636 if (t86r->hdr.type == TYPE86_RSP_CODE &&
637 t86r->cprbx.cprb_ver_id == 0x02) {
638 switch (resp_type->type) {
639 case PCIXCC_RESPONSE_TYPE_ICA:
640 length = sizeof(struct type86x_reply)
641 + t86r->length - 2;
642 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
643 memcpy(msg->message, reply->message, length);
644 break;
645 case PCIXCC_RESPONSE_TYPE_XCRB:
646 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
647 length = min(MSGTYPE06_MAX_MSG_SIZE, length);
648 memcpy(msg->message, reply->message, length);
649 break;
650 default:
651 memcpy(msg->message, &error_reply,
652 sizeof(error_reply));
653 }
654 } else
655 memcpy(msg->message, reply->message, sizeof(error_reply));
656out:
657 complete(&(resp_type->work));
658}
659
660static atomic_t zcrypt_step = ATOMIC_INIT(0);
661
662/**
663 * The request distributor calls this function if it picked the PCIXCC/CEX2C
664 * device to handle a modexpo request.
665 * @zdev: pointer to zcrypt_device structure that identifies the
666 * PCIXCC/CEX2C device to the request distributor
667 * @mex: pointer to the modexpo request buffer
668 */
669static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
670 struct ica_rsa_modexpo *mex)
671{
672 struct ap_message ap_msg;
673 struct response_type resp_type = {
674 .type = PCIXCC_RESPONSE_TYPE_ICA,
675 };
676 int rc;
677
678 ap_init_message(&ap_msg);
679 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
680 if (!ap_msg.message)
681 return -ENOMEM;
682 ap_msg.receive = zcrypt_msgtype6_receive;
683 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
684 atomic_inc_return(&zcrypt_step);
685 ap_msg.private = &resp_type;
686 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
687 if (rc)
688 goto out_free;
689 init_completion(&resp_type.work);
690 ap_queue_message(zdev->ap_dev, &ap_msg);
691 rc = wait_for_completion_interruptible(&resp_type.work);
692 if (rc == 0)
693 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
694 mex->outputdatalength);
695 else
696 /* Signal pending. */
697 ap_cancel_message(zdev->ap_dev, &ap_msg);
698out_free:
699 free_page((unsigned long) ap_msg.message);
700 return rc;
701}
702
703/**
704 * The request distributor calls this function if it picked the PCIXCC/CEX2C
705 * device to handle a modexpo_crt request.
706 * @zdev: pointer to zcrypt_device structure that identifies the
707 * PCIXCC/CEX2C device to the request distributor
708 * @crt: pointer to the modexpoc_crt request buffer
709 */
710static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
711 struct ica_rsa_modexpo_crt *crt)
712{
713 struct ap_message ap_msg;
714 struct response_type resp_type = {
715 .type = PCIXCC_RESPONSE_TYPE_ICA,
716 };
717 int rc;
718
719 ap_init_message(&ap_msg);
720 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
721 if (!ap_msg.message)
722 return -ENOMEM;
723 ap_msg.receive = zcrypt_msgtype6_receive;
724 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
725 atomic_inc_return(&zcrypt_step);
726 ap_msg.private = &resp_type;
727 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
728 if (rc)
729 goto out_free;
730 init_completion(&resp_type.work);
731 ap_queue_message(zdev->ap_dev, &ap_msg);
732 rc = wait_for_completion_interruptible(&resp_type.work);
733 if (rc == 0)
734 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
735 crt->outputdatalength);
736 else
737 /* Signal pending. */
738 ap_cancel_message(zdev->ap_dev, &ap_msg);
739out_free:
740 free_page((unsigned long) ap_msg.message);
741 return rc;
742}
743
744/**
745 * The request distributor calls this function if it picked the PCIXCC/CEX2C
746 * device to handle a send_cprb request.
747 * @zdev: pointer to zcrypt_device structure that identifies the
748 * PCIXCC/CEX2C device to the request distributor
749 * @xcRB: pointer to the send_cprb request buffer
750 */
751static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
752 struct ica_xcRB *xcRB)
753{
754 struct ap_message ap_msg;
755 struct response_type resp_type = {
756 .type = PCIXCC_RESPONSE_TYPE_XCRB,
757 };
758 int rc;
759
760 ap_init_message(&ap_msg);
761 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
762 if (!ap_msg.message)
763 return -ENOMEM;
764 ap_msg.receive = zcrypt_msgtype6_receive;
765 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
766 atomic_inc_return(&zcrypt_step);
767 ap_msg.private = &resp_type;
768 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
769 if (rc)
770 goto out_free;
771 init_completion(&resp_type.work);
772 ap_queue_message(zdev->ap_dev, &ap_msg);
773 rc = wait_for_completion_interruptible(&resp_type.work);
774 if (rc == 0)
775 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
776 else
777 /* Signal pending. */
778 ap_cancel_message(zdev->ap_dev, &ap_msg);
779out_free:
780 kzfree(ap_msg.message);
781 return rc;
782}
783
784/**
785 * The request distributor calls this function if it picked the PCIXCC/CEX2C
786 * device to generate random data.
787 * @zdev: pointer to zcrypt_device structure that identifies the
788 * PCIXCC/CEX2C device to the request distributor
789 * @buffer: pointer to a memory page to return random data
790 */
791
792static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
793 char *buffer)
794{
795 struct ap_message ap_msg;
796 struct response_type resp_type = {
797 .type = PCIXCC_RESPONSE_TYPE_XCRB,
798 };
799 int rc;
800
801 ap_init_message(&ap_msg);
802 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
803 if (!ap_msg.message)
804 return -ENOMEM;
805 ap_msg.receive = zcrypt_msgtype6_receive;
806 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
807 atomic_inc_return(&zcrypt_step);
808 ap_msg.private = &resp_type;
809 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
810 init_completion(&resp_type.work);
811 ap_queue_message(zdev->ap_dev, &ap_msg);
812 rc = wait_for_completion_interruptible(&resp_type.work);
813 if (rc == 0)
814 rc = convert_response_rng(zdev, &ap_msg, buffer);
815 else
816 /* Signal pending. */
817 ap_cancel_message(zdev->ap_dev, &ap_msg);
818 kfree(ap_msg.message);
819 return rc;
820}
821
822/**
823 * The crypto operations for a PCIXCC/CEX2C card.
824 */
825static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
826 .owner = THIS_MODULE,
827 .variant = MSGTYPE06_VARIANT_NORNG,
828 .rsa_modexpo = zcrypt_msgtype6_modexpo,
829 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
830 .send_cprb = zcrypt_msgtype6_send_cprb,
831};
832
833static struct zcrypt_ops zcrypt_msgtype6_ops = {
834 .owner = THIS_MODULE,
835 .variant = MSGTYPE06_VARIANT_DEFAULT,
836 .rsa_modexpo = zcrypt_msgtype6_modexpo,
837 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
838 .send_cprb = zcrypt_msgtype6_send_cprb,
839 .rng = zcrypt_msgtype6_rng,
840};
841
842int __init zcrypt_msgtype6_init(void)
843{
844 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
845 zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
846 return 0;
847}
848
849void __exit zcrypt_msgtype6_exit(void)
850{
851 zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
852 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
853}
854
855module_init(zcrypt_msgtype6_init);
856module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
new file mode 100644
index 000000000000..1e500d3c0735
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -0,0 +1,169 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _ZCRYPT_MSGTYPE6_H_
28#define _ZCRYPT_MSGTYPE6_H_
29
30#include <asm/zcrypt.h>
31
32#define MSGTYPE06_NAME "zcrypt_msgtype6"
33#define MSGTYPE06_VARIANT_DEFAULT 0
34#define MSGTYPE06_VARIANT_NORNG 1
35
36#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
37
38/**
39 * The type 6 message family is associated with PCICC or PCIXCC cards.
40 *
41 * It contains a message header followed by a CPRB, both of which
42 * are described below.
43 *
44 * Note that all reserved fields must be zeroes.
45 */
46struct type6_hdr {
47 unsigned char reserved1; /* 0x00 */
48 unsigned char type; /* 0x06 */
49 unsigned char reserved2[2]; /* 0x0000 */
50 unsigned char right[4]; /* 0x00000000 */
51 unsigned char reserved3[2]; /* 0x0000 */
52 unsigned char reserved4[2]; /* 0x0000 */
53 unsigned char apfs[4]; /* 0x00000000 */
54 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
55 unsigned int offset2; /* 0x00000000 */
56 unsigned int offset3; /* 0x00000000 */
57 unsigned int offset4; /* 0x00000000 */
58 unsigned char agent_id[16]; /* PCICC: */
59 /* 0x0100 */
60 /* 0x4343412d4150504c202020 */
61 /* 0x010101 */
62 /* PCIXCC: */
63 /* 0x4341000000000000 */
64 /* 0x0000000000000000 */
65 unsigned char rqid[2]; /* rqid. internal to 603 */
66 unsigned char reserved5[2]; /* 0x0000 */
67 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
68 unsigned char reserved6[2]; /* 0x0000 */
69 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
70 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
71 unsigned int ToCardLen3; /* 0x00000000 */
72 unsigned int ToCardLen4; /* 0x00000000 */
73 unsigned int FromCardLen1; /* response buffer length */
74 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
75 unsigned int FromCardLen3; /* 0x00000000 */
76 unsigned int FromCardLen4; /* 0x00000000 */
77} __packed;
78
79/**
80 * The type 86 message family is associated with PCICC and PCIXCC cards.
81 *
82 * It contains a message header followed by a CPRB. The CPRB is
83 * the same as the request CPRB, which is described above.
84 *
85 * If format is 1, an error condition exists and no data beyond
86 * the 8-byte message header is of interest.
87 *
88 * The non-error message is shown below.
89 *
90 * Note that all reserved fields must be zeroes.
91 */
92struct type86_hdr {
93 unsigned char reserved1; /* 0x00 */
94 unsigned char type; /* 0x86 */
95 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
96 unsigned char reserved2; /* 0x00 */
97 unsigned char reply_code; /* reply code (see above) */
98 unsigned char reserved3[3]; /* 0x000000 */
99} __packed;
100
101#define TYPE86_RSP_CODE 0x86
102#define TYPE86_FMT2 0x02
103
104struct type86_fmt2_ext {
105 unsigned char reserved[4]; /* 0x00000000 */
106 unsigned char apfs[4]; /* final status */
107 unsigned int count1; /* length of CPRB + parameters */
108 unsigned int offset1; /* offset to CPRB */
109 unsigned int count2; /* 0x00000000 */
110 unsigned int offset2; /* db offset 0x00000000 for PKD */
111 unsigned int count3; /* 0x00000000 */
112 unsigned int offset3; /* 0x00000000 */
113 unsigned int count4; /* 0x00000000 */
114 unsigned int offset4; /* 0x00000000 */
115} __packed;
116
117/**
118 * Prepare a type6 CPRB message for random number generation
119 *
120 * @ap_dev: AP device pointer
121 * @ap_msg: pointer to AP message
122 */
123static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
124 struct ap_message *ap_msg,
125 unsigned random_number_length)
126{
127 struct {
128 struct type6_hdr hdr;
129 struct CPRBX cprbx;
130 char function_code[2];
131 short int rule_length;
132 char rule[8];
133 short int verb_length;
134 short int key_length;
135 } __packed * msg = ap_msg->message;
136 static struct type6_hdr static_type6_hdrX = {
137 .type = 0x06,
138 .offset1 = 0x00000058,
139 .agent_id = {'C', 'A'},
140 .function_code = {'R', 'L'},
141 .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
142 .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
143 };
144 static struct CPRBX local_cprbx = {
145 .cprb_len = 0x00dc,
146 .cprb_ver_id = 0x02,
147 .func_id = {0x54, 0x32},
148 .req_parml = sizeof(*msg) - sizeof(msg->hdr) -
149 sizeof(msg->cprbx),
150 .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr),
151 };
152
153 msg->hdr = static_type6_hdrX;
154 msg->hdr.FromCardLen2 = random_number_length,
155 msg->cprbx = local_cprbx;
156 msg->cprbx.rpl_datal = random_number_length,
157 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
158 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
159 msg->rule_length = 0x0a;
160 memcpy(msg->rule, "RANDOM ", 8);
161 msg->verb_length = 0x02;
162 msg->key_length = 0x02;
163 ap_msg->length = sizeof(*msg);
164}
165
166int zcrypt_msgtype6_init(void);
167void zcrypt_msgtype6_exit(void);
168
169#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index ccb4f8b60c75..c7275e303a0d 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * zcrypt 2.1.0 2 * zcrypt 2.1.0
3 * 3 *
4 * Copyright IBM Corp. 2001, 2006 4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs 5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 6 * Eric Rossman (edrossma@us.ibm.com)
7 * 7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
@@ -35,9 +36,10 @@
35#include "ap_bus.h" 36#include "ap_bus.h"
36#include "zcrypt_api.h" 37#include "zcrypt_api.h"
37#include "zcrypt_error.h" 38#include "zcrypt_error.h"
38#include "zcrypt_pcicc.h" 39#include "zcrypt_msgtype6.h"
39#include "zcrypt_pcixcc.h" 40#include "zcrypt_pcixcc.h"
40#include "zcrypt_cca_key.h" 41#include "zcrypt_cca_key.h"
42#include "zcrypt_msgtype6.h"
41 43
42#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 44#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
43#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
@@ -75,14 +77,12 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
75 77
76MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 78MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
77MODULE_AUTHOR("IBM Corporation"); 79MODULE_AUTHOR("IBM Corporation");
78MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " 80MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
79 "Copyright IBM Corp. 2001, 2006"); 81 "Copyright IBM Corp. 2001, 2012");
80MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
81 83
82static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 84static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
83static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 85static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
84static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
85 struct ap_message *);
86 86
87static struct ap_driver zcrypt_pcixcc_driver = { 87static struct ap_driver zcrypt_pcixcc_driver = {
88 .probe = zcrypt_pcixcc_probe, 88 .probe = zcrypt_pcixcc_probe,
@@ -92,766 +92,6 @@ static struct ap_driver zcrypt_pcixcc_driver = {
92}; 92};
93 93
94/** 94/**
95 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
96 * card in a type6 message. The 3 fields that must be filled in at execution
97 * time are req_parml, rpl_parml and usage_domain.
98 * Everything about this interface is ascii/big-endian, since the
99 * device does *not* have 'Intel inside'.
100 *
101 * The CPRBX is followed immediately by the parm block.
102 * The parm block contains:
103 * - function code ('PD' 0x5044 or 'PK' 0x504B)
104 * - rule block (one of:)
105 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
106 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
107 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
108 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
109 * - VUD block
110 */
111static struct CPRBX static_cprbx = {
112 .cprb_len = 0x00DC,
113 .cprb_ver_id = 0x02,
114 .func_id = {0x54,0x32},
115};
116
117/**
118 * Convert a ICAMEX message to a type6 MEX message.
119 *
120 * @zdev: crypto device pointer
121 * @ap_msg: pointer to AP message
122 * @mex: pointer to user input data
123 *
124 * Returns 0 on success or -EFAULT.
125 */
126static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
127 struct ap_message *ap_msg,
128 struct ica_rsa_modexpo *mex)
129{
130 static struct type6_hdr static_type6_hdrX = {
131 .type = 0x06,
132 .offset1 = 0x00000058,
133 .agent_id = {'C','A',},
134 .function_code = {'P','K'},
135 };
136 static struct function_and_rules_block static_pke_fnr = {
137 .function_code = {'P','K'},
138 .ulen = 10,
139 .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
140 };
141 static struct function_and_rules_block static_pke_fnr_MCL2 = {
142 .function_code = {'P','K'},
143 .ulen = 10,
144 .only_rule = {'Z','E','R','O','-','P','A','D'}
145 };
146 struct {
147 struct type6_hdr hdr;
148 struct CPRBX cprbx;
149 struct function_and_rules_block fr;
150 unsigned short length;
151 char text[0];
152 } __attribute__((packed)) *msg = ap_msg->message;
153 int size;
154
155 /* VUD.ciphertext */
156 msg->length = mex->inputdatalength + 2;
157 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
158 return -EFAULT;
159
160 /* Set up key which is located after the variable length text. */
161 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
162 if (size < 0)
163 return size;
164 size += sizeof(*msg) + mex->inputdatalength;
165
166 /* message header, cprbx and f&r */
167 msg->hdr = static_type6_hdrX;
168 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
169 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
170
171 msg->cprbx = static_cprbx;
172 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
173 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
174
175 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
176 static_pke_fnr_MCL2 : static_pke_fnr;
177
178 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
179
180 ap_msg->length = size;
181 return 0;
182}
183
184/**
185 * Convert a ICACRT message to a type6 CRT message.
186 *
187 * @zdev: crypto device pointer
188 * @ap_msg: pointer to AP message
189 * @crt: pointer to user input data
190 *
191 * Returns 0 on success or -EFAULT.
192 */
193static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
194 struct ap_message *ap_msg,
195 struct ica_rsa_modexpo_crt *crt)
196{
197 static struct type6_hdr static_type6_hdrX = {
198 .type = 0x06,
199 .offset1 = 0x00000058,
200 .agent_id = {'C','A',},
201 .function_code = {'P','D'},
202 };
203 static struct function_and_rules_block static_pkd_fnr = {
204 .function_code = {'P','D'},
205 .ulen = 10,
206 .only_rule = {'Z','E','R','O','-','P','A','D'}
207 };
208
209 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
210 .function_code = {'P','D'},
211 .ulen = 10,
212 .only_rule = {'P','K','C','S','-','1','.','2'}
213 };
214 struct {
215 struct type6_hdr hdr;
216 struct CPRBX cprbx;
217 struct function_and_rules_block fr;
218 unsigned short length;
219 char text[0];
220 } __attribute__((packed)) *msg = ap_msg->message;
221 int size;
222
223 /* VUD.ciphertext */
224 msg->length = crt->inputdatalength + 2;
225 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
226 return -EFAULT;
227
228 /* Set up key which is located after the variable length text. */
229 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
230 if (size < 0)
231 return size;
232 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
233
234 /* message header, cprbx and f&r */
235 msg->hdr = static_type6_hdrX;
236 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
237 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
238
239 msg->cprbx = static_cprbx;
240 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
241 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
242 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
243
244 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
245 static_pkd_fnr_MCL2 : static_pkd_fnr;
246
247 ap_msg->length = size;
248 return 0;
249}
250
251/**
252 * Convert a XCRB message to a type6 CPRB message.
253 *
254 * @zdev: crypto device pointer
255 * @ap_msg: pointer to AP message
256 * @xcRB: pointer to user input data
257 *
258 * Returns 0 on success or -EFAULT, -EINVAL.
259 */
260struct type86_fmt2_msg {
261 struct type86_hdr hdr;
262 struct type86_fmt2_ext fmt2;
263} __attribute__((packed));
264
265static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
266 struct ap_message *ap_msg,
267 struct ica_xcRB *xcRB)
268{
269 static struct type6_hdr static_type6_hdrX = {
270 .type = 0x06,
271 .offset1 = 0x00000058,
272 };
273 struct {
274 struct type6_hdr hdr;
275 struct CPRBX cprbx;
276 } __attribute__((packed)) *msg = ap_msg->message;
277
278 int rcblen = CEIL4(xcRB->request_control_blk_length);
279 int replylen;
280 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
281 char *function_code;
282
283 /* length checks */
284 ap_msg->length = sizeof(struct type6_hdr) +
285 CEIL4(xcRB->request_control_blk_length) +
286 xcRB->request_data_length;
287 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
288 return -EINVAL;
289 replylen = sizeof(struct type86_fmt2_msg) +
290 CEIL4(xcRB->reply_control_blk_length) +
291 xcRB->reply_data_length;
292 if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
293 return -EINVAL;
294
295 /* prepare type6 header */
296 msg->hdr = static_type6_hdrX;
297 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
298 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
299 if (xcRB->request_data_length) {
300 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
301 msg->hdr.ToCardLen2 = xcRB->request_data_length;
302 }
303 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
304 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
305
306 /* prepare CPRB */
307 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
308 xcRB->request_control_blk_length))
309 return -EFAULT;
310 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
311 xcRB->request_control_blk_length)
312 return -EINVAL;
313 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
314 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
315
316 if (memcmp(function_code, "US", 2) == 0)
317 ap_msg->special = 1;
318 else
319 ap_msg->special = 0;
320
321 /* copy data block */
322 if (xcRB->request_data_length &&
323 copy_from_user(req_data, xcRB->request_data_address,
324 xcRB->request_data_length))
325 return -EFAULT;
326 return 0;
327}
328
329/**
330 * Prepare a type6 CPRB message for random number generation
331 *
332 * @ap_dev: AP device pointer
333 * @ap_msg: pointer to AP message
334 */
335static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
336 struct ap_message *ap_msg,
337 unsigned random_number_length)
338{
339 struct {
340 struct type6_hdr hdr;
341 struct CPRBX cprbx;
342 char function_code[2];
343 short int rule_length;
344 char rule[8];
345 short int verb_length;
346 short int key_length;
347 } __attribute__((packed)) *msg = ap_msg->message;
348 static struct type6_hdr static_type6_hdrX = {
349 .type = 0x06,
350 .offset1 = 0x00000058,
351 .agent_id = {'C', 'A'},
352 .function_code = {'R', 'L'},
353 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
354 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
355 };
356 static struct CPRBX local_cprbx = {
357 .cprb_len = 0x00dc,
358 .cprb_ver_id = 0x02,
359 .func_id = {0x54, 0x32},
360 .req_parml = sizeof *msg - sizeof(msg->hdr) -
361 sizeof(msg->cprbx),
362 .rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
363 };
364
365 msg->hdr = static_type6_hdrX;
366 msg->hdr.FromCardLen2 = random_number_length,
367 msg->cprbx = local_cprbx;
368 msg->cprbx.rpl_datal = random_number_length,
369 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
370 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
371 msg->rule_length = 0x0a;
372 memcpy(msg->rule, "RANDOM ", 8);
373 msg->verb_length = 0x02;
374 msg->key_length = 0x02;
375 ap_msg->length = sizeof *msg;
376}
377
378/**
379 * Copy results from a type 86 ICA reply message back to user space.
380 *
381 * @zdev: crypto device pointer
382 * @reply: reply AP message.
383 * @data: pointer to user output data
384 * @length: size of user output data
385 *
386 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
387 */
388struct type86x_reply {
389 struct type86_hdr hdr;
390 struct type86_fmt2_ext fmt2;
391 struct CPRBX cprbx;
392 unsigned char pad[4]; /* 4 byte function code/rules block ? */
393 unsigned short length;
394 char text[0];
395} __attribute__((packed));
396
397static int convert_type86_ica(struct zcrypt_device *zdev,
398 struct ap_message *reply,
399 char __user *outputdata,
400 unsigned int outputdatalength)
401{
402 static unsigned char static_pad[] = {
403 0x00,0x02,
404 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
405 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
406 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
407 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
408 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
409 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
410 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
411 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
412 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
413 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
414 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
415 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
416 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
417 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
418 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
419 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
420 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
421 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
422 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
423 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
424 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
425 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
426 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
427 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
428 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
429 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
430 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
431 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
432 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
433 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
434 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
435 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
436 };
437 struct type86x_reply *msg = reply->message;
438 unsigned short service_rc, service_rs;
439 unsigned int reply_len, pad_len;
440 char *data;
441
442 service_rc = msg->cprbx.ccp_rtcode;
443 if (unlikely(service_rc != 0)) {
444 service_rs = msg->cprbx.ccp_rscode;
445 if (service_rc == 8 && service_rs == 66)
446 return -EINVAL;
447 if (service_rc == 8 && service_rs == 65)
448 return -EINVAL;
449 if (service_rc == 8 && service_rs == 770)
450 return -EINVAL;
451 if (service_rc == 8 && service_rs == 783) {
452 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
453 return -EAGAIN;
454 }
455 if (service_rc == 12 && service_rs == 769)
456 return -EINVAL;
457 if (service_rc == 8 && service_rs == 72)
458 return -EINVAL;
459 zdev->online = 0;
460 return -EAGAIN; /* repeat the request on a different device. */
461 }
462 data = msg->text;
463 reply_len = msg->length - 2;
464 if (reply_len > outputdatalength)
465 return -EINVAL;
466 /*
467 * For all encipher requests, the length of the ciphertext (reply_len)
468 * will always equal the modulus length. For MEX decipher requests
469 * the output needs to get padded. Minimum pad size is 10.
470 *
471 * Currently, the cases where padding will be added is for:
472 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
473 * ZERO-PAD and CRT is only supported for PKD requests)
474 * - PCICC, always
475 */
476 pad_len = outputdatalength - reply_len;
477 if (pad_len > 0) {
478 if (pad_len < 10)
479 return -EINVAL;
480 /* 'restore' padding left in the PCICC/PCIXCC card. */
481 if (copy_to_user(outputdata, static_pad, pad_len - 1))
482 return -EFAULT;
483 if (put_user(0, outputdata + pad_len - 1))
484 return -EFAULT;
485 }
486 /* Copy the crypto response to user space. */
487 if (copy_to_user(outputdata + pad_len, data, reply_len))
488 return -EFAULT;
489 return 0;
490}
491
492/**
493 * Copy results from a type 86 XCRB reply message back to user space.
494 *
495 * @zdev: crypto device pointer
496 * @reply: reply AP message.
497 * @xcRB: pointer to XCRB
498 *
499 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
500 */
501static int convert_type86_xcrb(struct zcrypt_device *zdev,
502 struct ap_message *reply,
503 struct ica_xcRB *xcRB)
504{
505 struct type86_fmt2_msg *msg = reply->message;
506 char *data = reply->message;
507
508 /* Copy CPRB to user */
509 if (copy_to_user(xcRB->reply_control_blk_addr,
510 data + msg->fmt2.offset1, msg->fmt2.count1))
511 return -EFAULT;
512 xcRB->reply_control_blk_length = msg->fmt2.count1;
513
514 /* Copy data buffer to user */
515 if (msg->fmt2.count2)
516 if (copy_to_user(xcRB->reply_data_addr,
517 data + msg->fmt2.offset2, msg->fmt2.count2))
518 return -EFAULT;
519 xcRB->reply_data_length = msg->fmt2.count2;
520 return 0;
521}
522
523static int convert_type86_rng(struct zcrypt_device *zdev,
524 struct ap_message *reply,
525 char *buffer)
526{
527 struct {
528 struct type86_hdr hdr;
529 struct type86_fmt2_ext fmt2;
530 struct CPRBX cprbx;
531 } __attribute__((packed)) *msg = reply->message;
532 char *data = reply->message;
533
534 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
535 return -EINVAL;
536 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
537 return msg->fmt2.count2;
538}
539
540static int convert_response_ica(struct zcrypt_device *zdev,
541 struct ap_message *reply,
542 char __user *outputdata,
543 unsigned int outputdatalength)
544{
545 struct type86x_reply *msg = reply->message;
546
547 /* Response type byte is the second byte in the response. */
548 switch (((unsigned char *) reply->message)[1]) {
549 case TYPE82_RSP_CODE:
550 case TYPE88_RSP_CODE:
551 return convert_error(zdev, reply);
552 case TYPE86_RSP_CODE:
553 if (msg->cprbx.ccp_rtcode &&
554 (msg->cprbx.ccp_rscode == 0x14f) &&
555 (outputdatalength > 256)) {
556 if (zdev->max_exp_bit_length <= 17) {
557 zdev->max_exp_bit_length = 17;
558 return -EAGAIN;
559 } else
560 return -EINVAL;
561 }
562 if (msg->hdr.reply_code)
563 return convert_error(zdev, reply);
564 if (msg->cprbx.cprb_ver_id == 0x02)
565 return convert_type86_ica(zdev, reply,
566 outputdata, outputdatalength);
567 /* Fall through, no break, incorrect cprb version is an unknown
568 * response */
569 default: /* Unknown response type, this should NEVER EVER happen */
570 zdev->online = 0;
571 return -EAGAIN; /* repeat the request on a different device. */
572 }
573}
574
575static int convert_response_xcrb(struct zcrypt_device *zdev,
576 struct ap_message *reply,
577 struct ica_xcRB *xcRB)
578{
579 struct type86x_reply *msg = reply->message;
580
581 /* Response type byte is the second byte in the response. */
582 switch (((unsigned char *) reply->message)[1]) {
583 case TYPE82_RSP_CODE:
584 case TYPE88_RSP_CODE:
585 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
586 return convert_error(zdev, reply);
587 case TYPE86_RSP_CODE:
588 if (msg->hdr.reply_code) {
589 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
590 return convert_error(zdev, reply);
591 }
592 if (msg->cprbx.cprb_ver_id == 0x02)
593 return convert_type86_xcrb(zdev, reply, xcRB);
594 /* Fall through, no break, incorrect cprb version is an unknown
595 * response */
596 default: /* Unknown response type, this should NEVER EVER happen */
597 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
598 zdev->online = 0;
599 return -EAGAIN; /* repeat the request on a different device. */
600 }
601}
602
603static int convert_response_rng(struct zcrypt_device *zdev,
604 struct ap_message *reply,
605 char *data)
606{
607 struct type86x_reply *msg = reply->message;
608
609 switch (msg->hdr.type) {
610 case TYPE82_RSP_CODE:
611 case TYPE88_RSP_CODE:
612 return -EINVAL;
613 case TYPE86_RSP_CODE:
614 if (msg->hdr.reply_code)
615 return -EINVAL;
616 if (msg->cprbx.cprb_ver_id == 0x02)
617 return convert_type86_rng(zdev, reply, data);
618 /* Fall through, no break, incorrect cprb version is an unknown
619 * response */
620 default: /* Unknown response type, this should NEVER EVER happen */
621 zdev->online = 0;
622 return -EAGAIN; /* repeat the request on a different device. */
623 }
624}
625
626/**
627 * This function is called from the AP bus code after a crypto request
628 * "msg" has finished with the reply message "reply".
629 * It is called from tasklet context.
630 * @ap_dev: pointer to the AP device
631 * @msg: pointer to the AP message
632 * @reply: pointer to the AP reply message
633 */
634static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
635 struct ap_message *msg,
636 struct ap_message *reply)
637{
638 static struct error_hdr error_reply = {
639 .type = TYPE82_RSP_CODE,
640 .reply_code = REP82_ERROR_MACHINE_FAILURE,
641 };
642 struct response_type *resp_type =
643 (struct response_type *) msg->private;
644 struct type86x_reply *t86r;
645 int length;
646
647 /* Copy the reply message to the request message buffer. */
648 if (IS_ERR(reply)) {
649 memcpy(msg->message, &error_reply, sizeof(error_reply));
650 goto out;
651 }
652 t86r = reply->message;
653 if (t86r->hdr.type == TYPE86_RSP_CODE &&
654 t86r->cprbx.cprb_ver_id == 0x02) {
655 switch (resp_type->type) {
656 case PCIXCC_RESPONSE_TYPE_ICA:
657 length = sizeof(struct type86x_reply)
658 + t86r->length - 2;
659 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
660 memcpy(msg->message, reply->message, length);
661 break;
662 case PCIXCC_RESPONSE_TYPE_XCRB:
663 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
664 length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
665 memcpy(msg->message, reply->message, length);
666 break;
667 default:
668 memcpy(msg->message, &error_reply, sizeof error_reply);
669 }
670 } else
671 memcpy(msg->message, reply->message, sizeof error_reply);
672out:
673 complete(&(resp_type->work));
674}
675
676static atomic_t zcrypt_step = ATOMIC_INIT(0);
677
678/**
679 * The request distributor calls this function if it picked the PCIXCC/CEX2C
680 * device to handle a modexpo request.
681 * @zdev: pointer to zcrypt_device structure that identifies the
682 * PCIXCC/CEX2C device to the request distributor
683 * @mex: pointer to the modexpo request buffer
684 */
685static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
686 struct ica_rsa_modexpo *mex)
687{
688 struct ap_message ap_msg;
689 struct response_type resp_type = {
690 .type = PCIXCC_RESPONSE_TYPE_ICA,
691 };
692 int rc;
693
694 ap_init_message(&ap_msg);
695 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
696 if (!ap_msg.message)
697 return -ENOMEM;
698 ap_msg.receive = zcrypt_pcixcc_receive;
699 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
700 atomic_inc_return(&zcrypt_step);
701 ap_msg.private = &resp_type;
702 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
703 if (rc)
704 goto out_free;
705 init_completion(&resp_type.work);
706 ap_queue_message(zdev->ap_dev, &ap_msg);
707 rc = wait_for_completion_interruptible(&resp_type.work);
708 if (rc == 0)
709 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
710 mex->outputdatalength);
711 else
712 /* Signal pending. */
713 ap_cancel_message(zdev->ap_dev, &ap_msg);
714out_free:
715 free_page((unsigned long) ap_msg.message);
716 return rc;
717}
718
719/**
720 * The request distributor calls this function if it picked the PCIXCC/CEX2C
721 * device to handle a modexpo_crt request.
722 * @zdev: pointer to zcrypt_device structure that identifies the
723 * PCIXCC/CEX2C device to the request distributor
724 * @crt: pointer to the modexpoc_crt request buffer
725 */
726static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
727 struct ica_rsa_modexpo_crt *crt)
728{
729 struct ap_message ap_msg;
730 struct response_type resp_type = {
731 .type = PCIXCC_RESPONSE_TYPE_ICA,
732 };
733 int rc;
734
735 ap_init_message(&ap_msg);
736 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
737 if (!ap_msg.message)
738 return -ENOMEM;
739 ap_msg.receive = zcrypt_pcixcc_receive;
740 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
741 atomic_inc_return(&zcrypt_step);
742 ap_msg.private = &resp_type;
743 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
744 if (rc)
745 goto out_free;
746 init_completion(&resp_type.work);
747 ap_queue_message(zdev->ap_dev, &ap_msg);
748 rc = wait_for_completion_interruptible(&resp_type.work);
749 if (rc == 0)
750 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
751 crt->outputdatalength);
752 else
753 /* Signal pending. */
754 ap_cancel_message(zdev->ap_dev, &ap_msg);
755out_free:
756 free_page((unsigned long) ap_msg.message);
757 return rc;
758}
759
760/**
761 * The request distributor calls this function if it picked the PCIXCC/CEX2C
762 * device to handle a send_cprb request.
763 * @zdev: pointer to zcrypt_device structure that identifies the
764 * PCIXCC/CEX2C device to the request distributor
765 * @xcRB: pointer to the send_cprb request buffer
766 */
767static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
768 struct ica_xcRB *xcRB)
769{
770 struct ap_message ap_msg;
771 struct response_type resp_type = {
772 .type = PCIXCC_RESPONSE_TYPE_XCRB,
773 };
774 int rc;
775
776 ap_init_message(&ap_msg);
777 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
778 if (!ap_msg.message)
779 return -ENOMEM;
780 ap_msg.receive = zcrypt_pcixcc_receive;
781 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
782 atomic_inc_return(&zcrypt_step);
783 ap_msg.private = &resp_type;
784 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
785 if (rc)
786 goto out_free;
787 init_completion(&resp_type.work);
788 ap_queue_message(zdev->ap_dev, &ap_msg);
789 rc = wait_for_completion_interruptible(&resp_type.work);
790 if (rc == 0)
791 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
792 else
793 /* Signal pending. */
794 ap_cancel_message(zdev->ap_dev, &ap_msg);
795out_free:
796 kzfree(ap_msg.message);
797 return rc;
798}
799
800/**
801 * The request distributor calls this function if it picked the PCIXCC/CEX2C
802 * device to generate random data.
803 * @zdev: pointer to zcrypt_device structure that identifies the
804 * PCIXCC/CEX2C device to the request distributor
805 * @buffer: pointer to a memory page to return random data
806 */
807
808static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
809 char *buffer)
810{
811 struct ap_message ap_msg;
812 struct response_type resp_type = {
813 .type = PCIXCC_RESPONSE_TYPE_XCRB,
814 };
815 int rc;
816
817 ap_init_message(&ap_msg);
818 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
819 if (!ap_msg.message)
820 return -ENOMEM;
821 ap_msg.receive = zcrypt_pcixcc_receive;
822 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
823 atomic_inc_return(&zcrypt_step);
824 ap_msg.private = &resp_type;
825 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
826 init_completion(&resp_type.work);
827 ap_queue_message(zdev->ap_dev, &ap_msg);
828 rc = wait_for_completion_interruptible(&resp_type.work);
829 if (rc == 0)
830 rc = convert_response_rng(zdev, &ap_msg, buffer);
831 else
832 /* Signal pending. */
833 ap_cancel_message(zdev->ap_dev, &ap_msg);
834 kfree(ap_msg.message);
835 return rc;
836}
837
838/**
839 * The crypto operations for a PCIXCC/CEX2C card.
840 */
841static struct zcrypt_ops zcrypt_pcixcc_ops = {
842 .rsa_modexpo = zcrypt_pcixcc_modexpo,
843 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
844 .send_cprb = zcrypt_pcixcc_send_cprb,
845};
846
847static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
848 .rsa_modexpo = zcrypt_pcixcc_modexpo,
849 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
850 .send_cprb = zcrypt_pcixcc_send_cprb,
851 .rng = zcrypt_pcixcc_rng,
852};
853
854/**
855 * Micro-code detection function. Its sends a message to a pcixcc card 95 * Micro-code detection function. Its sends a message to a pcixcc card
856 * to find out the microcode level. 96 * to find out the microcode level.
857 * @ap_dev: pointer to the AP device. 97 * @ap_dev: pointer to the AP device.
@@ -1083,9 +323,11 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1083 return rc; 323 return rc;
1084 } 324 }
1085 if (rc) 325 if (rc)
1086 zdev->ops = &zcrypt_pcixcc_with_rng_ops; 326 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
327 MSGTYPE06_VARIANT_DEFAULT);
1087 else 328 else
1088 zdev->ops = &zcrypt_pcixcc_ops; 329 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
330 MSGTYPE06_VARIANT_NORNG);
1089 ap_dev->reply = &zdev->reply; 331 ap_dev->reply = &zdev->reply;
1090 ap_dev->private = zdev; 332 ap_dev->private = zdev;
1091 rc = zcrypt_device_register(zdev); 333 rc = zcrypt_device_register(zdev);
@@ -1095,6 +337,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1095 337
1096 out_free: 338 out_free:
1097 ap_dev->private = NULL; 339 ap_dev->private = NULL;
340 zcrypt_msgtype_release(zdev->ops);
1098 zcrypt_device_free(zdev); 341 zcrypt_device_free(zdev);
1099 return rc; 342 return rc;
1100} 343}
@@ -1106,8 +349,10 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1106static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) 349static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
1107{ 350{
1108 struct zcrypt_device *zdev = ap_dev->private; 351 struct zcrypt_device *zdev = ap_dev->private;
352 struct zcrypt_ops *zops = zdev->ops;
1109 353
1110 zcrypt_device_unregister(zdev); 354 zcrypt_device_unregister(zdev);
355 zcrypt_msgtype_release(zops);
1111} 356}
1112 357
1113int __init zcrypt_pcixcc_init(void) 358int __init zcrypt_pcixcc_init(void)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index c7cdf599e46b..eacafc8962f2 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,12 +1,13 @@
1/* 1/*
2 * zcrypt 2.1.0 2 * zcrypt 2.1.0
3 * 3 *
4 * Copyright IBM Corp. 2001, 2006 4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs 5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 6 * Eric Rossman (edrossma@us.ibm.com)
7 * 7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7a8b09612c41..cf6da7fafe54 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2993,7 +2993,7 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
2993 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 2993 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
2994 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 2994 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
2995 struct ccw_dev_id ccwid; 2995 struct ccw_dev_id ccwid;
2996 int level, rc; 2996 int level;
2997 2997
2998 tid->chpid = card->info.chpid; 2998 tid->chpid = card->info.chpid;
2999 ccw_device_get_id(CARD_RDEV(card), &ccwid); 2999 ccw_device_get_id(CARD_RDEV(card), &ccwid);
@@ -3001,17 +3001,10 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3001 tid->devno = ccwid.devno; 3001 tid->devno = ccwid.devno;
3002 if (!info) 3002 if (!info)
3003 return; 3003 return;
3004 3004 level = stsi(NULL, 0, 0, 0);
3005 rc = stsi(NULL, 0, 0, 0); 3005 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3006 if (rc == -ENOSYS)
3007 level = rc;
3008 else
3009 level = (((unsigned int) rc) >> 28);
3010
3011 if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
3012 tid->lparnr = info222->lpar_number; 3006 tid->lparnr = info222->lpar_number;
3013 3007 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3014 if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
3015 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3008 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3016 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3009 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3017 } 3010 }
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 999b4f52e8e5..f930b1a390ab 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -387,6 +387,7 @@ typedef struct elf64_shdr {
387#define NT_S390_PREFIX 0x305 /* s390 prefix register */ 387#define NT_S390_PREFIX 0x305 /* s390 prefix register */
388#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ 388#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
389#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ 389#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
390#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
390#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ 391#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
391 392
392 393
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 6acf83449105..f19ddc47304c 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -161,7 +161,7 @@ typedef void (*table_sort_t)(char *, int);
161#define SORTEXTABLE_64 161#define SORTEXTABLE_64
162#include "sortextable.h" 162#include "sortextable.h"
163 163
164static int compare_x86_table(const void *a, const void *b) 164static int compare_relative_table(const void *a, const void *b)
165{ 165{
166 int32_t av = (int32_t)r(a); 166 int32_t av = (int32_t)r(a);
167 int32_t bv = (int32_t)r(b); 167 int32_t bv = (int32_t)r(b);
@@ -173,7 +173,7 @@ static int compare_x86_table(const void *a, const void *b)
173 return 0; 173 return 0;
174} 174}
175 175
176static void sort_x86_table(char *extab_image, int image_size) 176static void sort_relative_table(char *extab_image, int image_size)
177{ 177{
178 int i; 178 int i;
179 179
@@ -188,7 +188,7 @@ static void sort_x86_table(char *extab_image, int image_size)
188 i += 4; 188 i += 4;
189 } 189 }
190 190
191 qsort(extab_image, image_size / 8, 8, compare_x86_table); 191 qsort(extab_image, image_size / 8, 8, compare_relative_table);
192 192
193 /* Now denormalize. */ 193 /* Now denormalize. */
194 i = 0; 194 i = 0;
@@ -245,9 +245,9 @@ do_file(char const *const fname)
245 break; 245 break;
246 case EM_386: 246 case EM_386:
247 case EM_X86_64: 247 case EM_X86_64:
248 custom_sort = sort_x86_table;
249 break;
250 case EM_S390: 248 case EM_S390:
249 custom_sort = sort_relative_table;
250 break;
251 case EM_MIPS: 251 case EM_MIPS:
252 break; 252 break;
253 } /* end switch */ 253 } /* end switch */