aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kconfig130
-rw-r--r--arch/s390/Kconfig.debug6
-rw-r--r--arch/s390/defconfig152
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h33
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c116
-rw-r--r--arch/s390/hypfs/hypfs_diag.c82
-rw-r--r--arch/s390/hypfs/hypfs_vm.c62
-rw-r--r--arch/s390/hypfs/inode.c18
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/dasd.h3
-rw-r--r--arch/s390/include/asm/ftrace.h11
-rw-r--r--arch/s390/include/asm/hardirq.h16
-rw-r--r--arch/s390/include/asm/irq.h34
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/ptrace.h52
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/s390_ext.h29
-rw-r--r--arch/s390/include/asm/smp.h3
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h10
-rw-r--r--arch/s390/include/asm/timex.h20
-rw-r--r--arch/s390/kernel/asm-offsets.c14
-rw-r--r--arch/s390/kernel/compat_ptrace.h53
-rw-r--r--arch/s390/kernel/entry.S274
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S73
-rw-r--r--arch/s390/kernel/ftrace.c238
-rw-r--r--arch/s390/kernel/irq.c41
-rw-r--r--arch/s390/kernel/kprobes.c470
-rw-r--r--arch/s390/kernel/mcount.S32
-rw-r--r--arch/s390/kernel/mcount64.S29
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/process.c21
-rw-r--r--arch/s390/kernel/processor.c20
-rw-r--r--arch/s390/kernel/ptrace.c306
-rw-r--r--arch/s390/kernel/s390_ext.c125
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c47
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kernel/traps.c15
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/kvm/Kconfig7
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/mm/fault.c35
-rw-r--r--drivers/s390/block/Kconfig24
-rw-r--r--drivers/s390/block/dasd.c314
-rw-r--r--drivers/s390/block/dasd_3990_erp.c16
-rw-r--r--drivers/s390/block/dasd_devmap.c155
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c664
-rw-r--r--drivers/s390/block/dasd_eckd.h17
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c21
-rw-r--r--drivers/s390/block/dasd_int.h35
-rw-r--r--drivers/s390/char/Kconfig69
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c18
-rw-r--r--drivers/s390/char/sclp_config.c1
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c78
-rw-r--r--drivers/s390/cio/chsc.c19
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/device_ops.c40
-rw-r--r--drivers/s390/cio/itcw.c62
-rw-r--r--drivers/s390/cio/qdio.h31
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c177
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c56
-rw-r--r--drivers/s390/crypto/ap_bus.c67
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c12
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c82
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h25
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/s390/net/Kconfig51
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--tools/perf/arch/s390/Makefile4
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c22
95 files changed, 2992 insertions, 1800 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6c6d7b339aae..ff19efdf6fef 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,13 +1,8 @@
1config SCHED_MC
2 def_bool y
3 depends on SMP
4
5config MMU 1config MMU
6 def_bool y 2 def_bool y
7 3
8config ZONE_DMA 4config ZONE_DMA
9 def_bool y 5 def_bool y if 64BIT
10 depends on 64BIT
11 6
12config LOCKDEP_SUPPORT 7config LOCKDEP_SUPPORT
13 def_bool y 8 def_bool y
@@ -25,12 +20,10 @@ config RWSEM_XCHGADD_ALGORITHM
25 def_bool y 20 def_bool y
26 21
27config ARCH_HAS_ILOG2_U32 22config ARCH_HAS_ILOG2_U32
28 bool 23 def_bool n
29 default n
30 24
31config ARCH_HAS_ILOG2_U64 25config ARCH_HAS_ILOG2_U64
32 bool 26 def_bool n
33 default n
34 27
35config GENERIC_HWEIGHT 28config GENERIC_HWEIGHT
36 def_bool y 29 def_bool y
@@ -42,9 +35,7 @@ config GENERIC_CLOCKEVENTS
42 def_bool y 35 def_bool y
43 36
44config GENERIC_BUG 37config GENERIC_BUG
45 bool 38 def_bool y if BUG
46 depends on BUG
47 default y
48 39
49config GENERIC_BUG_RELATIVE_POINTERS 40config GENERIC_BUG_RELATIVE_POINTERS
50 def_bool y 41 def_bool y
@@ -59,13 +50,10 @@ config ARCH_DMA_ADDR_T_64BIT
59 def_bool 64BIT 50 def_bool 64BIT
60 51
61config GENERIC_LOCKBREAK 52config GENERIC_LOCKBREAK
62 bool 53 def_bool y if SMP && PREEMPT
63 default y
64 depends on SMP && PREEMPT
65 54
66config PGSTE 55config PGSTE
67 bool 56 def_bool y if KVM
68 default y if KVM
69 57
70config VIRT_CPU_ACCOUNTING 58config VIRT_CPU_ACCOUNTING
71 def_bool y 59 def_bool y
@@ -85,7 +73,6 @@ config S390
85 select HAVE_DYNAMIC_FTRACE 73 select HAVE_DYNAMIC_FTRACE
86 select HAVE_FUNCTION_GRAPH_TRACER 74 select HAVE_FUNCTION_GRAPH_TRACER
87 select HAVE_REGS_AND_STACK_ACCESS_API 75 select HAVE_REGS_AND_STACK_ACCESS_API
88 select HAVE_DEFAULT_NO_SPIN_MUTEXES
89 select HAVE_OPROFILE 76 select HAVE_OPROFILE
90 select HAVE_KPROBES 77 select HAVE_KPROBES
91 select HAVE_KRETPROBES 78 select HAVE_KRETPROBES
@@ -130,8 +117,7 @@ config S390
130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 117 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
131 118
132config SCHED_OMIT_FRAME_POINTER 119config SCHED_OMIT_FRAME_POINTER
133 bool 120 def_bool y
134 default y
135 121
136source "init/Kconfig" 122source "init/Kconfig"
137 123
@@ -144,20 +130,21 @@ comment "Processor type and features"
144source "kernel/time/Kconfig" 130source "kernel/time/Kconfig"
145 131
146config 64BIT 132config 64BIT
147 bool "64 bit kernel" 133 def_bool y
134 prompt "64 bit kernel"
148 help 135 help
149 Select this option if you have an IBM z/Architecture machine 136 Select this option if you have an IBM z/Architecture machine
150 and want to use the 64 bit addressing mode. 137 and want to use the 64 bit addressing mode.
151 138
152config 32BIT 139config 32BIT
153 bool 140 def_bool y if !64BIT
154 default y if !64BIT
155 141
156config KTIME_SCALAR 142config KTIME_SCALAR
157 def_bool 32BIT 143 def_bool 32BIT
158 144
159config SMP 145config SMP
160 bool "Symmetric multi-processing support" 146 def_bool y
147 prompt "Symmetric multi-processing support"
161 ---help--- 148 ---help---
162 This enables support for systems with more than one CPU. If you have 149 This enables support for systems with more than one CPU. If you have
163 a system with only one CPU, like most personal computers, say N. If 150 a system with only one CPU, like most personal computers, say N. If
@@ -189,10 +176,10 @@ config NR_CPUS
189 approximately sixteen kilobytes to the kernel image. 176 approximately sixteen kilobytes to the kernel image.
190 177
191config HOTPLUG_CPU 178config HOTPLUG_CPU
192 bool "Support for hot-pluggable CPUs" 179 def_bool y
180 prompt "Support for hot-pluggable CPUs"
193 depends on SMP 181 depends on SMP
194 select HOTPLUG 182 select HOTPLUG
195 default n
196 help 183 help
197 Say Y here to be able to turn CPUs off and on. CPUs 184 Say Y here to be able to turn CPUs off and on. CPUs
198 can be controlled through /sys/devices/system/cpu/cpu#. 185 can be controlled through /sys/devices/system/cpu/cpu#.
@@ -208,14 +195,16 @@ config SCHED_MC
208 increased overhead in some places. 195 increased overhead in some places.
209 196
210config SCHED_BOOK 197config SCHED_BOOK
211 bool "Book scheduler support" 198 def_bool y
199 prompt "Book scheduler support"
212 depends on SMP && SCHED_MC 200 depends on SMP && SCHED_MC
213 help 201 help
214 Book scheduler support improves the CPU scheduler's decision making 202 Book scheduler support improves the CPU scheduler's decision making
215 when dealing with machines that have several books. 203 when dealing with machines that have several books.
216 204
217config MATHEMU 205config MATHEMU
218 bool "IEEE FPU emulation" 206 def_bool y
207 prompt "IEEE FPU emulation"
219 depends on MARCH_G5 208 depends on MARCH_G5
220 help 209 help
221 This option is required for IEEE compliant floating point arithmetic 210 This option is required for IEEE compliant floating point arithmetic
@@ -223,7 +212,8 @@ config MATHEMU
223 need this. 212 need this.
224 213
225config COMPAT 214config COMPAT
226 bool "Kernel support for 31 bit emulation" 215 def_bool y
216 prompt "Kernel support for 31 bit emulation"
227 depends on 64BIT 217 depends on 64BIT
228 select COMPAT_BINFMT_ELF 218 select COMPAT_BINFMT_ELF
229 help 219 help
@@ -233,16 +223,14 @@ config COMPAT
233 executing 31 bit applications. It is safe to say "Y". 223 executing 31 bit applications. It is safe to say "Y".
234 224
235config SYSVIPC_COMPAT 225config SYSVIPC_COMPAT
236 bool 226 def_bool y if COMPAT && SYSVIPC
237 depends on COMPAT && SYSVIPC
238 default y
239 227
240config AUDIT_ARCH 228config AUDIT_ARCH
241 bool 229 def_bool y
242 default y
243 230
244config S390_EXEC_PROTECT 231config S390_EXEC_PROTECT
245 bool "Data execute protection" 232 def_bool y
233 prompt "Data execute protection"
246 help 234 help
247 This option allows to enable a buffer overflow protection for user 235 This option allows to enable a buffer overflow protection for user
248 space programs and it also selects the addressing mode option above. 236 space programs and it also selects the addressing mode option above.
@@ -302,7 +290,8 @@ config MARCH_Z196
302endchoice 290endchoice
303 291
304config PACK_STACK 292config PACK_STACK
305 bool "Pack kernel stack" 293 def_bool y
294 prompt "Pack kernel stack"
306 help 295 help
307 This option enables the compiler option -mkernel-backchain if it 296 This option enables the compiler option -mkernel-backchain if it
308 is available. If the option is available the compiler supports 297 is available. If the option is available the compiler supports
@@ -315,7 +304,8 @@ config PACK_STACK
315 Say Y if you are unsure. 304 Say Y if you are unsure.
316 305
317config SMALL_STACK 306config SMALL_STACK
318 bool "Use 8kb for kernel stack instead of 16kb" 307 def_bool n
308 prompt "Use 8kb for kernel stack instead of 16kb"
319 depends on PACK_STACK && 64BIT && !LOCKDEP 309 depends on PACK_STACK && 64BIT && !LOCKDEP
320 help 310 help
321 If you say Y here and the compiler supports the -mkernel-backchain 311 If you say Y here and the compiler supports the -mkernel-backchain
@@ -327,7 +317,8 @@ config SMALL_STACK
327 Say N if you are unsure. 317 Say N if you are unsure.
328 318
329config CHECK_STACK 319config CHECK_STACK
330 bool "Detect kernel stack overflow" 320 def_bool y
321 prompt "Detect kernel stack overflow"
331 help 322 help
332 This option enables the compiler option -mstack-guard and 323 This option enables the compiler option -mstack-guard and
333 -mstack-size if they are available. If the compiler supports them 324 -mstack-size if they are available. If the compiler supports them
@@ -351,7 +342,8 @@ config STACK_GUARD
351 512 for 64 bit. 342 512 for 64 bit.
352 343
353config WARN_STACK 344config WARN_STACK
354 bool "Emit compiler warnings for function with broken stack usage" 345 def_bool n
346 prompt "Emit compiler warnings for function with broken stack usage"
355 help 347 help
356 This option enables the compiler options -mwarn-framesize and 348 This option enables the compiler options -mwarn-framesize and
357 -mwarn-dynamicstack. If the compiler supports these options it 349 -mwarn-dynamicstack. If the compiler supports these options it
@@ -386,24 +378,24 @@ config ARCH_SPARSEMEM_DEFAULT
386 def_bool y 378 def_bool y
387 379
388config ARCH_SELECT_MEMORY_MODEL 380config ARCH_SELECT_MEMORY_MODEL
389 def_bool y 381 def_bool y
390 382
391config ARCH_ENABLE_MEMORY_HOTPLUG 383config ARCH_ENABLE_MEMORY_HOTPLUG
392 def_bool y 384 def_bool y if SPARSEMEM
393 depends on SPARSEMEM
394 385
395config ARCH_ENABLE_MEMORY_HOTREMOVE 386config ARCH_ENABLE_MEMORY_HOTREMOVE
396 def_bool y 387 def_bool y
397 388
398config ARCH_HIBERNATION_POSSIBLE 389config ARCH_HIBERNATION_POSSIBLE
399 def_bool y if 64BIT 390 def_bool y if 64BIT
400 391
401source "mm/Kconfig" 392source "mm/Kconfig"
402 393
403comment "I/O subsystem configuration" 394comment "I/O subsystem configuration"
404 395
405config QDIO 396config QDIO
406 tristate "QDIO support" 397 def_tristate y
398 prompt "QDIO support"
407 ---help--- 399 ---help---
408 This driver provides the Queued Direct I/O base support for 400 This driver provides the Queued Direct I/O base support for
409 IBM System z. 401 IBM System z.
@@ -414,7 +406,8 @@ config QDIO
414 If unsure, say Y. 406 If unsure, say Y.
415 407
416config CHSC_SCH 408config CHSC_SCH
417 tristate "Support for CHSC subchannels" 409 def_tristate y
410 prompt "Support for CHSC subchannels"
418 help 411 help
419 This driver allows usage of CHSC subchannels. A CHSC subchannel 412 This driver allows usage of CHSC subchannels. A CHSC subchannel
420 is usually present on LPAR only. 413 is usually present on LPAR only.
@@ -432,7 +425,8 @@ config CHSC_SCH
432comment "Misc" 425comment "Misc"
433 426
434config IPL 427config IPL
435 bool "Builtin IPL record support" 428 def_bool y
429 prompt "Builtin IPL record support"
436 help 430 help
437 If you want to use the produced kernel to IPL directly from a 431 If you want to use the produced kernel to IPL directly from a
438 device, you have to merge a bootsector specific to the device 432 device, you have to merge a bootsector specific to the device
@@ -464,7 +458,8 @@ config FORCE_MAX_ZONEORDER
464 default "9" 458 default "9"
465 459
466config PFAULT 460config PFAULT
467 bool "Pseudo page fault support" 461 def_bool y
462 prompt "Pseudo page fault support"
468 help 463 help
469 Select this option, if you want to use PFAULT pseudo page fault 464 Select this option, if you want to use PFAULT pseudo page fault
470 handling under VM. If running native or in LPAR, this option 465 handling under VM. If running native or in LPAR, this option
@@ -476,7 +471,8 @@ config PFAULT
476 this option. 471 this option.
477 472
478config SHARED_KERNEL 473config SHARED_KERNEL
479 bool "VM shared kernel support" 474 def_bool y
475 prompt "VM shared kernel support"
480 help 476 help
481 Select this option, if you want to share the text segment of the 477 Select this option, if you want to share the text segment of the
482 Linux kernel between different VM guests. This reduces memory 478 Linux kernel between different VM guests. This reduces memory
@@ -487,7 +483,8 @@ config SHARED_KERNEL
487 doing and want to exploit this feature. 483 doing and want to exploit this feature.
488 484
489config CMM 485config CMM
490 tristate "Cooperative memory management" 486 def_tristate n
487 prompt "Cooperative memory management"
491 help 488 help
492 Select this option, if you want to enable the kernel interface 489 Select this option, if you want to enable the kernel interface
493 to reduce the memory size of the system. This is accomplished 490 to reduce the memory size of the system. This is accomplished
@@ -499,14 +496,16 @@ config CMM
499 option. 496 option.
500 497
501config CMM_IUCV 498config CMM_IUCV
502 bool "IUCV special message interface to cooperative memory management" 499 def_bool y
500 prompt "IUCV special message interface to cooperative memory management"
503 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) 501 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
504 help 502 help
505 Select this option to enable the special message interface to 503 Select this option to enable the special message interface to
506 the cooperative memory management. 504 the cooperative memory management.
507 505
508config APPLDATA_BASE 506config APPLDATA_BASE
509 bool "Linux - VM Monitor Stream, base infrastructure" 507 def_bool n
508 prompt "Linux - VM Monitor Stream, base infrastructure"
510 depends on PROC_FS 509 depends on PROC_FS
511 help 510 help
512 This provides a kernel interface for creating and updating z/VM APPLDATA 511 This provides a kernel interface for creating and updating z/VM APPLDATA
@@ -521,7 +520,8 @@ config APPLDATA_BASE
521 The /proc entries can also be read from, showing the current settings. 520 The /proc entries can also be read from, showing the current settings.
522 521
523config APPLDATA_MEM 522config APPLDATA_MEM
524 tristate "Monitor memory management statistics" 523 def_tristate m
524 prompt "Monitor memory management statistics"
525 depends on APPLDATA_BASE && VM_EVENT_COUNTERS 525 depends on APPLDATA_BASE && VM_EVENT_COUNTERS
526 help 526 help
527 This provides memory management related data to the Linux - VM Monitor 527 This provides memory management related data to the Linux - VM Monitor
@@ -537,7 +537,8 @@ config APPLDATA_MEM
537 appldata_mem.o. 537 appldata_mem.o.
538 538
539config APPLDATA_OS 539config APPLDATA_OS
540 tristate "Monitor OS statistics" 540 def_tristate m
541 prompt "Monitor OS statistics"
541 depends on APPLDATA_BASE 542 depends on APPLDATA_BASE
542 help 543 help
543 This provides OS related data to the Linux - VM Monitor Stream, like 544 This provides OS related data to the Linux - VM Monitor Stream, like
@@ -551,7 +552,8 @@ config APPLDATA_OS
551 appldata_os.o. 552 appldata_os.o.
552 553
553config APPLDATA_NET_SUM 554config APPLDATA_NET_SUM
554 tristate "Monitor overall network statistics" 555 def_tristate m
556 prompt "Monitor overall network statistics"
555 depends on APPLDATA_BASE && NET 557 depends on APPLDATA_BASE && NET
556 help 558 help
557 This provides network related data to the Linux - VM Monitor Stream, 559 This provides network related data to the Linux - VM Monitor Stream,
@@ -568,30 +570,32 @@ config APPLDATA_NET_SUM
568source kernel/Kconfig.hz 570source kernel/Kconfig.hz
569 571
570config S390_HYPFS_FS 572config S390_HYPFS_FS
571 bool "s390 hypervisor file system support" 573 def_bool y
574 prompt "s390 hypervisor file system support"
572 select SYS_HYPERVISOR 575 select SYS_HYPERVISOR
573 default y
574 help 576 help
575 This is a virtual file system intended to provide accounting 577 This is a virtual file system intended to provide accounting
576 information in an s390 hypervisor environment. 578 information in an s390 hypervisor environment.
577 579
578config KEXEC 580config KEXEC
579 bool "kexec system call" 581 def_bool n
582 prompt "kexec system call"
580 help 583 help
581 kexec is a system call that implements the ability to shutdown your 584 kexec is a system call that implements the ability to shutdown your
582 current kernel, and to start another kernel. It is like a reboot 585 current kernel, and to start another kernel. It is like a reboot
583 but is independent of hardware/microcode support. 586 but is independent of hardware/microcode support.
584 587
585config ZFCPDUMP 588config ZFCPDUMP
586 bool "zfcpdump support" 589 def_bool n
590 prompt "zfcpdump support"
587 select SMP 591 select SMP
588 default n
589 help 592 help
590 Select this option if you want to build an zfcpdump enabled kernel. 593 Select this option if you want to build an zfcpdump enabled kernel.
591 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. 594 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
592 595
593config S390_GUEST 596config S390_GUEST
594bool "s390 guest support for KVM (EXPERIMENTAL)" 597 def_bool y
598 prompt "s390 guest support for KVM (EXPERIMENTAL)"
595 depends on 64BIT && EXPERIMENTAL 599 depends on 64BIT && EXPERIMENTAL
596 select VIRTIO 600 select VIRTIO
597 select VIRTIO_RING 601 select VIRTIO_RING
@@ -603,9 +607,9 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
603 the default console. 607 the default console.
604 608
605config SECCOMP 609config SECCOMP
606 bool "Enable seccomp to safely compute untrusted bytecode" 610 def_bool y
611 prompt "Enable seccomp to safely compute untrusted bytecode"
607 depends on PROC_FS 612 depends on PROC_FS
608 default y
609 help 613 help
610 This kernel feature is useful for number crunching applications 614 This kernel feature is useful for number crunching applications
611 that may need to compute untrusted bytecode during their 615 that may need to compute untrusted bytecode during their
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 05221b13ffb1..2b380df95606 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,8 +1,7 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT 3config TRACE_IRQFLAGS_SUPPORT
4 bool 4 def_bool y
5 default y
6 5
7source "lib/Kconfig.debug" 6source "lib/Kconfig.debug"
8 7
@@ -19,7 +18,8 @@ config STRICT_DEVMEM
19 If you are unsure, say Y. 18 If you are unsure, say Y.
20 19
21config DEBUG_STRICT_USER_COPY_CHECKS 20config DEBUG_STRICT_USER_COPY_CHECKS
22 bool "Strict user copy size checks" 21 def_bool n
22 prompt "Strict user copy size checks"
23 ---help--- 23 ---help---
24 Enabling this option turns a certain set of sanity checks for user 24 Enabling this option turns a certain set of sanity checks for user
25 copy operations into compile time warnings. 25 copy operations into compile time warnings.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e40ac6ee6526..d79697157ac0 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -2,16 +2,12 @@ CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_AUDIT=y 4CONFIG_AUDIT=y
5CONFIG_RCU_TRACE=y
5CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
7CONFIG_CGROUPS=y
8CONFIG_CGROUP_NS=y
9CONFIG_SYSFS_DEPRECATED_V2=y
10CONFIG_UTS_NS=y
11CONFIG_IPC_NS=y
12CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
13# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
14# CONFIG_COMPAT_BRK is not set 10CONFIG_PERF_EVENTS=y
15CONFIG_SLAB=y 11CONFIG_SLAB=y
16CONFIG_KPROBES=y 12CONFIG_KPROBES=y
17CONFIG_MODULES=y 13CONFIG_MODULES=y
@@ -20,24 +16,12 @@ CONFIG_MODVERSIONS=y
20CONFIG_DEFAULT_DEADLINE=y 16CONFIG_DEFAULT_DEADLINE=y
21CONFIG_NO_HZ=y 17CONFIG_NO_HZ=y
22CONFIG_HIGH_RES_TIMERS=y 18CONFIG_HIGH_RES_TIMERS=y
23CONFIG_64BIT=y
24CONFIG_SMP=y
25CONFIG_NR_CPUS=32
26CONFIG_COMPAT=y
27CONFIG_S390_EXEC_PROTECT=y
28CONFIG_PACK_STACK=y
29CONFIG_CHECK_STACK=y
30CONFIG_PREEMPT=y 19CONFIG_PREEMPT=y
31CONFIG_MEMORY_HOTPLUG=y 20CONFIG_MEMORY_HOTPLUG=y
32CONFIG_MEMORY_HOTREMOVE=y 21CONFIG_MEMORY_HOTREMOVE=y
33CONFIG_QDIO=y
34CONFIG_CHSC_SCH=m
35CONFIG_IPL=y
36CONFIG_BINFMT_MISC=m 22CONFIG_BINFMT_MISC=m
37CONFIG_PFAULT=y
38CONFIG_HZ_100=y 23CONFIG_HZ_100=y
39CONFIG_KEXEC=y 24CONFIG_KEXEC=y
40CONFIG_S390_GUEST=y
41CONFIG_PM=y 25CONFIG_PM=y
42CONFIG_HIBERNATION=y 26CONFIG_HIBERNATION=y
43CONFIG_PACKET=y 27CONFIG_PACKET=y
@@ -46,16 +30,15 @@ CONFIG_NET_KEY=y
46CONFIG_AFIUCV=m 30CONFIG_AFIUCV=m
47CONFIG_INET=y 31CONFIG_INET=y
48CONFIG_IP_MULTICAST=y 32CONFIG_IP_MULTICAST=y
33# CONFIG_INET_LRO is not set
49CONFIG_IPV6=y 34CONFIG_IPV6=y
50CONFIG_NETFILTER=y 35CONFIG_NET_SCTPPROBE=m
51CONFIG_NETFILTER_NETLINK_QUEUE=m 36CONFIG_L2TP=m
52CONFIG_NETFILTER_NETLINK_LOG=m 37CONFIG_L2TP_DEBUGFS=m
53CONFIG_NF_CONNTRACK=m 38CONFIG_VLAN_8021Q=y
54# CONFIG_NF_CT_PROTO_SCTP is not set
55CONFIG_NET_SCHED=y 39CONFIG_NET_SCHED=y
56CONFIG_NET_SCH_CBQ=m 40CONFIG_NET_SCH_CBQ=m
57CONFIG_NET_SCH_PRIO=m 41CONFIG_NET_SCH_PRIO=m
58CONFIG_NET_SCH_MULTIQ=y
59CONFIG_NET_SCH_RED=m 42CONFIG_NET_SCH_RED=m
60CONFIG_NET_SCH_SFQ=m 43CONFIG_NET_SCH_SFQ=m
61CONFIG_NET_SCH_TEQL=m 44CONFIG_NET_SCH_TEQL=m
@@ -69,28 +52,14 @@ CONFIG_NET_CLS_U32=m
69CONFIG_CLS_U32_MARK=y 52CONFIG_CLS_U32_MARK=y
70CONFIG_NET_CLS_RSVP=m 53CONFIG_NET_CLS_RSVP=m
71CONFIG_NET_CLS_RSVP6=m 54CONFIG_NET_CLS_RSVP6=m
72CONFIG_NET_CLS_FLOW=m
73CONFIG_NET_CLS_ACT=y 55CONFIG_NET_CLS_ACT=y
74CONFIG_NET_ACT_POLICE=y 56CONFIG_NET_ACT_POLICE=y
75CONFIG_NET_ACT_NAT=m
76CONFIG_CAN=m
77CONFIG_CAN_RAW=m
78CONFIG_CAN_BCM=m
79CONFIG_CAN_VCAN=m
80CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 57CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
81# CONFIG_FIRMWARE_IN_KERNEL is not set 58# CONFIG_FIRMWARE_IN_KERNEL is not set
82CONFIG_BLK_DEV_LOOP=m 59CONFIG_BLK_DEV_LOOP=m
83CONFIG_BLK_DEV_NBD=m 60CONFIG_BLK_DEV_NBD=m
84CONFIG_BLK_DEV_RAM=y 61CONFIG_BLK_DEV_RAM=y
85CONFIG_BLK_DEV_XIP=y 62CONFIG_VIRTIO_BLK=y
86CONFIG_BLK_DEV_XPRAM=m
87CONFIG_DASD=y
88CONFIG_DASD_PROFILE=y
89CONFIG_DASD_ECKD=y
90CONFIG_DASD_FBA=y
91CONFIG_DASD_DIAG=y
92CONFIG_DASD_EER=y
93CONFIG_VIRTIO_BLK=m
94CONFIG_SCSI=y 63CONFIG_SCSI=y
95CONFIG_BLK_DEV_SD=y 64CONFIG_BLK_DEV_SD=y
96CONFIG_CHR_DEV_ST=y 65CONFIG_CHR_DEV_ST=y
@@ -102,101 +71,92 @@ CONFIG_SCSI_CONSTANTS=y
102CONFIG_SCSI_LOGGING=y 71CONFIG_SCSI_LOGGING=y
103CONFIG_SCSI_SCAN_ASYNC=y 72CONFIG_SCSI_SCAN_ASYNC=y
104CONFIG_ZFCP=y 73CONFIG_ZFCP=y
105CONFIG_SCSI_DH=m 74CONFIG_ZFCP_DIF=y
106CONFIG_SCSI_DH_RDAC=m
107CONFIG_SCSI_DH_HP_SW=m
108CONFIG_SCSI_DH_EMC=m
109CONFIG_SCSI_DH_ALUA=m
110CONFIG_SCSI_OSD_INITIATOR=m
111CONFIG_SCSI_OSD_ULD=m
112CONFIG_MD=y
113CONFIG_BLK_DEV_MD=y
114CONFIG_MD_LINEAR=m
115CONFIG_MD_RAID0=m
116CONFIG_MD_RAID1=m
117CONFIG_MD_MULTIPATH=m
118CONFIG_BLK_DEV_DM=y
119CONFIG_DM_CRYPT=y
120CONFIG_DM_SNAPSHOT=y
121CONFIG_DM_MIRROR=y
122CONFIG_DM_ZERO=y
123CONFIG_DM_MULTIPATH=m
124CONFIG_NETDEVICES=y 75CONFIG_NETDEVICES=y
125CONFIG_DUMMY=m 76CONFIG_DUMMY=m
126CONFIG_BONDING=m 77CONFIG_BONDING=m
127CONFIG_EQUALIZER=m 78CONFIG_EQUALIZER=m
128CONFIG_TUN=m 79CONFIG_TUN=m
129CONFIG_VETH=m
130CONFIG_NET_ETHERNET=y 80CONFIG_NET_ETHERNET=y
131CONFIG_LCS=m 81CONFIG_VIRTIO_NET=y
132CONFIG_CTCM=m
133CONFIG_QETH=y
134CONFIG_QETH_L2=y
135CONFIG_QETH_L3=y
136CONFIG_VIRTIO_NET=m
137CONFIG_HW_RANDOM_VIRTIO=m
138CONFIG_RAW_DRIVER=m 82CONFIG_RAW_DRIVER=m
139CONFIG_TN3270=y
140CONFIG_TN3270_TTY=y
141CONFIG_TN3270_FS=m
142CONFIG_TN3270_CONSOLE=y
143CONFIG_TN3215=y
144CONFIG_TN3215_CONSOLE=y
145CONFIG_SCLP_TTY=y
146CONFIG_SCLP_CONSOLE=y
147CONFIG_SCLP_VT220_TTY=y
148CONFIG_SCLP_VT220_CONSOLE=y
149CONFIG_SCLP_CPI=m
150CONFIG_SCLP_ASYNC=m
151CONFIG_S390_TAPE=m
152CONFIG_S390_TAPE_BLOCK=y
153CONFIG_S390_TAPE_34XX=m
154CONFIG_ACCESSIBILITY=y
155CONFIG_EXT2_FS=y 83CONFIG_EXT2_FS=y
156CONFIG_EXT3_FS=y 84CONFIG_EXT3_FS=y
157# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 85# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
86CONFIG_EXT4_FS=y
87CONFIG_EXT4_FS_POSIX_ACL=y
88CONFIG_EXT4_FS_SECURITY=y
158CONFIG_PROC_KCORE=y 89CONFIG_PROC_KCORE=y
159CONFIG_TMPFS=y 90CONFIG_TMPFS=y
160CONFIG_TMPFS_POSIX_ACL=y 91CONFIG_TMPFS_POSIX_ACL=y
161CONFIG_NFS_FS=y 92# CONFIG_NETWORK_FILESYSTEMS is not set
162CONFIG_NFS_V3=y
163CONFIG_NFSD=y
164CONFIG_NFSD_V3=y
165CONFIG_PARTITION_ADVANCED=y 93CONFIG_PARTITION_ADVANCED=y
166CONFIG_IBM_PARTITION=y 94CONFIG_IBM_PARTITION=y
167CONFIG_DLM=m 95CONFIG_DLM=m
168CONFIG_MAGIC_SYSRQ=y 96CONFIG_MAGIC_SYSRQ=y
169CONFIG_DEBUG_KERNEL=y 97CONFIG_DEBUG_KERNEL=y
170# CONFIG_SCHED_DEBUG is not set 98CONFIG_TIMER_STATS=y
171CONFIG_DEBUG_SPINLOCK=y 99CONFIG_PROVE_LOCKING=y
172CONFIG_DEBUG_MUTEXES=y 100CONFIG_PROVE_RCU=y
101CONFIG_LOCK_STAT=y
102CONFIG_DEBUG_LOCKDEP=y
173CONFIG_DEBUG_SPINLOCK_SLEEP=y 103CONFIG_DEBUG_SPINLOCK_SLEEP=y
104CONFIG_DEBUG_LIST=y
105CONFIG_DEBUG_NOTIFIERS=y
174# CONFIG_RCU_CPU_STALL_DETECTOR is not set 106# CONFIG_RCU_CPU_STALL_DETECTOR is not set
175CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 107CONFIG_KPROBES_SANITY_TEST=y
108CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
109CONFIG_LATENCYTOP=y
176CONFIG_SYSCTL_SYSCALL_CHECK=y 110CONFIG_SYSCTL_SYSCALL_CHECK=y
177CONFIG_SAMPLES=y 111CONFIG_DEBUG_PAGEALLOC=y
178CONFIG_CRYPTO_FIPS=y 112# CONFIG_FTRACE is not set
113# CONFIG_STRICT_DEVMEM is not set
114CONFIG_CRYPTO_NULL=m
115CONFIG_CRYPTO_CRYPTD=m
179CONFIG_CRYPTO_AUTHENC=m 116CONFIG_CRYPTO_AUTHENC=m
117CONFIG_CRYPTO_TEST=m
180CONFIG_CRYPTO_CCM=m 118CONFIG_CRYPTO_CCM=m
181CONFIG_CRYPTO_GCM=m 119CONFIG_CRYPTO_GCM=m
120CONFIG_CRYPTO_CBC=y
182CONFIG_CRYPTO_CTS=m 121CONFIG_CRYPTO_CTS=m
183CONFIG_CRYPTO_ECB=m 122CONFIG_CRYPTO_ECB=m
123CONFIG_CRYPTO_LRW=m
184CONFIG_CRYPTO_PCBC=m 124CONFIG_CRYPTO_PCBC=m
125CONFIG_CRYPTO_XTS=m
126CONFIG_CRYPTO_XCBC=m
185CONFIG_CRYPTO_VMAC=m 127CONFIG_CRYPTO_VMAC=m
128CONFIG_CRYPTO_MD4=m
129CONFIG_CRYPTO_MICHAEL_MIC=m
186CONFIG_CRYPTO_RMD128=m 130CONFIG_CRYPTO_RMD128=m
187CONFIG_CRYPTO_RMD160=m 131CONFIG_CRYPTO_RMD160=m
188CONFIG_CRYPTO_RMD256=m 132CONFIG_CRYPTO_RMD256=m
189CONFIG_CRYPTO_RMD320=m 133CONFIG_CRYPTO_RMD320=m
134CONFIG_CRYPTO_SHA256=m
135CONFIG_CRYPTO_SHA512=m
136CONFIG_CRYPTO_TGR192=m
137CONFIG_CRYPTO_WP512=m
138CONFIG_CRYPTO_ANUBIS=m
139CONFIG_CRYPTO_ARC4=m
140CONFIG_CRYPTO_BLOWFISH=m
190CONFIG_CRYPTO_CAMELLIA=m 141CONFIG_CRYPTO_CAMELLIA=m
142CONFIG_CRYPTO_CAST5=m
143CONFIG_CRYPTO_CAST6=m
144CONFIG_CRYPTO_DES=m
191CONFIG_CRYPTO_FCRYPT=m 145CONFIG_CRYPTO_FCRYPT=m
146CONFIG_CRYPTO_KHAZAD=m
192CONFIG_CRYPTO_SALSA20=m 147CONFIG_CRYPTO_SALSA20=m
193CONFIG_CRYPTO_SEED=m 148CONFIG_CRYPTO_SEED=m
149CONFIG_CRYPTO_SERPENT=m
150CONFIG_CRYPTO_TEA=m
151CONFIG_CRYPTO_TWOFISH=m
152CONFIG_CRYPTO_DEFLATE=m
194CONFIG_CRYPTO_ZLIB=m 153CONFIG_CRYPTO_ZLIB=m
195CONFIG_CRYPTO_LZO=m 154CONFIG_CRYPTO_LZO=m
196CONFIG_ZCRYPT=m 155CONFIG_ZCRYPT=m
156CONFIG_CRYPTO_SHA1_S390=m
157CONFIG_CRYPTO_SHA256_S390=m
197CONFIG_CRYPTO_SHA512_S390=m 158CONFIG_CRYPTO_SHA512_S390=m
198CONFIG_CRC_T10DIF=y 159CONFIG_CRYPTO_DES_S390=m
199CONFIG_CRC32=m 160CONFIG_CRYPTO_AES_S390=m
200CONFIG_CRC7=m 161CONFIG_CRC7=m
201CONFIG_KVM=m 162CONFIG_VIRTIO_BALLOON=y
202CONFIG_VIRTIO_BALLOON=m
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index b08d2abf6178..2e671d5004ca 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index fa487d4cc08b..80c1526f2af3 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -12,6 +12,8 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/workqueue.h>
16#include <linux/kref.h>
15 17
16#define REG_FILE_MODE 0440 18#define REG_FILE_MODE 0440
17#define UPDATE_FILE_MODE 0220 19#define UPDATE_FILE_MODE 0220
@@ -38,6 +40,33 @@ extern int hypfs_vm_init(void);
38extern void hypfs_vm_exit(void); 40extern void hypfs_vm_exit(void);
39extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); 41extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
40 42
41/* Directory for debugfs files */ 43/* debugfs interface */
42extern struct dentry *hypfs_dbfs_dir; 44struct hypfs_dbfs_file;
45
46struct hypfs_dbfs_data {
47 void *buf;
48 void *buf_free_ptr;
49 size_t size;
50 struct hypfs_dbfs_file *dbfs_file;;
51 struct kref kref;
52};
53
54struct hypfs_dbfs_file {
55 const char *name;
56 int (*data_create)(void **data, void **data_free_ptr,
57 size_t *size);
58 void (*data_free)(const void *buf_free_ptr);
59
60 /* Private data for hypfs_dbfs.c */
61 struct hypfs_dbfs_data *data;
62 struct delayed_work data_free_work;
63 struct mutex lock;
64 struct dentry *dentry;
65};
66
67extern int hypfs_dbfs_init(void);
68extern void hypfs_dbfs_exit(void);
69extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
70extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
71
43#endif /* _HYPFS_H_ */ 72#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
new file mode 100644
index 000000000000..b478013b7fec
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -0,0 +1,116 @@
1/*
2 * Hypervisor filesystem for Linux on s390 - debugfs interface
3 *
4 * Copyright (C) IBM Corp. 2010
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include "hypfs.h"
10
11static struct dentry *dbfs_dir;
12
13static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
14{
15 struct hypfs_dbfs_data *data;
16
17 data = kmalloc(sizeof(*data), GFP_KERNEL);
18 if (!data)
19 return NULL;
20 kref_init(&data->kref);
21 data->dbfs_file = f;
22 return data;
23}
24
25static void hypfs_dbfs_data_free(struct kref *kref)
26{
27 struct hypfs_dbfs_data *data;
28
29 data = container_of(kref, struct hypfs_dbfs_data, kref);
30 data->dbfs_file->data_free(data->buf_free_ptr);
31 kfree(data);
32}
33
34static void data_free_delayed(struct work_struct *work)
35{
36 struct hypfs_dbfs_data *data;
37 struct hypfs_dbfs_file *df;
38
39 df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
40 mutex_lock(&df->lock);
41 data = df->data;
42 df->data = NULL;
43 mutex_unlock(&df->lock);
44 kref_put(&data->kref, hypfs_dbfs_data_free);
45}
46
47static ssize_t dbfs_read(struct file *file, char __user *buf,
48 size_t size, loff_t *ppos)
49{
50 struct hypfs_dbfs_data *data;
51 struct hypfs_dbfs_file *df;
52 ssize_t rc;
53
54 if (*ppos != 0)
55 return 0;
56
57 df = file->f_path.dentry->d_inode->i_private;
58 mutex_lock(&df->lock);
59 if (!df->data) {
60 data = hypfs_dbfs_data_alloc(df);
61 if (!data) {
62 mutex_unlock(&df->lock);
63 return -ENOMEM;
64 }
65 rc = df->data_create(&data->buf, &data->buf_free_ptr,
66 &data->size);
67 if (rc) {
68 mutex_unlock(&df->lock);
69 kfree(data);
70 return rc;
71 }
72 df->data = data;
73 schedule_delayed_work(&df->data_free_work, HZ);
74 }
75 data = df->data;
76 kref_get(&data->kref);
77 mutex_unlock(&df->lock);
78
79 rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
80 kref_put(&data->kref, hypfs_dbfs_data_free);
81 return rc;
82}
83
84static const struct file_operations dbfs_ops = {
85 .read = dbfs_read,
86 .llseek = no_llseek,
87};
88
89int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
90{
91 df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
92 &dbfs_ops);
93 if (IS_ERR(df->dentry))
94 return PTR_ERR(df->dentry);
95 mutex_init(&df->lock);
96 INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
97 return 0;
98}
99
100void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
101{
102 debugfs_remove(df->dentry);
103}
104
105int hypfs_dbfs_init(void)
106{
107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
108 if (IS_ERR(dbfs_dir))
109 return PTR_ERR(dbfs_dir);
110 return 0;
111}
112
113void hypfs_dbfs_exit(void)
114{
115 debugfs_remove(dbfs_dir);
116}
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index cd4a81be9cf8..6023c6dc1fb7 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -555,81 +555,38 @@ struct dbfs_d204 {
555 char buf[]; /* d204 buffer */ 555 char buf[]; /* d204 buffer */
556} __attribute__ ((packed)); 556} __attribute__ ((packed));
557 557
558struct dbfs_d204_private { 558static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
559 struct dbfs_d204 *d204; /* Aligned d204 data with header */
560 void *base; /* Base pointer (needed for vfree) */
561};
562
563static int dbfs_d204_open(struct inode *inode, struct file *file)
564{ 559{
565 struct dbfs_d204_private *data;
566 struct dbfs_d204 *d204; 560 struct dbfs_d204 *d204;
567 int rc, buf_size; 561 int rc, buf_size;
562 void *base;
568 563
569 data = kzalloc(sizeof(*data), GFP_KERNEL);
570 if (!data)
571 return -ENOMEM;
572 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr); 564 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
573 data->base = vmalloc(buf_size); 565 base = vmalloc(buf_size);
574 if (!data->base) { 566 if (!base)
575 rc = -ENOMEM; 567 return -ENOMEM;
576 goto fail_kfree_data; 568 memset(base, 0, buf_size);
569 d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
570 rc = diag204_do_store(d204->buf, diag204_buf_pages);
571 if (rc) {
572 vfree(base);
573 return rc;
577 } 574 }
578 memset(data->base, 0, buf_size);
579 d204 = page_align_ptr(data->base + sizeof(d204->hdr))
580 - sizeof(d204->hdr);
581 rc = diag204_do_store(&d204->buf, diag204_buf_pages);
582 if (rc)
583 goto fail_vfree_base;
584 d204->hdr.version = DBFS_D204_HDR_VERSION; 575 d204->hdr.version = DBFS_D204_HDR_VERSION;
585 d204->hdr.len = PAGE_SIZE * diag204_buf_pages; 576 d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
586 d204->hdr.sc = diag204_store_sc; 577 d204->hdr.sc = diag204_store_sc;
587 data->d204 = d204; 578 *data = d204;
588 file->private_data = data; 579 *data_free_ptr = base;
589 return nonseekable_open(inode, file); 580 *size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
590
591fail_vfree_base:
592 vfree(data->base);
593fail_kfree_data:
594 kfree(data);
595 return rc;
596}
597
598static int dbfs_d204_release(struct inode *inode, struct file *file)
599{
600 struct dbfs_d204_private *data = file->private_data;
601
602 vfree(data->base);
603 kfree(data);
604 return 0; 581 return 0;
605} 582}
606 583
607static ssize_t dbfs_d204_read(struct file *file, char __user *buf, 584static struct hypfs_dbfs_file dbfs_file_d204 = {
608 size_t size, loff_t *ppos) 585 .name = "diag_204",
609{ 586 .data_create = dbfs_d204_create,
610 struct dbfs_d204_private *data = file->private_data; 587 .data_free = vfree,
611
612 return simple_read_from_buffer(buf, size, ppos, data->d204,
613 data->d204->hdr.len +
614 sizeof(data->d204->hdr));
615}
616
617static const struct file_operations dbfs_d204_ops = {
618 .open = dbfs_d204_open,
619 .read = dbfs_d204_read,
620 .release = dbfs_d204_release,
621 .llseek = no_llseek,
622}; 588};
623 589
624static int hypfs_dbfs_init(void)
625{
626 dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
627 NULL, &dbfs_d204_ops);
628 if (IS_ERR(dbfs_d204_file))
629 return PTR_ERR(dbfs_d204_file);
630 return 0;
631}
632
633__init int hypfs_diag_init(void) 590__init int hypfs_diag_init(void)
634{ 591{
635 int rc; 592 int rc;
@@ -639,7 +596,7 @@ __init int hypfs_diag_init(void)
639 return -ENODATA; 596 return -ENODATA;
640 } 597 }
641 if (diag204_info_type == INFO_EXT) { 598 if (diag204_info_type == INFO_EXT) {
642 rc = hypfs_dbfs_init(); 599 rc = hypfs_dbfs_create_file(&dbfs_file_d204);
643 if (rc) 600 if (rc)
644 return rc; 601 return rc;
645 } 602 }
@@ -660,6 +617,7 @@ void hypfs_diag_exit(void)
660 debugfs_remove(dbfs_d204_file); 617 debugfs_remove(dbfs_d204_file);
661 diag224_delete_name_table(); 618 diag224_delete_name_table();
662 diag204_free_buffer(); 619 diag204_free_buffer();
620 hypfs_dbfs_remove_file(&dbfs_file_d204);
663} 621}
664 622
665/* 623/*
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 26cf177f6a3a..e54796002f61 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -20,8 +20,6 @@ static char local_guest[] = " ";
20static char all_guests[] = "* "; 20static char all_guests[] = "* ";
21static char *guest_query; 21static char *guest_query;
22 22
23static struct dentry *dbfs_d2fc_file;
24
25struct diag2fc_data { 23struct diag2fc_data {
26 __u32 version; 24 __u32 version;
27 __u32 flags; 25 __u32 flags;
@@ -104,7 +102,7 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
104 return data; 102 return data;
105} 103}
106 104
107static void diag2fc_free(void *data) 105static void diag2fc_free(const void *data)
108{ 106{
109 vfree(data); 107 vfree(data);
110} 108}
@@ -239,43 +237,29 @@ struct dbfs_d2fc {
239 char buf[]; /* d2fc buffer */ 237 char buf[]; /* d2fc buffer */
240} __attribute__ ((packed)); 238} __attribute__ ((packed));
241 239
242static int dbfs_d2fc_open(struct inode *inode, struct file *file) 240static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
243{ 241{
244 struct dbfs_d2fc *data; 242 struct dbfs_d2fc *d2fc;
245 unsigned int count; 243 unsigned int count;
246 244
247 data = diag2fc_store(guest_query, &count, sizeof(data->hdr)); 245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
248 if (IS_ERR(data)) 246 if (IS_ERR(d2fc))
249 return PTR_ERR(data); 247 return PTR_ERR(d2fc);
250 get_clock_ext(data->hdr.tod_ext); 248 get_clock_ext(d2fc->hdr.tod_ext);
251 data->hdr.len = count * sizeof(struct diag2fc_data); 249 d2fc->hdr.len = count * sizeof(struct diag2fc_data);
252 data->hdr.version = DBFS_D2FC_HDR_VERSION; 250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
253 data->hdr.count = count; 251 d2fc->hdr.count = count;
254 memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved)); 252 memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
255 file->private_data = data; 253 *data = d2fc;
256 return nonseekable_open(inode, file); 254 *data_free_ptr = d2fc;
257} 255 *size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
258
259static int dbfs_d2fc_release(struct inode *inode, struct file *file)
260{
261 diag2fc_free(file->private_data);
262 return 0; 256 return 0;
263} 257}
264 258
265static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf, 259static struct hypfs_dbfs_file dbfs_file_2fc = {
266 size_t size, loff_t *ppos) 260 .name = "diag_2fc",
267{ 261 .data_create = dbfs_diag2fc_create,
268 struct dbfs_d2fc *data = file->private_data; 262 .data_free = diag2fc_free,
269
270 return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
271 sizeof(struct dbfs_d2fc_hdr));
272}
273
274static const struct file_operations dbfs_d2fc_ops = {
275 .open = dbfs_d2fc_open,
276 .read = dbfs_d2fc_read,
277 .release = dbfs_d2fc_release,
278 .llseek = no_llseek,
279}; 263};
280 264
281int hypfs_vm_init(void) 265int hypfs_vm_init(void)
@@ -288,18 +272,12 @@ int hypfs_vm_init(void)
288 guest_query = local_guest; 272 guest_query = local_guest;
289 else 273 else
290 return -EACCES; 274 return -EACCES;
291 275 return hypfs_dbfs_create_file(&dbfs_file_2fc);
292 dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
293 NULL, &dbfs_d2fc_ops);
294 if (IS_ERR(dbfs_d2fc_file))
295 return PTR_ERR(dbfs_d2fc_file);
296
297 return 0;
298} 276}
299 277
300void hypfs_vm_exit(void) 278void hypfs_vm_exit(void)
301{ 279{
302 if (!MACHINE_IS_VM) 280 if (!MACHINE_IS_VM)
303 return; 281 return;
304 debugfs_remove(dbfs_d2fc_file); 282 hypfs_dbfs_remove_file(&dbfs_file_2fc);
305} 283}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 47cc446dab8f..6fe874fc5f8e 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -46,8 +46,6 @@ static const struct super_operations hypfs_s_ops;
46/* start of list of all dentries, which have to be deleted on update */ 46/* start of list of all dentries, which have to be deleted on update */
47static struct dentry *hypfs_last_dentry; 47static struct dentry *hypfs_last_dentry;
48 48
49struct dentry *hypfs_dbfs_dir;
50
51static void hypfs_update_update(struct super_block *sb) 49static void hypfs_update_update(struct super_block *sb)
52{ 50{
53 struct hypfs_sb_info *sb_info = sb->s_fs_info; 51 struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -471,13 +469,12 @@ static int __init hypfs_init(void)
471{ 469{
472 int rc; 470 int rc;
473 471
474 hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 472 rc = hypfs_dbfs_init();
475 if (IS_ERR(hypfs_dbfs_dir)) 473 if (rc)
476 return PTR_ERR(hypfs_dbfs_dir); 474 return rc;
477
478 if (hypfs_diag_init()) { 475 if (hypfs_diag_init()) {
479 rc = -ENODATA; 476 rc = -ENODATA;
480 goto fail_debugfs_remove; 477 goto fail_dbfs_exit;
481 } 478 }
482 if (hypfs_vm_init()) { 479 if (hypfs_vm_init()) {
483 rc = -ENODATA; 480 rc = -ENODATA;
@@ -499,9 +496,8 @@ fail_hypfs_vm_exit:
499 hypfs_vm_exit(); 496 hypfs_vm_exit();
500fail_hypfs_diag_exit: 497fail_hypfs_diag_exit:
501 hypfs_diag_exit(); 498 hypfs_diag_exit();
502fail_debugfs_remove: 499fail_dbfs_exit:
503 debugfs_remove(hypfs_dbfs_dir); 500 hypfs_dbfs_exit();
504
505 pr_err("Initialization of hypfs failed with rc=%i\n", rc); 501 pr_err("Initialization of hypfs failed with rc=%i\n", rc);
506 return rc; 502 return rc;
507} 503}
@@ -510,7 +506,7 @@ static void __exit hypfs_exit(void)
510{ 506{
511 hypfs_diag_exit(); 507 hypfs_diag_exit();
512 hypfs_vm_exit(); 508 hypfs_vm_exit();
513 debugfs_remove(hypfs_dbfs_dir); 509 hypfs_dbfs_exit();
514 unregister_filesystem(&hypfs_type); 510 unregister_filesystem(&hypfs_type);
515 kobject_put(s390_kobj); 511 kobject_put(s390_kobj);
516} 512}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e8501115eca8..ff6f62e0ec3e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -204,6 +204,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
204 unsigned long, u8, int); 204 unsigned long, u8, int);
205int ccw_device_tm_intrg(struct ccw_device *cdev); 205int ccw_device_tm_intrg(struct ccw_device *cdev);
206 206
207int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
208
207extern int ccw_device_set_online(struct ccw_device *cdev); 209extern int ccw_device_set_online(struct ccw_device *cdev);
208extern int ccw_device_set_offline(struct ccw_device *cdev); 210extern int ccw_device_set_offline(struct ccw_device *cdev);
209 211
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 40e2ab0fa3f0..081434878296 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
202 202
203static inline int s390_nohz_delay(int cpu) 203static inline int s390_nohz_delay(int cpu)
204{ 204{
205 return per_cpu(s390_idle, cpu).nohz_delay != 0; 205 return __get_cpu_var(s390_idle).nohz_delay != 0;
206} 206}
207 207
208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) 208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index b604a9186f8e..0be28efe5b66 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -73,6 +73,7 @@ typedef struct dasd_information2_t {
73 * 0x02: use diag discipline (diag) 73 * 0x02: use diag discipline (diag)
74 * 0x04: set the device initially online (internal use only) 74 * 0x04: set the device initially online (internal use only)
75 * 0x08: enable ERP related logging 75 * 0x08: enable ERP related logging
76 * 0x20: give access to raw eckd data
76 */ 77 */
77#define DASD_FEATURE_DEFAULT 0x00 78#define DASD_FEATURE_DEFAULT 0x00
78#define DASD_FEATURE_READONLY 0x01 79#define DASD_FEATURE_READONLY 0x01
@@ -80,6 +81,8 @@ typedef struct dasd_information2_t {
80#define DASD_FEATURE_INITIAL_ONLINE 0x04 81#define DASD_FEATURE_INITIAL_ONLINE 0x04
81#define DASD_FEATURE_ERPLOG 0x08 82#define DASD_FEATURE_ERPLOG 0x08
82#define DASD_FEATURE_FAILFAST 0x10 83#define DASD_FEATURE_FAILFAST 0x10
84#define DASD_FEATURE_FAILONSLCK 0x20
85#define DASD_FEATURE_USERAW 0x40
83 86
84#define DASD_PARTN_BITS 2 87#define DASD_PARTN_BITS 2
85 88
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 96c14a9102b8..3c29be4836ed 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,20 +4,17 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern unsigned long ftrace_dyn_func;
8 7
9struct dyn_arch_ftrace { }; 8struct dyn_arch_ftrace { };
10 9
11#define MCOUNT_ADDR ((long)_mcount) 10#define MCOUNT_ADDR ((long)_mcount)
12 11
13#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
14#define MCOUNT_OFFSET_RET 18 13#define MCOUNT_INSN_SIZE 12
15#define MCOUNT_INSN_SIZE 24
16#define MCOUNT_OFFSET 14
17#else
18#define MCOUNT_OFFSET_RET 26
19#define MCOUNT_INSN_SIZE 30
20#define MCOUNT_OFFSET 8 14#define MCOUNT_OFFSET 8
15#else
16#define MCOUNT_INSN_SIZE 20
17#define MCOUNT_OFFSET 4
21#endif 18#endif
22 19
23static inline unsigned long ftrace_call_adjust(unsigned long addr) 20static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 881d94590aeb..e4155d3eb2cb 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -21,20 +21,4 @@
21 21
22#define HARDIRQ_BITS 8 22#define HARDIRQ_BITS 8
23 23
24void clock_comparator_work(void);
25
26static inline unsigned long long local_tick_disable(void)
27{
28 unsigned long long old;
29
30 old = S390_lowcore.clock_comparator;
31 S390_lowcore.clock_comparator = -1ULL;
32 return old;
33}
34
35static inline void local_tick_enable(unsigned long long comp)
36{
37 S390_lowcore.clock_comparator = comp;
38}
39
40#endif /* __ASM_HARDIRQ_H */ 24#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7da991a858f8..db14a311f1d2 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,23 +1,33 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#ifdef __KERNEL__
5#include <linux/hardirq.h> 4#include <linux/hardirq.h>
6 5
7/*
8 * the definition of irqs has changed in 2.5.46:
9 * NR_IRQS is no longer the number of i/o
10 * interrupts (65536), but rather the number
11 * of interrupt classes (2).
12 * Only external and i/o interrupts make much sense here (CH).
13 */
14
15enum interruption_class { 6enum interruption_class {
16 EXTERNAL_INTERRUPT, 7 EXTERNAL_INTERRUPT,
17 IO_INTERRUPT, 8 IO_INTERRUPT,
18 9 EXTINT_CLK,
10 EXTINT_IPI,
11 EXTINT_TMR,
12 EXTINT_TLA,
13 EXTINT_PFL,
14 EXTINT_DSD,
15 EXTINT_VRT,
16 EXTINT_SCP,
17 EXTINT_IUC,
18 IOINT_QAI,
19 IOINT_QDI,
20 IOINT_DAS,
21 IOINT_C15,
22 IOINT_C70,
23 IOINT_TAP,
24 IOINT_VMR,
25 IOINT_LCS,
26 IOINT_CLW,
27 IOINT_CTC,
28 IOINT_APB,
29 NMI_NMI,
19 NR_IRQS, 30 NR_IRQS,
20}; 31};
21 32
22#endif /* __KERNEL__ */ 33#endif /* _ASM_IRQ_H */
23#endif
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 330f68caffe4..a231a9439c4b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,7 +31,6 @@
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33 33
34#define __ARCH_WANT_KPROBES_INSN_SLOT
35struct pt_regs; 34struct pt_regs;
36struct kprobe; 35struct kprobe;
37 36
@@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t;
58/* Architecture specific copy of original instruction */ 57/* Architecture specific copy of original instruction */
59struct arch_specific_insn { 58struct arch_specific_insn {
60 /* copy of original instruction */ 59 /* copy of original instruction */
61 kprobe_opcode_t *insn; 60 kprobe_opcode_t insn[MAX_INSN_SIZE];
62 int fixup;
63 int ilen;
64 int reg;
65}; 61};
66 62
67struct ins_replace_args {
68 kprobe_opcode_t *ptr;
69 kprobe_opcode_t old;
70 kprobe_opcode_t new;
71};
72struct prev_kprobe { 63struct prev_kprobe {
73 struct kprobe *kp; 64 struct kprobe *kp;
74 unsigned long status; 65 unsigned long status;
75 unsigned long saved_psw;
76 unsigned long kprobe_saved_imask;
77 unsigned long kprobe_saved_ctl[3];
78}; 66};
79 67
80/* per-cpu kprobe control block */ 68/* per-cpu kprobe control block */
@@ -82,17 +70,13 @@ struct kprobe_ctlblk {
82 unsigned long kprobe_status; 70 unsigned long kprobe_status;
83 unsigned long kprobe_saved_imask; 71 unsigned long kprobe_saved_imask;
84 unsigned long kprobe_saved_ctl[3]; 72 unsigned long kprobe_saved_ctl[3];
85 struct pt_regs jprobe_saved_regs;
86 unsigned long jprobe_saved_r14;
87 unsigned long jprobe_saved_r15;
88 struct prev_kprobe prev_kprobe; 73 struct prev_kprobe prev_kprobe;
74 struct pt_regs jprobe_saved_regs;
89 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; 75 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
90}; 76};
91 77
92void arch_remove_kprobe(struct kprobe *p); 78void arch_remove_kprobe(struct kprobe *p);
93void kretprobe_trampoline(void); 79void kretprobe_trampoline(void);
94int is_prohibited_opcode(kprobe_opcode_t *instruction);
95void get_instruction_type(struct arch_specific_insn *ainsn);
96 80
97int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 81int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
98int kprobe_exceptions_notify(struct notifier_block *self, 82int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 8d6f87169577..bf3de04170a7 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr)
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
35extern void print_cpu_info(void);
36extern int get_cpu_capability(unsigned int *); 35extern int get_cpu_capability(unsigned int *);
37 36
38/* 37/*
@@ -81,7 +80,8 @@ struct thread_struct {
81 mm_segment_t mm_segment; 80 mm_segment_t mm_segment;
82 unsigned long prot_addr; /* address of protection-excep. */ 81 unsigned long prot_addr; /* address of protection-excep. */
83 unsigned int trap_no; 82 unsigned int trap_no;
84 per_struct per_info; 83 struct per_regs per_user; /* User specified PER registers */
84 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 85 /* pfault_wait is used to block the process on a pfault event */
86 unsigned long pfault_wait; 86 unsigned long pfault_wait;
87}; 87};
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d9d42b1e46fa..9ad628a8574a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -331,10 +331,60 @@ struct pt_regs
331 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr; 332 unsigned short svcnr;
333}; 333};
334
335/*
336 * Program event recording (PER) register set.
337 */
338struct per_regs {
339 unsigned long control; /* PER control bits */
340 unsigned long start; /* PER starting address */
341 unsigned long end; /* PER ending address */
342};
343
344/*
345 * PER event contains information about the cause of the last PER exception.
346 */
347struct per_event {
348 unsigned short cause; /* PER code, ATMID and AI */
349 unsigned long address; /* PER address */
350 unsigned char paid; /* PER access identification */
351};
352
353/*
354 * Simplified per_info structure used to decode the ptrace user space ABI.
355 */
356struct per_struct_kernel {
357 unsigned long cr9; /* PER control bits */
358 unsigned long cr10; /* PER starting address */
359 unsigned long cr11; /* PER ending address */
360 unsigned long bits; /* Obsolete software bits */
361 unsigned long starting_addr; /* User specified start address */
362 unsigned long ending_addr; /* User specified end address */
363 unsigned short perc_atmid; /* PER trap ATMID */
364 unsigned long address; /* PER trap instruction address */
365 unsigned char access_id; /* PER trap access identification */
366};
367
368#define PER_EVENT_MASK 0xE9000000UL
369
370#define PER_EVENT_BRANCH 0x80000000UL
371#define PER_EVENT_IFETCH 0x40000000UL
372#define PER_EVENT_STORE 0x20000000UL
373#define PER_EVENT_STORE_REAL 0x08000000UL
374#define PER_EVENT_NULLIFICATION 0x01000000UL
375
376#define PER_CONTROL_MASK 0x00a00000UL
377
378#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
379#define PER_CONTROL_ALTERATION 0x00200000UL
380
334#endif 381#endif
335 382
336/* 383/*
337 * Now for the program event recording (trace) definitions. 384 * Now for the user space program event recording (trace) definitions.
385 * The following structures are used only for the ptrace interface, don't
386 * touch or even look at it if you don't want to modify the user-space
387 * ptrace interface. In particular stay away from it for in-kernel PER.
338 */ 388 */
339typedef struct 389typedef struct
340{ 390{
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 46e96bc1f5a1..350e7ee5952d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -361,6 +361,7 @@ struct qdio_initialize {
361 qdio_handler_t *input_handler; 361 qdio_handler_t *input_handler;
362 qdio_handler_t *output_handler; 362 qdio_handler_t *output_handler;
363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long); 363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
364 int scan_threshold;
364 unsigned long int_parm; 365 unsigned long int_parm;
365 void **input_sbal_addr_array; 366 void **input_sbal_addr_array;
366 void **output_sbal_addr_array; 367 void **output_sbal_addr_array;
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
index 1a9307e70842..080876d5f196 100644
--- a/arch/s390/include/asm/s390_ext.h
+++ b/arch/s390/include/asm/s390_ext.h
@@ -1,32 +1,17 @@
1#ifndef _S390_EXTINT_H
2#define _S390_EXTINT_H
3
4/* 1/*
5 * include/asm-s390/s390_ext.h 2 * Copyright IBM Corp. 1999,2010
6 * 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
7 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Copyright IBM Corp. 1999,2007
9 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
10 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 */ 5 */
12 6
7#ifndef _S390_EXTINT_H
8#define _S390_EXTINT_H
9
13#include <linux/types.h> 10#include <linux/types.h>
14 11
15typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); 12typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
16 13
17typedef struct ext_int_info_t {
18 struct ext_int_info_t *next;
19 ext_int_handler_t handler;
20 __u16 code;
21} ext_int_info_t;
22
23extern ext_int_info_t *ext_int_hash[];
24
25int register_external_interrupt(__u16 code, ext_int_handler_t handler); 14int register_external_interrupt(__u16 code, ext_int_handler_t handler);
26int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
27 ext_int_info_t *info);
28int unregister_external_interrupt(__u16 code, ext_int_handler_t handler); 15int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
29int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
30 ext_int_info_t *info);
31 16
32#endif 17#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index edc03cb9cd79..045e009fc164 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -20,7 +20,6 @@ extern void machine_power_off_smp(void);
20 20
21extern int __cpu_disable (void); 21extern int __cpu_disable (void);
22extern void __cpu_die (unsigned int cpu); 22extern void __cpu_die (unsigned int cpu);
23extern void cpu_die (void) __attribute__ ((noreturn));
24extern int __cpu_up (unsigned int cpu); 23extern int __cpu_up (unsigned int cpu);
25 24
26extern struct mutex smp_cpu_state_mutex; 25extern struct mutex smp_cpu_state_mutex;
@@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
71 70
72#ifdef CONFIG_HOTPLUG_CPU 71#ifdef CONFIG_HOTPLUG_CPU
73extern int smp_rescan_cpus(void); 72extern int smp_rescan_cpus(void);
73extern void __noreturn cpu_die(void);
74#else 74#else
75static inline int smp_rescan_cpus(void) { return 0; } 75static inline int smp_rescan_cpus(void) { return 0; }
76static inline void cpu_die(void) { }
76#endif 77#endif
77 78
78#endif /* __ASM_SMP_H */ 79#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3ad16dbf622e..6710b0eac165 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,6 +20,7 @@
20struct task_struct; 20struct task_struct;
21 21
22extern struct task_struct *__switch_to(void *, void *); 22extern struct task_struct *__switch_to(void *, void *);
23extern void update_per_regs(struct task_struct *task);
23 24
24static inline void save_fp_regs(s390_fp_regs *fpregs) 25static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 26{
@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
93 if (next->mm) { \ 94 if (next->mm) { \
94 restore_fp_regs(&next->thread.fp_regs); \ 95 restore_fp_regs(&next->thread.fp_regs); \
95 restore_access_regs(&next->thread.acrs[0]); \ 96 restore_access_regs(&next->thread.acrs[0]); \
97 update_per_regs(next); \
96 } \ 98 } \
97 prev = __switch_to(prev,next); \ 99 prev = __switch_to(prev,next); \
98} while (0) 100} while (0)
@@ -101,11 +103,9 @@ extern void account_vtime(struct task_struct *, struct task_struct *);
101extern void account_tick_vtime(struct task_struct *); 103extern void account_tick_vtime(struct task_struct *);
102 104
103#ifdef CONFIG_PFAULT 105#ifdef CONFIG_PFAULT
104extern void pfault_irq_init(void);
105extern int pfault_init(void); 106extern int pfault_init(void);
106extern void pfault_fini(void); 107extern void pfault_fini(void);
107#else /* CONFIG_PFAULT */ 108#else /* CONFIG_PFAULT */
108#define pfault_irq_init() do { } while (0)
109#define pfault_init() ({-1;}) 109#define pfault_init() ({-1;})
110#define pfault_fini() do { } while (0) 110#define pfault_fini() do { } while (0)
111#endif /* CONFIG_PFAULT */ 111#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 5baf0230b29b..ebc77091466f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -74,7 +74,7 @@ struct thread_info {
74/* how to get the thread information struct from C */ 74/* how to get the thread information struct from C */
75static inline struct thread_info *current_thread_info(void) 75static inline struct thread_info *current_thread_info(void)
76{ 76{
77 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); 77 return (struct thread_info *) S390_lowcore.thread_info;
78} 78}
79 79
80#define THREAD_SIZE_ORDER THREAD_ORDER 80#define THREAD_SIZE_ORDER THREAD_ORDER
@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
88#define TIF_SIGPENDING 2 /* signal pending */ 88#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_31BIT 17 /* 32bit process */ 99#define TIF_31BIT 17 /* 32bit process */
100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
102#define TIF_FREEZE 20 /* thread is freezing for suspend */ 102#define TIF_SINGLE_STEP 20 /* This task is single stepped */
103#define TIF_FREEZE 21 /* thread is freezing for suspend */
103 104
104#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 105#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
105#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 106#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
106#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 107#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 108#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
108#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 109#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 110#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 111#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 115#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
116#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
118#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
117#define _TIF_FREEZE (1<<TIF_FREEZE) 119#define _TIF_FREEZE (1<<TIF_FREEZE)
118 120
119#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 09d345a701dc..88829a40af6f 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -11,6 +11,8 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14#include <asm/lowcore.h>
15
14/* The value of the TOD clock for 1.1.1970. */ 16/* The value of the TOD clock for 1.1.1970. */
15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 17#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
16 18
@@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time)
49 asm volatile("stckc %0" : "=Q" (*time)); 51 asm volatile("stckc %0" : "=Q" (*time));
50} 52}
51 53
54void clock_comparator_work(void);
55
56static inline unsigned long long local_tick_disable(void)
57{
58 unsigned long long old;
59
60 old = S390_lowcore.clock_comparator;
61 S390_lowcore.clock_comparator = -1ULL;
62 set_clock_comparator(S390_lowcore.clock_comparator);
63 return old;
64}
65
66static inline void local_tick_enable(unsigned long long comp)
67{
68 S390_lowcore.clock_comparator = comp;
69 set_clock_comparator(S390_lowcore.clock_comparator);
70}
71
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 72#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
53 73
54typedef unsigned long long cycles_t; 74typedef unsigned long long cycles_t;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 33982e7ce04d..fe03c140002a 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,14 +23,16 @@ int main(void)
23{ 23{
24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 26 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
28 BLANK(); 27 BLANK();
29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
30 BLANK(); 29 BLANK();
31 DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); 30 DEFINE(__THREAD_per_cause,
32 DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); 31 offsetof(struct task_struct, thread.per_event.cause));
33 DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); 32 DEFINE(__THREAD_per_address,
33 offsetof(struct task_struct, thread.per_event.address));
34 DEFINE(__THREAD_per_paid,
35 offsetof(struct task_struct, thread.per_event.paid));
34 BLANK(); 36 BLANK();
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 37 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -85,9 +87,9 @@ int main(void)
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 87 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 88 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); 89 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
88 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); 90 DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
89 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 91 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
90 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 92 DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
91 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); 93 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
92 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 94 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
93 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); 95 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 3141025724f4..12b823833510 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -4,40 +4,19 @@
4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
5#include "compat_linux.h" /* needed for psw_compat_t */ 5#include "compat_linux.h" /* needed for psw_compat_t */
6 6
7typedef struct { 7struct compat_per_struct_kernel {
8 __u32 cr[NUM_CR_WORDS]; 8 __u32 cr9; /* PER control bits */
9} per_cr_words32; 9 __u32 cr10; /* PER starting address */
10 10 __u32 cr11; /* PER ending address */
11typedef struct { 11 __u32 bits; /* Obsolete software bits */
12 __u16 perc_atmid; /* 0x096 */ 12 __u32 starting_addr; /* User specified start address */
13 __u32 address; /* 0x098 */ 13 __u32 ending_addr; /* User specified end address */
14 __u8 access_id; /* 0x0a1 */ 14 __u16 perc_atmid; /* PER trap ATMID */
15} per_lowcore_words32; 15 __u32 address; /* PER trap instruction address */
16 16 __u8 access_id; /* PER trap access identification */
17typedef struct { 17};
18 union {
19 per_cr_words32 words;
20 } control_regs;
21 /*
22 * Use these flags instead of setting em_instruction_fetch
23 * directly they are used so that single stepping can be
24 * switched on & off while not affecting other tracing
25 */
26 unsigned single_step : 1;
27 unsigned instruction_fetch : 1;
28 unsigned : 30;
29 /*
30 * These addresses are copied into cr10 & cr11 if single
31 * stepping is switched off
32 */
33 __u32 starting_addr;
34 __u32 ending_addr;
35 union {
36 per_lowcore_words32 words;
37 } lowcore;
38} per_struct32;
39 18
40struct user_regs_struct32 19struct compat_user_regs_struct
41{ 20{
42 psw_compat_t psw; 21 psw_compat_t psw;
43 u32 gprs[NUM_GPRS]; 22 u32 gprs[NUM_GPRS];
@@ -50,14 +29,14 @@ struct user_regs_struct32
50 * itself as there is no "official" ptrace interface for hardware 29 * itself as there is no "official" ptrace interface for hardware
51 * watchpoints. This is the way intel does it. 30 * watchpoints. This is the way intel does it.
52 */ 31 */
53 per_struct32 per_info; 32 struct compat_per_struct_kernel per_info;
54 u32 ieee_instruction_pointer; /* obsolete, always 0 */ 33 u32 ieee_instruction_pointer; /* obsolete, always 0 */
55}; 34};
56 35
57struct user32 { 36struct compat_user {
58 /* We start with the registers, to mimic the way that "memory" 37 /* We start with the registers, to mimic the way that "memory"
59 is returned from the ptrace(3,...) function. */ 38 is returned from the ptrace(3,...) function. */
60 struct user_regs_struct32 regs; /* Where the registers are actually stored */ 39 struct compat_user_regs_struct regs;
61 /* The rest of this junk is to help gdb figure out what goes where */ 40 /* The rest of this junk is to help gdb figure out what goes where */
62 u32 u_tsize; /* Text segment size (pages). */ 41 u32 u_tsize; /* Text segment size (pages). */
63 u32 u_dsize; /* Data segment size (pages). */ 42 u32 u_dsize; /* Data segment size (pages). */
@@ -79,6 +58,6 @@ typedef struct
79 __u32 len; 58 __u32 len;
80 __u32 kernel_addr; 59 __u32 kernel_addr;
81 __u32 process_addr; 60 __u32 process_addr;
82} ptrace_area_emu31; 61} compat_ptrace_area;
83 62
84#endif /* _PTRACE32_H */ 63#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1ecc337fb679..648f64239a9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,7 +9,6 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
@@ -49,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
49SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
50 49
51_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
52 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
53_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING) 53 _TIF_MCCK_PENDING)
55_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -110,31 +109,36 @@ STACK_SIZE = 1 << STACK_SHIFT
1101: stm %r10,%r11,\lc_sum 1091: stm %r10,%r11,\lc_sum
111 .endm 110 .endm
112 111
113 .macro SAVE_ALL_BASE savearea 112 .macro SAVE_ALL_SVC psworg,savearea
114 stm %r12,%r15,\savearea 113 stm %r12,%r15,\savearea
115 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 114 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
115 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
116 s %r15,BASED(.Lc_spsize) # make room for registers & psw
116 .endm 117 .endm
117 118
118 .macro SAVE_ALL_SVC psworg,savearea 119 .macro SAVE_ALL_BASE savearea
119 la %r12,\psworg 120 stm %r12,%r15,\savearea
120 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 121 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
121 .endm 122 .endm
122 123
123 .macro SAVE_ALL_SYNC psworg,savearea 124 .macro SAVE_ALL_PGM psworg,savearea
124 la %r12,\psworg
125 tm \psworg+1,0x01 # test problem state bit 125 tm \psworg+1,0x01 # test problem state bit
126 bz BASED(2f) # skip stack setup save
127 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
128#ifdef CONFIG_CHECK_STACK 126#ifdef CONFIG_CHECK_STACK
129 b BASED(3f) 127 bnz BASED(1f)
1302: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 128 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
131 bz BASED(stack_overflow) 129 bnz BASED(2f)
1323: 130 la %r12,\psworg
131 b BASED(stack_overflow)
132#else
133 bz BASED(2f)
133#endif 134#endif
1342: 1351: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
1362: s %r15,BASED(.Lc_spsize) # make room for registers & psw
135 .endm 137 .endm
136 138
137 .macro SAVE_ALL_ASYNC psworg,savearea 139 .macro SAVE_ALL_ASYNC psworg,savearea
140 stm %r12,%r15,\savearea
141 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
138 la %r12,\psworg 142 la %r12,\psworg
139 tm \psworg+1,0x01 # test problem state bit 143 tm \psworg+1,0x01 # test problem state bit
140 bnz BASED(1f) # from user -> load async stack 144 bnz BASED(1f) # from user -> load async stack
@@ -149,27 +153,23 @@ STACK_SIZE = 1 << STACK_SHIFT
1490: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 1530: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
150 slr %r14,%r15 154 slr %r14,%r15
151 sra %r14,STACK_SHIFT 155 sra %r14,STACK_SHIFT
152 be BASED(2f)
1531: l %r15,__LC_ASYNC_STACK
154#ifdef CONFIG_CHECK_STACK 156#ifdef CONFIG_CHECK_STACK
155 b BASED(3f) 157 bnz BASED(1f)
1562: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 158 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
157 bz BASED(stack_overflow) 159 bnz BASED(2f)
1583: 160 b BASED(stack_overflow)
161#else
162 bz BASED(2f)
159#endif 163#endif
1602: 1641: l %r15,__LC_ASYNC_STACK
1652: s %r15,BASED(.Lc_spsize) # make room for registers & psw
161 .endm 166 .endm
162 167
163 .macro CREATE_STACK_FRAME psworg,savearea 168 .macro CREATE_STACK_FRAME savearea
164 s %r15,BASED(.Lc_spsize) # make room for registers & psw 169 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
165 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
166 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 170 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
167 icm %r12,12,__LC_SVC_ILC
168 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
169 st %r12,SP_ILC(%r15)
170 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 171 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
171 la %r12,0 172 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
172 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
173 .endm 173 .endm
174 174
175 .macro RESTORE_ALL psworg,sync 175 .macro RESTORE_ALL psworg,sync
@@ -188,6 +188,8 @@ STACK_SIZE = 1 << STACK_SHIFT
188 ssm __SF_EMPTY(%r15) 188 ssm __SF_EMPTY(%r15)
189 .endm 189 .endm
190 190
191 .section .kprobes.text, "ax"
192
191/* 193/*
192 * Scheduler resume function, called by switch_to 194 * Scheduler resume function, called by switch_to
193 * gpr2 = (task_struct *) prev 195 * gpr2 = (task_struct *) prev
@@ -198,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
198 .globl __switch_to 200 .globl __switch_to
199__switch_to: 201__switch_to:
200 basr %r1,0 202 basr %r1,0
201__switch_to_base: 2030: l %r4,__THREAD_info(%r2) # get thread_info of prev
202 tm __THREAD_per(%r3),0xe8 # new process is using per ? 204 l %r5,__THREAD_info(%r3) # get thread_info of next
203 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
204 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
205 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
206 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
207 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
208__switch_to_noper:
209 l %r4,__THREAD_info(%r2) # get thread_info of prev
210 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 205 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
211 bz __switch_to_no_mcck-__switch_to_base(%r1) 206 bz 1f-0b(%r1)
212 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 207 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
213 l %r4,__THREAD_info(%r3) # get thread_info of next 208 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
214 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 2091: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
215__switch_to_no_mcck: 210 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
216 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 211 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
217 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
218 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
219 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 214 st %r3,__LC_CURRENT # store task struct of next
220 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 215 st %r5,__LC_THREAD_INFO # store thread info of next
221 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 216 ahi %r5,STACK_SIZE # end of kernel stack of next
222 l %r3,__THREAD_info(%r3) # load thread_info from task struct 217 st %r5,__LC_KERNEL_STACK # store end of kernel stack
223 st %r3,__LC_THREAD_INFO
224 ahi %r3,STACK_SIZE
225 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
226 br %r14 218 br %r14
227 219
228__critical_start: 220__critical_start:
@@ -235,10 +227,11 @@ __critical_start:
235system_call: 227system_call:
236 stpt __LC_SYNC_ENTER_TIMER 228 stpt __LC_SYNC_ENTER_TIMER
237sysc_saveall: 229sysc_saveall:
238 SAVE_ALL_BASE __LC_SAVE_AREA
239 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 230 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
240 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 231 CREATE_STACK_FRAME __LC_SAVE_AREA
241 lh %r7,0x8a # get svc number from lowcore 232 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
233 mvc SP_ILC(4,%r15),__LC_SVC_ILC
234 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
242sysc_vtime: 235sysc_vtime:
243 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 236 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
244sysc_stime: 237sysc_stime:
@@ -246,20 +239,20 @@ sysc_stime:
246sysc_update: 239sysc_update:
247 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 240 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
248sysc_do_svc: 241sysc_do_svc:
249 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 242 xr %r7,%r7
250 ltr %r7,%r7 # test for svc 0 243 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
251 bnz BASED(sysc_nr_ok) # svc number > 0 244 bnz BASED(sysc_nr_ok) # svc number > 0
252 # svc 0: system call number in %r1 245 # svc 0: system call number in %r1
253 cl %r1,BASED(.Lnr_syscalls) 246 cl %r1,BASED(.Lnr_syscalls)
254 bnl BASED(sysc_nr_ok) 247 bnl BASED(sysc_nr_ok)
248 sth %r1,SP_SVCNR(%r15)
255 lr %r7,%r1 # copy svc number to %r7 249 lr %r7,%r1 # copy svc number to %r7
256sysc_nr_ok: 250sysc_nr_ok:
257 sth %r7,SP_SVCNR(%r15)
258 sll %r7,2 # svc number *4 251 sll %r7,2 # svc number *4
259 l %r8,BASED(.Lsysc_table) 252 l %r10,BASED(.Lsysc_table)
260 tm __TI_flags+2(%r9),_TIF_SYSCALL 253 tm __TI_flags+2(%r12),_TIF_SYSCALL
261 mvc SP_ARGS(4,%r15),SP_R7(%r15) 254 mvc SP_ARGS(4,%r15),SP_R7(%r15)
262 l %r8,0(%r7,%r8) # get system call addr. 255 l %r8,0(%r7,%r10) # get system call addr.
263 bnz BASED(sysc_tracesys) 256 bnz BASED(sysc_tracesys)
264 basr %r14,%r8 # call sys_xxxx 257 basr %r14,%r8 # call sys_xxxx
265 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 258 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -267,7 +260,7 @@ sysc_nr_ok:
267sysc_return: 260sysc_return:
268 LOCKDEP_SYS_EXIT 261 LOCKDEP_SYS_EXIT
269sysc_tif: 262sysc_tif:
270 tm __TI_flags+3(%r9),_TIF_WORK_SVC 263 tm __TI_flags+3(%r12),_TIF_WORK_SVC
271 bnz BASED(sysc_work) # there is work to do (signals etc.) 264 bnz BASED(sysc_work) # there is work to do (signals etc.)
272sysc_restore: 265sysc_restore:
273 RESTORE_ALL __LC_RETURN_PSW,1 266 RESTORE_ALL __LC_RETURN_PSW,1
@@ -284,17 +277,17 @@ sysc_work:
284# One of the work bits is on. Find out which one. 277# One of the work bits is on. Find out which one.
285# 278#
286sysc_work_tif: 279sysc_work_tif:
287 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 280 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
288 bo BASED(sysc_mcck_pending) 281 bo BASED(sysc_mcck_pending)
289 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 282 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
290 bo BASED(sysc_reschedule) 283 bo BASED(sysc_reschedule)
291 tm __TI_flags+3(%r9),_TIF_SIGPENDING 284 tm __TI_flags+3(%r12),_TIF_SIGPENDING
292 bo BASED(sysc_sigpending) 285 bo BASED(sysc_sigpending)
293 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 286 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
294 bo BASED(sysc_notify_resume) 287 bo BASED(sysc_notify_resume)
295 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 288 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
296 bo BASED(sysc_restart) 289 bo BASED(sysc_restart)
297 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 290 tm __TI_flags+3(%r12),_TIF_PER_TRAP
298 bo BASED(sysc_singlestep) 291 bo BASED(sysc_singlestep)
299 b BASED(sysc_return) # beware of critical section cleanup 292 b BASED(sysc_return) # beware of critical section cleanup
300 293
@@ -318,13 +311,13 @@ sysc_mcck_pending:
318# _TIF_SIGPENDING is set, call do_signal 311# _TIF_SIGPENDING is set, call do_signal
319# 312#
320sysc_sigpending: 313sysc_sigpending:
321 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 314 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
322 la %r2,SP_PTREGS(%r15) # load pt_regs 315 la %r2,SP_PTREGS(%r15) # load pt_regs
323 l %r1,BASED(.Ldo_signal) 316 l %r1,BASED(.Ldo_signal)
324 basr %r14,%r1 # call do_signal 317 basr %r14,%r1 # call do_signal
325 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 318 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
326 bo BASED(sysc_restart) 319 bo BASED(sysc_restart)
327 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 320 tm __TI_flags+3(%r12),_TIF_PER_TRAP
328 bo BASED(sysc_singlestep) 321 bo BASED(sysc_singlestep)
329 b BASED(sysc_return) 322 b BASED(sysc_return)
330 323
@@ -342,23 +335,23 @@ sysc_notify_resume:
342# _TIF_RESTART_SVC is set, set up registers and restart svc 335# _TIF_RESTART_SVC is set, set up registers and restart svc
343# 336#
344sysc_restart: 337sysc_restart:
345 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 338 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
346 l %r7,SP_R2(%r15) # load new svc number 339 l %r7,SP_R2(%r15) # load new svc number
347 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 340 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
348 lm %r2,%r6,SP_R2(%r15) # load svc arguments 341 lm %r2,%r6,SP_R2(%r15) # load svc arguments
342 sth %r7,SP_SVCNR(%r15)
349 b BASED(sysc_nr_ok) # restart svc 343 b BASED(sysc_nr_ok) # restart svc
350 344
351# 345#
352# _TIF_SINGLE_STEP is set, call do_single_step 346# _TIF_PER_TRAP is set, call do_per_trap
353# 347#
354sysc_singlestep: 348sysc_singlestep:
355 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 349 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
356 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 350 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
357 mvi SP_SVCNR+1(%r15),0xff
358 la %r2,SP_PTREGS(%r15) # address of register-save area 351 la %r2,SP_PTREGS(%r15) # address of register-save area
359 l %r1,BASED(.Lhandle_per) # load adr. of per handler 352 l %r1,BASED(.Lhandle_per) # load adr. of per handler
360 la %r14,BASED(sysc_return) # load adr. of system return 353 la %r14,BASED(sysc_return) # load adr. of system return
361 br %r1 # branch to do_single_step 354 br %r1 # branch to do_per_trap
362 355
363# 356#
364# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 357# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -368,15 +361,15 @@ sysc_tracesys:
368 l %r1,BASED(.Ltrace_entry) 361 l %r1,BASED(.Ltrace_entry)
369 la %r2,SP_PTREGS(%r15) # load pt_regs 362 la %r2,SP_PTREGS(%r15) # load pt_regs
370 la %r3,0 363 la %r3,0
371 srl %r7,2 364 xr %r0,%r0
372 st %r7,SP_R2(%r15) 365 icm %r0,3,SP_SVCNR(%r15)
366 st %r0,SP_R2(%r15)
373 basr %r14,%r1 367 basr %r14,%r1
374 cl %r2,BASED(.Lnr_syscalls) 368 cl %r2,BASED(.Lnr_syscalls)
375 bnl BASED(sysc_tracenogo) 369 bnl BASED(sysc_tracenogo)
376 l %r8,BASED(.Lsysc_table)
377 lr %r7,%r2 370 lr %r7,%r2
378 sll %r7,2 # svc number *4 371 sll %r7,2 # svc number *4
379 l %r8,0(%r7,%r8) 372 l %r8,0(%r7,%r10)
380sysc_tracego: 373sysc_tracego:
381 lm %r3,%r6,SP_R3(%r15) 374 lm %r3,%r6,SP_R3(%r15)
382 mvc SP_ARGS(4,%r15),SP_R7(%r15) 375 mvc SP_ARGS(4,%r15),SP_R7(%r15)
@@ -384,7 +377,7 @@ sysc_tracego:
384 basr %r14,%r8 # call sys_xxx 377 basr %r14,%r8 # call sys_xxx
385 st %r2,SP_R2(%r15) # store return value 378 st %r2,SP_R2(%r15) # store return value
386sysc_tracenogo: 379sysc_tracenogo:
387 tm __TI_flags+2(%r9),_TIF_SYSCALL 380 tm __TI_flags+2(%r12),_TIF_SYSCALL
388 bz BASED(sysc_return) 381 bz BASED(sysc_return)
389 l %r1,BASED(.Ltrace_exit) 382 l %r1,BASED(.Ltrace_exit)
390 la %r2,SP_PTREGS(%r15) # load pt_regs 383 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -397,7 +390,7 @@ sysc_tracenogo:
397 .globl ret_from_fork 390 .globl ret_from_fork
398ret_from_fork: 391ret_from_fork:
399 l %r13,__LC_SVC_NEW_PSW+4 392 l %r13,__LC_SVC_NEW_PSW+4
400 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 393 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
401 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 394 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
402 bo BASED(0f) 395 bo BASED(0f)
403 st %r15,SP_R15(%r15) # store stack pointer for new kthread 396 st %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -432,8 +425,8 @@ kernel_execve:
4320: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4250: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
433 l %r15,__LC_KERNEL_STACK # load ksp 426 l %r15,__LC_KERNEL_STACK # load ksp
434 s %r15,BASED(.Lc_spsize) # make room for registers & psw 427 s %r15,BASED(.Lc_spsize) # make room for registers & psw
435 l %r9,__LC_THREAD_INFO
436 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 428 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
429 l %r12,__LC_THREAD_INFO
437 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 430 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
438 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 431 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
439 l %r1,BASED(.Lexecve_tail) 432 l %r1,BASED(.Lexecve_tail)
@@ -463,26 +456,27 @@ pgm_check_handler:
463 SAVE_ALL_BASE __LC_SAVE_AREA 456 SAVE_ALL_BASE __LC_SAVE_AREA
464 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 457 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
465 bnz BASED(pgm_per) # got per exception -> special case 458 bnz BASED(pgm_per) # got per exception -> special case
466 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 459 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
467 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 460 CREATE_STACK_FRAME __LC_SAVE_AREA
461 xc SP_ILC(4,%r15),SP_ILC(%r15)
462 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
463 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
468 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 464 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
469 bz BASED(pgm_no_vtime) 465 bz BASED(pgm_no_vtime)
470 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 466 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
471 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 467 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
472 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 468 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
473pgm_no_vtime: 469pgm_no_vtime:
474 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
475 l %r3,__LC_PGM_ILC # load program interruption code 470 l %r3,__LC_PGM_ILC # load program interruption code
476 l %r4,__LC_TRANS_EXC_CODE 471 l %r4,__LC_TRANS_EXC_CODE
477 REENABLE_IRQS 472 REENABLE_IRQS
478 la %r8,0x7f 473 la %r8,0x7f
479 nr %r8,%r3 474 nr %r8,%r3
480pgm_do_call:
481 l %r7,BASED(.Ljump_table)
482 sll %r8,2 475 sll %r8,2
483 l %r7,0(%r8,%r7) # load address of handler routine 476 l %r1,BASED(.Ljump_table)
477 l %r1,0(%r8,%r1) # load address of handler routine
484 la %r2,SP_PTREGS(%r15) # address of register-save area 478 la %r2,SP_PTREGS(%r15) # address of register-save area
485 basr %r14,%r7 # branch to interrupt-handler 479 basr %r14,%r1 # branch to interrupt-handler
486pgm_exit: 480pgm_exit:
487 b BASED(sysc_return) 481 b BASED(sysc_return)
488 482
@@ -503,33 +497,34 @@ pgm_per:
503# Normal per exception 497# Normal per exception
504# 498#
505pgm_per_std: 499pgm_per_std:
506 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 500 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
507 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 501 CREATE_STACK_FRAME __LC_SAVE_AREA
502 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
503 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
508 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 504 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
509 bz BASED(pgm_no_vtime2) 505 bz BASED(pgm_no_vtime2)
510 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 506 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
511 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 507 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
512 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 508 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
513pgm_no_vtime2: 509pgm_no_vtime2:
514 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 510 l %r1,__TI_task(%r12)
515 l %r1,__TI_task(%r9)
516 tm SP_PSW+1(%r15),0x01 # kernel per event ? 511 tm SP_PSW+1(%r15),0x01 # kernel per event ?
517 bz BASED(kernel_per) 512 bz BASED(kernel_per)
518 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 513 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
519 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 514 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
520 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 515 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
521 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 516 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
522 l %r3,__LC_PGM_ILC # load program interruption code 517 l %r3,__LC_PGM_ILC # load program interruption code
523 l %r4,__LC_TRANS_EXC_CODE 518 l %r4,__LC_TRANS_EXC_CODE
524 REENABLE_IRQS 519 REENABLE_IRQS
525 la %r8,0x7f 520 la %r8,0x7f
526 nr %r8,%r3 # clear per-event-bit and ilc 521 nr %r8,%r3 # clear per-event-bit and ilc
527 be BASED(pgm_exit2) # only per or per+check ? 522 be BASED(pgm_exit2) # only per or per+check ?
528 l %r7,BASED(.Ljump_table)
529 sll %r8,2 523 sll %r8,2
530 l %r7,0(%r8,%r7) # load address of handler routine 524 l %r1,BASED(.Ljump_table)
525 l %r1,0(%r8,%r1) # load address of handler routine
531 la %r2,SP_PTREGS(%r15) # address of register-save area 526 la %r2,SP_PTREGS(%r15) # address of register-save area
532 basr %r14,%r7 # branch to interrupt-handler 527 basr %r14,%r1 # branch to interrupt-handler
533pgm_exit2: 528pgm_exit2:
534 b BASED(sysc_return) 529 b BASED(sysc_return)
535 530
@@ -537,18 +532,19 @@ pgm_exit2:
537# it was a single stepped SVC that is causing all the trouble 532# it was a single stepped SVC that is causing all the trouble
538# 533#
539pgm_svcper: 534pgm_svcper:
540 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 535 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
541 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 536 CREATE_STACK_FRAME __LC_SAVE_AREA
537 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
538 mvc SP_ILC(4,%r15),__LC_SVC_ILC
539 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
542 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 540 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
543 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 541 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
544 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 542 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
545 lh %r7,0x8a # get svc number from lowcore 543 l %r8,__TI_task(%r12)
546 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 544 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
547 l %r8,__TI_task(%r9) 545 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
548 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 546 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
549 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS 547 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
550 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
551 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 548 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
553 lm %r2,%r6,SP_R2(%r15) # load svc arguments 549 lm %r2,%r6,SP_R2(%r15) # load svc arguments
554 b BASED(sysc_do_svc) 550 b BASED(sysc_do_svc)
@@ -558,8 +554,7 @@ pgm_svcper:
558# 554#
559kernel_per: 555kernel_per:
560 REENABLE_IRQS 556 REENABLE_IRQS
561 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 557 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
562 mvi SP_SVCNR+1(%r15),0xff
563 la %r2,SP_PTREGS(%r15) # address of register-save area 558 la %r2,SP_PTREGS(%r15) # address of register-save area
564 l %r1,BASED(.Lhandle_per) # load adr. of per handler 559 l %r1,BASED(.Lhandle_per) # load adr. of per handler
565 basr %r14,%r1 # branch to do_single_step 560 basr %r14,%r1 # branch to do_single_step
@@ -573,9 +568,10 @@ kernel_per:
573io_int_handler: 568io_int_handler:
574 stck __LC_INT_CLOCK 569 stck __LC_INT_CLOCK
575 stpt __LC_ASYNC_ENTER_TIMER 570 stpt __LC_ASYNC_ENTER_TIMER
576 SAVE_ALL_BASE __LC_SAVE_AREA+16
577 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 571 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
578 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 572 CREATE_STACK_FRAME __LC_SAVE_AREA+16
573 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
574 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
579 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 575 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
580 bz BASED(io_no_vtime) 576 bz BASED(io_no_vtime)
581 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 577 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -583,7 +579,6 @@ io_int_handler:
583 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 579 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
584io_no_vtime: 580io_no_vtime:
585 TRACE_IRQS_OFF 581 TRACE_IRQS_OFF
586 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
587 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 582 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
588 la %r2,SP_PTREGS(%r15) # address of register-save area 583 la %r2,SP_PTREGS(%r15) # address of register-save area
589 basr %r14,%r1 # branch to standard irq handler 584 basr %r14,%r1 # branch to standard irq handler
@@ -591,7 +586,7 @@ io_return:
591 LOCKDEP_SYS_EXIT 586 LOCKDEP_SYS_EXIT
592 TRACE_IRQS_ON 587 TRACE_IRQS_ON
593io_tif: 588io_tif:
594 tm __TI_flags+3(%r9),_TIF_WORK_INT 589 tm __TI_flags+3(%r12),_TIF_WORK_INT
595 bnz BASED(io_work) # there is work to do (signals etc.) 590 bnz BASED(io_work) # there is work to do (signals etc.)
596io_restore: 591io_restore:
597 RESTORE_ALL __LC_RETURN_PSW,0 592 RESTORE_ALL __LC_RETURN_PSW,0
@@ -609,9 +604,9 @@ io_work:
609 bo BASED(io_work_user) # yes -> do resched & signal 604 bo BASED(io_work_user) # yes -> do resched & signal
610#ifdef CONFIG_PREEMPT 605#ifdef CONFIG_PREEMPT
611 # check for preemptive scheduling 606 # check for preemptive scheduling
612 icm %r0,15,__TI_precount(%r9) 607 icm %r0,15,__TI_precount(%r12)
613 bnz BASED(io_restore) # preemption disabled 608 bnz BASED(io_restore) # preemption disabled
614 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 609 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
615 bno BASED(io_restore) 610 bno BASED(io_restore)
616 # switch to kernel stack 611 # switch to kernel stack
617 l %r1,SP_R15(%r15) 612 l %r1,SP_R15(%r15)
@@ -645,13 +640,13 @@ io_work_user:
645# and _TIF_MCCK_PENDING 640# and _TIF_MCCK_PENDING
646# 641#
647io_work_tif: 642io_work_tif:
648 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 643 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
649 bo BASED(io_mcck_pending) 644 bo BASED(io_mcck_pending)
650 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 645 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
651 bo BASED(io_reschedule) 646 bo BASED(io_reschedule)
652 tm __TI_flags+3(%r9),_TIF_SIGPENDING 647 tm __TI_flags+3(%r12),_TIF_SIGPENDING
653 bo BASED(io_sigpending) 648 bo BASED(io_sigpending)
654 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 649 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
655 bo BASED(io_notify_resume) 650 bo BASED(io_notify_resume)
656 b BASED(io_return) # beware of critical section cleanup 651 b BASED(io_return) # beware of critical section cleanup
657 652
@@ -711,16 +706,16 @@ io_notify_resume:
711ext_int_handler: 706ext_int_handler:
712 stck __LC_INT_CLOCK 707 stck __LC_INT_CLOCK
713 stpt __LC_ASYNC_ENTER_TIMER 708 stpt __LC_ASYNC_ENTER_TIMER
714 SAVE_ALL_BASE __LC_SAVE_AREA+16
715 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 709 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
716 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 710 CREATE_STACK_FRAME __LC_SAVE_AREA+16
711 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
712 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
717 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 713 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
718 bz BASED(ext_no_vtime) 714 bz BASED(ext_no_vtime)
719 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 715 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
720 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 716 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
721 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 717 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
722ext_no_vtime: 718ext_no_vtime:
723 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
724 TRACE_IRQS_OFF 719 TRACE_IRQS_OFF
725 la %r2,SP_PTREGS(%r15) # address of register-save area 720 la %r2,SP_PTREGS(%r15) # address of register-save area
726 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 721 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
@@ -775,7 +770,10 @@ mcck_int_main:
775 sra %r14,PAGE_SHIFT 770 sra %r14,PAGE_SHIFT
776 be BASED(0f) 771 be BASED(0f)
777 l %r15,__LC_PANIC_STACK # load panic stack 772 l %r15,__LC_PANIC_STACK # load panic stack
7780: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 7730: s %r15,BASED(.Lc_spsize) # make room for registers & psw
774 CREATE_STACK_FRAME __LC_SAVE_AREA+32
775 mvc SP_PSW(8,%r15),0(%r12)
776 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
779 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 777 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
780 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 778 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
781 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 779 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -784,7 +782,6 @@ mcck_int_main:
784 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 782 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
785 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER 783 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
786mcck_no_vtime: 784mcck_no_vtime:
787 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
788 la %r2,SP_PTREGS(%r15) # load pt_regs 785 la %r2,SP_PTREGS(%r15) # load pt_regs
789 l %r1,BASED(.Ls390_mcck) 786 l %r1,BASED(.Ls390_mcck)
790 basr %r14,%r1 # call machine check handler 787 basr %r14,%r1 # call machine check handler
@@ -796,7 +793,7 @@ mcck_no_vtime:
796 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 793 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
797 lr %r15,%r1 794 lr %r15,%r1
798 stosm __SF_EMPTY(%r15),0x04 # turn dat on 795 stosm __SF_EMPTY(%r15),0x04 # turn dat on
799 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 796 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
800 bno BASED(mcck_return) 797 bno BASED(mcck_return)
801 TRACE_IRQS_OFF 798 TRACE_IRQS_OFF
802 l %r1,BASED(.Ls390_handle_mcck) 799 l %r1,BASED(.Ls390_handle_mcck)
@@ -861,6 +858,8 @@ restart_crash:
861restart_go: 858restart_go:
862#endif 859#endif
863 860
861 .section .kprobes.text, "ax"
862
864#ifdef CONFIG_CHECK_STACK 863#ifdef CONFIG_CHECK_STACK
865/* 864/*
866 * The synchronous or the asynchronous stack overflowed. We are dead. 865 * The synchronous or the asynchronous stack overflowed. We are dead.
@@ -943,12 +942,13 @@ cleanup_system_call:
943 bh BASED(0f) 942 bh BASED(0f)
944 mvc __LC_SAVE_AREA(16),0(%r12) 943 mvc __LC_SAVE_AREA(16),0(%r12)
9450: st %r13,4(%r12) 9440: st %r13,4(%r12)
946 st %r12,__LC_SAVE_AREA+48 # argh 945 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
947 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 946 s %r15,BASED(.Lc_spsize) # make room for registers & psw
948 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
949 l %r12,__LC_SAVE_AREA+48 # argh
950 st %r15,12(%r12) 947 st %r15,12(%r12)
951 lh %r7,0x8a 948 CREATE_STACK_FRAME __LC_SAVE_AREA
949 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
950 mvc SP_ILC(4,%r15),__LC_SVC_ILC
951 mvc 0(4,%r12),__LC_THREAD_INFO
952cleanup_vtime: 952cleanup_vtime:
953 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 953 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
954 bhe BASED(cleanup_stime) 954 bhe BASED(cleanup_stime)
@@ -1046,7 +1046,7 @@ cleanup_io_restore_insn:
1046.Ldo_signal: .long do_signal 1046.Ldo_signal: .long do_signal
1047.Ldo_notify_resume: 1047.Ldo_notify_resume:
1048 .long do_notify_resume 1048 .long do_notify_resume
1049.Lhandle_per: .long do_single_step 1049.Lhandle_per: .long do_per_trap
1050.Ldo_execve: .long do_execve 1050.Ldo_execve: .long do_execve
1051.Lexecve_tail: .long execve_tail 1051.Lexecve_tail: .long execve_tail
1052.Ljump_table: .long pgm_check_table 1052.Ljump_table: .long pgm_check_table
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 95c1dfc4ef31..17a6f83a2d67 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
12 12
13extern int sysctl_userprocess_debug; 13extern int sysctl_userprocess_debug;
14 14
15void do_single_step(struct pt_regs *regs); 15void do_per_trap(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit); 16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs); 17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs); 18void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 8f3e802174db..9d3603d6c511 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51STACK_SIZE = 1 << STACK_SHIFT 51STACK_SIZE = 1 << STACK_SHIFT
52 52
53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -197,6 +197,8 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
197 ssm __SF_EMPTY(%r15) 197 ssm __SF_EMPTY(%r15)
198 .endm 198 .endm
199 199
200 .section .kprobes.text, "ax"
201
200/* 202/*
201 * Scheduler resume function, called by switch_to 203 * Scheduler resume function, called by switch_to
202 * gpr2 = (task_struct *) prev 204 * gpr2 = (task_struct *) prev
@@ -206,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
206 */ 208 */
207 .globl __switch_to 209 .globl __switch_to
208__switch_to: 210__switch_to:
209 tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? 211 lg %r4,__THREAD_info(%r2) # get thread_info of prev
210 jz __switch_to_noper # if not we're fine 212 lg %r5,__THREAD_info(%r3) # get thread_info of next
211 stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
212 clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
213 je __switch_to_noper # we got away without bashing TLB's
214 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
215__switch_to_noper:
216 lg %r4,__THREAD_info(%r2) # get thread_info of prev
217 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
218 jz __switch_to_no_mcck 214 jz 0f
219 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 215 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
220 lg %r4,__THREAD_info(%r3) # get thread_info of next 216 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
221 oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 2170: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
222__switch_to_no_mcck: 218 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
223 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 219 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
224 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
225 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
226 lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 222 stg %r3,__LC_CURRENT # store task struct of next
227 stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct 223 stg %r5,__LC_THREAD_INFO # store thread info of next
228 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 224 aghi %r5,STACK_SIZE # end of kernel stack of next
229 lg %r3,__THREAD_info(%r3) # load thread_info from task struct 225 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
230 stg %r3,__LC_THREAD_INFO
231 aghi %r3,STACK_SIZE
232 stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
233 br %r14 226 br %r14
234 227
235__critical_start: 228__critical_start:
@@ -309,7 +302,7 @@ sysc_work_tif:
309 jo sysc_notify_resume 302 jo sysc_notify_resume
310 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 303 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
311 jo sysc_restart 304 jo sysc_restart
312 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 305 tm __TI_flags+7(%r12),_TIF_PER_TRAP
313 jo sysc_singlestep 306 jo sysc_singlestep
314 j sysc_return # beware of critical section cleanup 307 j sysc_return # beware of critical section cleanup
315 308
@@ -331,12 +324,12 @@ sysc_mcck_pending:
331# _TIF_SIGPENDING is set, call do_signal 324# _TIF_SIGPENDING is set, call do_signal
332# 325#
333sysc_sigpending: 326sysc_sigpending:
334 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 327 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
335 la %r2,SP_PTREGS(%r15) # load pt_regs 328 la %r2,SP_PTREGS(%r15) # load pt_regs
336 brasl %r14,do_signal # call do_signal 329 brasl %r14,do_signal # call do_signal
337 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 330 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
338 jo sysc_restart 331 jo sysc_restart
339 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 332 tm __TI_flags+7(%r12),_TIF_PER_TRAP
340 jo sysc_singlestep 333 jo sysc_singlestep
341 j sysc_return 334 j sysc_return
342 335
@@ -361,14 +354,14 @@ sysc_restart:
361 j sysc_nr_ok # restart svc 354 j sysc_nr_ok # restart svc
362 355
363# 356#
364# _TIF_SINGLE_STEP is set, call do_single_step 357# _TIF_PER_TRAP is set, call do_per_trap
365# 358#
366sysc_singlestep: 359sysc_singlestep:
367 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 360 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
368 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 361 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
369 la %r2,SP_PTREGS(%r15) # address of register-save area 362 la %r2,SP_PTREGS(%r15) # address of register-save area
370 larl %r14,sysc_return # load adr. of system return 363 larl %r14,sysc_return # load adr. of system return
371 jg do_single_step # branch to do_sigtrap 364 jg do_per_trap
372 365
373# 366#
374# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 367# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -524,10 +517,10 @@ pgm_no_vtime2:
524 lg %r1,__TI_task(%r12) 517 lg %r1,__TI_task(%r12)
525 tm SP_PSW+1(%r15),0x01 # kernel per event ? 518 tm SP_PSW+1(%r15),0x01 # kernel per event ?
526 jz kernel_per 519 jz kernel_per
527 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 520 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
528 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 521 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
529 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 522 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
530 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 523 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
531 lgf %r3,__LC_PGM_ILC # load program interruption code 524 lgf %r3,__LC_PGM_ILC # load program interruption code
532 lg %r4,__LC_TRANS_EXC_CODE 525 lg %r4,__LC_TRANS_EXC_CODE
533 REENABLE_IRQS 526 REENABLE_IRQS
@@ -556,10 +549,10 @@ pgm_svcper:
556 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 549 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
557 LAST_BREAK 550 LAST_BREAK
558 lg %r8,__TI_task(%r12) 551 lg %r8,__TI_task(%r12)
559 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 552 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
560 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 553 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
561 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 554 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
562 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 555 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
563 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 556 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
564 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 557 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
565 j sysc_do_svc 558 j sysc_do_svc
@@ -571,7 +564,7 @@ kernel_per:
571 REENABLE_IRQS 564 REENABLE_IRQS
572 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 565 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
573 la %r2,SP_PTREGS(%r15) # address of register-save area 566 la %r2,SP_PTREGS(%r15) # address of register-save area
574 brasl %r14,do_single_step 567 brasl %r14,do_per_trap
575 j pgm_exit 568 j pgm_exit
576 569
577/* 570/*
@@ -868,6 +861,8 @@ restart_crash:
868restart_go: 861restart_go:
869#endif 862#endif
870 863
864 .section .kprobes.text, "ax"
865
871#ifdef CONFIG_CHECK_STACK 866#ifdef CONFIG_CHECK_STACK
872/* 867/*
873 * The synchronous or the asynchronous stack overflowed. We are dead. 868 * The synchronous or the asynchronous stack overflowed. We are dead.
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 6a83d0581317..78bdf0e5dff7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -4,7 +4,7 @@
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
@@ -12,176 +12,144 @@
12#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kprobes.h>
15#include <trace/syscall.h> 16#include <trace/syscall.h>
16#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
17 18
19#ifdef CONFIG_64BIT
20#define MCOUNT_OFFSET_RET 12
21#else
22#define MCOUNT_OFFSET_RET 22
23#endif
24
18#ifdef CONFIG_DYNAMIC_FTRACE 25#ifdef CONFIG_DYNAMIC_FTRACE
19 26
20void ftrace_disable_code(void); 27void ftrace_disable_code(void);
21void ftrace_disable_return(void); 28void ftrace_enable_insn(void);
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26 29
27#ifdef CONFIG_64BIT 30#ifdef CONFIG_64BIT
28 31/*
32 * The 64-bit mcount code looks like this:
33 * stg %r14,8(%r15) # offset 0
34 * > larl %r1,<&counter> # offset 6
35 * > brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * Total length is 24 bytes. The middle two instructions of the mcount
38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
39 * The 64-bit enabled ftrace code block looks like this:
40 * stg %r14,8(%r15) # offset 0
41 * > lg %r1,__LC_FTRACE_FUNC # offset 6
42 * > lgr %r0,%r0 # offset 12
43 * > basr %r14,%r1 # offset 16
44 * lg %r14,8(%15) # offset 18
45 * The return points of the mcount/ftrace function have the same offset 18.
46 * The 64-bit disable ftrace code block looks like this:
47 * stg %r14,8(%r15) # offset 0
48 * > jg .+18 # offset 6
49 * > lgr %r0,%r0 # offset 12
50 * > basr %r14,%r1 # offset 16
51 * lg %r14,8(%15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible.
54 */
29asm( 55asm(
30 " .align 4\n" 56 " .align 4\n"
31 "ftrace_disable_code:\n" 57 "ftrace_disable_code:\n"
32 " j 0f\n" 58 " jg 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
36 "ftrace_disable_return:\n"
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n" 59 " lgr %r0,%r0\n"
39 "0:\n"); 60 " basr %r14,%r1\n"
40 61 "0:\n"
41asm(
42 " .align 4\n" 62 " .align 4\n"
43 "ftrace_nop_code:\n" 63 "ftrace_enable_insn:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 64 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
45 65
46asm( 66#define FTRACE_INSN_SIZE 6
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
50 67
51#else /* CONFIG_64BIT */ 68#else /* CONFIG_64BIT */
52 69/*
70 * The 31-bit mcount code looks like this:
71 * st %r14,4(%r15) # offset 0
72 * > bras %r1,0f # offset 4
73 * > .long _mcount # offset 8
74 * > .long <&counter> # offset 12
75 * > 0: l %r14,0(%r1) # offset 16
76 * > l %r1,4(%r1) # offset 20
77 * basr %r14,%r14 # offset 24
78 * l %r14,4(%r15) # offset 26
79 * Total length is 30 bytes. The twenty bytes starting from offset 4
80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
81 * The 31-bit enabled ftrace code block looks like this:
82 * st %r14,4(%r15) # offset 0
83 * > l %r14,__LC_FTRACE_FUNC # offset 4
84 * > j 0f # offset 8
85 * > .fill 12,1,0x07 # offset 12
86 * 0: basr %r14,%r14 # offset 24
87 * l %r14,4(%r14) # offset 26
88 * The return points of the mcount/ftrace function have the same offset 26.
89 * The 31-bit disabled ftrace code block looks like this:
90 * st %r14,4(%r15) # offset 0
91 * > j .+26 # offset 4
92 * > j 0f # offset 8
93 * > .fill 12,1,0x07 # offset 12
94 * 0: basr %r14,%r14 # offset 24
95 * l %r14,4(%r14) # offset 26
96 * The j instruction branches to offset 30 to skip as many instructions
97 * as possible.
98 */
53asm( 99asm(
54 " .align 4\n" 100 " .align 4\n"
55 "ftrace_disable_code:\n" 101 "ftrace_disable_code:\n"
102 " j 1f\n"
56 " j 0f\n" 103 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 104 " .fill 12,1,0x07\n"
58 " basr %r14,%r1\n" 105 "0: basr %r14,%r14\n"
59 "ftrace_disable_return:\n" 106 "1:\n"
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
69
70asm(
71 " .align 4\n" 107 " .align 4\n"
72 "ftrace_nop_code:\n" 108 "ftrace_enable_insn:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 109 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
74 110
75asm( 111#define FTRACE_INSN_SIZE 4
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
79 112
80#endif /* CONFIG_64BIT */ 113#endif /* CONFIG_64BIT */
81 114
82static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
85{
86 unsigned char replaced[MCOUNT_INSN_SIZE];
87
88 /*
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
94 */
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
106{
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111 115
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr) 117 unsigned long addr)
114{ 118{
115 if (addr == MCOUNT_ADDR) 119 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
116 return ftrace_make_initial_nop(mod, rec, addr); 120 MCOUNT_INSN_SIZE))
117 return ftrace_modify_code(rec->ip, 121 return -EPERM;
118 ftrace_call_code, FTRACE_INSN_SIZE, 122 return 0;
119 ftrace_nop_code, FTRACE_INSN_SIZE);
120} 123}
121 124
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{ 126{
124 return ftrace_modify_code(rec->ip, 127 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
125 ftrace_nop_code, FTRACE_INSN_SIZE, 128 FTRACE_INSN_SIZE))
126 ftrace_call_code, FTRACE_INSN_SIZE); 129 return -EPERM;
130 return 0;
127} 131}
128 132
129int ftrace_update_ftrace_func(ftrace_func_t func) 133int ftrace_update_ftrace_func(ftrace_func_t func)
130{ 134{
131 ftrace_dyn_func = (unsigned long)func;
132 return 0; 135 return 0;
133} 136}
134 137
135int __init ftrace_dyn_arch_init(void *data) 138int __init ftrace_dyn_arch_init(void *data)
136{ 139{
137 *(unsigned long *)data = 0; 140 *(unsigned long *) data = 0;
138 return 0; 141 return 0;
139} 142}
140 143
141#endif /* CONFIG_DYNAMIC_FTRACE */ 144#endif /* CONFIG_DYNAMIC_FTRACE */
142 145
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER 146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154 unsigned short opcode = 0xa704;
155
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161 unsigned short opcode = 0xa7f4;
162
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168 return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175 return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/* 147/*
181 * Hook the return address and push it in the stack of return addresses 148 * Hook the return address and push it in the stack of return addresses
182 * in current thread info. 149 * in current thread info.
183 */ 150 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152 unsigned long ip)
185{ 153{
186 struct ftrace_graph_ent trace; 154 struct ftrace_graph_ent trace;
187 155
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
189 goto out; 157 goto out;
190 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 158 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
191 goto out; 159 goto out;
192 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 160 trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
193 /* Only trace if the calling function expects to. */ 161 /* Only trace if the calling function expects to. */
194 if (!ftrace_graph_entry(&trace)) { 162 if (!ftrace_graph_entry(&trace)) {
195 current->curr_ret_stack--; 163 current->curr_ret_stack--;
196 goto out; 164 goto out;
197 } 165 }
198 parent = (unsigned long)return_to_handler; 166 parent = (unsigned long) return_to_handler;
199out: 167out:
200 return parent; 168 return parent;
201} 169}
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181 unsigned short offset;
182
183 offset = ((void *) prepare_ftrace_return -
184 (void *) ftrace_graph_caller) / 2;
185 return probe_kernel_write(ftrace_graph_caller + 2,
186 &offset, sizeof(offset));
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191 static unsigned short offset = 0x0002;
192
193 return probe_kernel_write(ftrace_graph_caller + 2,
194 &offset, sizeof(offset));
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 026a37a94fc9..ea5099c9709c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * Copyright IBM Corp. 2004,2010
3 *
4 * Copyright IBM Corp. 2004,2007
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 3 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com) 4 * Thomas Spatzier (tspat@de.ibm.com)
7 * 5 *
@@ -17,12 +15,42 @@
17#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
18#include <linux/profile.h> 16#include <linux/profile.h>
19 17
18struct irq_class {
19 char *name;
20 char *desc;
21};
22
23static const struct irq_class intrclass_names[] = {
24 {.name = "EXT" },
25 {.name = "I/O" },
26 {.name = "CLK", .desc = "[EXT] Clock Comparator" },
27 {.name = "IPI", .desc = "[EXT] Signal Processor" },
28 {.name = "TMR", .desc = "[EXT] CPU Timer" },
29 {.name = "TAL", .desc = "[EXT] Timing Alert" },
30 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
31 {.name = "DSD", .desc = "[EXT] DASD Diag" },
32 {.name = "VRT", .desc = "[EXT] Virtio" },
33 {.name = "SCP", .desc = "[EXT] Service Call" },
34 {.name = "IUC", .desc = "[EXT] IUCV" },
35 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
36 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
37 {.name = "DAS", .desc = "[I/O] DASD" },
38 {.name = "C15", .desc = "[I/O] 3215" },
39 {.name = "C70", .desc = "[I/O] 3270" },
40 {.name = "TAP", .desc = "[I/O] Tape" },
41 {.name = "VMR", .desc = "[I/O] Unit Record Devices" },
42 {.name = "LCS", .desc = "[I/O] LCS" },
43 {.name = "CLW", .desc = "[I/O] CLAW" },
44 {.name = "CTC", .desc = "[I/O] CTC" },
45 {.name = "APB", .desc = "[I/O] AP Bus" },
46 {.name = "NMI", .desc = "[NMI] Machine Check" },
47};
48
20/* 49/*
21 * show_interrupts is needed by /proc/interrupts. 50 * show_interrupts is needed by /proc/interrupts.
22 */ 51 */
23int show_interrupts(struct seq_file *p, void *v) 52int show_interrupts(struct seq_file *p, void *v)
24{ 53{
25 static const char *intrclass_names[] = { "EXT", "I/O", };
26 int i = *(loff_t *) v, j; 54 int i = *(loff_t *) v, j;
27 55
28 get_online_cpus(); 56 get_online_cpus();
@@ -34,15 +62,16 @@ int show_interrupts(struct seq_file *p, void *v)
34 } 62 }
35 63
36 if (i < NR_IRQS) { 64 if (i < NR_IRQS) {
37 seq_printf(p, "%s: ", intrclass_names[i]); 65 seq_printf(p, "%s: ", intrclass_names[i].name);
38#ifndef CONFIG_SMP 66#ifndef CONFIG_SMP
39 seq_printf(p, "%10u ", kstat_irqs(i)); 67 seq_printf(p, "%10u ", kstat_irqs(i));
40#else 68#else
41 for_each_online_cpu(j) 69 for_each_online_cpu(j)
42 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 70 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
43#endif 71#endif
72 if (intrclass_names[i].desc)
73 seq_printf(p, " %s", intrclass_names[i].desc);
44 seq_putc(p, '\n'); 74 seq_putc(p, '\n');
45
46 } 75 }
47 put_online_cpus(); 76 put_online_cpus();
48 return 0; 77 return 0;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2564793ec2b6..1d05d669107c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -32,34 +32,14 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/hardirq.h> 33#include <linux/hardirq.h>
34 34
35DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 35DEFINE_PER_CPU(struct kprobe *, current_kprobe);
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
37 37
38struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 38struct kretprobe_blackpoint kretprobe_blacklist[] = { };
39 39
40int __kprobes arch_prepare_kprobe(struct kprobe *p) 40static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
41{
42 /* Make sure the probe isn't going on a difficult instruction */
43 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
44 return -EINVAL;
45
46 if ((unsigned long)p->addr & 0x01)
47 return -EINVAL;
48
49 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot()))
51 return -ENOMEM;
52
53 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
54
55 get_instruction_type(&p->ainsn);
56 p->opcode = *p->addr;
57 return 0;
58}
59
60int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
61{ 41{
62 switch (*(__u8 *) instruction) { 42 switch (insn[0] >> 8) {
63 case 0x0c: /* bassm */ 43 case 0x0c: /* bassm */
64 case 0x0b: /* bsm */ 44 case 0x0b: /* bsm */
65 case 0x83: /* diag */ 45 case 0x83: /* diag */
@@ -68,7 +48,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
68 case 0xad: /* stosm */ 48 case 0xad: /* stosm */
69 return -EINVAL; 49 return -EINVAL;
70 } 50 }
71 switch (*(__u16 *) instruction) { 51 switch (insn[0]) {
72 case 0x0101: /* pr */ 52 case 0x0101: /* pr */
73 case 0xb25a: /* bsa */ 53 case 0xb25a: /* bsa */
74 case 0xb240: /* bakr */ 54 case 0xb240: /* bakr */
@@ -81,93 +61,92 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
81 return 0; 61 return 0;
82} 62}
83 63
84void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) 64static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
85{ 65{
86 /* default fixup method */ 66 /* default fixup method */
87 ainsn->fixup = FIXUP_PSW_NORMAL; 67 int fixup = FIXUP_PSW_NORMAL;
88
89 /* save r1 operand */
90 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
91 68
92 /* save the instruction length (pop 5-5) in bytes */ 69 switch (insn[0] >> 8) {
93 switch (*(__u8 *) (ainsn->insn) >> 6) {
94 case 0:
95 ainsn->ilen = 2;
96 break;
97 case 1:
98 case 2:
99 ainsn->ilen = 4;
100 break;
101 case 3:
102 ainsn->ilen = 6;
103 break;
104 }
105
106 switch (*(__u8 *) ainsn->insn) {
107 case 0x05: /* balr */ 70 case 0x05: /* balr */
108 case 0x0d: /* basr */ 71 case 0x0d: /* basr */
109 ainsn->fixup = FIXUP_RETURN_REGISTER; 72 fixup = FIXUP_RETURN_REGISTER;
110 /* if r2 = 0, no branch will be taken */ 73 /* if r2 = 0, no branch will be taken */
111 if ((*ainsn->insn & 0x0f) == 0) 74 if ((insn[0] & 0x0f) == 0)
112 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; 75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
113 break; 76 break;
114 case 0x06: /* bctr */ 77 case 0x06: /* bctr */
115 case 0x07: /* bcr */ 78 case 0x07: /* bcr */
116 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 79 fixup = FIXUP_BRANCH_NOT_TAKEN;
117 break; 80 break;
118 case 0x45: /* bal */ 81 case 0x45: /* bal */
119 case 0x4d: /* bas */ 82 case 0x4d: /* bas */
120 ainsn->fixup = FIXUP_RETURN_REGISTER; 83 fixup = FIXUP_RETURN_REGISTER;
121 break; 84 break;
122 case 0x47: /* bc */ 85 case 0x47: /* bc */
123 case 0x46: /* bct */ 86 case 0x46: /* bct */
124 case 0x86: /* bxh */ 87 case 0x86: /* bxh */
125 case 0x87: /* bxle */ 88 case 0x87: /* bxle */
126 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 89 fixup = FIXUP_BRANCH_NOT_TAKEN;
127 break; 90 break;
128 case 0x82: /* lpsw */ 91 case 0x82: /* lpsw */
129 ainsn->fixup = FIXUP_NOT_REQUIRED; 92 fixup = FIXUP_NOT_REQUIRED;
130 break; 93 break;
131 case 0xb2: /* lpswe */ 94 case 0xb2: /* lpswe */
132 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { 95 if ((insn[0] & 0xff) == 0xb2)
133 ainsn->fixup = FIXUP_NOT_REQUIRED; 96 fixup = FIXUP_NOT_REQUIRED;
134 }
135 break; 97 break;
136 case 0xa7: /* bras */ 98 case 0xa7: /* bras */
137 if ((*ainsn->insn & 0x0f) == 0x05) { 99 if ((insn[0] & 0x0f) == 0x05)
138 ainsn->fixup |= FIXUP_RETURN_REGISTER; 100 fixup |= FIXUP_RETURN_REGISTER;
139 }
140 break; 101 break;
141 case 0xc0: 102 case 0xc0:
142 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ 103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
143 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ 104 (insn[0] & 0x0f) == 0x05) /* brasl */
144 ainsn->fixup |= FIXUP_RETURN_REGISTER; 105 fixup |= FIXUP_RETURN_REGISTER;
145 break; 106 break;
146 case 0xeb: 107 case 0xeb:
147 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ 108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
148 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ 109 (insn[2] & 0xff) == 0x45) /* bxleg */
149 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 110 fixup = FIXUP_BRANCH_NOT_TAKEN;
150 }
151 break; 111 break;
152 case 0xe3: /* bctg */ 112 case 0xe3: /* bctg */
153 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { 113 if ((insn[2] & 0xff) == 0x46)
154 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 114 fixup = FIXUP_BRANCH_NOT_TAKEN;
155 }
156 break; 115 break;
157 } 116 }
117 return fixup;
118}
119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode(p->addr))
127 return -EINVAL;
128
129 p->opcode = *p->addr;
130 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
131
132 return 0;
158} 133}
159 134
135struct ins_replace_args {
136 kprobe_opcode_t *ptr;
137 kprobe_opcode_t opcode;
138};
139
160static int __kprobes swap_instruction(void *aref) 140static int __kprobes swap_instruction(void *aref)
161{ 141{
162 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 142 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
163 unsigned long status = kcb->kprobe_status; 143 unsigned long status = kcb->kprobe_status;
164 struct ins_replace_args *args = aref; 144 struct ins_replace_args *args = aref;
165 int rc;
166 145
167 kcb->kprobe_status = KPROBE_SWAP_INST; 146 kcb->kprobe_status = KPROBE_SWAP_INST;
168 rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); 147 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
169 kcb->kprobe_status = status; 148 kcb->kprobe_status = status;
170 return rc; 149 return 0;
171} 150}
172 151
173void __kprobes arch_arm_kprobe(struct kprobe *p) 152void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -175,8 +154,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
175 struct ins_replace_args args; 154 struct ins_replace_args args;
176 155
177 args.ptr = p->addr; 156 args.ptr = p->addr;
178 args.old = p->opcode; 157 args.opcode = BREAKPOINT_INSTRUCTION;
179 args.new = BREAKPOINT_INSTRUCTION;
180 stop_machine(swap_instruction, &args, NULL); 158 stop_machine(swap_instruction, &args, NULL);
181} 159}
182 160
@@ -185,64 +163,69 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
185 struct ins_replace_args args; 163 struct ins_replace_args args;
186 164
187 args.ptr = p->addr; 165 args.ptr = p->addr;
188 args.old = BREAKPOINT_INSTRUCTION; 166 args.opcode = p->opcode;
189 args.new = p->opcode;
190 stop_machine(swap_instruction, &args, NULL); 167 stop_machine(swap_instruction, &args, NULL);
191} 168}
192 169
193void __kprobes arch_remove_kprobe(struct kprobe *p) 170void __kprobes arch_remove_kprobe(struct kprobe *p)
194{ 171{
195 if (p->ainsn.insn) {
196 free_insn_slot(p->ainsn.insn, 0);
197 p->ainsn.insn = NULL;
198 }
199} 172}
200 173
201static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 174static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs,
176 unsigned long ip)
202{ 177{
203 per_cr_bits kprobe_per_regs[1]; 178 struct per_regs per_kprobe;
204 179
205 memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 180 /* Set up the PER control registers %cr9-%cr11 */
206 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; 181 per_kprobe.control = PER_EVENT_IFETCH;
182 per_kprobe.start = ip;
183 per_kprobe.end = ip;
207 184
208 /* Set up the per control reg info, will pass to lctl */ 185 /* Save control regs and psw mask */
209 kprobe_per_regs[0].em_instruction_fetch = 1; 186 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
210 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; 187 kcb->kprobe_saved_imask = regs->psw.mask &
211 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; 188 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
212 189
213 /* Set the PER control regs, turns on single step for this address */ 190 /* Set PER control regs, turns on single step for the given address */
214 __ctl_load(kprobe_per_regs, 9, 11); 191 __ctl_load(per_kprobe, 9, 11);
215 regs->psw.mask |= PSW_MASK_PER; 192 regs->psw.mask |= PSW_MASK_PER;
216 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 193 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
194 regs->psw.addr = ip | PSW_ADDR_AMODE;
217} 195}
218 196
219static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 197static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
198 struct pt_regs *regs,
199 unsigned long ip)
220{ 200{
221 kcb->prev_kprobe.kp = kprobe_running(); 201 /* Restore control regs and psw mask, set new psw address */
222 kcb->prev_kprobe.status = kcb->kprobe_status; 202 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
223 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; 203 regs->psw.mask &= ~PSW_MASK_PER;
224 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, 204 regs->psw.mask |= kcb->kprobe_saved_imask;
225 sizeof(kcb->kprobe_saved_ctl)); 205 regs->psw.addr = ip | PSW_ADDR_AMODE;
226} 206}
227 207
228static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 208/*
209 * Activate a kprobe by storing its pointer to current_kprobe. The
210 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
211 * two kprobes can be active, see KPROBE_REENTER.
212 */
213static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
229{ 214{
230 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 215 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
231 kcb->kprobe_status = kcb->prev_kprobe.status; 216 kcb->prev_kprobe.status = kcb->kprobe_status;
232 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; 217 __get_cpu_var(current_kprobe) = p;
233 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
234 sizeof(kcb->kprobe_saved_ctl));
235} 218}
236 219
237static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 220/*
238 struct kprobe_ctlblk *kcb) 221 * Deactivate a kprobe by backing up to the previous state. If the
222 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
223 * for any other state prev_kprobe.kp will be NULL.
224 */
225static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
239{ 226{
240 __get_cpu_var(current_kprobe) = p; 227 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
241 /* Save the interrupt and per flags */ 228 kcb->kprobe_status = kcb->prev_kprobe.status;
242 kcb->kprobe_saved_imask = regs->psw.mask &
243 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
244 /* Save the control regs that govern PER */
245 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
246} 229}
247 230
248void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 231void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -251,79 +234,104 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
251 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 234 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
252 235
253 /* Replace the return addr with trampoline addr */ 236 /* Replace the return addr with trampoline addr */
254 regs->gprs[14] = (unsigned long)&kretprobe_trampoline; 237 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
238}
239
240static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
241 struct kprobe *p)
242{
243 switch (kcb->kprobe_status) {
244 case KPROBE_HIT_SSDONE:
245 case KPROBE_HIT_ACTIVE:
246 kprobes_inc_nmissed_count(p);
247 break;
248 case KPROBE_HIT_SS:
249 case KPROBE_REENTER:
250 default:
251 /*
252 * A kprobe on the code path to single step an instruction
253 * is a BUG. The code path resides in the .kprobes.text
254 * section and is executed with interrupts disabled.
255 */
256 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
257 dump_kprobe(p);
258 BUG();
259 }
255} 260}
256 261
257static int __kprobes kprobe_handler(struct pt_regs *regs) 262static int __kprobes kprobe_handler(struct pt_regs *regs)
258{ 263{
259 struct kprobe *p;
260 int ret = 0;
261 unsigned long *addr = (unsigned long *)
262 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
263 struct kprobe_ctlblk *kcb; 264 struct kprobe_ctlblk *kcb;
265 struct kprobe *p;
264 266
265 /* 267 /*
266 * We don't want to be preempted for the entire 268 * We want to disable preemption for the entire duration of kprobe
267 * duration of kprobe processing 269 * processing. That includes the calls to the pre/post handlers
270 * and single stepping the kprobe instruction.
268 */ 271 */
269 preempt_disable(); 272 preempt_disable();
270 kcb = get_kprobe_ctlblk(); 273 kcb = get_kprobe_ctlblk();
274 p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
271 275
272 /* Check we're not actually recursing */ 276 if (p) {
273 if (kprobe_running()) { 277 if (kprobe_running()) {
274 p = get_kprobe(addr); 278 /*
275 if (p) { 279 * We have hit a kprobe while another is still
276 if (kcb->kprobe_status == KPROBE_HIT_SS && 280 * active. This can happen in the pre and post
277 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 281 * handler. Single step the instruction of the
278 regs->psw.mask &= ~PSW_MASK_PER; 282 * new probe but do not call any handler function
279 regs->psw.mask |= kcb->kprobe_saved_imask; 283 * of this secondary kprobe.
280 goto no_kprobe; 284 * push_kprobe and pop_kprobe saves and restores
281 } 285 * the currently active kprobe.
282 /* We have reentered the kprobe_handler(), since
283 * another probe was hit while within the handler.
284 * We here save the original kprobes variables and
285 * just single step on the instruction of the new probe
286 * without calling any user handlers.
287 */ 286 */
288 save_previous_kprobe(kcb); 287 kprobe_reenter_check(kcb, p);
289 set_current_kprobe(p, regs, kcb); 288 push_kprobe(kcb, p);
290 kprobes_inc_nmissed_count(p);
291 prepare_singlestep(p, regs);
292 kcb->kprobe_status = KPROBE_REENTER; 289 kcb->kprobe_status = KPROBE_REENTER;
293 return 1;
294 } else { 290 } else {
295 p = __get_cpu_var(current_kprobe); 291 /*
296 if (p->break_handler && p->break_handler(p, regs)) { 292 * If we have no pre-handler or it returned 0, we
297 goto ss_probe; 293 * continue with single stepping. If we have a
298 } 294 * pre-handler and it returned non-zero, it prepped
295 * for calling the break_handler below on re-entry
296 * for jprobe processing, so get out doing nothing
297 * more here.
298 */
299 push_kprobe(kcb, p);
300 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
301 if (p->pre_handler && p->pre_handler(p, regs))
302 return 1;
303 kcb->kprobe_status = KPROBE_HIT_SS;
299 } 304 }
300 goto no_kprobe; 305 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
301 }
302
303 p = get_kprobe(addr);
304 if (!p)
305 /*
306 * No kprobe at this address. The fault has not been
307 * caused by a kprobe breakpoint. The race of breakpoint
308 * vs. kprobe remove does not exist because on s390 we
309 * use stop_machine to arm/disarm the breakpoints.
310 */
311 goto no_kprobe;
312
313 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
314 set_current_kprobe(p, regs, kcb);
315 if (p->pre_handler && p->pre_handler(p, regs))
316 /* handler has already set things up, so skip ss setup */
317 return 1; 306 return 1;
318 307 } else if (kprobe_running()) {
319ss_probe: 308 p = __get_cpu_var(current_kprobe);
320 prepare_singlestep(p, regs); 309 if (p->break_handler && p->break_handler(p, regs)) {
321 kcb->kprobe_status = KPROBE_HIT_SS; 310 /*
322 return 1; 311 * Continuation after the jprobe completed and
323 312 * caused the jprobe_return trap. The jprobe
324no_kprobe: 313 * break_handler "returns" to the original
314 * function that still has the kprobe breakpoint
315 * installed. We continue with single stepping.
316 */
317 kcb->kprobe_status = KPROBE_HIT_SS;
318 enable_singlestep(kcb, regs,
319 (unsigned long) p->ainsn.insn);
320 return 1;
321 } /* else:
322 * No kprobe at this address and the current kprobe
323 * has no break handler (no jprobe!). The kernel just
324 * exploded, let the standard trap handler pick up the
325 * pieces.
326 */
327 } /* else:
328 * No kprobe at this address and no active kprobe. The trap has
329 * not been caused by a kprobe breakpoint. The race of breakpoint
330 * vs. kprobe remove does not exist because on s390 as we use
331 * stop_machine to arm/disarm the breakpoints.
332 */
325 preempt_enable_no_resched(); 333 preempt_enable_no_resched();
326 return ret; 334 return 0;
327} 335}
328 336
329/* 337/*
@@ -344,12 +352,12 @@ static void __used kretprobe_trampoline_holder(void)
344static int __kprobes trampoline_probe_handler(struct kprobe *p, 352static int __kprobes trampoline_probe_handler(struct kprobe *p,
345 struct pt_regs *regs) 353 struct pt_regs *regs)
346{ 354{
347 struct kretprobe_instance *ri = NULL; 355 struct kretprobe_instance *ri;
348 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
349 struct hlist_node *node, *tmp; 357 struct hlist_node *node, *tmp;
350 unsigned long flags, orig_ret_address = 0; 358 unsigned long flags, orig_ret_address;
351 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 359 unsigned long trampoline_address;
352 kprobe_opcode_t *correct_ret_addr = NULL; 360 kprobe_opcode_t *correct_ret_addr;
353 361
354 INIT_HLIST_HEAD(&empty_rp); 362 INIT_HLIST_HEAD(&empty_rp);
355 kretprobe_hash_lock(current, &head, &flags); 363 kretprobe_hash_lock(current, &head, &flags);
@@ -367,12 +375,16 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
367 * real return address, and all the rest will point to 375 * real return address, and all the rest will point to
368 * kretprobe_trampoline 376 * kretprobe_trampoline
369 */ 377 */
378 ri = NULL;
379 orig_ret_address = 0;
380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline;
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current) 383 if (ri->task != current)
372 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
373 continue; 385 continue;
374 386
375 orig_ret_address = (unsigned long)ri->ret_addr; 387 orig_ret_address = (unsigned long) ri->ret_addr;
376 388
377 if (orig_ret_address != trampoline_address) 389 if (orig_ret_address != trampoline_address)
378 /* 390 /*
@@ -391,7 +403,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
391 /* another task is sharing our hash bucket */ 403 /* another task is sharing our hash bucket */
392 continue; 404 continue;
393 405
394 orig_ret_address = (unsigned long)ri->ret_addr; 406 orig_ret_address = (unsigned long) ri->ret_addr;
395 407
396 if (ri->rp && ri->rp->handler) { 408 if (ri->rp && ri->rp->handler) {
397 ri->ret_addr = correct_ret_addr; 409 ri->ret_addr = correct_ret_addr;
@@ -400,19 +412,18 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
400 412
401 recycle_rp_inst(ri, &empty_rp); 413 recycle_rp_inst(ri, &empty_rp);
402 414
403 if (orig_ret_address != trampoline_address) { 415 if (orig_ret_address != trampoline_address)
404 /* 416 /*
405 * This is the real return address. Any other 417 * This is the real return address. Any other
406 * instances associated with this task are for 418 * instances associated with this task are for
407 * other calls deeper on the call stack 419 * other calls deeper on the call stack
408 */ 420 */
409 break; 421 break;
410 }
411 } 422 }
412 423
413 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 424 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
414 425
415 reset_current_kprobe(); 426 pop_kprobe(get_kprobe_ctlblk());
416 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
417 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
418 429
@@ -439,55 +450,42 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
439static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 450static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
440{ 451{
441 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 452 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
453 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
454 int fixup = get_fixup_type(p->ainsn.insn);
442 455
443 regs->psw.addr &= PSW_ADDR_INSN; 456 if (fixup & FIXUP_PSW_NORMAL)
444 457 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
445 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
446 regs->psw.addr = (unsigned long)p->addr +
447 ((unsigned long)regs->psw.addr -
448 (unsigned long)p->ainsn.insn);
449 458
450 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) 459 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
451 if ((unsigned long)regs->psw.addr - 460 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
452 (unsigned long)p->ainsn.insn == p->ainsn.ilen) 461 if (ip - (unsigned long) p->ainsn.insn == ilen)
453 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; 462 ip = (unsigned long) p->addr + ilen;
463 }
454 464
455 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) 465 if (fixup & FIXUP_RETURN_REGISTER) {
456 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + 466 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
457 (regs->gprs[p->ainsn.reg] - 467 regs->gprs[reg] += (unsigned long) p->addr -
458 (unsigned long)p->ainsn.insn)) 468 (unsigned long) p->ainsn.insn;
459 | PSW_ADDR_AMODE; 469 }
460 470
461 regs->psw.addr |= PSW_ADDR_AMODE; 471 disable_singlestep(kcb, regs, ip);
462 /* turn off PER mode */
463 regs->psw.mask &= ~PSW_MASK_PER;
464 /* Restore the original per control regs */
465 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
466 regs->psw.mask |= kcb->kprobe_saved_imask;
467} 472}
468 473
469static int __kprobes post_kprobe_handler(struct pt_regs *regs) 474static int __kprobes post_kprobe_handler(struct pt_regs *regs)
470{ 475{
471 struct kprobe *cur = kprobe_running();
472 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 struct kprobe *p = kprobe_running();
473 478
474 if (!cur) 479 if (!p)
475 return 0; 480 return 0;
476 481
477 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 482 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
478 kcb->kprobe_status = KPROBE_HIT_SSDONE; 483 kcb->kprobe_status = KPROBE_HIT_SSDONE;
479 cur->post_handler(cur, regs, 0); 484 p->post_handler(p, regs, 0);
480 } 485 }
481 486
482 resume_execution(cur, regs); 487 resume_execution(p, regs);
483 488 pop_kprobe(kcb);
484 /*Restore back the original saved kprobes variables and continue. */
485 if (kcb->kprobe_status == KPROBE_REENTER) {
486 restore_previous_kprobe(kcb);
487 goto out;
488 }
489 reset_current_kprobe();
490out:
491 preempt_enable_no_resched(); 489 preempt_enable_no_resched();
492 490
493 /* 491 /*
@@ -495,17 +493,16 @@ out:
495 * will have PER set, in which case, continue the remaining processing 493 * will have PER set, in which case, continue the remaining processing
496 * of do_single_step, as if this is not a probe hit. 494 * of do_single_step, as if this is not a probe hit.
497 */ 495 */
498 if (regs->psw.mask & PSW_MASK_PER) { 496 if (regs->psw.mask & PSW_MASK_PER)
499 return 0; 497 return 0;
500 }
501 498
502 return 1; 499 return 1;
503} 500}
504 501
505static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) 502static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
506{ 503{
507 struct kprobe *cur = kprobe_running();
508 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 504 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
505 struct kprobe *p = kprobe_running();
509 const struct exception_table_entry *entry; 506 const struct exception_table_entry *entry;
510 507
511 switch(kcb->kprobe_status) { 508 switch(kcb->kprobe_status) {
@@ -521,14 +518,8 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
521 * and allow the page fault handler to continue as a 518 * and allow the page fault handler to continue as a
522 * normal page fault. 519 * normal page fault.
523 */ 520 */
524 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; 521 disable_singlestep(kcb, regs, (unsigned long) p->addr);
525 regs->psw.mask &= ~PSW_MASK_PER; 522 pop_kprobe(kcb);
526 regs->psw.mask |= kcb->kprobe_saved_imask;
527 if (kcb->kprobe_status == KPROBE_REENTER)
528 restore_previous_kprobe(kcb);
529 else {
530 reset_current_kprobe();
531 }
532 preempt_enable_no_resched(); 523 preempt_enable_no_resched();
533 break; 524 break;
534 case KPROBE_HIT_ACTIVE: 525 case KPROBE_HIT_ACTIVE:
@@ -538,7 +529,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
538 * we can also use npre/npostfault count for accouting 529 * we can also use npre/npostfault count for accouting
539 * these specific fault cases. 530 * these specific fault cases.
540 */ 531 */
541 kprobes_inc_nmissed_count(cur); 532 kprobes_inc_nmissed_count(p);
542 533
543 /* 534 /*
544 * We come here because instructions in the pre/post 535 * We come here because instructions in the pre/post
@@ -547,7 +538,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
547 * copy_from_user(), get_user() etc. Let the 538 * copy_from_user(), get_user() etc. Let the
548 * user-specified handler try to fix it first. 539 * user-specified handler try to fix it first.
549 */ 540 */
550 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 541 if (p->fault_handler && p->fault_handler(p, regs, trapnr))
551 return 1; 542 return 1;
552 543
553 /* 544 /*
@@ -589,7 +580,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
589int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 580int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
590 unsigned long val, void *data) 581 unsigned long val, void *data)
591{ 582{
592 struct die_args *args = (struct die_args *)data; 583 struct die_args *args = (struct die_args *) data;
593 struct pt_regs *regs = args->regs; 584 struct pt_regs *regs = args->regs;
594 int ret = NOTIFY_DONE; 585 int ret = NOTIFY_DONE;
595 586
@@ -598,16 +589,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
598 589
599 switch (val) { 590 switch (val) {
600 case DIE_BPT: 591 case DIE_BPT:
601 if (kprobe_handler(args->regs)) 592 if (kprobe_handler(regs))
602 ret = NOTIFY_STOP; 593 ret = NOTIFY_STOP;
603 break; 594 break;
604 case DIE_SSTEP: 595 case DIE_SSTEP:
605 if (post_kprobe_handler(args->regs)) 596 if (post_kprobe_handler(regs))
606 ret = NOTIFY_STOP; 597 ret = NOTIFY_STOP;
607 break; 598 break;
608 case DIE_TRAP: 599 case DIE_TRAP:
609 if (!preemptible() && kprobe_running() && 600 if (!preemptible() && kprobe_running() &&
610 kprobe_trap_handler(args->regs, args->trapnr)) 601 kprobe_trap_handler(regs, args->trapnr))
611 ret = NOTIFY_STOP; 602 ret = NOTIFY_STOP;
612 break; 603 break;
613 default: 604 default:
@@ -623,23 +614,19 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
623int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 614int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
624{ 615{
625 struct jprobe *jp = container_of(p, struct jprobe, kp); 616 struct jprobe *jp = container_of(p, struct jprobe, kp);
626 unsigned long addr;
627 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 617 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
618 unsigned long stack;
628 619
629 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 620 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
630 621
631 /* setup return addr to the jprobe handler routine */ 622 /* setup return addr to the jprobe handler routine */
632 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; 623 regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
633 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 624 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
634 625
635 /* r14 is the function return address */
636 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
637 /* r15 is the stack pointer */ 626 /* r15 is the stack pointer */
638 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; 627 stack = (unsigned long) regs->gprs[15];
639 addr = (unsigned long)kcb->jprobe_saved_r15;
640 628
641 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, 629 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
642 MIN_STACK_SIZE(addr));
643 return 1; 630 return 1;
644} 631}
645 632
@@ -656,30 +643,29 @@ void __kprobes jprobe_return_end(void)
656int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 643int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
657{ 644{
658 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 645 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
659 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); 646 unsigned long stack;
647
648 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
660 649
661 /* Put the regs back */ 650 /* Put the regs back */
662 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 651 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
663 /* put the stack back */ 652 /* put the stack back */
664 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 653 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
665 MIN_STACK_SIZE(stack_addr));
666 preempt_enable_no_resched(); 654 preempt_enable_no_resched();
667 return 1; 655 return 1;
668} 656}
669 657
670static struct kprobe trampoline_p = { 658static struct kprobe trampoline = {
671 .addr = (kprobe_opcode_t *) & kretprobe_trampoline, 659 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
672 .pre_handler = trampoline_probe_handler 660 .pre_handler = trampoline_probe_handler
673}; 661};
674 662
675int __init arch_init_kprobes(void) 663int __init arch_init_kprobes(void)
676{ 664{
677 return register_kprobe(&trampoline_p); 665 return register_kprobe(&trampoline);
678} 666}
679 667
680int __kprobes arch_trampoline_kprobe(struct kprobe *p) 668int __kprobes arch_trampoline_kprobe(struct kprobe *p)
681{ 669{
682 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) 670 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
683 return 1;
684 return 0;
685} 671}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index dfe015d7398c..1e6a55795628 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,22 +18,12 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .long ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
28 stm %r2,%r5,16(%r15) 24 stm %r2,%r5,16(%r15)
29 bras %r1,2f 25 bras %r1,2f
30#ifdef CONFIG_DYNAMIC_FTRACE
310: .long ftrace_dyn_func
32#else
330: .long ftrace_trace_function 260: .long ftrace_trace_function
34#endif
351: .long function_trace_stop 271: .long function_trace_stop
362: l %r2,1b-0b(%r1) 282: l %r2,1b-0b(%r1)
37 icm %r2,0xf,0(%r2) 29 icm %r2,0xf,0(%r2)
@@ -47,21 +39,15 @@ ftrace_caller:
47 l %r14,0(%r14) 39 l %r14,0(%r14)
48 basr %r14,%r14 40 basr %r14,%r14
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER 41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50#ifdef CONFIG_DYNAMIC_FTRACE 42 l %r2,100(%r15)
43 l %r3,152(%r15)
51 .globl ftrace_graph_caller 44 .globl ftrace_graph_caller
52ftrace_graph_caller: 45ftrace_graph_caller:
53 # This unconditional branch gets runtime patched. Change only if 46# The bras instruction gets runtime patched to call prepare_ftrace_return.
54 # you know what you are doing. See ftrace_enable_graph_caller(). 47# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
55 j 1f 48# bras %r14,prepare_ftrace_return
56#endif 49 bras %r14,0f
57 bras %r1,0f 500: st %r2,100(%r15)
58 .long prepare_ftrace_return
590: l %r2,152(%r15)
60 l %r4,0(%r1)
61 l %r3,100(%r15)
62 basr %r14,%r4
63 st %r2,100(%r15)
641:
65#endif 51#endif
66 ahi %r15,96 52 ahi %r15,96
67 l %r14,56(%r15) 53 l %r14,56(%r15)
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index c37211c6092b..e73667286ac0 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,12 +18,6 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .quad ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
@@ -35,26 +31,19 @@ ftrace_caller:
35 stg %r1,__SF_BACKCHAIN(%r15) 31 stg %r1,__SF_BACKCHAIN(%r15)
36 lgr %r2,%r14 32 lgr %r2,%r14
37 lg %r3,168(%r15) 33 lg %r3,168(%r15)
38#ifdef CONFIG_DYNAMIC_FTRACE
39 larl %r14,ftrace_dyn_func
40#else
41 larl %r14,ftrace_trace_function 34 larl %r14,ftrace_trace_function
42#endif
43 lg %r14,0(%r14) 35 lg %r14,0(%r14)
44 basr %r14,%r14 36 basr %r14,%r14
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER 37#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46#ifdef CONFIG_DYNAMIC_FTRACE 38 lg %r2,168(%r15)
39 lg %r3,272(%r15)
47 .globl ftrace_graph_caller 40 .globl ftrace_graph_caller
48ftrace_graph_caller: 41ftrace_graph_caller:
49 # This unconditional branch gets runtime patched. Change only if 42# The bras instruction gets runtime patched to call prepare_ftrace_return.
50 # you know what you are doing. See ftrace_enable_graph_caller(). 43# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
51 j 0f 44# bras %r14,prepare_ftrace_return
52#endif 45 bras %r14,0f
53 lg %r2,272(%r15) 460: stg %r2,168(%r15)
54 lg %r3,168(%r15)
55 brasl %r14,prepare_ftrace_return
56 stg %r2,168(%r15)
570:
58#endif 47#endif
59 aghi %r15,160 48 aghi %r15,160
60 lmg %r2,%r5,32(%r15) 49 lmg %r2,%r5,32(%r15)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 1995c1712fc8..fab88431a06f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -8,6 +8,7 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/kernel_stat.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/hardirq.h> 14#include <linux/hardirq.h>
@@ -255,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
255 nmi_enter(); 256 nmi_enter();
256 s390_idle_check(regs, S390_lowcore.mcck_clock, 257 s390_idle_check(regs, S390_lowcore.mcck_clock,
257 S390_lowcore.mcck_enter_timer); 258 S390_lowcore.mcck_enter_timer);
258 259 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
259 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
260 mcck = &__get_cpu_var(cpu_mcck); 261 mcck = &__get_cpu_var(cpu_mcck);
261 umode = user_mode(regs); 262 umode = user_mode(regs);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ec2e03b22ead..6ba42222b542 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -32,6 +32,7 @@
32#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/compat.h> 34#include <linux/compat.h>
35#include <linux/kprobes.h>
35#include <asm/compat.h> 36#include <asm/compat.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
@@ -41,6 +42,7 @@
41#include <asm/irq.h> 42#include <asm/irq.h>
42#include <asm/timer.h> 43#include <asm/timer.h>
43#include <asm/nmi.h> 44#include <asm/nmi.h>
45#include <asm/smp.h>
44#include "entry.h" 46#include "entry.h"
45 47
46asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 48asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,13 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 */ 77 */
76static void default_idle(void) 78static void default_idle(void)
77{ 79{
78 /* CPU is going idle. */ 80 if (cpu_is_offline(smp_processor_id()))
79#ifdef CONFIG_HOTPLUG_CPU
80 if (cpu_is_offline(smp_processor_id())) {
81 preempt_enable_no_resched();
82 cpu_die(); 81 cpu_die();
83 }
84#endif
85 local_irq_disable(); 82 local_irq_disable();
86 if (need_resched()) { 83 if (need_resched()) {
87 local_irq_enable(); 84 local_irq_enable();
@@ -116,15 +113,17 @@ void cpu_idle(void)
116 } 113 }
117} 114}
118 115
119extern void kernel_thread_starter(void); 116extern void __kprobes kernel_thread_starter(void);
120 117
121asm( 118asm(
122 ".align 4\n" 119 ".section .kprobes.text, \"ax\"\n"
120 ".global kernel_thread_starter\n"
123 "kernel_thread_starter:\n" 121 "kernel_thread_starter:\n"
124 " la 2,0(10)\n" 122 " la 2,0(10)\n"
125 " basr 14,9\n" 123 " basr 14,9\n"
126 " la 2,0\n" 124 " la 2,0\n"
127 " br 11\n"); 125 " br 11\n"
126 ".previous\n");
128 127
129int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 128int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
130{ 129{
@@ -214,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
214 /* start new process with ar4 pointing to the correct address space */ 213 /* start new process with ar4 pointing to the correct address space */
215 p->thread.mm_segment = get_fs(); 214 p->thread.mm_segment = get_fs();
216 /* Don't copy debug registers */ 215 /* Don't copy debug registers */
217 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 216 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
217 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
219 clear_tsk_thread_flag(p, TIF_PER_TRAP);
219 /* Initialize per thread user and system timer values */ 220 /* Initialize per thread user and system timer values */
220 ti = task_thread_info(p); 221 ti = task_thread_info(p);
221 ti->user_timer = 0; 222 ti->user_timer = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 644548e615c6..311e9d712888 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,7 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/cpu.h>
17#include <asm/elf.h> 17#include <asm/elf.h>
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
@@ -35,17 +35,6 @@ void __cpuinit cpu_init(void)
35} 35}
36 36
37/* 37/*
38 * print_cpu_info - print basic information about a cpu
39 */
40void __cpuinit print_cpu_info(void)
41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
44 pr_info("Processor %d started, address %d, identification %06X\n",
45 S390_lowcore.cpu_nr, stap(), id->ident);
46}
47
48/*
49 * show_cpuinfo - Get information on one CPU for use by procfs. 38 * show_cpuinfo - Get information on one CPU for use by procfs.
50 */ 39 */
51static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
@@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
57 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
58 int i; 47 int i;
59 48
60 s390_adjust_jiffies();
61 preempt_disable();
62 if (!n) { 49 if (!n) {
50 s390_adjust_jiffies();
63 seq_printf(m, "vendor_id : IBM/S390\n" 51 seq_printf(m, "vendor_id : IBM/S390\n"
64 "# processors : %i\n" 52 "# processors : %i\n"
65 "bogomips per cpu: %lu.%02lu\n", 53 "bogomips per cpu: %lu.%02lu\n",
@@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
71 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
72 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
73 } 61 }
74 62 get_online_cpus();
75 if (cpu_online(n)) { 63 if (cpu_online(n)) {
76 struct cpuid *id = &per_cpu(cpu_id, n); 64 struct cpuid *id = &per_cpu(cpu_id, n);
77 seq_printf(m, "processor %li: " 65 seq_printf(m, "processor %li: "
@@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
80 "machine = %04X\n", 68 "machine = %04X\n",
81 n, id->version, id->ident, id->machine); 69 n, id->version, id->ident, id->machine);
82 } 70 }
83 preempt_enable(); 71 put_online_cpus();
84 return 0; 72 return 0;
85} 73}
86 74
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 019bb714db49..ef86ad243986 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,25 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/ptrace.c 2 * Ptrace user space interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */ 7 */
24 8
25#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -61,76 +45,58 @@ enum s390_regset {
61 REGSET_GENERAL_EXTENDED, 45 REGSET_GENERAL_EXTENDED,
62}; 46};
63 47
64static void 48void update_per_regs(struct task_struct *task)
65FixPerRegisters(struct task_struct *task)
66{ 49{
67 struct pt_regs *regs; 50 static const struct per_regs per_single_step = {
68 per_struct *per_info; 51 .control = PER_EVENT_IFETCH,
69 per_cr_words cr_words; 52 .start = 0,
70 53 .end = PSW_ADDR_INSN,
71 regs = task_pt_regs(task); 54 };
72 per_info = (per_struct *) &task->thread.per_info; 55 struct pt_regs *regs = task_pt_regs(task);
73 per_info->control_regs.bits.em_instruction_fetch = 56 struct thread_struct *thread = &task->thread;
74 per_info->single_step | per_info->instruction_fetch; 57 const struct per_regs *new;
75 58 struct per_regs old;
76 if (per_info->single_step) { 59
77 per_info->control_regs.bits.starting_addr = 0; 60 /* TIF_SINGLE_STEP overrides the user specified PER registers. */
78#ifdef CONFIG_COMPAT 61 new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
79 if (is_compat_task()) 62 &per_single_step : &thread->per_user;
80 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 63
81 else 64 /* Take care of the PER enablement bit in the PSW. */
82#endif 65 if (!(new->control & PER_EVENT_MASK)) {
83 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
84 } else {
85 per_info->control_regs.bits.starting_addr =
86 per_info->starting_addr;
87 per_info->control_regs.bits.ending_addr =
88 per_info->ending_addr;
89 }
90 /*
91 * if any of the control reg tracing bits are on
92 * we switch on per in the psw
93 */
94 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
95 regs->psw.mask |= PSW_MASK_PER;
96 else
97 regs->psw.mask &= ~PSW_MASK_PER; 66 regs->psw.mask &= ~PSW_MASK_PER;
98 67 return;
99 if (per_info->control_regs.bits.em_storage_alteration)
100 per_info->control_regs.bits.storage_alt_space_ctl = 1;
101 else
102 per_info->control_regs.bits.storage_alt_space_ctl = 0;
103
104 if (task == current) {
105 __ctl_store(cr_words, 9, 11);
106 if (memcmp(&cr_words, &per_info->control_regs.words,
107 sizeof(cr_words)) != 0)
108 __ctl_load(per_info->control_regs.words, 9, 11);
109 } 68 }
69 regs->psw.mask |= PSW_MASK_PER;
70 __ctl_store(old, 9, 11);
71 if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
72 __ctl_load(*new, 9, 11);
110} 73}
111 74
112void user_enable_single_step(struct task_struct *task) 75void user_enable_single_step(struct task_struct *task)
113{ 76{
114 task->thread.per_info.single_step = 1; 77 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
115 FixPerRegisters(task); 78 if (task == current)
79 update_per_regs(task);
116} 80}
117 81
118void user_disable_single_step(struct task_struct *task) 82void user_disable_single_step(struct task_struct *task)
119{ 83{
120 task->thread.per_info.single_step = 0; 84 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
121 FixPerRegisters(task); 85 if (task == current)
86 update_per_regs(task);
122} 87}
123 88
124/* 89/*
125 * Called by kernel/ptrace.c when detaching.. 90 * Called by kernel/ptrace.c when detaching..
126 * 91 *
127 * Make sure single step bits etc are not set. 92 * Clear all debugging related fields.
128 */ 93 */
129void 94void ptrace_disable(struct task_struct *task)
130ptrace_disable(struct task_struct *child)
131{ 95{
132 /* make sure the single step bit is not set. */ 96 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
133 user_disable_single_step(child); 97 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
98 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
99 clear_tsk_thread_flag(task, TIF_PER_TRAP);
134} 100}
135 101
136#ifndef CONFIG_64BIT 102#ifndef CONFIG_64BIT
@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
139# define __ADDR_MASK 7 105# define __ADDR_MASK 7
140#endif 106#endif
141 107
108static inline unsigned long __peek_user_per(struct task_struct *child,
109 addr_t addr)
110{
111 struct per_struct_kernel *dummy = NULL;
112
113 if (addr == (addr_t) &dummy->cr9)
114 /* Control bits of the active per set. */
115 return test_thread_flag(TIF_SINGLE_STEP) ?
116 PER_EVENT_IFETCH : child->thread.per_user.control;
117 else if (addr == (addr_t) &dummy->cr10)
118 /* Start address of the active per set. */
119 return test_thread_flag(TIF_SINGLE_STEP) ?
120 0 : child->thread.per_user.start;
121 else if (addr == (addr_t) &dummy->cr11)
122 /* End address of the active per set. */
123 return test_thread_flag(TIF_SINGLE_STEP) ?
124 PSW_ADDR_INSN : child->thread.per_user.end;
125 else if (addr == (addr_t) &dummy->bits)
126 /* Single-step bit. */
127 return test_thread_flag(TIF_SINGLE_STEP) ?
128 (1UL << (BITS_PER_LONG - 1)) : 0;
129 else if (addr == (addr_t) &dummy->starting_addr)
130 /* Start address of the user specified per set. */
131 return child->thread.per_user.start;
132 else if (addr == (addr_t) &dummy->ending_addr)
133 /* End address of the user specified per set. */
134 return child->thread.per_user.end;
135 else if (addr == (addr_t) &dummy->perc_atmid)
136 /* PER code, ATMID and AI of the last PER trap */
137 return (unsigned long)
138 child->thread.per_event.cause << (BITS_PER_LONG - 16);
139 else if (addr == (addr_t) &dummy->address)
140 /* Address of the last PER trap */
141 return child->thread.per_event.address;
142 else if (addr == (addr_t) &dummy->access_id)
143 /* Access id of the last PER trap */
144 return (unsigned long)
145 child->thread.per_event.paid << (BITS_PER_LONG - 8);
146 return 0;
147}
148
142/* 149/*
143 * Read the word at offset addr from the user area of a process. The 150 * Read the word at offset addr from the user area of a process. The
144 * trouble here is that the information is littered over different 151 * trouble here is that the information is littered over different
@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
204 211
205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 212 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
206 /* 213 /*
207 * per_info is found in the thread structure 214 * Handle access to the per_info structure.
208 */ 215 */
209 offset = addr - (addr_t) &dummy->regs.per_info; 216 addr -= (addr_t) &dummy->regs.per_info;
210 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); 217 tmp = __peek_user_per(child, addr);
211 218
212 } else 219 } else
213 tmp = 0; 220 tmp = 0;
@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
237 return put_user(tmp, (addr_t __user *) data); 244 return put_user(tmp, (addr_t __user *) data);
238} 245}
239 246
247static inline void __poke_user_per(struct task_struct *child,
248 addr_t addr, addr_t data)
249{
250 struct per_struct_kernel *dummy = NULL;
251
252 /*
253 * There are only three fields in the per_info struct that the
254 * debugger user can write to.
255 * 1) cr9: the debugger wants to set a new PER event mask
256 * 2) starting_addr: the debugger wants to set a new starting
257 * address to use with the PER event mask.
258 * 3) ending_addr: the debugger wants to set a new ending
259 * address to use with the PER event mask.
260 * The user specified PER event mask and the start and end
261 * addresses are used only if single stepping is not in effect.
262 * Writes to any other field in per_info are ignored.
263 */
264 if (addr == (addr_t) &dummy->cr9)
265 /* PER event mask of the user specified per set. */
266 child->thread.per_user.control =
267 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
268 else if (addr == (addr_t) &dummy->starting_addr)
269 /* Starting address of the user specified per set. */
270 child->thread.per_user.start = data;
271 else if (addr == (addr_t) &dummy->ending_addr)
272 /* Ending address of the user specified per set. */
273 child->thread.per_user.end = data;
274}
275
240/* 276/*
241 * Write a word to the user area of a process at location addr. This 277 * Write a word to the user area of a process at location addr. This
242 * operation does have an additional problem compared to peek_user. 278 * operation does have an additional problem compared to peek_user.
@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
311 347
312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 348 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
313 /* 349 /*
314 * per_info is found in the thread structure 350 * Handle access to the per_info structure.
315 */ 351 */
316 offset = addr - (addr_t) &dummy->regs.per_info; 352 addr -= (addr_t) &dummy->regs.per_info;
317 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; 353 __poke_user_per(child, addr, data);
318 354
319 } 355 }
320 356
321 FixPerRegisters(child);
322 return 0; 357 return 0;
323} 358}
324 359
325static int 360static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
326poke_user(struct task_struct *child, addr_t addr, addr_t data)
327{ 361{
328 addr_t mask; 362 addr_t mask;
329 363
@@ -410,12 +444,53 @@ long arch_ptrace(struct task_struct *child, long request,
410 */ 444 */
411 445
412/* 446/*
447 * Same as peek_user_per but for a 31 bit program.
448 */
449static inline __u32 __peek_user_per_compat(struct task_struct *child,
450 addr_t addr)
451{
452 struct compat_per_struct_kernel *dummy32 = NULL;
453
454 if (addr == (addr_t) &dummy32->cr9)
455 /* Control bits of the active per set. */
456 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
457 PER_EVENT_IFETCH : child->thread.per_user.control;
458 else if (addr == (addr_t) &dummy32->cr10)
459 /* Start address of the active per set. */
460 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
461 0 : child->thread.per_user.start;
462 else if (addr == (addr_t) &dummy32->cr11)
463 /* End address of the active per set. */
464 return test_thread_flag(TIF_SINGLE_STEP) ?
465 PSW32_ADDR_INSN : child->thread.per_user.end;
466 else if (addr == (addr_t) &dummy32->bits)
467 /* Single-step bit. */
468 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
469 0x80000000 : 0;
470 else if (addr == (addr_t) &dummy32->starting_addr)
471 /* Start address of the user specified per set. */
472 return (__u32) child->thread.per_user.start;
473 else if (addr == (addr_t) &dummy32->ending_addr)
474 /* End address of the user specified per set. */
475 return (__u32) child->thread.per_user.end;
476 else if (addr == (addr_t) &dummy32->perc_atmid)
477 /* PER code, ATMID and AI of the last PER trap */
478 return (__u32) child->thread.per_event.cause << 16;
479 else if (addr == (addr_t) &dummy32->address)
480 /* Address of the last PER trap */
481 return (__u32) child->thread.per_event.address;
482 else if (addr == (addr_t) &dummy32->access_id)
483 /* Access id of the last PER trap */
484 return (__u32) child->thread.per_event.paid << 24;
485 return 0;
486}
487
488/*
413 * Same as peek_user but for a 31 bit program. 489 * Same as peek_user but for a 31 bit program.
414 */ 490 */
415static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 491static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
416{ 492{
417 struct user32 *dummy32 = NULL; 493 struct compat_user *dummy32 = NULL;
418 per_struct32 *dummy_per32 = NULL;
419 addr_t offset; 494 addr_t offset;
420 __u32 tmp; 495 __u32 tmp;
421 496
@@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
465 540
466 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 541 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
467 /* 542 /*
468 * per_info is found in the thread structure 543 * Handle access to the per_info structure.
469 */ 544 */
470 offset = addr - (addr_t) &dummy32->regs.per_info; 545 addr -= (addr_t) &dummy32->regs.per_info;
471 /* This is magic. See per_struct and per_struct32. */ 546 tmp = __peek_user_per_compat(child, addr);
472 if ((offset >= (addr_t) &dummy_per32->control_regs &&
473 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
474 (offset >= (addr_t) &dummy_per32->starting_addr &&
475 offset <= (addr_t) &dummy_per32->ending_addr) ||
476 offset == (addr_t) &dummy_per32->lowcore.words.address)
477 offset = offset*2 + 4;
478 else
479 offset = offset*2;
480 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
481 547
482 } else 548 } else
483 tmp = 0; 549 tmp = 0;
@@ -498,13 +564,32 @@ static int peek_user_compat(struct task_struct *child,
498} 564}
499 565
500/* 566/*
567 * Same as poke_user_per but for a 31 bit program.
568 */
569static inline void __poke_user_per_compat(struct task_struct *child,
570 addr_t addr, __u32 data)
571{
572 struct compat_per_struct_kernel *dummy32 = NULL;
573
574 if (addr == (addr_t) &dummy32->cr9)
575 /* PER event mask of the user specified per set. */
576 child->thread.per_user.control =
577 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
578 else if (addr == (addr_t) &dummy32->starting_addr)
579 /* Starting address of the user specified per set. */
580 child->thread.per_user.start = data;
581 else if (addr == (addr_t) &dummy32->ending_addr)
582 /* Ending address of the user specified per set. */
583 child->thread.per_user.end = data;
584}
585
586/*
501 * Same as poke_user but for a 31 bit program. 587 * Same as poke_user but for a 31 bit program.
502 */ 588 */
503static int __poke_user_compat(struct task_struct *child, 589static int __poke_user_compat(struct task_struct *child,
504 addr_t addr, addr_t data) 590 addr_t addr, addr_t data)
505{ 591{
506 struct user32 *dummy32 = NULL; 592 struct compat_user *dummy32 = NULL;
507 per_struct32 *dummy_per32 = NULL;
508 __u32 tmp = (__u32) data; 593 __u32 tmp = (__u32) data;
509 addr_t offset; 594 addr_t offset;
510 595
@@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
561 646
562 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 647 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
563 /* 648 /*
564 * per_info is found in the thread structure. 649 * Handle access to the per_info structure.
565 */
566 offset = addr - (addr_t) &dummy32->regs.per_info;
567 /*
568 * This is magic. See per_struct and per_struct32.
569 * By incident the offsets in per_struct are exactly
570 * twice the offsets in per_struct32 for all fields.
571 * The 8 byte fields need special handling though,
572 * because the second half (bytes 4-7) is needed and
573 * not the first half.
574 */ 650 */
575 if ((offset >= (addr_t) &dummy_per32->control_regs && 651 addr -= (addr_t) &dummy32->regs.per_info;
576 offset < (addr_t) (&dummy_per32->control_regs + 1)) || 652 __poke_user_per_compat(child, addr, data);
577 (offset >= (addr_t) &dummy_per32->starting_addr &&
578 offset <= (addr_t) &dummy_per32->ending_addr) ||
579 offset == (addr_t) &dummy_per32->lowcore.words.address)
580 offset = offset*2 + 4;
581 else
582 offset = offset*2;
583 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
584
585 } 653 }
586 654
587 FixPerRegisters(child);
588 return 0; 655 return 0;
589} 656}
590 657
591static int poke_user_compat(struct task_struct *child, 658static int poke_user_compat(struct task_struct *child,
592 addr_t addr, addr_t data) 659 addr_t addr, addr_t data)
593{ 660{
594 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 661 if (!is_compat_task() || (addr & 3) ||
662 addr > sizeof(struct compat_user) - 3)
595 return -EIO; 663 return -EIO;
596 664
597 return __poke_user_compat(child, addr, data); 665 return __poke_user_compat(child, addr, data);
@@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
602{ 670{
603 unsigned long addr = caddr; 671 unsigned long addr = caddr;
604 unsigned long data = cdata; 672 unsigned long data = cdata;
605 ptrace_area_emu31 parea; 673 compat_ptrace_area parea;
606 int copied, ret; 674 int copied, ret;
607 675
608 switch (request) { 676 switch (request) {
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bd1db508e8af..185029919c4d 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -1,33 +1,36 @@
1/* 1/*
2 * arch/s390/kernel/s390_ext.c 2 * Copyright IBM Corp. 1999,2010
3 * 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */ 5 */
9 6
7#include <linux/kernel_stat.h>
8#include <linux/interrupt.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/kernel.h> 10#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/ftrace.h> 11#include <linux/ftrace.h>
14#include <linux/errno.h> 12#include <linux/errno.h>
15#include <linux/kernel_stat.h> 13#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <asm/cputime.h>
18#include <asm/lowcore.h>
19#include <asm/s390_ext.h> 14#include <asm/s390_ext.h>
20#include <asm/irq_regs.h> 15#include <asm/irq_regs.h>
16#include <asm/cputime.h>
17#include <asm/lowcore.h>
21#include <asm/irq.h> 18#include <asm/irq.h>
22#include "entry.h" 19#include "entry.h"
23 20
21struct ext_int_info {
22 struct ext_int_info *next;
23 ext_int_handler_t handler;
24 __u16 code;
25};
26
24/* 27/*
25 * ext_int_hash[index] is the start of the list for all external interrupts 28 * ext_int_hash[index] is the start of the list for all external interrupts
26 * that hash to this index. With the current set of external interrupts 29 * that hash to this index. With the current set of external interrupts
27 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 30 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
28 * iucv and 0x2603 pfault) this is always the first element. 31 * iucv and 0x2603 pfault) this is always the first element.
29 */ 32 */
30ext_int_info_t *ext_int_hash[256] = { NULL, }; 33static struct ext_int_info *ext_int_hash[256];
31 34
32static inline int ext_hash(__u16 code) 35static inline int ext_hash(__u16 code)
33{ 36{
@@ -36,90 +39,53 @@ static inline int ext_hash(__u16 code)
36 39
37int register_external_interrupt(__u16 code, ext_int_handler_t handler) 40int register_external_interrupt(__u16 code, ext_int_handler_t handler)
38{ 41{
39 ext_int_info_t *p; 42 struct ext_int_info *p;
40 int index; 43 int index;
41
42 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
43 if (p == NULL)
44 return -ENOMEM;
45 p->code = code;
46 p->handler = handler;
47 index = ext_hash(code);
48 p->next = ext_int_hash[index];
49 ext_int_hash[index] = p;
50 return 0;
51}
52
53int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
54 ext_int_info_t *p)
55{
56 int index;
57 44
58 if (p == NULL) 45 p = kmalloc(sizeof(*p), GFP_ATOMIC);
59 return -EINVAL; 46 if (!p)
60 p->code = code; 47 return -ENOMEM;
61 p->handler = handler; 48 p->code = code;
49 p->handler = handler;
62 index = ext_hash(code); 50 index = ext_hash(code);
63 p->next = ext_int_hash[index]; 51 p->next = ext_int_hash[index];
64 ext_int_hash[index] = p; 52 ext_int_hash[index] = p;
65 return 0; 53 return 0;
66} 54}
55EXPORT_SYMBOL(register_external_interrupt);
67 56
68int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) 57int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
69{ 58{
70 ext_int_info_t *p, *q; 59 struct ext_int_info *p, *q;
71 int index;
72
73 index = ext_hash(code);
74 q = NULL;
75 p = ext_int_hash[index];
76 while (p != NULL) {
77 if (p->code == code && p->handler == handler)
78 break;
79 q = p;
80 p = p->next;
81 }
82 if (p == NULL)
83 return -ENOENT;
84 if (q != NULL)
85 q->next = p->next;
86 else
87 ext_int_hash[index] = p->next;
88 kfree(p);
89 return 0;
90}
91
92int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
93 ext_int_info_t *p)
94{
95 ext_int_info_t *q;
96 int index; 60 int index;
97 61
98 if (p == NULL || p->code != code || p->handler != handler)
99 return -EINVAL;
100 index = ext_hash(code); 62 index = ext_hash(code);
101 q = ext_int_hash[index]; 63 q = NULL;
102 if (p != q) { 64 p = ext_int_hash[index];
103 while (q != NULL) { 65 while (p) {
104 if (q->next == p) 66 if (p->code == code && p->handler == handler)
105 break; 67 break;
106 q = q->next; 68 q = p;
107 } 69 p = p->next;
108 if (q == NULL) 70 }
109 return -ENOENT; 71 if (!p)
72 return -ENOENT;
73 if (q)
110 q->next = p->next; 74 q->next = p->next;
111 } else 75 else
112 ext_int_hash[index] = p->next; 76 ext_int_hash[index] = p->next;
77 kfree(p);
113 return 0; 78 return 0;
114} 79}
80EXPORT_SYMBOL(unregister_external_interrupt);
115 81
116void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, 82void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
117 unsigned int param32, unsigned long param64) 83 unsigned int param32, unsigned long param64)
118{ 84{
119 struct pt_regs *old_regs; 85 struct pt_regs *old_regs;
120 unsigned short code; 86 unsigned short code;
121 ext_int_info_t *p; 87 struct ext_int_info *p;
122 int index; 88 int index;
123 89
124 code = (unsigned short) ext_int_code; 90 code = (unsigned short) ext_int_code;
125 old_regs = set_irq_regs(regs); 91 old_regs = set_irq_regs(regs);
@@ -132,7 +98,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
132 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 98 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
133 if (code != 0x1004) 99 if (code != 0x1004)
134 __get_cpu_var(s390_idle).nohz_delay = 1; 100 __get_cpu_var(s390_idle).nohz_delay = 1;
135 index = ext_hash(code); 101 index = ext_hash(code);
136 for (p = ext_int_hash[index]; p; p = p->next) { 102 for (p = ext_int_hash[index]; p; p = p->next) {
137 if (likely(p->code == code)) 103 if (likely(p->code == code))
138 p->handler(ext_int_code, param32, param64); 104 p->handler(ext_int_code, param32, param64);
@@ -140,6 +106,3 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
140 irq_exit(); 106 irq_exit();
141 set_irq_regs(old_regs); 107 set_irq_regs(old_regs);
142} 108}
143
144EXPORT_SYMBOL(register_external_interrupt);
145EXPORT_SYMBOL(unregister_external_interrupt);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ee7ac8b11782..abbb3c3c7aab 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
505 * Let tracing know that we've done the handler setup. 505 * Let tracing know that we've done the handler setup.
506 */ 506 */
507 tracehook_signal_handler(signr, &info, &ka, regs, 507 tracehook_signal_handler(signr, &info, &ka, regs,
508 current->thread.per_info.single_step); 508 test_thread_flag(TIF_SINGLE_STEP));
509 } 509 }
510 return; 510 return;
511 } 511 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 94cf510b8fe1..63a97db83f96 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -23,6 +23,7 @@
23#define KMSG_COMPONENT "cpu" 23#define KMSG_COMPONENT "cpu"
24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 25
26#include <linux/workqueue.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
@@ -161,6 +162,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
161{ 162{
162 unsigned long bits; 163 unsigned long bits;
163 164
165 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
164 /* 166 /*
165 * handle bit signal external calls 167 * handle bit signal external calls
166 * 168 *
@@ -469,25 +471,25 @@ int __cpuinit start_secondary(void *cpuvoid)
469 ipi_call_unlock(); 471 ipi_call_unlock();
470 /* Switch on interrupts */ 472 /* Switch on interrupts */
471 local_irq_enable(); 473 local_irq_enable();
472 /* Print info about this processor */
473 print_cpu_info();
474 /* cpu_idle will call schedule for us */ 474 /* cpu_idle will call schedule for us */
475 cpu_idle(); 475 cpu_idle();
476 return 0; 476 return 0;
477} 477}
478 478
479static void __init smp_create_idle(unsigned int cpu) 479struct create_idle {
480 struct work_struct work;
481 struct task_struct *idle;
482 struct completion done;
483 int cpu;
484};
485
486static void __cpuinit smp_fork_idle(struct work_struct *work)
480{ 487{
481 struct task_struct *p; 488 struct create_idle *c_idle;
482 489
483 /* 490 c_idle = container_of(work, struct create_idle, work);
484 * don't care about the psw and regs settings since we'll never 491 c_idle->idle = fork_idle(c_idle->cpu);
485 * reschedule the forked task. 492 complete(&c_idle->done);
486 */
487 p = fork_idle(cpu);
488 if (IS_ERR(p))
489 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
490 current_set[cpu] = p;
491} 493}
492 494
493static int __cpuinit smp_alloc_lowcore(int cpu) 495static int __cpuinit smp_alloc_lowcore(int cpu)
@@ -551,6 +553,7 @@ static void smp_free_lowcore(int cpu)
551int __cpuinit __cpu_up(unsigned int cpu) 553int __cpuinit __cpu_up(unsigned int cpu)
552{ 554{
553 struct _lowcore *cpu_lowcore; 555 struct _lowcore *cpu_lowcore;
556 struct create_idle c_idle;
554 struct task_struct *idle; 557 struct task_struct *idle;
555 struct stack_frame *sf; 558 struct stack_frame *sf;
556 u32 lowcore; 559 u32 lowcore;
@@ -558,6 +561,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
558 561
559 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 562 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
560 return -EIO; 563 return -EIO;
564 idle = current_set[cpu];
565 if (!idle) {
566 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
567 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
568 c_idle.cpu = cpu;
569 schedule_work(&c_idle.work);
570 wait_for_completion(&c_idle.done);
571 if (IS_ERR(c_idle.idle))
572 return PTR_ERR(c_idle.idle);
573 idle = c_idle.idle;
574 current_set[cpu] = c_idle.idle;
575 }
576 init_idle(idle, cpu);
561 if (smp_alloc_lowcore(cpu)) 577 if (smp_alloc_lowcore(cpu))
562 return -ENOMEM; 578 return -ENOMEM;
563 do { 579 do {
@@ -572,7 +588,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
572 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 588 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
573 udelay(10); 589 udelay(10);
574 590
575 idle = current_set[cpu];
576 cpu_lowcore = lowcore_ptr[cpu]; 591 cpu_lowcore = lowcore_ptr[cpu];
577 cpu_lowcore->kernel_stack = (unsigned long) 592 cpu_lowcore->kernel_stack = (unsigned long)
578 task_stack_page(idle) + THREAD_SIZE; 593 task_stack_page(idle) + THREAD_SIZE;
@@ -664,7 +679,6 @@ void __cpu_die(unsigned int cpu)
664 udelay(10); 679 udelay(10);
665 smp_free_lowcore(cpu); 680 smp_free_lowcore(cpu);
666 atomic_dec(&init_mm.context.attach_count); 681 atomic_dec(&init_mm.context.attach_count);
667 pr_info("Processor %d stopped\n", cpu);
668} 682}
669 683
670void cpu_die(void) 684void cpu_die(void)
@@ -684,14 +698,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
684#endif 698#endif
685 unsigned long async_stack, panic_stack; 699 unsigned long async_stack, panic_stack;
686 struct _lowcore *lowcore; 700 struct _lowcore *lowcore;
687 unsigned int cpu;
688 701
689 smp_detect_cpus(); 702 smp_detect_cpus();
690 703
691 /* request the 0x1201 emergency signal external interrupt */ 704 /* request the 0x1201 emergency signal external interrupt */
692 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 705 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
693 panic("Couldn't request external interrupt 0x1201"); 706 panic("Couldn't request external interrupt 0x1201");
694 print_cpu_info();
695 707
696 /* Reallocate current lowcore, but keep its contents. */ 708 /* Reallocate current lowcore, but keep its contents. */
697 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 709 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -719,9 +731,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
719 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) 731 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
720 BUG(); 732 BUG();
721#endif 733#endif
722 for_each_possible_cpu(cpu)
723 if (cpu != smp_processor_id())
724 smp_create_idle(cpu);
725} 734}
726 735
727void __init smp_prepare_boot_cpu(void) 736void __init smp_prepare_boot_cpu(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index f754a6dc4f94..9e7b039458da 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -15,6 +15,7 @@
15#define KMSG_COMPONENT "time" 15#define KMSG_COMPONENT "time"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 17
18#include <linux/kernel_stat.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
@@ -37,6 +38,7 @@
37#include <linux/clocksource.h> 38#include <linux/clocksource.h>
38#include <linux/clockchips.h> 39#include <linux/clockchips.h>
39#include <linux/gfp.h> 40#include <linux/gfp.h>
41#include <linux/kprobes.h>
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/delay.h> 43#include <asm/delay.h>
42#include <asm/s390_ext.h> 44#include <asm/s390_ext.h>
@@ -60,7 +62,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
60/* 62/*
61 * Scheduler clock - returns current time in nanosec units. 63 * Scheduler clock - returns current time in nanosec units.
62 */ 64 */
63unsigned long long notrace sched_clock(void) 65unsigned long long notrace __kprobes sched_clock(void)
64{ 66{
65 return (get_clock_monotonic() * 125) >> 9; 67 return (get_clock_monotonic() * 125) >> 9;
66} 68}
@@ -159,6 +161,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
159 unsigned int param32, 161 unsigned int param32,
160 unsigned long param64) 162 unsigned long param64)
161{ 163{
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
162 if (S390_lowcore.clock_comparator == -1ULL) 165 if (S390_lowcore.clock_comparator == -1ULL)
163 set_clock_comparator(S390_lowcore.clock_comparator); 166 set_clock_comparator(S390_lowcore.clock_comparator);
164} 167}
@@ -169,6 +172,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
169static void timing_alert_interrupt(unsigned int ext_int_code, 172static void timing_alert_interrupt(unsigned int ext_int_code,
170 unsigned int param32, unsigned long param64) 173 unsigned int param32, unsigned long param64)
171{ 174{
175 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
172 if (param32 & 0x00c40000) 176 if (param32 & 0x00c40000)
173 etr_timing_alert((struct etr_irq_parm *) &param32); 177 etr_timing_alert((struct etr_irq_parm *) &param32);
174 if (param32 & 0x00038000) 178 if (param32 & 0x00038000)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 70640822621a..5eb78dd584ce 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); 365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
366} 366}
367 367
368void __kprobes do_single_step(struct pt_regs *regs) 368void __kprobes do_per_trap(struct pt_regs *regs)
369{ 369{
370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
371 SIGTRAP) == NOTIFY_STOP){
372 return; 371 return;
373 }
374 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 372 if (tracehook_consider_fatal_signal(current, SIGTRAP))
375 force_sig(SIGTRAP, current); 373 force_sig(SIGTRAP, current);
376} 374}
@@ -451,8 +449,8 @@ static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
451 "floating point exception", regs, &si); 449 "floating point exception", regs, &si);
452} 450}
453 451
454static void illegal_op(struct pt_regs *regs, long pgm_int_code, 452static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
455 unsigned long trans_exc_code) 453 unsigned long trans_exc_code)
456{ 454{
457 siginfo_t info; 455 siginfo_t info;
458 __u8 opcode[6]; 456 __u8 opcode[6];
@@ -688,7 +686,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
688 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); 686 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
689} 687}
690 688
691asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 689asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
692{ 690{
693 bust_spinlocks(1); 691 bust_spinlocks(1);
694 printk("Kernel stack overflow.\n"); 692 printk("Kernel stack overflow.\n");
@@ -733,5 +731,6 @@ void __init trap_init(void)
733 pgm_check_table[0x15] = &operand_exception; 731 pgm_check_table[0x15] = &operand_exception;
734 pgm_check_table[0x1C] = &space_switch_exception; 732 pgm_check_table[0x1C] = &space_switch_exception;
735 pgm_check_table[0x1D] = &hfp_sqrt_exception; 733 pgm_check_table[0x1D] = &hfp_sqrt_exception;
736 pfault_irq_init(); 734 /* Enable machine checks early. */
735 local_mcck_enable();
737} 736}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7eff9b7347c0..1ccdf4d8aa85 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -20,6 +20,7 @@
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h> 21#include <linux/posix-timers.h>
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/kprobes.h>
23 24
24#include <asm/s390_ext.h> 25#include <asm/s390_ext.h>
25#include <asm/timer.h> 26#include <asm/timer.h>
@@ -122,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk)
122} 123}
123EXPORT_SYMBOL_GPL(account_system_vtime); 124EXPORT_SYMBOL_GPL(account_system_vtime);
124 125
125void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 126void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
126{ 127{
127 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
128 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -162,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
162 idle->sequence++; 163 idle->sequence++;
163} 164}
164 165
165void vtime_stop_cpu(void) 166void __kprobes vtime_stop_cpu(void)
166{ 167{
167 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 168 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
168 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 169 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -323,6 +324,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
323 struct list_head cb_list; /* the callback queue */ 324 struct list_head cb_list; /* the callback queue */
324 __u64 elapsed, next; 325 __u64 elapsed, next;
325 326
327 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
326 INIT_LIST_HEAD(&cb_list); 328 INIT_LIST_HEAD(&cb_list);
327 vq = &__get_cpu_var(virt_cpu_timer); 329 vq = &__get_cpu_var(virt_cpu_timer);
328 330
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index a7251580891c..f66a1bdbb61d 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -4,8 +4,8 @@
4source "virt/kvm/Kconfig" 4source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 bool "Virtualization" 7 def_bool y
8 default y 8 prompt "Virtualization"
9 ---help--- 9 ---help---
10 Say Y here to get to see options for using your Linux host to run other 10 Say Y here to get to see options for using your Linux host to run other
11 operating systems inside virtual machines (guests). 11 operating systems inside virtual machines (guests).
@@ -16,7 +16,8 @@ menuconfig VIRTUALIZATION
16if VIRTUALIZATION 16if VIRTUALIZATION
17 17
18config KVM 18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support" 19 def_tristate y
20 prompt "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM && EXPERIMENTAL 21 depends on HAVE_KVM && EXPERIMENTAL
21 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
22 select ANON_INODES 23 select ANON_INODES
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 7c37ec359ec2..0f53110e1d09 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -47,7 +47,6 @@ static void __udelay_disabled(unsigned long long usecs)
47 lockdep_on(); 47 lockdep_on();
48 __ctl_load(cr0_saved, 0, 0); 48 __ctl_load(cr0_saved, 0, 0);
49 local_tick_enable(clock_saved); 49 local_tick_enable(clock_saved);
50 set_clock_comparator(S390_lowcore.clock_comparator);
51} 50}
52 51
53static void __udelay_enabled(unsigned long long usecs) 52static void __udelay_enabled(unsigned long long usecs)
@@ -70,7 +69,6 @@ static void __udelay_enabled(unsigned long long usecs)
70 if (clock_saved) 69 if (clock_saved)
71 local_tick_enable(clock_saved); 70 local_tick_enable(clock_saved);
72 } while (get_clock() < end); 71 } while (get_clock() < end);
73 set_clock_comparator(S390_lowcore.clock_comparator);
74} 72}
75 73
76/* 74/*
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe5701e9efbf..2c57806c0858 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,6 +10,7 @@
10 * Copyright (C) 1995 Linus Torvalds 10 * Copyright (C) 1995 Linus Torvalds
11 */ 11 */
12 12
13#include <linux/kernel_stat.h>
13#include <linux/perf_event.h> 14#include <linux/perf_event.h>
14#include <linux/signal.h> 15#include <linux/signal.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
@@ -234,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
234 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
235 236
236 if (!rc && instruction == 0x0a77) { 237 if (!rc && instruction == 0x0a77) {
237 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
238 if (is_compat_task()) 239 if (is_compat_task())
239 sys32_sigreturn(); 240 sys32_sigreturn();
240 else 241 else
241 sys_sigreturn(); 242 sys_sigreturn();
242 } else if (!rc && instruction == 0x0aad) { 243 } else if (!rc && instruction == 0x0aad) {
243 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
244 if (is_compat_task()) 245 if (is_compat_task())
245 sys32_rt_sigreturn(); 246 sys32_rt_sigreturn();
246 else 247 else
@@ -378,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
378 * The instruction that caused the program check will 379 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 380 * be repeated. Don't signal single step via SIGTRAP.
380 */ 381 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 382 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
382 fault = 0; 383 fault = 0;
383out_up: 384out_up:
384 up_read(&mm->mmap_sem); 385 up_read(&mm->mmap_sem);
@@ -480,8 +481,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
480/* 481/*
481 * 'pfault' pseudo page faults routines. 482 * 'pfault' pseudo page faults routines.
482 */ 483 */
483static ext_int_info_t ext_int_pfault; 484static int pfault_disable;
484static int pfault_disable = 0;
485 485
486static int __init nopfault(char *str) 486static int __init nopfault(char *str)
487{ 487{
@@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
543 struct task_struct *tsk; 543 struct task_struct *tsk;
544 __u16 subcode; 544 __u16 subcode;
545 545
546 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
546 /* 547 /*
547 * Get the external interruption subcode & pfault 548 * Get the external interruption subcode & pfault
548 * initial/completion signal bit. VM stores this 549 * initial/completion signal bit. VM stores this
@@ -592,24 +593,28 @@ static void pfault_interrupt(unsigned int ext_int_code,
592 } 593 }
593} 594}
594 595
595void __init pfault_irq_init(void) 596static int __init pfault_irq_init(void)
596{ 597{
597 if (!MACHINE_IS_VM) 598 int rc;
598 return;
599 599
600 if (!MACHINE_IS_VM)
601 return 0;
600 /* 602 /*
601 * Try to get pfault pseudo page faults going. 603 * Try to get pfault pseudo page faults going.
602 */ 604 */
603 if (register_early_external_interrupt(0x2603, pfault_interrupt, 605 rc = register_external_interrupt(0x2603, pfault_interrupt);
604 &ext_int_pfault) != 0) 606 if (rc) {
605 panic("Couldn't request external interrupt 0x2603"); 607 pfault_disable = 1;
606 608 return rc;
609 }
607 if (pfault_init() == 0) 610 if (pfault_init() == 0)
608 return; 611 return 0;
609 612
610 /* Tough luck, no pfault. */ 613 /* Tough luck, no pfault. */
611 pfault_disable = 1; 614 pfault_disable = 1;
612 unregister_early_external_interrupt(0x2603, pfault_interrupt, 615 unregister_external_interrupt(0x2603, pfault_interrupt);
613 &ext_int_pfault); 616 return 0;
614} 617}
618early_initcall(pfault_irq_init);
619
615#endif 620#endif
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 07883197f474..8e477bb1f3f6 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,7 +2,8 @@ comment "S/390 block device drivers"
2 depends on S390 && BLOCK 2 depends on S390 && BLOCK
3 3
4config BLK_DEV_XPRAM 4config BLK_DEV_XPRAM
5 tristate "XPRAM disk support" 5 def_tristate m
6 prompt "XPRAM disk support"
6 depends on S390 && BLOCK 7 depends on S390 && BLOCK
7 help 8 help
8 Select this option if you want to use your expanded storage on S/390 9 Select this option if you want to use your expanded storage on S/390
@@ -12,13 +13,15 @@ config BLK_DEV_XPRAM
12 xpram. If unsure, say "N". 13 xpram. If unsure, say "N".
13 14
14config DCSSBLK 15config DCSSBLK
15 tristate "DCSSBLK support" 16 def_tristate m
17 prompt "DCSSBLK support"
16 depends on S390 && BLOCK 18 depends on S390 && BLOCK
17 help 19 help
18 Support for dcss block device 20 Support for dcss block device
19 21
20config DASD 22config DASD
21 tristate "Support for DASD devices" 23 def_tristate y
24 prompt "Support for DASD devices"
22 depends on CCW && BLOCK 25 depends on CCW && BLOCK
23 select IOSCHED_DEADLINE 26 select IOSCHED_DEADLINE
24 help 27 help
@@ -27,28 +30,32 @@ config DASD
27 natively on a single image or an LPAR. 30 natively on a single image or an LPAR.
28 31
29config DASD_PROFILE 32config DASD_PROFILE
30 bool "Profiling support for dasd devices" 33 def_bool y
34 prompt "Profiling support for dasd devices"
31 depends on DASD 35 depends on DASD
32 help 36 help
33 Enable this option if you want to see profiling information 37 Enable this option if you want to see profiling information
34 in /proc/dasd/statistics. 38 in /proc/dasd/statistics.
35 39
36config DASD_ECKD 40config DASD_ECKD
37 tristate "Support for ECKD Disks" 41 def_tristate y
42 prompt "Support for ECKD Disks"
38 depends on DASD 43 depends on DASD
39 help 44 help
40 ECKD devices are the most commonly used devices. You should enable 45 ECKD devices are the most commonly used devices. You should enable
41 this option unless you are very sure to have no ECKD device. 46 this option unless you are very sure to have no ECKD device.
42 47
43config DASD_FBA 48config DASD_FBA
44 tristate "Support for FBA Disks" 49 def_tristate y
50 prompt "Support for FBA Disks"
45 depends on DASD 51 depends on DASD
46 help 52 help
47 Select this option to be able to access FBA devices. It is safe to 53 Select this option to be able to access FBA devices. It is safe to
48 say "Y". 54 say "Y".
49 55
50config DASD_DIAG 56config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 57 def_tristate y
58 prompt "Support for DIAG access to Disks"
52 depends on DASD 59 depends on DASD
53 help 60 help
54 Select this option if you want to use Diagnose250 command to access 61 Select this option if you want to use Diagnose250 command to access
@@ -56,7 +63,8 @@ config DASD_DIAG
56 say "N". 63 say "N".
57 64
58config DASD_EER 65config DASD_EER
59 bool "Extended error reporting (EER)" 66 def_bool y
67 prompt "Extended error reporting (EER)"
60 depends on DASD 68 depends on DASD
61 help 69 help
62 This driver provides a character device interface to the 70 This driver provides a character device interface to the
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fb613d70c2cb..794bfd962266 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,6 +11,7 @@
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/kmod.h> 15#include <linux/kmod.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -368,6 +369,11 @@ dasd_state_ready_to_online(struct dasd_device * device)
368 device->state = DASD_STATE_ONLINE; 369 device->state = DASD_STATE_ONLINE;
369 if (device->block) { 370 if (device->block) {
370 dasd_schedule_block_bh(device->block); 371 dasd_schedule_block_bh(device->block);
372 if ((device->features & DASD_FEATURE_USERAW)) {
373 disk = device->block->gdp;
374 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
375 return 0;
376 }
371 disk = device->block->bdev->bd_disk; 377 disk = device->block->bdev->bd_disk;
372 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 378 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
373 while ((part = disk_part_iter_next(&piter))) 379 while ((part = disk_part_iter_next(&piter)))
@@ -393,7 +399,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
393 return rc; 399 return rc;
394 } 400 }
395 device->state = DASD_STATE_READY; 401 device->state = DASD_STATE_READY;
396 if (device->block) { 402 if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
397 disk = device->block->bdev->bd_disk; 403 disk = device->block->bdev->bd_disk;
398 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 404 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
399 while ((part = disk_part_iter_next(&piter))) 405 while ((part = disk_part_iter_next(&piter)))
@@ -744,10 +750,6 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
744 char *data; 750 char *data;
745 int size; 751 int size;
746 752
747 /* Sanity checks */
748 BUG_ON(datasize > PAGE_SIZE ||
749 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
750
751 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 753 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
752 if (cplength > 0) 754 if (cplength > 0)
753 size += cplength * sizeof(struct ccw1); 755 size += cplength * sizeof(struct ccw1);
@@ -853,7 +855,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
853 rc = ccw_device_clear(device->cdev, (long) cqr); 855 rc = ccw_device_clear(device->cdev, (long) cqr);
854 switch (rc) { 856 switch (rc) {
855 case 0: /* termination successful */ 857 case 0: /* termination successful */
856 cqr->retries--;
857 cqr->status = DASD_CQR_CLEAR_PENDING; 858 cqr->status = DASD_CQR_CLEAR_PENDING;
858 cqr->stopclk = get_clock(); 859 cqr->stopclk = get_clock();
859 cqr->starttime = 0; 860 cqr->starttime = 0;
@@ -905,6 +906,16 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
905 return rc; 906 return rc;
906 } 907 }
907 device = (struct dasd_device *) cqr->startdev; 908 device = (struct dasd_device *) cqr->startdev;
909 if (((cqr->block &&
910 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
911 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
912 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
913 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
914 "because of stolen lock", cqr);
915 cqr->status = DASD_CQR_ERROR;
916 cqr->intrc = -EPERM;
917 return -EPERM;
918 }
908 if (cqr->retries < 0) { 919 if (cqr->retries < 0) {
909 /* internal error 14 - start_IO run out of retries */ 920 /* internal error 14 - start_IO run out of retries */
910 sprintf(errorstring, "14 %p", cqr); 921 sprintf(errorstring, "14 %p", cqr);
@@ -916,6 +927,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
916 cqr->startclk = get_clock(); 927 cqr->startclk = get_clock();
917 cqr->starttime = jiffies; 928 cqr->starttime = jiffies;
918 cqr->retries--; 929 cqr->retries--;
930 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
931 cqr->lpm &= device->path_data.opm;
932 if (!cqr->lpm)
933 cqr->lpm = device->path_data.opm;
934 }
919 if (cqr->cpmode == 1) { 935 if (cqr->cpmode == 1) {
920 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 936 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
921 (long) cqr, cqr->lpm); 937 (long) cqr, cqr->lpm);
@@ -928,35 +944,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
928 cqr->status = DASD_CQR_IN_IO; 944 cqr->status = DASD_CQR_IN_IO;
929 break; 945 break;
930 case -EBUSY: 946 case -EBUSY:
931 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 947 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
932 "start_IO: device busy, retry later"); 948 "start_IO: device busy, retry later");
933 break; 949 break;
934 case -ETIMEDOUT: 950 case -ETIMEDOUT:
935 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 951 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
936 "start_IO: request timeout, retry later"); 952 "start_IO: request timeout, retry later");
937 break; 953 break;
938 case -EACCES: 954 case -EACCES:
939 /* -EACCES indicates that the request used only a 955 /* -EACCES indicates that the request used only a subset of the
940 * subset of the available pathes and all these 956 * available paths and all these paths are gone. If the lpm of
941 * pathes are gone. 957 * this request was only a subset of the opm (e.g. the ppm) then
942 * Do a retry with all available pathes. 958 * we just do a retry with all available paths.
959 * If we already use the full opm, something is amiss, and we
960 * need a full path verification.
943 */ 961 */
944 cqr->lpm = LPM_ANYPATH; 962 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
945 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 963 DBF_DEV_EVENT(DBF_WARNING, device,
946 "start_IO: selected pathes gone," 964 "start_IO: selected paths gone (%x)",
947 " retry on all pathes"); 965 cqr->lpm);
966 } else if (cqr->lpm != device->path_data.opm) {
967 cqr->lpm = device->path_data.opm;
968 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
969 "start_IO: selected paths gone,"
970 " retry on all paths");
971 } else {
972 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
973 "start_IO: all paths in opm gone,"
974 " do path verification");
975 dasd_generic_last_path_gone(device);
976 device->path_data.opm = 0;
977 device->path_data.ppm = 0;
978 device->path_data.npm = 0;
979 device->path_data.tbvpm =
980 ccw_device_get_path_mask(device->cdev);
981 }
948 break; 982 break;
949 case -ENODEV: 983 case -ENODEV:
950 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 984 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
951 "start_IO: -ENODEV device gone, retry"); 985 "start_IO: -ENODEV device gone, retry");
952 break; 986 break;
953 case -EIO: 987 case -EIO:
954 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 988 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
955 "start_IO: -EIO device gone, retry"); 989 "start_IO: -EIO device gone, retry");
956 break; 990 break;
957 case -EINVAL: 991 case -EINVAL:
958 /* most likely caused in power management context */ 992 /* most likely caused in power management context */
959 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 993 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
960 "start_IO: -EINVAL device currently " 994 "start_IO: -EINVAL device currently "
961 "not accessible"); 995 "not accessible");
962 break; 996 break;
@@ -1076,6 +1110,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1076 unsigned long long now; 1110 unsigned long long now;
1077 int expires; 1111 int expires;
1078 1112
1113 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
1079 if (IS_ERR(irb)) { 1114 if (IS_ERR(irb)) {
1080 switch (PTR_ERR(irb)) { 1115 switch (PTR_ERR(irb)) {
1081 case -EIO: 1116 case -EIO:
@@ -1094,16 +1129,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1094 } 1129 }
1095 1130
1096 now = get_clock(); 1131 now = get_clock();
1097
1098 /* check for unsolicited interrupts */
1099 cqr = (struct dasd_ccw_req *) intparm; 1132 cqr = (struct dasd_ccw_req *) intparm;
1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1133 /* check for conditions that should be handled immediately */
1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1134 if (!cqr ||
1102 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) || 1135 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1103 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | 1136 scsw_cstat(&irb->scsw) == 0)) {
1104 SCSW_STCTL_ALERT_STATUS))))) {
1105 if (cqr && cqr->status == DASD_CQR_IN_IO)
1106 cqr->status = DASD_CQR_QUEUED;
1107 if (cqr) 1137 if (cqr)
1108 memcpy(&cqr->irb, irb, sizeof(*irb)); 1138 memcpy(&cqr->irb, irb, sizeof(*irb));
1109 device = dasd_device_from_cdev_locked(cdev); 1139 device = dasd_device_from_cdev_locked(cdev);
@@ -1114,17 +1144,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1114 dasd_put_device(device); 1144 dasd_put_device(device);
1115 return; 1145 return;
1116 } 1146 }
1117 device->discipline->dump_sense_dbf(device, irb, 1147 device->discipline->dump_sense_dbf(device, irb, "int");
1118 "unsolicited"); 1148 if (device->features & DASD_FEATURE_ERPLOG)
1119 if ((device->features & DASD_FEATURE_ERPLOG)) 1149 device->discipline->dump_sense(device, cqr, irb);
1120 device->discipline->dump_sense(device, cqr, 1150 device->discipline->check_for_device_change(device, cqr, irb);
1121 irb);
1122 dasd_device_clear_timer(device);
1123 device->discipline->handle_unsolicited_interrupt(device,
1124 irb);
1125 dasd_put_device(device); 1151 dasd_put_device(device);
1126 return;
1127 } 1152 }
1153 if (!cqr)
1154 return;
1128 1155
1129 device = (struct dasd_device *) cqr->startdev; 1156 device = (struct dasd_device *) cqr->startdev;
1130 if (!device || 1157 if (!device ||
@@ -1164,25 +1191,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1164 struct dasd_ccw_req, devlist); 1191 struct dasd_ccw_req, devlist);
1165 } 1192 }
1166 } else { /* error */ 1193 } else { /* error */
1167 memcpy(&cqr->irb, irb, sizeof(struct irb));
1168 /* log sense for every failed I/O to s390 debugfeature */
1169 dasd_log_sense_dbf(cqr, irb);
1170 if (device->features & DASD_FEATURE_ERPLOG) {
1171 dasd_log_sense(cqr, irb);
1172 }
1173
1174 /* 1194 /*
1175 * If we don't want complex ERP for this request, then just 1195 * If we don't want complex ERP for this request, then just
1176 * reset this and retry it in the fastpath 1196 * reset this and retry it in the fastpath
1177 */ 1197 */
1178 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1198 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1179 cqr->retries > 0) { 1199 cqr->retries > 0) {
1180 if (cqr->lpm == LPM_ANYPATH) 1200 if (cqr->lpm == device->path_data.opm)
1181 DBF_DEV_EVENT(DBF_DEBUG, device, 1201 DBF_DEV_EVENT(DBF_DEBUG, device,
1182 "default ERP in fastpath " 1202 "default ERP in fastpath "
1183 "(%i retries left)", 1203 "(%i retries left)",
1184 cqr->retries); 1204 cqr->retries);
1185 cqr->lpm = LPM_ANYPATH; 1205 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1206 cqr->lpm = device->path_data.opm;
1186 cqr->status = DASD_CQR_QUEUED; 1207 cqr->status = DASD_CQR_QUEUED;
1187 next = cqr; 1208 next = cqr;
1188 } else 1209 } else
@@ -1210,13 +1231,13 @@ enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1210 goto out; 1231 goto out;
1211 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1232 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1212 device->state != device->target || 1233 device->state != device->target ||
1213 !device->discipline->handle_unsolicited_interrupt){ 1234 !device->discipline->check_for_device_change){
1214 dasd_put_device(device); 1235 dasd_put_device(device);
1215 goto out; 1236 goto out;
1216 } 1237 }
1217 1238 if (device->discipline->dump_sense_dbf)
1218 dasd_device_clear_timer(device); 1239 device->discipline->dump_sense_dbf(device, irb, "uc");
1219 device->discipline->handle_unsolicited_interrupt(device, irb); 1240 device->discipline->check_for_device_change(device, NULL, irb);
1220 dasd_put_device(device); 1241 dasd_put_device(device);
1221out: 1242out:
1222 return UC_TODO_RETRY; 1243 return UC_TODO_RETRY;
@@ -1366,8 +1387,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
1366 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1387 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1367 if (cqr->status != DASD_CQR_QUEUED) 1388 if (cqr->status != DASD_CQR_QUEUED)
1368 return; 1389 return;
1369 /* when device is stopped, return request to previous layer */ 1390 /* when device is stopped, return request to previous layer
1370 if (device->stopped) { 1391 * exception: only the disconnect or unresumed bits are set and the
1392 * cqr is a path verification request
1393 */
1394 if (device->stopped &&
1395 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1396 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
1397 cqr->intrc = -EAGAIN;
1371 cqr->status = DASD_CQR_CLEARED; 1398 cqr->status = DASD_CQR_CLEARED;
1372 dasd_schedule_device_bh(device); 1399 dasd_schedule_device_bh(device);
1373 return; 1400 return;
@@ -1383,6 +1410,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
1383 dasd_device_set_timer(device, 50); 1410 dasd_device_set_timer(device, 50);
1384} 1411}
1385 1412
1413static void __dasd_device_check_path_events(struct dasd_device *device)
1414{
1415 int rc;
1416
1417 if (device->path_data.tbvpm) {
1418 if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
1419 DASD_UNRESUMED_PM))
1420 return;
1421 rc = device->discipline->verify_path(
1422 device, device->path_data.tbvpm);
1423 if (rc)
1424 dasd_device_set_timer(device, 50);
1425 else
1426 device->path_data.tbvpm = 0;
1427 }
1428};
1429
1386/* 1430/*
1387 * Go through all request on the dasd_device request queue, 1431 * Go through all request on the dasd_device request queue,
1388 * terminate them on the cdev if necessary, and return them to the 1432 * terminate them on the cdev if necessary, and return them to the
@@ -1457,6 +1501,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
1457 __dasd_device_check_expire(device); 1501 __dasd_device_check_expire(device);
1458 /* find final requests on ccw queue */ 1502 /* find final requests on ccw queue */
1459 __dasd_device_process_ccw_queue(device, &final_queue); 1503 __dasd_device_process_ccw_queue(device, &final_queue);
1504 __dasd_device_check_path_events(device);
1460 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1505 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1461 /* Now call the callback function of requests with final status */ 1506 /* Now call the callback function of requests with final status */
1462 __dasd_device_process_final_queue(device, &final_queue); 1507 __dasd_device_process_final_queue(device, &final_queue);
@@ -1613,7 +1658,12 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1613 continue; 1658 continue;
1614 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1659 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1615 continue; 1660 continue;
1616 1661 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1662 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1663 cqr->status = DASD_CQR_FAILED;
1664 cqr->intrc = -EPERM;
1665 continue;
1666 }
1617 /* Non-temporary stop condition will trigger fail fast */ 1667 /* Non-temporary stop condition will trigger fail fast */
1618 if (device->stopped & ~DASD_STOPPED_PENDING && 1668 if (device->stopped & ~DASD_STOPPED_PENDING &&
1619 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1669 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -1621,7 +1671,6 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1621 cqr->status = DASD_CQR_FAILED; 1671 cqr->status = DASD_CQR_FAILED;
1622 continue; 1672 continue;
1623 } 1673 }
1624
1625 /* Don't try to start requests if device is stopped */ 1674 /* Don't try to start requests if device is stopped */
1626 if (interruptible) { 1675 if (interruptible) {
1627 rc = wait_event_interruptible( 1676 rc = wait_event_interruptible(
@@ -1706,13 +1755,18 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1706 int rc; 1755 int rc;
1707 1756
1708 device = cqr->startdev; 1757 device = cqr->startdev;
1758 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1759 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1760 cqr->status = DASD_CQR_FAILED;
1761 cqr->intrc = -EPERM;
1762 return -EIO;
1763 }
1709 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1764 spin_lock_irq(get_ccwdev_lock(device->cdev));
1710 rc = _dasd_term_running_cqr(device); 1765 rc = _dasd_term_running_cqr(device);
1711 if (rc) { 1766 if (rc) {
1712 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1767 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1713 return rc; 1768 return rc;
1714 } 1769 }
1715
1716 cqr->callback = dasd_wakeup_cb; 1770 cqr->callback = dasd_wakeup_cb;
1717 cqr->callback_data = DASD_SLEEPON_START_TAG; 1771 cqr->callback_data = DASD_SLEEPON_START_TAG;
1718 cqr->status = DASD_CQR_QUEUED; 1772 cqr->status = DASD_CQR_QUEUED;
@@ -2016,6 +2070,13 @@ static void __dasd_block_start_head(struct dasd_block *block)
2016 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2070 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2017 if (cqr->status != DASD_CQR_FILLED) 2071 if (cqr->status != DASD_CQR_FILLED)
2018 continue; 2072 continue;
2073 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2074 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2075 cqr->status = DASD_CQR_FAILED;
2076 cqr->intrc = -EPERM;
2077 dasd_schedule_block_bh(block);
2078 continue;
2079 }
2019 /* Non-temporary stop condition will trigger fail fast */ 2080 /* Non-temporary stop condition will trigger fail fast */
2020 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2081 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2021 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2082 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -2201,8 +2262,20 @@ static void dasd_setup_queue(struct dasd_block *block)
2201{ 2262{
2202 int max; 2263 int max;
2203 2264
2204 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2265 if (block->base->features & DASD_FEATURE_USERAW) {
2205 max = block->base->discipline->max_blocks << block->s2b_shift; 2266 /*
2267 * the max_blocks value for raw_track access is 256
2268 * it is higher than the native ECKD value because we
2269 * only need one ccw per track
2270 * so the max_hw_sectors are
2271 * 2048 x 512B = 1024kB = 16 tracks
2272 */
2273 max = 2048;
2274 } else {
2275 max = block->base->discipline->max_blocks << block->s2b_shift;
2276 }
2277 blk_queue_logical_block_size(block->request_queue,
2278 block->bp_block);
2206 blk_queue_max_hw_sectors(block->request_queue, max); 2279 blk_queue_max_hw_sectors(block->request_queue, max);
2207 blk_queue_max_segments(block->request_queue, -1L); 2280 blk_queue_max_segments(block->request_queue, -1L);
2208 /* with page sized segments we can translate each segement into 2281 /* with page sized segments we can translate each segement into
@@ -2588,10 +2661,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2588 return 0; 2661 return 0;
2589} 2662}
2590 2663
2664int dasd_generic_last_path_gone(struct dasd_device *device)
2665{
2666 struct dasd_ccw_req *cqr;
2667
2668 dev_warn(&device->cdev->dev, "No operational channel path is left "
2669 "for the device\n");
2670 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
2671 /* First of all call extended error reporting. */
2672 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2673
2674 if (device->state < DASD_STATE_BASIC)
2675 return 0;
2676 /* Device is active. We want to keep it. */
2677 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2678 if ((cqr->status == DASD_CQR_IN_IO) ||
2679 (cqr->status == DASD_CQR_CLEAR_PENDING)) {
2680 cqr->status = DASD_CQR_QUEUED;
2681 cqr->retries++;
2682 }
2683 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2684 dasd_device_clear_timer(device);
2685 dasd_schedule_device_bh(device);
2686 return 1;
2687}
2688EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
2689
2690int dasd_generic_path_operational(struct dasd_device *device)
2691{
2692 dev_info(&device->cdev->dev, "A channel path to the device has become "
2693 "operational\n");
2694 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
2695 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2696 if (device->stopped & DASD_UNRESUMED_PM) {
2697 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2698 dasd_restore_device(device);
2699 return 1;
2700 }
2701 dasd_schedule_device_bh(device);
2702 if (device->block)
2703 dasd_schedule_block_bh(device->block);
2704 return 1;
2705}
2706EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
2707
2591int dasd_generic_notify(struct ccw_device *cdev, int event) 2708int dasd_generic_notify(struct ccw_device *cdev, int event)
2592{ 2709{
2593 struct dasd_device *device; 2710 struct dasd_device *device;
2594 struct dasd_ccw_req *cqr;
2595 int ret; 2711 int ret;
2596 2712
2597 device = dasd_device_from_cdev_locked(cdev); 2713 device = dasd_device_from_cdev_locked(cdev);
@@ -2602,41 +2718,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2602 case CIO_GONE: 2718 case CIO_GONE:
2603 case CIO_BOXED: 2719 case CIO_BOXED:
2604 case CIO_NO_PATH: 2720 case CIO_NO_PATH:
2605 /* First of all call extended error reporting. */ 2721 device->path_data.opm = 0;
2606 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2722 device->path_data.ppm = 0;
2607 2723 device->path_data.npm = 0;
2608 if (device->state < DASD_STATE_BASIC) 2724 ret = dasd_generic_last_path_gone(device);
2609 break;
2610 /* Device is active. We want to keep it. */
2611 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2612 if (cqr->status == DASD_CQR_IN_IO) {
2613 cqr->status = DASD_CQR_QUEUED;
2614 cqr->retries++;
2615 }
2616 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2617 dasd_device_clear_timer(device);
2618 dasd_schedule_device_bh(device);
2619 ret = 1;
2620 break; 2725 break;
2621 case CIO_OPER: 2726 case CIO_OPER:
2622 /* FIXME: add a sanity check. */
2623 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2624 if (device->stopped & DASD_UNRESUMED_PM) {
2625 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2626 dasd_restore_device(device);
2627 ret = 1;
2628 break;
2629 }
2630 dasd_schedule_device_bh(device);
2631 if (device->block)
2632 dasd_schedule_block_bh(device->block);
2633 ret = 1; 2727 ret = 1;
2728 if (device->path_data.opm)
2729 ret = dasd_generic_path_operational(device);
2634 break; 2730 break;
2635 } 2731 }
2636 dasd_put_device(device); 2732 dasd_put_device(device);
2637 return ret; 2733 return ret;
2638} 2734}
2639 2735
2736void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
2737{
2738 int chp;
2739 __u8 oldopm, eventlpm;
2740 struct dasd_device *device;
2741
2742 device = dasd_device_from_cdev_locked(cdev);
2743 if (IS_ERR(device))
2744 return;
2745 for (chp = 0; chp < 8; chp++) {
2746 eventlpm = 0x80 >> chp;
2747 if (path_event[chp] & PE_PATH_GONE) {
2748 oldopm = device->path_data.opm;
2749 device->path_data.opm &= ~eventlpm;
2750 device->path_data.ppm &= ~eventlpm;
2751 device->path_data.npm &= ~eventlpm;
2752 if (oldopm && !device->path_data.opm)
2753 dasd_generic_last_path_gone(device);
2754 }
2755 if (path_event[chp] & PE_PATH_AVAILABLE) {
2756 device->path_data.opm &= ~eventlpm;
2757 device->path_data.ppm &= ~eventlpm;
2758 device->path_data.npm &= ~eventlpm;
2759 device->path_data.tbvpm |= eventlpm;
2760 dasd_schedule_device_bh(device);
2761 }
2762 }
2763 dasd_put_device(device);
2764}
2765EXPORT_SYMBOL_GPL(dasd_generic_path_event);
2766
2767int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
2768{
2769 if (!device->path_data.opm && lpm) {
2770 device->path_data.opm = lpm;
2771 dasd_generic_path_operational(device);
2772 } else
2773 device->path_data.opm |= lpm;
2774 return 0;
2775}
2776EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
2777
2778
2640int dasd_generic_pm_freeze(struct ccw_device *cdev) 2779int dasd_generic_pm_freeze(struct ccw_device *cdev)
2641{ 2780{
2642 struct dasd_ccw_req *cqr, *n; 2781 struct dasd_ccw_req *cqr, *n;
@@ -2646,6 +2785,10 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2646 2785
2647 if (IS_ERR(device)) 2786 if (IS_ERR(device))
2648 return PTR_ERR(device); 2787 return PTR_ERR(device);
2788
2789 if (device->discipline->freeze)
2790 rc = device->discipline->freeze(device);
2791
2649 /* disallow new I/O */ 2792 /* disallow new I/O */
2650 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2793 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2651 /* clear active requests */ 2794 /* clear active requests */
@@ -2682,9 +2825,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2682 list_splice_tail(&freeze_queue, &device->ccw_queue); 2825 list_splice_tail(&freeze_queue, &device->ccw_queue);
2683 spin_unlock_irq(get_ccwdev_lock(cdev)); 2826 spin_unlock_irq(get_ccwdev_lock(cdev));
2684 2827
2685 if (device->discipline->freeze)
2686 rc = device->discipline->freeze(device);
2687
2688 dasd_put_device(device); 2828 dasd_put_device(device);
2689 return rc; 2829 return rc;
2690} 2830}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 968c76cf7127..1654a24817be 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
153 opm = ccw_device_get_path_mask(device->cdev); 153 opm = ccw_device_get_path_mask(device->cdev);
154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
155 //FIXME: start with get_opm ?
156 if (erp->lpm == 0) 155 if (erp->lpm == 0)
157 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); 156 erp->lpm = device->path_data.opm &
157 ~(erp->irb.esw.esw0.sublog.lpum);
158 else 158 else
159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); 159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
160 160
@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
270{ 270{
271 erp->function = dasd_3990_erp_action_1; 271 erp->function = dasd_3990_erp_action_1;
272 dasd_3990_erp_alternate_path(erp); 272 dasd_3990_erp_alternate_path(erp);
273 if (erp->status == DASD_CQR_FAILED) { 273 if (erp->status == DASD_CQR_FAILED &&
274 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
274 erp->status = DASD_CQR_FILLED; 275 erp->status = DASD_CQR_FILLED;
275 erp->retries = 10; 276 erp->retries = 10;
276 erp->lpm = LPM_ANYPATH; 277 erp->lpm = erp->startdev->path_data.opm;
277 erp->function = dasd_3990_erp_action_1_sec; 278 erp->function = dasd_3990_erp_action_1_sec;
278 } 279 }
279 return erp; 280 return erp;
@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
1907static void 1908static void
1908dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) 1909dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1909{ 1910{
1910
1911 if (sense[25] & DASD_SENSE_BIT_3) { 1911 if (sense[25] & DASD_SENSE_BIT_3) {
1912 dasd_3990_erp_alternate_path(erp); 1912 dasd_3990_erp_alternate_path(erp);
1913 1913
1914 if (erp->status == DASD_CQR_FAILED) { 1914 if (erp->status == DASD_CQR_FAILED &&
1915 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
1915 /* reset the lpm and the status to be able to 1916 /* reset the lpm and the status to be able to
1916 * try further actions. */ 1917 * try further actions. */
1917 1918 erp->lpm = erp->startdev->path_data.opm;
1918 erp->lpm = 0;
1919 erp->status = DASD_CQR_NEED_ERP; 1919 erp->status = DASD_CQR_NEED_ERP;
1920 } 1920 }
1921 } 1921 }
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8d41f3ed38d7..cb6a67bc89ff 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp)
208 features |= DASD_FEATURE_READONLY; 208 features |= DASD_FEATURE_READONLY;
209 else if (len == 4 && !strncmp(str, "diag", 4)) 209 else if (len == 4 && !strncmp(str, "diag", 4))
210 features |= DASD_FEATURE_USEDIAG; 210 features |= DASD_FEATURE_USEDIAG;
211 else if (len == 3 && !strncmp(str, "raw", 3))
212 features |= DASD_FEATURE_USERAW;
211 else if (len == 6 && !strncmp(str, "erplog", 6)) 213 else if (len == 6 && !strncmp(str, "erplog", 6))
212 features |= DASD_FEATURE_ERPLOG; 214 features |= DASD_FEATURE_ERPLOG;
213 else if (len == 8 && !strncmp(str, "failfast", 8)) 215 else if (len == 8 && !strncmp(str, "failfast", 8))
@@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device)
639{ 641{
640 wake_up(&dasd_delete_wq); 642 wake_up(&dasd_delete_wq);
641} 643}
644EXPORT_SYMBOL_GPL(dasd_put_device_wake);
642 645
643/* 646/*
644 * Return dasd_device structure associated with cdev. 647 * Return dasd_device structure associated with cdev.
@@ -856,7 +859,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
856 spin_lock(&dasd_devmap_lock); 859 spin_lock(&dasd_devmap_lock);
857 /* Changing diag discipline flag is only allowed in offline state. */ 860 /* Changing diag discipline flag is only allowed in offline state. */
858 rc = count; 861 rc = count;
859 if (!devmap->device) { 862 if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
860 if (val) 863 if (val)
861 devmap->features |= DASD_FEATURE_USEDIAG; 864 devmap->features |= DASD_FEATURE_USEDIAG;
862 else 865 else
@@ -869,6 +872,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
869 872
870static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store); 873static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
871 874
875/*
876 * use_raw controls whether the driver should give access to raw eckd data or
877 * operate in standard mode
878 */
879static ssize_t
880dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
881{
882 struct dasd_devmap *devmap;
883 int use_raw;
884
885 devmap = dasd_find_busid(dev_name(dev));
886 if (!IS_ERR(devmap))
887 use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
888 else
889 use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
890 return sprintf(buf, use_raw ? "1\n" : "0\n");
891}
892
893static ssize_t
894dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
895 const char *buf, size_t count)
896{
897 struct dasd_devmap *devmap;
898 ssize_t rc;
899 unsigned long val;
900
901 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
902 if (IS_ERR(devmap))
903 return PTR_ERR(devmap);
904
905 if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
906 return -EINVAL;
907
908 spin_lock(&dasd_devmap_lock);
909 /* Changing diag discipline flag is only allowed in offline state. */
910 rc = count;
911 if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
912 if (val)
913 devmap->features |= DASD_FEATURE_USERAW;
914 else
915 devmap->features &= ~DASD_FEATURE_USERAW;
916 } else
917 rc = -EPERM;
918 spin_unlock(&dasd_devmap_lock);
919 return rc;
920}
921
922static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
923 dasd_use_raw_store);
924
872static ssize_t 925static ssize_t
873dasd_discipline_show(struct device *dev, struct device_attribute *attr, 926dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 char *buf) 927 char *buf)
@@ -1126,6 +1179,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
1126 1179
1127static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); 1180static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
1128 1181
1182static ssize_t dasd_reservation_policy_show(struct device *dev,
1183 struct device_attribute *attr,
1184 char *buf)
1185{
1186 struct dasd_devmap *devmap;
1187 int rc = 0;
1188
1189 devmap = dasd_find_busid(dev_name(dev));
1190 if (IS_ERR(devmap)) {
1191 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1192 } else {
1193 spin_lock(&dasd_devmap_lock);
1194 if (devmap->features & DASD_FEATURE_FAILONSLCK)
1195 rc = snprintf(buf, PAGE_SIZE, "fail\n");
1196 else
1197 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1198 spin_unlock(&dasd_devmap_lock);
1199 }
1200 return rc;
1201}
1202
1203static ssize_t dasd_reservation_policy_store(struct device *dev,
1204 struct device_attribute *attr,
1205 const char *buf, size_t count)
1206{
1207 struct dasd_devmap *devmap;
1208 int rc;
1209
1210 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
1211 if (IS_ERR(devmap))
1212 return PTR_ERR(devmap);
1213 rc = 0;
1214 spin_lock(&dasd_devmap_lock);
1215 if (sysfs_streq("ignore", buf))
1216 devmap->features &= ~DASD_FEATURE_FAILONSLCK;
1217 else if (sysfs_streq("fail", buf))
1218 devmap->features |= DASD_FEATURE_FAILONSLCK;
1219 else
1220 rc = -EINVAL;
1221 if (devmap->device)
1222 devmap->device->features = devmap->features;
1223 spin_unlock(&dasd_devmap_lock);
1224 if (rc)
1225 return rc;
1226 else
1227 return count;
1228}
1229
1230static DEVICE_ATTR(reservation_policy, 0644,
1231 dasd_reservation_policy_show, dasd_reservation_policy_store);
1232
1233static ssize_t dasd_reservation_state_show(struct device *dev,
1234 struct device_attribute *attr,
1235 char *buf)
1236{
1237 struct dasd_device *device;
1238 int rc = 0;
1239
1240 device = dasd_device_from_cdev(to_ccwdev(dev));
1241 if (IS_ERR(device))
1242 return snprintf(buf, PAGE_SIZE, "none\n");
1243
1244 if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
1245 rc = snprintf(buf, PAGE_SIZE, "reserved\n");
1246 else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
1247 rc = snprintf(buf, PAGE_SIZE, "lost\n");
1248 else
1249 rc = snprintf(buf, PAGE_SIZE, "none\n");
1250 dasd_put_device(device);
1251 return rc;
1252}
1253
1254static ssize_t dasd_reservation_state_store(struct device *dev,
1255 struct device_attribute *attr,
1256 const char *buf, size_t count)
1257{
1258 struct dasd_device *device;
1259 int rc = 0;
1260
1261 device = dasd_device_from_cdev(to_ccwdev(dev));
1262 if (IS_ERR(device))
1263 return -ENODEV;
1264 if (sysfs_streq("reset", buf))
1265 clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
1266 else
1267 rc = -EINVAL;
1268 dasd_put_device(device);
1269
1270 if (rc)
1271 return rc;
1272 else
1273 return count;
1274}
1275
1276static DEVICE_ATTR(last_known_reservation_state, 0644,
1277 dasd_reservation_state_show, dasd_reservation_state_store);
1278
1129static struct attribute * dasd_attrs[] = { 1279static struct attribute * dasd_attrs[] = {
1130 &dev_attr_readonly.attr, 1280 &dev_attr_readonly.attr,
1131 &dev_attr_discipline.attr, 1281 &dev_attr_discipline.attr,
@@ -1134,10 +1284,13 @@ static struct attribute * dasd_attrs[] = {
1134 &dev_attr_vendor.attr, 1284 &dev_attr_vendor.attr,
1135 &dev_attr_uid.attr, 1285 &dev_attr_uid.attr,
1136 &dev_attr_use_diag.attr, 1286 &dev_attr_use_diag.attr,
1287 &dev_attr_raw_track_access.attr,
1137 &dev_attr_eer_enabled.attr, 1288 &dev_attr_eer_enabled.attr,
1138 &dev_attr_erplog.attr, 1289 &dev_attr_erplog.attr,
1139 &dev_attr_failfast.attr, 1290 &dev_attr_failfast.attr,
1140 &dev_attr_expires.attr, 1291 &dev_attr_expires.attr,
1292 &dev_attr_reservation_policy.attr,
1293 &dev_attr_last_known_reservation_state.attr,
1141 NULL, 1294 NULL,
1142}; 1295};
1143 1296
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 266b34b55403..29143eda9dd9 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -10,6 +10,7 @@
10 10
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/kernel_stat.h>
13#include <linux/stddef.h> 14#include <linux/stddef.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -238,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
238 addr_t ip; 239 addr_t ip;
239 int rc; 240 int rc;
240 241
242 kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
241 switch (ext_int_code >> 24) { 243 switch (ext_int_code >> 24) {
242 case DASD_DIAG_CODE_31BIT: 244 case DASD_DIAG_CODE_31BIT:
243 ip = (addr_t) param32; 245 ip = (addr_t) param32;
@@ -617,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
617 .ebcname = "DIAG", 619 .ebcname = "DIAG",
618 .max_blocks = DIAG_MAX_BLOCKS, 620 .max_blocks = DIAG_MAX_BLOCKS,
619 .check_device = dasd_diag_check_device, 621 .check_device = dasd_diag_check_device,
622 .verify_path = dasd_generic_verify_path,
620 .fill_geometry = dasd_diag_fill_geometry, 623 .fill_geometry = dasd_diag_fill_geometry,
621 .start_IO = dasd_start_diag, 624 .start_IO = dasd_start_diag,
622 .term_IO = dasd_diag_term_IO, 625 .term_IO = dasd_diag_term_IO,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bf61274af3bb..318672d05563 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -54,6 +54,15 @@
54#define ECKD_F7(i) (i->factor7) 54#define ECKD_F7(i) (i->factor7)
55#define ECKD_F8(i) (i->factor8) 55#define ECKD_F8(i) (i->factor8)
56 56
57/*
58 * raw track access always map to 64k in memory
59 * so it maps to 16 blocks of 4k per track
60 */
61#define DASD_RAW_BLOCK_PER_TRACK 16
62#define DASD_RAW_BLOCKSIZE 4096
63/* 64k are 128 x 512 byte sectors */
64#define DASD_RAW_SECTORS_PER_TRACK 128
65
57MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
58 67
59static struct dasd_discipline dasd_eckd_discipline; 68static struct dasd_discipline dasd_eckd_discipline;
@@ -90,6 +99,18 @@ static struct {
90} *dasd_reserve_req; 99} *dasd_reserve_req;
91static DEFINE_MUTEX(dasd_reserve_mutex); 100static DEFINE_MUTEX(dasd_reserve_mutex);
92 101
102/* definitions for the path verification worker */
103struct path_verification_work_data {
104 struct work_struct worker;
105 struct dasd_device *device;
106 struct dasd_ccw_req cqr;
107 struct ccw1 ccw;
108 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
109 int isglobal;
110 __u8 tbvpm;
111};
112static struct path_verification_work_data *path_verification_worker;
113static DEFINE_MUTEX(dasd_path_verification_mutex);
93 114
94/* initial attempt at a probe function. this can be simplified once 115/* initial attempt at a probe function. this can be simplified once
95 * the other detection code is gone */ 116 * the other detection code is gone */
@@ -373,6 +394,23 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
373 data->length = reclen; 394 data->length = reclen;
374 data->operation.operation = 0x03; 395 data->operation.operation = 0x03;
375 break; 396 break;
397 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
398 data->operation.orientation = 0x0;
399 data->operation.operation = 0x3F;
400 data->extended_operation = 0x11;
401 data->length = 0;
402 data->extended_parameter_length = 0x02;
403 if (data->count > 8) {
404 data->extended_parameter[0] = 0xFF;
405 data->extended_parameter[1] = 0xFF;
406 data->extended_parameter[1] <<= (16 - count);
407 } else {
408 data->extended_parameter[0] = 0xFF;
409 data->extended_parameter[0] <<= (8 - count);
410 data->extended_parameter[1] = 0x00;
411 }
412 data->sector = 0xFF;
413 break;
376 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 414 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
377 data->auxiliary.length_valid = 0x1; 415 data->auxiliary.length_valid = 0x1;
378 data->length = reclen; /* not tlf, as one might think */ 416 data->length = reclen; /* not tlf, as one might think */
@@ -396,6 +434,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
396 case DASD_ECKD_CCW_READ_COUNT: 434 case DASD_ECKD_CCW_READ_COUNT:
397 data->operation.operation = 0x06; 435 data->operation.operation = 0x06;
398 break; 436 break;
437 case DASD_ECKD_CCW_READ_TRACK:
438 data->operation.orientation = 0x1;
439 data->operation.operation = 0x0C;
440 data->extended_parameter_length = 0;
441 data->sector = 0xFF;
442 break;
399 case DASD_ECKD_CCW_READ_TRACK_DATA: 443 case DASD_ECKD_CCW_READ_TRACK_DATA:
400 data->auxiliary.length_valid = 0x1; 444 data->auxiliary.length_valid = 0x1;
401 data->length = tlf; 445 data->length = tlf;
@@ -439,10 +483,16 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
439 483
440 ccw->cmd_code = DASD_ECKD_CCW_PFX; 484 ccw->cmd_code = DASD_ECKD_CCW_PFX;
441 ccw->flags = 0; 485 ccw->flags = 0;
442 ccw->count = sizeof(*pfxdata); 486 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
443 ccw->cda = (__u32) __pa(pfxdata); 487 ccw->count = sizeof(*pfxdata) + 2;
488 ccw->cda = (__u32) __pa(pfxdata);
489 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
490 } else {
491 ccw->count = sizeof(*pfxdata);
492 ccw->cda = (__u32) __pa(pfxdata);
493 memset(pfxdata, 0, sizeof(*pfxdata));
494 }
444 495
445 memset(pfxdata, 0, sizeof(*pfxdata));
446 /* prefix data */ 496 /* prefix data */
447 if (format > 1) { 497 if (format > 1) {
448 DBF_DEV_EVENT(DBF_ERR, basedev, 498 DBF_DEV_EVENT(DBF_ERR, basedev,
@@ -476,6 +526,7 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
476 dedata->mask.perm = 0x1; 526 dedata->mask.perm = 0x1;
477 dedata->attributes.operation = basepriv->attrib.operation; 527 dedata->attributes.operation = basepriv->attrib.operation;
478 break; 528 break;
529 case DASD_ECKD_CCW_READ_TRACK:
479 case DASD_ECKD_CCW_READ_TRACK_DATA: 530 case DASD_ECKD_CCW_READ_TRACK_DATA:
480 dedata->mask.perm = 0x1; 531 dedata->mask.perm = 0x1;
481 dedata->attributes.operation = basepriv->attrib.operation; 532 dedata->attributes.operation = basepriv->attrib.operation;
@@ -502,6 +553,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
502 dedata->attributes.operation = DASD_BYPASS_CACHE; 553 dedata->attributes.operation = DASD_BYPASS_CACHE;
503 rc = check_XRC_on_prefix(pfxdata, basedev); 554 rc = check_XRC_on_prefix(pfxdata, basedev);
504 break; 555 break;
556 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
557 dedata->mask.perm = 0x03;
558 dedata->attributes.operation = basepriv->attrib.operation;
559 dedata->blk_size = 0;
560 break;
505 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 561 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
506 dedata->mask.perm = 0x02; 562 dedata->mask.perm = 0x02;
507 dedata->attributes.operation = basepriv->attrib.operation; 563 dedata->attributes.operation = basepriv->attrib.operation;
@@ -755,26 +811,27 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
755 return -EINVAL; 811 return -EINVAL;
756} 812}
757 813
758static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 814static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
759 void *rcd_buffer, 815 struct dasd_ccw_req *cqr,
760 struct ciw *ciw, __u8 lpm) 816 __u8 *rcd_buffer,
817 __u8 lpm)
761{ 818{
762 struct dasd_ccw_req *cqr;
763 struct ccw1 *ccw; 819 struct ccw1 *ccw;
764 820 /*
765 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, 821 * buffer has to start with EBCDIC "V1.0" to show
766 device); 822 * support for virtual device SNEQ
767 823 */
768 if (IS_ERR(cqr)) { 824 rcd_buffer[0] = 0xE5;
769 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 825 rcd_buffer[1] = 0xF1;
770 "Could not allocate RCD request"); 826 rcd_buffer[2] = 0x4B;
771 return cqr; 827 rcd_buffer[3] = 0xF0;
772 }
773 828
774 ccw = cqr->cpaddr; 829 ccw = cqr->cpaddr;
775 ccw->cmd_code = ciw->cmd; 830 ccw->cmd_code = DASD_ECKD_CCW_RCD;
831 ccw->flags = 0;
776 ccw->cda = (__u32)(addr_t)rcd_buffer; 832 ccw->cda = (__u32)(addr_t)rcd_buffer;
777 ccw->count = ciw->count; 833 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
834 cqr->magic = DASD_ECKD_MAGIC;
778 835
779 cqr->startdev = device; 836 cqr->startdev = device;
780 cqr->memdev = device; 837 cqr->memdev = device;
@@ -784,7 +841,30 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
784 cqr->retries = 256; 841 cqr->retries = 256;
785 cqr->buildclk = get_clock(); 842 cqr->buildclk = get_clock();
786 cqr->status = DASD_CQR_FILLED; 843 cqr->status = DASD_CQR_FILLED;
787 return cqr; 844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
845}
846
847static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
848 struct dasd_ccw_req *cqr,
849 __u8 *rcd_buffer,
850 __u8 lpm)
851{
852 struct ciw *ciw;
853 int rc;
854 /*
855 * sanity check: scan for RCD command in extended SenseID data
856 * some devices do not support RCD
857 */
858 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
859 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
860 return -EOPNOTSUPP;
861
862 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
864 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
865 cqr->retries = 5;
866 rc = dasd_sleep_on_immediatly(cqr);
867 return rc;
788} 868}
789 869
790static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 870static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
@@ -797,32 +877,29 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
797 struct dasd_ccw_req *cqr; 877 struct dasd_ccw_req *cqr;
798 878
799 /* 879 /*
800 * scan for RCD command in extended SenseID data 880 * sanity check: scan for RCD command in extended SenseID data
881 * some devices do not support RCD
801 */ 882 */
802 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 883 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
803 if (!ciw || ciw->cmd == 0) { 884 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
804 ret = -EOPNOTSUPP; 885 ret = -EOPNOTSUPP;
805 goto out_error; 886 goto out_error;
806 } 887 }
807 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 888 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
808 if (!rcd_buf) { 889 if (!rcd_buf) {
809 ret = -ENOMEM; 890 ret = -ENOMEM;
810 goto out_error; 891 goto out_error;
811 } 892 }
812 893 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
813 /* 894 0, /* use rcd_buf as data ara */
814 * buffer has to start with EBCDIC "V1.0" to show 895 device);
815 * support for virtual device SNEQ
816 */
817 rcd_buf[0] = 0xE5;
818 rcd_buf[1] = 0xF1;
819 rcd_buf[2] = 0x4B;
820 rcd_buf[3] = 0xF0;
821 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
822 if (IS_ERR(cqr)) { 896 if (IS_ERR(cqr)) {
823 ret = PTR_ERR(cqr); 897 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
898 "Could not allocate RCD request");
899 ret = -ENOMEM;
824 goto out_error; 900 goto out_error;
825 } 901 }
902 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
826 ret = dasd_sleep_on(cqr); 903 ret = dasd_sleep_on(cqr);
827 /* 904 /*
828 * on success we update the user input parms 905 * on success we update the user input parms
@@ -831,7 +908,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
831 if (ret) 908 if (ret)
832 goto out_error; 909 goto out_error;
833 910
834 *rcd_buffer_size = ciw->count; 911 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
835 *rcd_buffer = rcd_buf; 912 *rcd_buffer = rcd_buf;
836 return 0; 913 return 0;
837out_error: 914out_error:
@@ -901,18 +978,18 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
901 void *conf_data; 978 void *conf_data;
902 int conf_len, conf_data_saved; 979 int conf_len, conf_data_saved;
903 int rc; 980 int rc;
904 __u8 lpm; 981 __u8 lpm, opm;
905 struct dasd_eckd_private *private; 982 struct dasd_eckd_private *private;
906 struct dasd_eckd_path *path_data; 983 struct dasd_path *path_data;
907 984
908 private = (struct dasd_eckd_private *) device->private; 985 private = (struct dasd_eckd_private *) device->private;
909 path_data = (struct dasd_eckd_path *) &private->path_data; 986 path_data = &device->path_data;
910 path_data->opm = ccw_device_get_path_mask(device->cdev); 987 opm = ccw_device_get_path_mask(device->cdev);
911 lpm = 0x80; 988 lpm = 0x80;
912 conf_data_saved = 0; 989 conf_data_saved = 0;
913 /* get configuration data per operational path */ 990 /* get configuration data per operational path */
914 for (lpm = 0x80; lpm; lpm>>= 1) { 991 for (lpm = 0x80; lpm; lpm>>= 1) {
915 if (lpm & path_data->opm){ 992 if (lpm & opm) {
916 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 993 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
917 &conf_len, lpm); 994 &conf_len, lpm);
918 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 995 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
@@ -925,6 +1002,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
925 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1002 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
926 "No configuration data " 1003 "No configuration data "
927 "retrieved"); 1004 "retrieved");
1005 /* no further analysis possible */
1006 path_data->opm |= lpm;
928 continue; /* no error */ 1007 continue; /* no error */
929 } 1008 }
930 /* save first valid configuration data */ 1009 /* save first valid configuration data */
@@ -948,6 +1027,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
948 path_data->ppm |= lpm; 1027 path_data->ppm |= lpm;
949 break; 1028 break;
950 } 1029 }
1030 path_data->opm |= lpm;
951 if (conf_data != private->conf_data) 1031 if (conf_data != private->conf_data)
952 kfree(conf_data); 1032 kfree(conf_data);
953 } 1033 }
@@ -955,6 +1035,140 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
955 return 0; 1035 return 0;
956} 1036}
957 1037
1038static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1039{
1040 struct dasd_eckd_private *private;
1041 int mdc;
1042 u32 fcx_max_data;
1043
1044 private = (struct dasd_eckd_private *) device->private;
1045 if (private->fcx_max_data) {
1046 mdc = ccw_device_get_mdc(device->cdev, lpm);
1047 if ((mdc < 0)) {
1048 dev_warn(&device->cdev->dev,
1049 "Detecting the maximum data size for zHPF "
1050 "requests failed (rc=%d) for a new path %x\n",
1051 mdc, lpm);
1052 return mdc;
1053 }
1054 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
1055 if (fcx_max_data < private->fcx_max_data) {
1056 dev_warn(&device->cdev->dev,
1057 "The maximum data size for zHPF requests %u "
1058 "on a new path %x is below the active maximum "
1059 "%u\n", fcx_max_data, lpm,
1060 private->fcx_max_data);
1061 return -EACCES;
1062 }
1063 }
1064 return 0;
1065}
1066
1067static void do_path_verification_work(struct work_struct *work)
1068{
1069 struct path_verification_work_data *data;
1070 struct dasd_device *device;
1071 __u8 lpm, opm, npm, ppm, epm;
1072 unsigned long flags;
1073 int rc;
1074
1075 data = container_of(work, struct path_verification_work_data, worker);
1076 device = data->device;
1077
1078 opm = 0;
1079 npm = 0;
1080 ppm = 0;
1081 epm = 0;
1082 for (lpm = 0x80; lpm; lpm >>= 1) {
1083 if (lpm & data->tbvpm) {
1084 memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1085 memset(&data->cqr, 0, sizeof(data->cqr));
1086 data->cqr.cpaddr = &data->ccw;
1087 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1088 data->rcd_buffer,
1089 lpm);
1090 if (!rc) {
1091 switch (dasd_eckd_path_access(data->rcd_buffer,
1092 DASD_ECKD_RCD_DATA_SIZE)) {
1093 case 0x02:
1094 npm |= lpm;
1095 break;
1096 case 0x03:
1097 ppm |= lpm;
1098 break;
1099 }
1100 opm |= lpm;
1101 } else if (rc == -EOPNOTSUPP) {
1102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1103 "path verification: No configuration "
1104 "data retrieved");
1105 opm |= lpm;
1106 } else if (rc == -EAGAIN) {
1107 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1108 "path verification: device is stopped,"
1109 " try again later");
1110 epm |= lpm;
1111 } else {
1112 dev_warn(&device->cdev->dev,
1113 "Reading device feature codes failed "
1114 "(rc=%d) for new path %x\n", rc, lpm);
1115 continue;
1116 }
1117 if (verify_fcx_max_data(device, lpm)) {
1118 opm &= ~lpm;
1119 npm &= ~lpm;
1120 ppm &= ~lpm;
1121 }
1122 }
1123 }
1124 /*
1125 * There is a small chance that a path is lost again between
1126 * above path verification and the following modification of
1127 * the device opm mask. We could avoid that race here by using
1128 * yet another path mask, but we rather deal with this unlikely
1129 * situation in dasd_start_IO.
1130 */
1131 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1132 if (!device->path_data.opm && opm) {
1133 device->path_data.opm = opm;
1134 dasd_generic_path_operational(device);
1135 } else
1136 device->path_data.opm |= opm;
1137 device->path_data.npm |= npm;
1138 device->path_data.ppm |= ppm;
1139 device->path_data.tbvpm |= epm;
1140 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1141
1142 dasd_put_device(device);
1143 if (data->isglobal)
1144 mutex_unlock(&dasd_path_verification_mutex);
1145 else
1146 kfree(data);
1147}
1148
1149static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1150{
1151 struct path_verification_work_data *data;
1152
1153 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1154 if (!data) {
1155 if (mutex_trylock(&dasd_path_verification_mutex)) {
1156 data = path_verification_worker;
1157 data->isglobal = 1;
1158 } else
1159 return -ENOMEM;
1160 } else {
1161 memset(data, 0, sizeof(*data));
1162 data->isglobal = 0;
1163 }
1164 INIT_WORK(&data->worker, do_path_verification_work);
1165 dasd_get_device(device);
1166 data->device = device;
1167 data->tbvpm = lpm;
1168 schedule_work(&data->worker);
1169 return 0;
1170}
1171
958static int dasd_eckd_read_features(struct dasd_device *device) 1172static int dasd_eckd_read_features(struct dasd_device *device)
959{ 1173{
960 struct dasd_psf_prssd_data *prssdp; 1174 struct dasd_psf_prssd_data *prssdp;
@@ -1105,6 +1319,37 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
1105 "returned rc=%d", private->uid.ssid, rc); 1319 "returned rc=%d", private->uid.ssid, rc);
1106} 1320}
1107 1321
1322static u32 get_fcx_max_data(struct dasd_device *device)
1323{
1324#if defined(CONFIG_64BIT)
1325 int tpm, mdc;
1326 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1327 struct dasd_eckd_private *private;
1328
1329 if (dasd_nofcx)
1330 return 0;
1331 /* is transport mode supported? */
1332 private = (struct dasd_eckd_private *) device->private;
1333 fcx_in_css = css_general_characteristics.fcx;
1334 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1335 fcx_in_features = private->features.feature[40] & 0x80;
1336 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1337
1338 if (!tpm)
1339 return 0;
1340
1341 mdc = ccw_device_get_mdc(device->cdev, 0);
1342 if (mdc < 0) {
1343 dev_warn(&device->cdev->dev, "Detecting the maximum supported"
1344 " data size for zHPF requests failed\n");
1345 return 0;
1346 } else
1347 return mdc * FCX_MAX_DATA_FACTOR;
1348#else
1349 return 0;
1350#endif
1351}
1352
1108/* 1353/*
1109 * Check device characteristics. 1354 * Check device characteristics.
1110 * If the device is accessible using ECKD discipline, the device is enabled. 1355 * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1223,6 +1468,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1223 else 1468 else
1224 private->real_cyl = private->rdc_data.no_cyl; 1469 private->real_cyl = private->rdc_data.no_cyl;
1225 1470
1471 private->fcx_max_data = get_fcx_max_data(device);
1472
1226 readonly = dasd_device_is_ro(device); 1473 readonly = dasd_device_is_ro(device);
1227 if (readonly) 1474 if (readonly)
1228 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1475 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -1404,6 +1651,13 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1404 dasd_sfree_request(init_cqr, device); 1651 dasd_sfree_request(init_cqr, device);
1405 } 1652 }
1406 1653
1654 if (device->features & DASD_FEATURE_USERAW) {
1655 block->bp_block = DASD_RAW_BLOCKSIZE;
1656 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
1657 block->s2b_shift = 3;
1658 goto raw;
1659 }
1660
1407 if (status == INIT_CQR_UNFORMATTED) { 1661 if (status == INIT_CQR_UNFORMATTED) {
1408 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 1662 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1409 return -EMEDIUMTYPE; 1663 return -EMEDIUMTYPE;
@@ -1441,6 +1695,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1441 dev_warn(&device->cdev->dev, 1695 dev_warn(&device->cdev->dev,
1442 "Track 0 has no records following the VTOC\n"); 1696 "Track 0 has no records following the VTOC\n");
1443 } 1697 }
1698
1444 if (count_area != NULL && count_area->kl == 0) { 1699 if (count_area != NULL && count_area->kl == 0) {
1445 /* we found notthing violating our disk layout */ 1700 /* we found notthing violating our disk layout */
1446 if (dasd_check_blocksize(count_area->dl) == 0) 1701 if (dasd_check_blocksize(count_area->dl) == 0)
@@ -1456,6 +1711,8 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1456 block->s2b_shift++; 1711 block->s2b_shift++;
1457 1712
1458 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1713 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1714
1715raw:
1459 block->blocks = (private->real_cyl * 1716 block->blocks = (private->real_cyl *
1460 private->rdc_data.trk_per_cyl * 1717 private->rdc_data.trk_per_cyl *
1461 blk_per_trk); 1718 blk_per_trk);
@@ -1716,6 +1973,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1716 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1973 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1717 dasd_eckd_reset_ccw_to_base_io(cqr); 1974 dasd_eckd_reset_ccw_to_base_io(cqr);
1718 cqr->startdev = cqr->block->base; 1975 cqr->startdev = cqr->block->base;
1976 cqr->lpm = cqr->block->base->path_data.opm;
1719 } 1977 }
1720}; 1978};
1721 1979
@@ -1744,9 +2002,9 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1744 return dasd_default_erp_postaction; 2002 return dasd_default_erp_postaction;
1745} 2003}
1746 2004
1747 2005static void dasd_eckd_check_for_device_change(struct dasd_device *device,
1748static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 2006 struct dasd_ccw_req *cqr,
1749 struct irb *irb) 2007 struct irb *irb)
1750{ 2008{
1751 char mask; 2009 char mask;
1752 char *sense = NULL; 2010 char *sense = NULL;
@@ -1770,40 +2028,41 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1770 /* schedule worker to reload device */ 2028 /* schedule worker to reload device */
1771 dasd_reload_device(device); 2029 dasd_reload_device(device);
1772 } 2030 }
1773
1774 dasd_generic_handle_state_change(device); 2031 dasd_generic_handle_state_change(device);
1775 return; 2032 return;
1776 } 2033 }
1777 2034
1778 /* summary unit check */
1779 sense = dasd_get_sense(irb); 2035 sense = dasd_get_sense(irb);
1780 if (sense && (sense[7] == 0x0D) && 2036 if (!sense)
2037 return;
2038
2039 /* summary unit check */
2040 if ((sense[7] == 0x0D) &&
1781 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2041 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
1782 dasd_alias_handle_summary_unit_check(device, irb); 2042 dasd_alias_handle_summary_unit_check(device, irb);
1783 return; 2043 return;
1784 } 2044 }
1785 2045
1786 /* service information message SIM */ 2046 /* service information message SIM */
1787 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 2047 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
1788 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 2048 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1789 dasd_3990_erp_handle_sim(device, sense); 2049 dasd_3990_erp_handle_sim(device, sense);
1790 dasd_schedule_device_bh(device);
1791 return; 2050 return;
1792 } 2051 }
1793 2052
1794 if ((scsw_cc(&irb->scsw) == 1) && !sense && 2053 /* loss of device reservation is handled via base devices only
1795 (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) && 2054 * as alias devices may be used with several bases
1796 (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) && 2055 */
1797 (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) { 2056 if (device->block && (sense[7] == 0x3F) &&
1798 /* fake irb do nothing, they are handled elsewhere */ 2057 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1799 dasd_schedule_device_bh(device); 2058 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
1800 return; 2059 if (device->features & DASD_FEATURE_FAILONSLCK)
2060 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
2061 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2062 dev_err(&device->cdev->dev,
2063 "The device reservation was lost\n");
1801 } 2064 }
1802 2065}
1803 dasd_schedule_device_bh(device);
1804 return;
1805};
1806
1807 2066
1808static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 2067static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1809 struct dasd_device *startdev, 2068 struct dasd_device *startdev,
@@ -1984,7 +2243,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1984 cqr->memdev = startdev; 2243 cqr->memdev = startdev;
1985 cqr->block = block; 2244 cqr->block = block;
1986 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2245 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
1987 cqr->lpm = private->path_data.ppm; 2246 cqr->lpm = startdev->path_data.ppm;
1988 cqr->retries = 256; 2247 cqr->retries = 256;
1989 cqr->buildclk = get_clock(); 2248 cqr->buildclk = get_clock();
1990 cqr->status = DASD_CQR_FILLED; 2249 cqr->status = DASD_CQR_FILLED;
@@ -2161,7 +2420,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2161 cqr->memdev = startdev; 2420 cqr->memdev = startdev;
2162 cqr->block = block; 2421 cqr->block = block;
2163 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2422 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2164 cqr->lpm = private->path_data.ppm; 2423 cqr->lpm = startdev->path_data.ppm;
2165 cqr->retries = 256; 2424 cqr->retries = 256;
2166 cqr->buildclk = get_clock(); 2425 cqr->buildclk = get_clock();
2167 cqr->status = DASD_CQR_FILLED; 2426 cqr->status = DASD_CQR_FILLED;
@@ -2326,6 +2585,12 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2326 struct tidaw *last_tidaw = NULL; 2585 struct tidaw *last_tidaw = NULL;
2327 int itcw_op; 2586 int itcw_op;
2328 size_t itcw_size; 2587 size_t itcw_size;
2588 u8 tidaw_flags;
2589 unsigned int seg_len, part_len, len_to_track_end;
2590 unsigned char new_track;
2591 sector_t recid, trkid;
2592 unsigned int offs;
2593 unsigned int count, count_to_trk_end;
2329 2594
2330 basedev = block->base; 2595 basedev = block->base;
2331 private = (struct dasd_eckd_private *) basedev->private; 2596 private = (struct dasd_eckd_private *) basedev->private;
@@ -2341,12 +2606,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2341 /* trackbased I/O needs address all memory via TIDAWs, 2606 /* trackbased I/O needs address all memory via TIDAWs,
2342 * not just for 64 bit addresses. This allows us to map 2607 * not just for 64 bit addresses. This allows us to map
2343 * each segment directly to one tidaw. 2608 * each segment directly to one tidaw.
2609 * In the case of write requests, additional tidaws may
2610 * be needed when a segment crosses a track boundary.
2344 */ 2611 */
2345 trkcount = last_trk - first_trk + 1; 2612 trkcount = last_trk - first_trk + 1;
2346 ctidaw = 0; 2613 ctidaw = 0;
2347 rq_for_each_segment(bv, req, iter) { 2614 rq_for_each_segment(bv, req, iter) {
2348 ++ctidaw; 2615 ++ctidaw;
2349 } 2616 }
2617 if (rq_data_dir(req) == WRITE)
2618 ctidaw += (last_trk - first_trk);
2350 2619
2351 /* Allocate the ccw request. */ 2620 /* Allocate the ccw request. */
2352 itcw_size = itcw_calc_size(0, ctidaw, 0); 2621 itcw_size = itcw_calc_size(0, ctidaw, 0);
@@ -2354,15 +2623,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2354 if (IS_ERR(cqr)) 2623 if (IS_ERR(cqr))
2355 return cqr; 2624 return cqr;
2356 2625
2357 cqr->cpmode = 1;
2358 cqr->startdev = startdev;
2359 cqr->memdev = startdev;
2360 cqr->block = block;
2361 cqr->expires = 100*HZ;
2362 cqr->buildclk = get_clock();
2363 cqr->status = DASD_CQR_FILLED;
2364 cqr->retries = 10;
2365
2366 /* transfer length factor: how many bytes to read from the last track */ 2626 /* transfer length factor: how many bytes to read from the last track */
2367 if (first_trk == last_trk) 2627 if (first_trk == last_trk)
2368 tlf = last_offs - first_offs + 1; 2628 tlf = last_offs - first_offs + 1;
@@ -2371,8 +2631,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2371 tlf *= blksize; 2631 tlf *= blksize;
2372 2632
2373 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2633 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2634 if (IS_ERR(itcw)) {
2635 dasd_sfree_request(cqr, startdev);
2636 return ERR_PTR(-EINVAL);
2637 }
2374 cqr->cpaddr = itcw_get_tcw(itcw); 2638 cqr->cpaddr = itcw_get_tcw(itcw);
2375
2376 if (prepare_itcw(itcw, first_trk, last_trk, 2639 if (prepare_itcw(itcw, first_trk, last_trk,
2377 cmd, basedev, startdev, 2640 cmd, basedev, startdev,
2378 first_offs + 1, 2641 first_offs + 1,
@@ -2385,31 +2648,69 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2385 dasd_sfree_request(cqr, startdev); 2648 dasd_sfree_request(cqr, startdev);
2386 return ERR_PTR(-EAGAIN); 2649 return ERR_PTR(-EAGAIN);
2387 } 2650 }
2388
2389 /* 2651 /*
2390 * A tidaw can address 4k of memory, but must not cross page boundaries 2652 * A tidaw can address 4k of memory, but must not cross page boundaries
2391 * We can let the block layer handle this by setting 2653 * We can let the block layer handle this by setting
2392 * blk_queue_segment_boundary to page boundaries and 2654 * blk_queue_segment_boundary to page boundaries and
2393 * blk_max_segment_size to page size when setting up the request queue. 2655 * blk_max_segment_size to page size when setting up the request queue.
2656 * For write requests, a TIDAW must not cross track boundaries, because
2657 * we have to set the CBC flag on the last tidaw for each track.
2394 */ 2658 */
2395 rq_for_each_segment(bv, req, iter) { 2659 if (rq_data_dir(req) == WRITE) {
2396 dst = page_address(bv->bv_page) + bv->bv_offset; 2660 new_track = 1;
2397 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); 2661 recid = first_rec;
2398 if (IS_ERR(last_tidaw)) 2662 rq_for_each_segment(bv, req, iter) {
2399 return (struct dasd_ccw_req *)last_tidaw; 2663 dst = page_address(bv->bv_page) + bv->bv_offset;
2664 seg_len = bv->bv_len;
2665 while (seg_len) {
2666 if (new_track) {
2667 trkid = recid;
2668 offs = sector_div(trkid, blk_per_trk);
2669 count_to_trk_end = blk_per_trk - offs;
2670 count = min((last_rec - recid + 1),
2671 (sector_t)count_to_trk_end);
2672 len_to_track_end = count * blksize;
2673 recid += count;
2674 new_track = 0;
2675 }
2676 part_len = min(seg_len, len_to_track_end);
2677 seg_len -= part_len;
2678 len_to_track_end -= part_len;
2679 /* We need to end the tidaw at track end */
2680 if (!len_to_track_end) {
2681 new_track = 1;
2682 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
2683 } else
2684 tidaw_flags = 0;
2685 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
2686 dst, part_len);
2687 if (IS_ERR(last_tidaw))
2688 return ERR_PTR(-EINVAL);
2689 dst += part_len;
2690 }
2691 }
2692 } else {
2693 rq_for_each_segment(bv, req, iter) {
2694 dst = page_address(bv->bv_page) + bv->bv_offset;
2695 last_tidaw = itcw_add_tidaw(itcw, 0x00,
2696 dst, bv->bv_len);
2697 if (IS_ERR(last_tidaw))
2698 return ERR_PTR(-EINVAL);
2699 }
2400 } 2700 }
2401 2701 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2402 last_tidaw->flags |= 0x80; 2702 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
2403 itcw_finalize(itcw); 2703 itcw_finalize(itcw);
2404 2704
2405 if (blk_noretry_request(req) || 2705 if (blk_noretry_request(req) ||
2406 block->base->features & DASD_FEATURE_FAILFAST) 2706 block->base->features & DASD_FEATURE_FAILFAST)
2407 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2707 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2708 cqr->cpmode = 1;
2408 cqr->startdev = startdev; 2709 cqr->startdev = startdev;
2409 cqr->memdev = startdev; 2710 cqr->memdev = startdev;
2410 cqr->block = block; 2711 cqr->block = block;
2411 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2712 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2412 cqr->lpm = private->path_data.ppm; 2713 cqr->lpm = startdev->path_data.ppm;
2413 cqr->retries = 256; 2714 cqr->retries = 256;
2414 cqr->buildclk = get_clock(); 2715 cqr->buildclk = get_clock();
2415 cqr->status = DASD_CQR_FILLED; 2716 cqr->status = DASD_CQR_FILLED;
@@ -2420,11 +2721,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2420 struct dasd_block *block, 2721 struct dasd_block *block,
2421 struct request *req) 2722 struct request *req)
2422{ 2723{
2423 int tpm, cmdrtd, cmdwtd; 2724 int cmdrtd, cmdwtd;
2424 int use_prefix; 2725 int use_prefix;
2425#if defined(CONFIG_64BIT) 2726 int fcx_multitrack;
2426 int fcx_in_css, fcx_in_gneq, fcx_in_features;
2427#endif
2428 struct dasd_eckd_private *private; 2727 struct dasd_eckd_private *private;
2429 struct dasd_device *basedev; 2728 struct dasd_device *basedev;
2430 sector_t first_rec, last_rec; 2729 sector_t first_rec, last_rec;
@@ -2432,6 +2731,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2432 unsigned int first_offs, last_offs; 2731 unsigned int first_offs, last_offs;
2433 unsigned int blk_per_trk, blksize; 2732 unsigned int blk_per_trk, blksize;
2434 int cdlspecial; 2733 int cdlspecial;
2734 unsigned int data_size;
2435 struct dasd_ccw_req *cqr; 2735 struct dasd_ccw_req *cqr;
2436 2736
2437 basedev = block->base; 2737 basedev = block->base;
@@ -2450,15 +2750,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2450 last_offs = sector_div(last_trk, blk_per_trk); 2750 last_offs = sector_div(last_trk, blk_per_trk);
2451 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2751 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2452 2752
2453 /* is transport mode supported? */ 2753 fcx_multitrack = private->features.feature[40] & 0x20;
2454#if defined(CONFIG_64BIT) 2754 data_size = blk_rq_bytes(req);
2455 fcx_in_css = css_general_characteristics.fcx; 2755 /* tpm write request add CBC data on each track boundary */
2456 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2756 if (rq_data_dir(req) == WRITE)
2457 fcx_in_features = private->features.feature[40] & 0x80; 2757 data_size += (last_trk - first_trk) * 4;
2458 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2459#else
2460 tpm = 0;
2461#endif
2462 2758
2463 /* is read track data and write track data in command mode supported? */ 2759 /* is read track data and write track data in command mode supported? */
2464 cmdrtd = private->features.feature[9] & 0x20; 2760 cmdrtd = private->features.feature[9] & 0x20;
@@ -2468,13 +2764,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2468 cqr = NULL; 2764 cqr = NULL;
2469 if (cdlspecial || dasd_page_cache) { 2765 if (cdlspecial || dasd_page_cache) {
2470 /* do nothing, just fall through to the cmd mode single case */ 2766 /* do nothing, just fall through to the cmd mode single case */
2471 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { 2767 } else if ((data_size <= private->fcx_max_data)
2768 && (fcx_multitrack || (first_trk == last_trk))) {
2472 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 2769 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2473 first_rec, last_rec, 2770 first_rec, last_rec,
2474 first_trk, last_trk, 2771 first_trk, last_trk,
2475 first_offs, last_offs, 2772 first_offs, last_offs,
2476 blk_per_trk, blksize); 2773 blk_per_trk, blksize);
2477 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2774 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2775 (PTR_ERR(cqr) != -ENOMEM))
2478 cqr = NULL; 2776 cqr = NULL;
2479 } else if (use_prefix && 2777 } else if (use_prefix &&
2480 (((rq_data_dir(req) == READ) && cmdrtd) || 2778 (((rq_data_dir(req) == READ) && cmdrtd) ||
@@ -2484,7 +2782,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2484 first_trk, last_trk, 2782 first_trk, last_trk,
2485 first_offs, last_offs, 2783 first_offs, last_offs,
2486 blk_per_trk, blksize); 2784 blk_per_trk, blksize);
2487 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2785 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2786 (PTR_ERR(cqr) != -ENOMEM))
2488 cqr = NULL; 2787 cqr = NULL;
2489 } 2788 }
2490 if (!cqr) 2789 if (!cqr)
@@ -2496,6 +2795,135 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2496 return cqr; 2795 return cqr;
2497} 2796}
2498 2797
2798static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
2799 struct dasd_block *block,
2800 struct request *req)
2801{
2802 struct dasd_eckd_private *private;
2803 unsigned long *idaws;
2804 struct dasd_device *basedev;
2805 struct dasd_ccw_req *cqr;
2806 struct ccw1 *ccw;
2807 struct req_iterator iter;
2808 struct bio_vec *bv;
2809 char *dst;
2810 unsigned char cmd;
2811 unsigned int trkcount;
2812 unsigned int seg_len, len_to_track_end;
2813 unsigned int first_offs;
2814 unsigned int cidaw, cplength, datasize;
2815 sector_t first_trk, last_trk;
2816 unsigned int pfx_datasize;
2817
2818 /*
2819 * raw track access needs to be mutiple of 64k and on 64k boundary
2820 */
2821 if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
2822 cqr = ERR_PTR(-EINVAL);
2823 goto out;
2824 }
2825 if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
2826 DASD_RAW_SECTORS_PER_TRACK) != 0) {
2827 cqr = ERR_PTR(-EINVAL);
2828 goto out;
2829 }
2830
2831 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
2832 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
2833 DASD_RAW_SECTORS_PER_TRACK;
2834 trkcount = last_trk - first_trk + 1;
2835 first_offs = 0;
2836 basedev = block->base;
2837 private = (struct dasd_eckd_private *) basedev->private;
2838
2839 if (rq_data_dir(req) == READ)
2840 cmd = DASD_ECKD_CCW_READ_TRACK;
2841 else if (rq_data_dir(req) == WRITE)
2842 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
2843 else {
2844 cqr = ERR_PTR(-EINVAL);
2845 goto out;
2846 }
2847
2848 /*
2849 * Raw track based I/O needs IDAWs for each page,
2850 * and not just for 64 bit addresses.
2851 */
2852 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
2853
2854 /* 1x prefix + one read/write ccw per track */
2855 cplength = 1 + trkcount;
2856
2857 /*
2858 * struct PFX_eckd_data has up to 2 byte as extended parameter
2859 * this is needed for write full track and has to be mentioned
2860 * seperately
2861 * add 8 instead of 2 to keep 8 byte boundary
2862 */
2863 pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
2864
2865 datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
2866
2867 /* Allocate the ccw request. */
2868 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
2869 datasize, startdev);
2870 if (IS_ERR(cqr))
2871 goto out;
2872 ccw = cqr->cpaddr;
2873
2874 if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
2875 basedev, startdev, 1 /* format */, first_offs + 1,
2876 trkcount, 0, 0) == -EAGAIN) {
2877 /* Clock not in sync and XRC is enabled.
2878 * Try again later.
2879 */
2880 dasd_sfree_request(cqr, startdev);
2881 cqr = ERR_PTR(-EAGAIN);
2882 goto out;
2883 }
2884
2885 idaws = (unsigned long *)(cqr->data + pfx_datasize);
2886
2887 len_to_track_end = 0;
2888
2889 rq_for_each_segment(bv, req, iter) {
2890 dst = page_address(bv->bv_page) + bv->bv_offset;
2891 seg_len = bv->bv_len;
2892 if (!len_to_track_end) {
2893 ccw[-1].flags |= CCW_FLAG_CC;
2894 ccw->cmd_code = cmd;
2895 /* maximum 3390 track size */
2896 ccw->count = 57326;
2897 /* 64k map to one track */
2898 len_to_track_end = 65536;
2899 ccw->cda = (__u32)(addr_t)idaws;
2900 ccw->flags |= CCW_FLAG_IDA;
2901 ccw->flags |= CCW_FLAG_SLI;
2902 ccw++;
2903 }
2904 len_to_track_end -= seg_len;
2905 idaws = idal_create_words(idaws, dst, seg_len);
2906 }
2907
2908 if (blk_noretry_request(req) ||
2909 block->base->features & DASD_FEATURE_FAILFAST)
2910 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2911 cqr->startdev = startdev;
2912 cqr->memdev = startdev;
2913 cqr->block = block;
2914 cqr->expires = startdev->default_expires * HZ;
2915 cqr->lpm = startdev->path_data.ppm;
2916 cqr->retries = 256;
2917 cqr->buildclk = get_clock();
2918 cqr->status = DASD_CQR_FILLED;
2919
2920 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2921 cqr = NULL;
2922out:
2923 return cqr;
2924}
2925
2926
2499static int 2927static int
2500dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2928dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2501{ 2929{
@@ -2600,7 +3028,10 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2600 3028
2601 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 3029 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2602 private->count++; 3030 private->count++;
2603 cqr = dasd_eckd_build_cp(startdev, block, req); 3031 if ((base->features & DASD_FEATURE_USERAW))
3032 cqr = dasd_raw_build_cp(startdev, block, req);
3033 else
3034 cqr = dasd_eckd_build_cp(startdev, block, req);
2604 if (IS_ERR(cqr)) 3035 if (IS_ERR(cqr))
2605 private->count--; 3036 private->count--;
2606 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 3037 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
@@ -2688,6 +3119,8 @@ dasd_eckd_release(struct dasd_device *device)
2688 cqr->status = DASD_CQR_FILLED; 3119 cqr->status = DASD_CQR_FILLED;
2689 3120
2690 rc = dasd_sleep_on_immediatly(cqr); 3121 rc = dasd_sleep_on_immediatly(cqr);
3122 if (!rc)
3123 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2691 3124
2692 if (useglobal) 3125 if (useglobal)
2693 mutex_unlock(&dasd_reserve_mutex); 3126 mutex_unlock(&dasd_reserve_mutex);
@@ -2741,6 +3174,8 @@ dasd_eckd_reserve(struct dasd_device *device)
2741 cqr->status = DASD_CQR_FILLED; 3174 cqr->status = DASD_CQR_FILLED;
2742 3175
2743 rc = dasd_sleep_on_immediatly(cqr); 3176 rc = dasd_sleep_on_immediatly(cqr);
3177 if (!rc)
3178 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2744 3179
2745 if (useglobal) 3180 if (useglobal)
2746 mutex_unlock(&dasd_reserve_mutex); 3181 mutex_unlock(&dasd_reserve_mutex);
@@ -2793,6 +3228,8 @@ dasd_eckd_steal_lock(struct dasd_device *device)
2793 cqr->status = DASD_CQR_FILLED; 3228 cqr->status = DASD_CQR_FILLED;
2794 3229
2795 rc = dasd_sleep_on_immediatly(cqr); 3230 rc = dasd_sleep_on_immediatly(cqr);
3231 if (!rc)
3232 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2796 3233
2797 if (useglobal) 3234 if (useglobal)
2798 mutex_unlock(&dasd_reserve_mutex); 3235 mutex_unlock(&dasd_reserve_mutex);
@@ -2845,6 +3282,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
2845 cqr->memdev = device; 3282 cqr->memdev = device;
2846 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3283 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2847 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3284 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3285 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
2848 cqr->retries = 5; 3286 cqr->retries = 5;
2849 cqr->expires = 10 * HZ; 3287 cqr->expires = 10 * HZ;
2850 cqr->buildclk = get_clock(); 3288 cqr->buildclk = get_clock();
@@ -3279,10 +3717,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3279{ 3717{
3280 char *page; 3718 char *page;
3281 int len, sl, sct, residual; 3719 int len, sl, sct, residual;
3282
3283 struct tsb *tsb; 3720 struct tsb *tsb;
3284 u8 *sense; 3721 u8 *sense, *rcq;
3285
3286 3722
3287 page = (char *) get_zeroed_page(GFP_ATOMIC); 3723 page = (char *) get_zeroed_page(GFP_ATOMIC);
3288 if (page == NULL) { 3724 if (page == NULL) {
@@ -3348,12 +3784,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3348 case 2: /* ts_ddpc */ 3784 case 2: /* ts_ddpc */
3349 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3785 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3350 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3786 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3351 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3787 for (sl = 0; sl < 2; sl++) {
3352 " tsb->tsa.ddpc.rcq: "); 3788 len += sprintf(page + len,
3353 for (sl = 0; sl < 16; sl++) { 3789 KERN_ERR PRINTK_HEADER
3790 " tsb->tsa.ddpc.rcq %2d-%2d: ",
3791 (8 * sl), ((8 * sl) + 7));
3792 rcq = tsb->tsa.ddpc.rcq;
3354 for (sct = 0; sct < 8; sct++) { 3793 for (sct = 0; sct < 8; sct++) {
3355 len += sprintf(page + len, " %02x", 3794 len += sprintf(page + len, " %02x",
3356 tsb->tsa.ddpc.rcq[sl]); 3795 rcq[8 * sl + sct]);
3357 } 3796 }
3358 len += sprintf(page + len, "\n"); 3797 len += sprintf(page + len, "\n");
3359 } 3798 }
@@ -3550,6 +3989,7 @@ static struct ccw_driver dasd_eckd_driver = {
3550 .set_offline = dasd_generic_set_offline, 3989 .set_offline = dasd_generic_set_offline,
3551 .set_online = dasd_eckd_set_online, 3990 .set_online = dasd_eckd_set_online,
3552 .notify = dasd_generic_notify, 3991 .notify = dasd_generic_notify,
3992 .path_event = dasd_generic_path_event,
3553 .freeze = dasd_generic_pm_freeze, 3993 .freeze = dasd_generic_pm_freeze,
3554 .thaw = dasd_generic_restore_device, 3994 .thaw = dasd_generic_restore_device,
3555 .restore = dasd_generic_restore_device, 3995 .restore = dasd_generic_restore_device,
@@ -3573,10 +4013,11 @@ static struct dasd_discipline dasd_eckd_discipline = {
3573 .owner = THIS_MODULE, 4013 .owner = THIS_MODULE,
3574 .name = "ECKD", 4014 .name = "ECKD",
3575 .ebcname = "ECKD", 4015 .ebcname = "ECKD",
3576 .max_blocks = 240, 4016 .max_blocks = 190,
3577 .check_device = dasd_eckd_check_characteristics, 4017 .check_device = dasd_eckd_check_characteristics,
3578 .uncheck_device = dasd_eckd_uncheck_device, 4018 .uncheck_device = dasd_eckd_uncheck_device,
3579 .do_analysis = dasd_eckd_do_analysis, 4019 .do_analysis = dasd_eckd_do_analysis,
4020 .verify_path = dasd_eckd_verify_path,
3580 .ready_to_online = dasd_eckd_ready_to_online, 4021 .ready_to_online = dasd_eckd_ready_to_online,
3581 .online_to_ready = dasd_eckd_online_to_ready, 4022 .online_to_ready = dasd_eckd_online_to_ready,
3582 .fill_geometry = dasd_eckd_fill_geometry, 4023 .fill_geometry = dasd_eckd_fill_geometry,
@@ -3586,7 +4027,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
3586 .format_device = dasd_eckd_format_device, 4027 .format_device = dasd_eckd_format_device,
3587 .erp_action = dasd_eckd_erp_action, 4028 .erp_action = dasd_eckd_erp_action,
3588 .erp_postaction = dasd_eckd_erp_postaction, 4029 .erp_postaction = dasd_eckd_erp_postaction,
3589 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 4030 .check_for_device_change = dasd_eckd_check_for_device_change,
3590 .build_cp = dasd_eckd_build_alias_cp, 4031 .build_cp = dasd_eckd_build_alias_cp,
3591 .free_cp = dasd_eckd_free_alias_cp, 4032 .free_cp = dasd_eckd_free_alias_cp,
3592 .dump_sense = dasd_eckd_dump_sense, 4033 .dump_sense = dasd_eckd_dump_sense,
@@ -3609,11 +4050,19 @@ dasd_eckd_init(void)
3609 GFP_KERNEL | GFP_DMA); 4050 GFP_KERNEL | GFP_DMA);
3610 if (!dasd_reserve_req) 4051 if (!dasd_reserve_req)
3611 return -ENOMEM; 4052 return -ENOMEM;
4053 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
4054 GFP_KERNEL | GFP_DMA);
4055 if (!path_verification_worker) {
4056 kfree(dasd_reserve_req);
4057 return -ENOMEM;
4058 }
3612 ret = ccw_driver_register(&dasd_eckd_driver); 4059 ret = ccw_driver_register(&dasd_eckd_driver);
3613 if (!ret) 4060 if (!ret)
3614 wait_for_device_probe(); 4061 wait_for_device_probe();
3615 else 4062 else {
4063 kfree(path_verification_worker);
3616 kfree(dasd_reserve_req); 4064 kfree(dasd_reserve_req);
4065 }
3617 return ret; 4066 return ret;
3618} 4067}
3619 4068
@@ -3621,6 +4070,7 @@ static void __exit
3621dasd_eckd_cleanup(void) 4070dasd_eckd_cleanup(void)
3622{ 4071{
3623 ccw_driver_unregister(&dasd_eckd_driver); 4072 ccw_driver_unregister(&dasd_eckd_driver);
4073 kfree(path_verification_worker);
3624 kfree(dasd_reserve_req); 4074 kfree(dasd_reserve_req);
3625} 4075}
3626 4076
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 12097c24f2f5..4a688a873a77 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -37,14 +37,17 @@
37#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d 37#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
38#define DASD_ECKD_CCW_READ_KD_MT 0x8e 38#define DASD_ECKD_CCW_READ_KD_MT 0x8e
39#define DASD_ECKD_CCW_RELEASE 0x94 39#define DASD_ECKD_CCW_RELEASE 0x94
40#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
40#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 41#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
41#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 42#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
42#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5 43#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
43#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6 44#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
44#define DASD_ECKD_CCW_RESERVE 0xB4 45#define DASD_ECKD_CCW_RESERVE 0xB4
46#define DASD_ECKD_CCW_READ_TRACK 0xDE
45#define DASD_ECKD_CCW_PFX 0xE7 47#define DASD_ECKD_CCW_PFX 0xE7
46#define DASD_ECKD_CCW_PFX_READ 0xEA 48#define DASD_ECKD_CCW_PFX_READ 0xEA
47#define DASD_ECKD_CCW_RSCK 0xF9 49#define DASD_ECKD_CCW_RSCK 0xF9
50#define DASD_ECKD_CCW_RCD 0xFA
48 51
49/* 52/*
50 * Perform Subsystem Function / Sub-Orders 53 * Perform Subsystem Function / Sub-Orders
@@ -57,6 +60,11 @@
57 */ 60 */
58#define LV_COMPAT_CYL 0xFFFE 61#define LV_COMPAT_CYL 0xFFFE
59 62
63
64#define FCX_MAX_DATA_FACTOR 65536
65#define DASD_ECKD_RCD_DATA_SIZE 256
66
67
60/***************************************************************************** 68/*****************************************************************************
61 * SECTION: Type Definitions 69 * SECTION: Type Definitions
62 ****************************************************************************/ 70 ****************************************************************************/
@@ -331,12 +339,6 @@ struct dasd_gneq {
331 __u8 reserved2[22]; 339 __u8 reserved2[22];
332} __attribute__ ((packed)); 340} __attribute__ ((packed));
333 341
334struct dasd_eckd_path {
335 __u8 opm;
336 __u8 ppm;
337 __u8 npm;
338};
339
340struct dasd_rssd_features { 342struct dasd_rssd_features {
341 char feature[256]; 343 char feature[256];
342} __attribute__((packed)); 344} __attribute__((packed));
@@ -442,7 +444,6 @@ struct dasd_eckd_private {
442 struct vd_sneq *vdsneq; 444 struct vd_sneq *vdsneq;
443 struct dasd_gneq *gneq; 445 struct dasd_gneq *gneq;
444 446
445 struct dasd_eckd_path path_data;
446 struct eckd_count count_area[5]; 447 struct eckd_count count_area[5];
447 int init_cqr_status; 448 int init_cqr_status;
448 int uses_cdl; 449 int uses_cdl;
@@ -455,6 +456,8 @@ struct dasd_eckd_private {
455 struct alias_pav_group *pavgroup; 456 struct alias_pav_group *pavgroup;
456 struct alias_lcu *lcu; 457 struct alias_lcu *lcu;
457 int count; 458 int count;
459
460 u32 fcx_max_data;
458}; 461};
459 462
460 463
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 83b4615a3b62..77f778b7b070 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device)
473 cqr->retries = 255; 473 cqr->retries = 255;
474 cqr->expires = 10 * HZ; 474 cqr->expires = 10 * HZ;
475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
476 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
476 477
477 ccw = cqr->cpaddr; 478 ccw = cqr->cpaddr;
478 ccw->cmd_code = DASD_ECKD_CCW_SNSS; 479 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 7656384a811d..0eafe2e421e7 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
96 DBF_DEV_EVENT(DBF_DEBUG, device, 96 DBF_DEV_EVENT(DBF_DEBUG, device,
97 "default ERP called (%i retries left)", 97 "default ERP called (%i retries left)",
98 cqr->retries); 98 cqr->retries);
99 cqr->lpm = LPM_ANYPATH; 99 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
100 cqr->lpm = device->path_data.opm;
100 cqr->status = DASD_CQR_FILLED; 101 cqr->status = DASD_CQR_FILLED;
101 } else { 102 } else {
102 pr_err("%s: default ERP has run out of retries and failed\n", 103 pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bec5486e0e6d..be89b3a893da 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = {
73 .set_offline = dasd_generic_set_offline, 73 .set_offline = dasd_generic_set_offline,
74 .set_online = dasd_fba_set_online, 74 .set_online = dasd_fba_set_online,
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76 .path_event = dasd_generic_path_event,
76 .freeze = dasd_generic_pm_freeze, 77 .freeze = dasd_generic_pm_freeze,
77 .thaw = dasd_generic_restore_device, 78 .thaw = dasd_generic_restore_device,
78 .restore = dasd_generic_restore_device, 79 .restore = dasd_generic_restore_device,
@@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
164 } 165 }
165 166
166 device->default_expires = DASD_EXPIRES; 167 device->default_expires = DASD_EXPIRES;
168 device->path_data.opm = LPM_ANYPATH;
167 169
168 readonly = dasd_device_is_ro(device); 170 readonly = dasd_device_is_ro(device);
169 if (readonly) 171 if (readonly)
@@ -231,24 +233,16 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
231 return NULL; 233 return NULL;
232} 234}
233 235
234static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, 236static void dasd_fba_check_for_device_change(struct dasd_device *device,
235 struct irb *irb) 237 struct dasd_ccw_req *cqr,
238 struct irb *irb)
236{ 239{
237 char mask; 240 char mask;
238 241
239 /* first of all check for state change pending interrupt */ 242 /* first of all check for state change pending interrupt */
240 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 243 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
241 if ((irb->scsw.cmd.dstat & mask) == mask) { 244 if ((irb->scsw.cmd.dstat & mask) == mask)
242 dasd_generic_handle_state_change(device); 245 dasd_generic_handle_state_change(device);
243 return;
244 }
245
246 /* check for unsolicited interrupts */
247 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
248 "unsolicited interrupt received");
249 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
250 dasd_schedule_device_bh(device);
251 return;
252}; 246};
253 247
254static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, 248static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
@@ -596,13 +590,14 @@ static struct dasd_discipline dasd_fba_discipline = {
596 .max_blocks = 96, 590 .max_blocks = 96,
597 .check_device = dasd_fba_check_characteristics, 591 .check_device = dasd_fba_check_characteristics,
598 .do_analysis = dasd_fba_do_analysis, 592 .do_analysis = dasd_fba_do_analysis,
593 .verify_path = dasd_generic_verify_path,
599 .fill_geometry = dasd_fba_fill_geometry, 594 .fill_geometry = dasd_fba_fill_geometry,
600 .start_IO = dasd_start_IO, 595 .start_IO = dasd_start_IO,
601 .term_IO = dasd_term_IO, 596 .term_IO = dasd_term_IO,
602 .handle_terminated_request = dasd_fba_handle_terminated_request, 597 .handle_terminated_request = dasd_fba_handle_terminated_request,
603 .erp_action = dasd_fba_erp_action, 598 .erp_action = dasd_fba_erp_action,
604 .erp_postaction = dasd_fba_erp_postaction, 599 .erp_postaction = dasd_fba_erp_postaction,
605 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt, 600 .check_for_device_change = dasd_fba_check_for_device_change,
606 .build_cp = dasd_fba_build_cp, 601 .build_cp = dasd_fba_build_cp,
607 .free_cp = dasd_fba_free_cp, 602 .free_cp = dasd_fba_free_cp,
608 .dump_sense = dasd_fba_dump_sense, 603 .dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 500678d7116c..df9f6999411d 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -231,6 +231,11 @@ struct dasd_ccw_req {
231/* per dasd_ccw_req flags */ 231/* per dasd_ccw_req flags */
232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ 233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
234#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
235#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
236 * stolen. Should not be combined with
237 * DASD_CQR_FLAGS_USE_ERP
238 */
234 239
235/* Signature for error recovery functions. */ 240/* Signature for error recovery functions. */
236typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 241typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -287,6 +292,14 @@ struct dasd_discipline {
287 int (*do_analysis) (struct dasd_block *); 292 int (*do_analysis) (struct dasd_block *);
288 293
289 /* 294 /*
295 * This function is called, when new paths become available.
296 * Disciplins may use this callback to do necessary setup work,
297 * e.g. verify that new path is compatible with the current
298 * configuration.
299 */
300 int (*verify_path)(struct dasd_device *, __u8);
301
302 /*
290 * Last things to do when a device is set online, and first things 303 * Last things to do when a device is set online, and first things
291 * when it is set offline. 304 * when it is set offline.
292 */ 305 */
@@ -325,9 +338,9 @@ struct dasd_discipline {
325 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 338 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
326 struct irb *); 339 struct irb *);
327 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); 340 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
328 341 void (*check_for_device_change) (struct dasd_device *,
329 void (*handle_unsolicited_interrupt) (struct dasd_device *, 342 struct dasd_ccw_req *,
330 struct irb *); 343 struct irb *);
331 344
332 /* i/o control functions. */ 345 /* i/o control functions. */
333 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); 346 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
@@ -362,6 +375,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
362#define DASD_EER_STATECHANGE 3 375#define DASD_EER_STATECHANGE 3
363#define DASD_EER_PPRCSUSPEND 4 376#define DASD_EER_PPRCSUSPEND 4
364 377
378struct dasd_path {
379 __u8 opm;
380 __u8 tbvpm;
381 __u8 ppm;
382 __u8 npm;
383};
384
365struct dasd_device { 385struct dasd_device {
366 /* Block device stuff. */ 386 /* Block device stuff. */
367 struct dasd_block *block; 387 struct dasd_block *block;
@@ -377,6 +397,7 @@ struct dasd_device {
377 struct dasd_discipline *discipline; 397 struct dasd_discipline *discipline;
378 struct dasd_discipline *base_discipline; 398 struct dasd_discipline *base_discipline;
379 char *private; 399 char *private;
400 struct dasd_path path_data;
380 401
381 /* Device state and target state. */ 402 /* Device state and target state. */
382 int state, target; 403 int state, target;
@@ -456,6 +477,9 @@ struct dasd_block {
456 * confuse this with the user specified 477 * confuse this with the user specified
457 * read-only feature. 478 * read-only feature.
458 */ 479 */
480#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
481#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
482
459 483
460void dasd_put_device_wake(struct dasd_device *); 484void dasd_put_device_wake(struct dasd_device *);
461 485
@@ -620,10 +644,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
620int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); 644int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
621int dasd_generic_set_offline (struct ccw_device *cdev); 645int dasd_generic_set_offline (struct ccw_device *cdev);
622int dasd_generic_notify(struct ccw_device *, int); 646int dasd_generic_notify(struct ccw_device *, int);
647int dasd_generic_last_path_gone(struct dasd_device *);
648int dasd_generic_path_operational(struct dasd_device *);
649
623void dasd_generic_handle_state_change(struct dasd_device *); 650void dasd_generic_handle_state_change(struct dasd_device *);
624int dasd_generic_pm_freeze(struct ccw_device *); 651int dasd_generic_pm_freeze(struct ccw_device *);
625int dasd_generic_restore_device(struct ccw_device *); 652int dasd_generic_restore_device(struct ccw_device *);
626enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); 653enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
654void dasd_generic_path_event(struct ccw_device *, int *);
655int dasd_generic_verify_path(struct dasd_device *, __u8);
627 656
628int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 657int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
629char *dasd_get_sense(struct irb *); 658char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 40834f18754c..dcee3c5c8954 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -2,76 +2,85 @@ comment "S/390 character device drivers"
2 depends on S390 2 depends on S390
3 3
4config TN3270 4config TN3270
5 tristate "Support for locally attached 3270 terminals" 5 def_tristate y
6 prompt "Support for locally attached 3270 terminals"
6 depends on CCW 7 depends on CCW
7 help 8 help
8 Include support for IBM 3270 terminals. 9 Include support for IBM 3270 terminals.
9 10
10config TN3270_TTY 11config TN3270_TTY
11 tristate "Support for tty input/output on 3270 terminals" 12 def_tristate y
13 prompt "Support for tty input/output on 3270 terminals"
12 depends on TN3270 14 depends on TN3270
13 help 15 help
14 Include support for using an IBM 3270 terminal as a Linux tty. 16 Include support for using an IBM 3270 terminal as a Linux tty.
15 17
16config TN3270_FS 18config TN3270_FS
17 tristate "Support for fullscreen applications on 3270 terminals" 19 def_tristate m
20 prompt "Support for fullscreen applications on 3270 terminals"
18 depends on TN3270 21 depends on TN3270
19 help 22 help
20 Include support for fullscreen applications on an IBM 3270 terminal. 23 Include support for fullscreen applications on an IBM 3270 terminal.
21 24
22config TN3270_CONSOLE 25config TN3270_CONSOLE
23 bool "Support for console on 3270 terminal" 26 def_bool y
27 prompt "Support for console on 3270 terminal"
24 depends on TN3270=y && TN3270_TTY=y 28 depends on TN3270=y && TN3270_TTY=y
25 help 29 help
26 Include support for using an IBM 3270 terminal as a Linux system 30 Include support for using an IBM 3270 terminal as a Linux system
27 console. Available only if 3270 support is compiled in statically. 31 console. Available only if 3270 support is compiled in statically.
28 32
29config TN3215 33config TN3215
30 bool "Support for 3215 line mode terminal" 34 def_bool y
35 prompt "Support for 3215 line mode terminal"
31 depends on CCW 36 depends on CCW
32 help 37 help
33 Include support for IBM 3215 line-mode terminals. 38 Include support for IBM 3215 line-mode terminals.
34 39
35config TN3215_CONSOLE 40config TN3215_CONSOLE
36 bool "Support for console on 3215 line mode terminal" 41 def_bool y
42 prompt "Support for console on 3215 line mode terminal"
37 depends on TN3215 43 depends on TN3215
38 help 44 help
39 Include support for using an IBM 3215 line-mode terminal as a 45 Include support for using an IBM 3215 line-mode terminal as a
40 Linux system console. 46 Linux system console.
41 47
42config CCW_CONSOLE 48config CCW_CONSOLE
43 bool 49 def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
44 depends on TN3215_CONSOLE || TN3270_CONSOLE
45 default y
46 50
47config SCLP_TTY 51config SCLP_TTY
48 bool "Support for SCLP line mode terminal" 52 def_bool y
53 prompt "Support for SCLP line mode terminal"
49 depends on S390 54 depends on S390
50 help 55 help
51 Include support for IBM SCLP line-mode terminals. 56 Include support for IBM SCLP line-mode terminals.
52 57
53config SCLP_CONSOLE 58config SCLP_CONSOLE
54 bool "Support for console on SCLP line mode terminal" 59 def_bool y
60 prompt "Support for console on SCLP line mode terminal"
55 depends on SCLP_TTY 61 depends on SCLP_TTY
56 help 62 help
57 Include support for using an IBM HWC line-mode terminal as the Linux 63 Include support for using an IBM HWC line-mode terminal as the Linux
58 system console. 64 system console.
59 65
60config SCLP_VT220_TTY 66config SCLP_VT220_TTY
61 bool "Support for SCLP VT220-compatible terminal" 67 def_bool y
68 prompt "Support for SCLP VT220-compatible terminal"
62 depends on S390 69 depends on S390
63 help 70 help
64 Include support for an IBM SCLP VT220-compatible terminal. 71 Include support for an IBM SCLP VT220-compatible terminal.
65 72
66config SCLP_VT220_CONSOLE 73config SCLP_VT220_CONSOLE
67 bool "Support for console on SCLP VT220-compatible terminal" 74 def_bool y
75 prompt "Support for console on SCLP VT220-compatible terminal"
68 depends on SCLP_VT220_TTY 76 depends on SCLP_VT220_TTY
69 help 77 help
70 Include support for using an IBM SCLP VT220-compatible terminal as a 78 Include support for using an IBM SCLP VT220-compatible terminal as a
71 Linux system console. 79 Linux system console.
72 80
73config SCLP_CPI 81config SCLP_CPI
74 tristate "Control-Program Identification" 82 def_tristate m
83 prompt "Control-Program Identification"
75 depends on S390 84 depends on S390
76 help 85 help
77 This option enables the hardware console interface for system 86 This option enables the hardware console interface for system
@@ -83,7 +92,8 @@ config SCLP_CPI
83 need this feature and intend to run your kernel in LPAR. 92 need this feature and intend to run your kernel in LPAR.
84 93
85config SCLP_ASYNC 94config SCLP_ASYNC
86 tristate "Support for Call Home via Asynchronous SCLP Records" 95 def_tristate m
96 prompt "Support for Call Home via Asynchronous SCLP Records"
87 depends on S390 97 depends on S390
88 help 98 help
89 This option enables the call home function, which is able to inform 99 This option enables the call home function, which is able to inform
@@ -93,7 +103,8 @@ config SCLP_ASYNC
93 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
94 104
95config S390_TAPE 105config S390_TAPE
96 tristate "S/390 tape device support" 106 def_tristate m
107 prompt "S/390 tape device support"
97 depends on CCW 108 depends on CCW
98 help 109 help
99 Select this option if you want to access channel-attached tape 110 Select this option if you want to access channel-attached tape
@@ -109,7 +120,8 @@ comment "S/390 tape interface support"
109 depends on S390_TAPE 120 depends on S390_TAPE
110 121
111config S390_TAPE_BLOCK 122config S390_TAPE_BLOCK
112 bool "Support for tape block devices" 123 def_bool y
124 prompt "Support for tape block devices"
113 depends on S390_TAPE && BLOCK 125 depends on S390_TAPE && BLOCK
114 help 126 help
115 Select this option if you want to access your channel-attached tape 127 Select this option if you want to access your channel-attached tape
@@ -123,7 +135,8 @@ comment "S/390 tape hardware support"
123 depends on S390_TAPE 135 depends on S390_TAPE
124 136
125config S390_TAPE_34XX 137config S390_TAPE_34XX
126 tristate "Support for 3480/3490 tape hardware" 138 def_tristate m
139 prompt "Support for 3480/3490 tape hardware"
127 depends on S390_TAPE 140 depends on S390_TAPE
128 help 141 help
129 Select this option if you want to access IBM 3480/3490 magnetic 142 Select this option if you want to access IBM 3480/3490 magnetic
@@ -131,7 +144,8 @@ config S390_TAPE_34XX
131 It is safe to say "Y" here. 144 It is safe to say "Y" here.
132 145
133config S390_TAPE_3590 146config S390_TAPE_3590
134 tristate "Support for 3590 tape hardware" 147 def_tristate m
148 prompt "Support for 3590 tape hardware"
135 depends on S390_TAPE 149 depends on S390_TAPE
136 help 150 help
137 Select this option if you want to access IBM 3590 magnetic 151 Select this option if you want to access IBM 3590 magnetic
@@ -139,7 +153,8 @@ config S390_TAPE_3590
139 It is safe to say "Y" here. 153 It is safe to say "Y" here.
140 154
141config VMLOGRDR 155config VMLOGRDR
142 tristate "Support for the z/VM recording system services (VM only)" 156 def_tristate m
157 prompt "Support for the z/VM recording system services (VM only)"
143 depends on IUCV 158 depends on IUCV
144 help 159 help
145 Select this option if you want to be able to receive records collected 160 Select this option if you want to be able to receive records collected
@@ -148,29 +163,31 @@ config VMLOGRDR
148 This driver depends on the IUCV support driver. 163 This driver depends on the IUCV support driver.
149 164
150config VMCP 165config VMCP
151 bool "Support for the z/VM CP interface" 166 def_bool y
167 prompt "Support for the z/VM CP interface"
152 depends on S390 168 depends on S390
153 help 169 help
154 Select this option if you want to be able to interact with the control 170 Select this option if you want to be able to interact with the control
155 program on z/VM 171 program on z/VM
156 172
157config MONREADER 173config MONREADER
158 tristate "API for reading z/VM monitor service records" 174 def_tristate m
175 prompt "API for reading z/VM monitor service records"
159 depends on IUCV 176 depends on IUCV
160 help 177 help
161 Character device driver for reading z/VM monitor service records 178 Character device driver for reading z/VM monitor service records
162 179
163config MONWRITER 180config MONWRITER
164 tristate "API for writing z/VM monitor service records" 181 def_tristate m
182 prompt "API for writing z/VM monitor service records"
165 depends on S390 183 depends on S390
166 default "m"
167 help 184 help
168 Character device driver for writing z/VM monitor service records 185 Character device driver for writing z/VM monitor service records
169 186
170config S390_VMUR 187config S390_VMUR
171 tristate "z/VM unit record device driver" 188 def_tristate m
189 prompt "z/VM unit record device driver"
172 depends on S390 190 depends on S390
173 default "m"
174 help 191 help
175 Character device driver for z/VM reader, puncher and printer. 192 Character device driver for z/VM reader, puncher and printer.
176 193
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 59ec073724bf..3fb4335d491d 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,6 +9,7 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/kernel_stat.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kdev_t.h> 15#include <linux/kdev_t.h>
@@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
361 int cstat, dstat; 362 int cstat, dstat;
362 int count; 363 int count;
363 364
365 kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
364 raw = dev_get_drvdata(&cdev->dev); 366 raw = dev_get_drvdata(&cdev->dev);
365 req = (struct raw3215_req *) intparm; 367 req = (struct raw3215_req *) intparm;
366 cstat = irb->scsw.cmd.cstat; 368 cstat = irb->scsw.cmd.cstat;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2a4c566456e7..96ba2fd1c8ad 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,6 +7,7 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
329 struct raw3270_request *rq; 330 struct raw3270_request *rq;
330 int rc; 331 int rc;
331 332
333 kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
332 rp = dev_get_drvdata(&cdev->dev); 334 rp = dev_get_drvdata(&cdev->dev);
333 if (!rp) 335 if (!rp)
334 return; 336 return;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 35cc4686b99b..b76c61f82485 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
@@ -18,16 +19,14 @@
18#include <linux/suspend.h> 19#include <linux/suspend.h>
19#include <linux/completion.h> 20#include <linux/completion.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <asm/types.h>
22#include <asm/s390_ext.h> 22#include <asm/s390_ext.h>
23#include <asm/types.h>
24#include <asm/irq.h>
23 25
24#include "sclp.h" 26#include "sclp.h"
25 27
26#define SCLP_HEADER "sclp: " 28#define SCLP_HEADER "sclp: "
27 29
28/* Structure for register_early_external_interrupt. */
29static ext_int_info_t ext_int_info_hwc;
30
31/* Lock to protect internal data consistency. */ 30/* Lock to protect internal data consistency. */
32static DEFINE_SPINLOCK(sclp_lock); 31static DEFINE_SPINLOCK(sclp_lock);
33 32
@@ -402,6 +401,7 @@ static void sclp_interrupt_handler(unsigned int ext_int_code,
402 u32 finished_sccb; 401 u32 finished_sccb;
403 u32 evbuf_pending; 402 u32 evbuf_pending;
404 403
404 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
405 spin_lock(&sclp_lock); 405 spin_lock(&sclp_lock);
406 finished_sccb = param32 & 0xfffffff8; 406 finished_sccb = param32 & 0xfffffff8;
407 evbuf_pending = param32 & 0x3; 407 evbuf_pending = param32 & 0x3;
@@ -824,6 +824,7 @@ static void sclp_check_handler(unsigned int ext_int_code,
824{ 824{
825 u32 finished_sccb; 825 u32 finished_sccb;
826 826
827 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
827 finished_sccb = param32 & 0xfffffff8; 828 finished_sccb = param32 & 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */ 829 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb == 0) 830 if (finished_sccb == 0)
@@ -866,8 +867,7 @@ sclp_check_interface(void)
866 867
867 spin_lock_irqsave(&sclp_lock, flags); 868 spin_lock_irqsave(&sclp_lock, flags);
868 /* Prepare init mask command */ 869 /* Prepare init mask command */
869 rc = register_early_external_interrupt(0x2401, sclp_check_handler, 870 rc = register_external_interrupt(0x2401, sclp_check_handler);
870 &ext_int_info_hwc);
871 if (rc) { 871 if (rc) {
872 spin_unlock_irqrestore(&sclp_lock, flags); 872 spin_unlock_irqrestore(&sclp_lock, flags);
873 return rc; 873 return rc;
@@ -900,8 +900,7 @@ sclp_check_interface(void)
900 } else 900 } else
901 rc = -EBUSY; 901 rc = -EBUSY;
902 } 902 }
903 unregister_early_external_interrupt(0x2401, sclp_check_handler, 903 unregister_external_interrupt(0x2401, sclp_check_handler);
904 &ext_int_info_hwc);
905 spin_unlock_irqrestore(&sclp_lock, flags); 904 spin_unlock_irqrestore(&sclp_lock, flags);
906 return rc; 905 return rc;
907} 906}
@@ -1064,8 +1063,7 @@ sclp_init(void)
1064 if (rc) 1063 if (rc)
1065 goto fail_init_state_uninitialized; 1064 goto fail_init_state_uninitialized;
1066 /* Register interrupt handler */ 1065 /* Register interrupt handler */
1067 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, 1066 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1068 &ext_int_info_hwc);
1069 if (rc) 1067 if (rc)
1070 goto fail_unregister_reboot_notifier; 1068 goto fail_unregister_reboot_notifier;
1071 sclp_init_state = sclp_init_state_initialized; 1069 sclp_init_state = sclp_init_state_initialized;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index b497afe061cc..16e232a99fb7 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
33 int cpu; 33 int cpu;
34 struct sys_device *sysdev; 34 struct sys_device *sysdev;
35 35
36 s390_adjust_jiffies();
36 pr_warning("cpu capability changed.\n"); 37 pr_warning("cpu capability changed.\n");
37 get_online_cpus(); 38 get_online_cpus();
38 for_each_online_cpu(cpu) { 39 for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index b3a3e8e8656e..7978a0adeaf3 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,6 +14,7 @@
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 16
17#include <linux/kernel_stat.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> // for kernel parameters 19#include <linux/init.h> // for kernel parameters
19#include <linux/kmod.h> // for requesting modules 20#include <linux/kmod.h> // for requesting modules
@@ -1114,6 +1115,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1114 struct tape_request *request; 1115 struct tape_request *request;
1115 int rc; 1116 int rc;
1116 1117
1118 kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
1117 device = dev_get_drvdata(&cdev->dev); 1119 device = dev_get_drvdata(&cdev->dev);
1118 if (device == NULL) { 1120 if (device == NULL) {
1119 return; 1121 return;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index f7e4ae6bf15a..caef1757341d 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,6 +11,7 @@
11#define KMSG_COMPONENT "vmur" 11#define KMSG_COMPONENT "vmur"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/cdev.h> 15#include <linux/cdev.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16 17
@@ -302,6 +303,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
302{ 303{
303 struct urdev *urd; 304 struct urdev *urd;
304 305
306 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
305 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 307 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
306 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 308 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
307 irb->scsw.cmd.count); 309 irb->scsw.cmd.count);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 97b25d68e3e7..2864581d8ecb 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -67,6 +67,27 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
67} 67}
68 68
69/* 69/*
70 * Remove references from ccw devices to ccw group device and from
71 * ccw group device to ccw devices.
72 */
73static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
74{
75 struct ccw_device *cdev;
76 int i;
77
78 for (i = 0; i < gdev->count; i++) {
79 cdev = gdev->cdev[i];
80 if (!cdev)
81 continue;
82 spin_lock_irq(cdev->ccwlock);
83 dev_set_drvdata(&cdev->dev, NULL);
84 spin_unlock_irq(cdev->ccwlock);
85 gdev->cdev[i] = NULL;
86 put_device(&cdev->dev);
87 }
88}
89
90/*
70 * Provide an 'ungroup' attribute so the user can remove group devices no 91 * Provide an 'ungroup' attribute so the user can remove group devices no
71 * longer needed or accidentially created. Saves memory :) 92 * longer needed or accidentially created. Saves memory :)
72 */ 93 */
@@ -78,6 +99,7 @@ static void ccwgroup_ungroup_callback(struct device *dev)
78 if (device_is_registered(&gdev->dev)) { 99 if (device_is_registered(&gdev->dev)) {
79 __ccwgroup_remove_symlinks(gdev); 100 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev); 101 device_unregister(dev);
102 __ccwgroup_remove_cdev_refs(gdev);
81 } 103 }
82 mutex_unlock(&gdev->reg_mutex); 104 mutex_unlock(&gdev->reg_mutex);
83} 105}
@@ -116,21 +138,7 @@ static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
116static void 138static void
117ccwgroup_release (struct device *dev) 139ccwgroup_release (struct device *dev)
118{ 140{
119 struct ccwgroup_device *gdev; 141 kfree(to_ccwgroupdev(dev));
120 int i;
121
122 gdev = to_ccwgroupdev(dev);
123
124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
130 put_device(&gdev->cdev[i]->dev);
131 }
132 }
133 kfree(gdev);
134} 142}
135 143
136static int 144static int
@@ -639,6 +647,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
639 mutex_lock(&gdev->reg_mutex); 647 mutex_lock(&gdev->reg_mutex);
640 __ccwgroup_remove_symlinks(gdev); 648 __ccwgroup_remove_symlinks(gdev);
641 device_unregister(dev); 649 device_unregister(dev);
650 __ccwgroup_remove_cdev_refs(gdev);
642 mutex_unlock(&gdev->reg_mutex); 651 mutex_unlock(&gdev->reg_mutex);
643 put_device(dev); 652 put_device(dev);
644 } 653 }
@@ -660,25 +669,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
660 return 0; 669 return 0;
661} 670}
662 671
663static struct ccwgroup_device *
664__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
665{
666 struct ccwgroup_device *gdev;
667
668 gdev = dev_get_drvdata(&cdev->dev);
669 if (gdev) {
670 if (get_device(&gdev->dev)) {
671 mutex_lock(&gdev->reg_mutex);
672 if (device_is_registered(&gdev->dev))
673 return gdev;
674 mutex_unlock(&gdev->reg_mutex);
675 put_device(&gdev->dev);
676 }
677 return NULL;
678 }
679 return NULL;
680}
681
682/** 672/**
683 * ccwgroup_remove_ccwdev() - remove function for slave devices 673 * ccwgroup_remove_ccwdev() - remove function for slave devices
684 * @cdev: ccw device to be removed 674 * @cdev: ccw device to be removed
@@ -694,13 +684,25 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
694 /* Ignore offlining errors, device is gone anyway. */ 684 /* Ignore offlining errors, device is gone anyway. */
695 ccw_device_set_offline(cdev); 685 ccw_device_set_offline(cdev);
696 /* If one of its devices is gone, the whole group is done for. */ 686 /* If one of its devices is gone, the whole group is done for. */
697 gdev = __ccwgroup_get_gdev_by_cdev(cdev); 687 spin_lock_irq(cdev->ccwlock);
698 if (gdev) { 688 gdev = dev_get_drvdata(&cdev->dev);
689 if (!gdev) {
690 spin_unlock_irq(cdev->ccwlock);
691 return;
692 }
693 /* Get ccwgroup device reference for local processing. */
694 get_device(&gdev->dev);
695 spin_unlock_irq(cdev->ccwlock);
696 /* Unregister group device. */
697 mutex_lock(&gdev->reg_mutex);
698 if (device_is_registered(&gdev->dev)) {
699 __ccwgroup_remove_symlinks(gdev); 699 __ccwgroup_remove_symlinks(gdev);
700 device_unregister(&gdev->dev); 700 device_unregister(&gdev->dev);
701 mutex_unlock(&gdev->reg_mutex); 701 __ccwgroup_remove_cdev_refs(gdev);
702 put_device(&gdev->dev);
703 } 702 }
703 mutex_unlock(&gdev->reg_mutex);
704 /* Release ccwgroup device reference for local processing. */
705 put_device(&gdev->dev);
704} 706}
705 707
706MODULE_LICENSE("GPL"); 708MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1aaddea673e0..0689fcf23a11 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -695,6 +695,25 @@ out:
695 return ret; 695 return ret;
696} 696}
697 697
698int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
699 struct channel_path_desc_fmt1 *desc)
700{
701 struct chsc_response_struct *chsc_resp;
702 struct chsc_scpd *scpd_area;
703 int ret;
704
705 spin_lock_irq(&chsc_page_lock);
706 scpd_area = chsc_page;
707 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
708 if (ret)
709 goto out;
710 chsc_resp = (void *)&scpd_area->response;
711 memcpy(desc, &chsc_resp->data, sizeof(*desc));
712out:
713 spin_unlock_irq(&chsc_page_lock);
714 return ret;
715}
716
698static void 717static void
699chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 718chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
700 struct cmg_chars *chars) 719 struct cmg_chars *chars)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 6693f5e3176f..3f15b2aaeaea 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -35,6 +35,22 @@ struct channel_path_desc {
35 u8 chpp; 35 u8 chpp;
36} __attribute__ ((packed)); 36} __attribute__ ((packed));
37 37
38struct channel_path_desc_fmt1 {
39 u8 flags;
40 u8 lsn;
41 u8 desc;
42 u8 chpid;
43 u32:24;
44 u8 chpp;
45 u32 unused[3];
46 u16 mdc;
47 u16:13;
48 u8 r:1;
49 u8 s:1;
50 u8 f:1;
51 u32 zeros[2];
52} __attribute__ ((packed));
53
38struct channel_path; 54struct channel_path;
39 55
40struct css_chsc_char { 56struct css_chsc_char {
@@ -92,6 +108,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
92 int c, int m, void *page); 108 int c, int m, void *page);
93int chsc_determine_base_channel_path_desc(struct chp_id chpid, 109int chsc_determine_base_channel_path_desc(struct chp_id chpid,
94 struct channel_path_desc *desc); 110 struct channel_path_desc *desc);
111int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
112 struct channel_path_desc_fmt1 *desc);
95void chsc_chp_online(struct chp_id chpid); 113void chsc_chp_online(struct chp_id chpid);
96void chsc_chp_offline(struct chp_id chpid); 114void chsc_chp_offline(struct chp_id chpid);
97int chsc_get_channel_measurement_chars(struct channel_path *chp); 115int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 825951b6b83f..24d8e97355b9 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -618,6 +618,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
619{ 619{
620 struct subchannel_id mchk_schid; 620 struct subchannel_id mchk_schid;
621 struct subchannel *sch;
621 622
622 if (overflow) { 623 if (overflow) {
623 css_schedule_eval_all(); 624 css_schedule_eval_all();
@@ -637,6 +638,13 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
637 if (crw1) 638 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 639 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
639 640
641 if (crw0->erc == CRW_ERC_PMOD) {
642 sch = get_subchannel_by_schid(mchk_schid);
643 if (sch) {
644 css_update_ssd_info(sch);
645 put_device(&sch->dev);
646 }
647 }
640 /* 648 /*
641 * Since we are always presented with IPI in the CRW, we have to 649 * Since we are always presented with IPI in the CRW, we have to
642 * use stsch() to find out if the subchannel in question has come 650 * use stsch() to find out if the subchannel in question has come
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da84543dfe9..651976b54af8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -687,6 +687,46 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
687EXPORT_SYMBOL(ccw_device_tm_start_timeout); 687EXPORT_SYMBOL(ccw_device_tm_start_timeout);
688 688
689/** 689/**
690 * ccw_device_get_mdc - accumulate max data count
691 * @cdev: ccw device for which the max data count is accumulated
692 * @mask: mask of paths to use
693 *
694 * Return the number of 64K-bytes blocks all paths at least support
695 * for a transport command. Return values <= 0 indicate failures.
696 */
697int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
698{
699 struct subchannel *sch = to_subchannel(cdev->dev.parent);
700 struct channel_path_desc_fmt1 desc;
701 struct chp_id chpid;
702 int mdc = 0, ret, i;
703
704 /* Adjust requested path mask to excluded varied off paths. */
705 if (mask)
706 mask &= sch->lpm;
707 else
708 mask = sch->lpm;
709
710 chp_id_init(&chpid);
711 for (i = 0; i < 8; i++) {
712 if (!(mask & (0x80 >> i)))
713 continue;
714 chpid.id = sch->schib.pmcw.chpid[i];
715 ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
716 if (ret)
717 return ret;
718 if (!desc.f)
719 return 0;
720 if (!desc.r)
721 mdc = 1;
722 mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
723 }
724
725 return mdc;
726}
727EXPORT_SYMBOL(ccw_device_get_mdc);
728
729/**
690 * ccw_device_tm_intrg - perform interrogate function 730 * ccw_device_tm_intrg - perform interrogate function
691 * @cdev: ccw device on which to perform the interrogate function 731 * @cdev: ccw device on which to perform the interrogate function
692 * 732 *
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index a0ae29564774..358ee16d10a2 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) 93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{ 94{
95 size_t len; 95 size_t len;
96 int cross_count;
96 97
97 /* Main data. */ 98 /* Main data. */
98 len = sizeof(struct itcw); 99 len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
105 /* TSB */ sizeof(struct tsb) + 106 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); 107 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 } 108 }
109
108 /* Maximum required alignment padding. */ 110 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; 111 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */ 112
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) 113 /* TIDAW lists may not cross a 4k boundary. To cross a
112 len += max(max_tidaws, intrg_max_tidaws) * 114 * boundary we need to add a TTIC TIDAW. We need to reserve
113 sizeof(struct tidaw) - 1; 115 * one additional TIDAW for a TTIC that we may need to add due
116 * to the placement of the data chunk in memory, and a further
117 * TIDAW for each page boundary that the TIDAW list may cross
118 * due to it's own size.
119 */
120 if (max_tidaws) {
121 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
122 >> PAGE_SHIFT);
123 len += cross_count * sizeof(struct tidaw);
124 }
125 if (intrg_max_tidaws) {
126 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
127 >> PAGE_SHIFT);
128 len += cross_count * sizeof(struct tidaw);
129 }
114 return len; 130 return len;
115} 131}
116EXPORT_SYMBOL(itcw_calc_size); 132EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
165 void *chunk; 181 void *chunk;
166 addr_t start; 182 addr_t start;
167 addr_t end; 183 addr_t end;
184 int cross_count;
168 185
169 /* Check for 2G limit. */ 186 /* Check for 2G limit. */
170 start = (addr_t) buffer; 187 start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
177 if (IS_ERR(chunk)) 194 if (IS_ERR(chunk))
178 return chunk; 195 return chunk;
179 itcw = chunk; 196 itcw = chunk;
180 itcw->max_tidaws = max_tidaws; 197 /* allow for TTIC tidaws that may be needed to cross a page boundary */
181 itcw->intrg_max_tidaws = intrg_max_tidaws; 198 cross_count = 0;
199 if (max_tidaws)
200 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
201 >> PAGE_SHIFT);
202 itcw->max_tidaws = max_tidaws + cross_count;
203 cross_count = 0;
204 if (intrg_max_tidaws)
205 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
206 >> PAGE_SHIFT);
207 itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
182 /* Main TCW. */ 208 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); 209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk)) 210 if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
198 /* Data TIDAL. */ 224 /* Data TIDAL. */
199 if (max_tidaws > 0) { 225 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 226 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1); 227 itcw->max_tidaws, 16, 0);
202 if (IS_ERR(chunk)) 228 if (IS_ERR(chunk))
203 return chunk; 229 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1); 230 tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
206 /* Interrogate data TIDAL. */ 232 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) { 233 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 234 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1); 235 itcw->intrg_max_tidaws, 16, 0);
210 if (IS_ERR(chunk)) 236 if (IS_ERR(chunk))
211 return chunk; 237 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1); 238 tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the 309 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space. 310 * available space.
285 * 311 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The 312 * Note: TTIC tidaws are automatically added when needed, so explicitly calling
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. 313 * this interface with the TTIC flag is not supported. The last-tidaw flag
314 * for the last tidaw in the list will be set by itcw_finalize.
288 */ 315 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) 316struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{ 317{
318 struct tidaw *following;
319
291 if (itcw->num_tidaws >= itcw->max_tidaws) 320 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC); 321 return ERR_PTR(-ENOSPC);
322 /*
323 * Is the tidaw, which follows the one we are about to fill, on the next
324 * page? Then we have to insert a TTIC tidaw first, that points to the
325 * tidaw on the new page.
326 */
327 following = ((struct tidaw *) tcw_get_data(itcw->tcw))
328 + itcw->num_tidaws + 1;
329 if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
330 tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
331 TIDAW_FLAGS_TTIC, following, 0);
332 if (itcw->num_tidaws >= itcw->max_tidaws)
333 return ERR_PTR(-ENOSPC);
334 }
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); 335 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294} 336}
295EXPORT_SYMBOL(itcw_add_tidaw); 337EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 0f4ef8769a3d..7bc643f3f5ab 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -91,6 +91,12 @@ enum qdio_irq_states {
91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ 91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ 92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
93 93
94/* SIGA flags */
95#define QDIO_SIGA_WRITE 0x00
96#define QDIO_SIGA_READ 0x01
97#define QDIO_SIGA_SYNC 0x02
98#define QDIO_SIGA_QEBSM_FLAG 0x80
99
94#ifdef CONFIG_64BIT 100#ifdef CONFIG_64BIT
95static inline int do_sqbs(u64 token, unsigned char state, int queue, 101static inline int do_sqbs(u64 token, unsigned char state, int queue,
96 int *start, int *count) 102 int *start, int *count)
@@ -142,10 +148,9 @@ struct siga_flag {
142 u8 input:1; 148 u8 input:1;
143 u8 output:1; 149 u8 output:1;
144 u8 sync:1; 150 u8 sync:1;
145 u8 no_sync_ti:1; 151 u8 sync_after_ai:1;
146 u8 no_sync_out_ti:1; 152 u8 sync_out_after_pci:1;
147 u8 no_sync_out_pci:1; 153 u8:3;
148 u8:2;
149} __attribute__ ((packed)); 154} __attribute__ ((packed));
150 155
151struct chsc_ssqd_area { 156struct chsc_ssqd_area {
@@ -202,6 +207,7 @@ struct qdio_dev_perf_stat {
202 unsigned int inbound_queue_full; 207 unsigned int inbound_queue_full;
203 unsigned int outbound_call; 208 unsigned int outbound_call;
204 unsigned int outbound_handler; 209 unsigned int outbound_handler;
210 unsigned int outbound_queue_full;
205 unsigned int fast_requeue; 211 unsigned int fast_requeue;
206 unsigned int target_full; 212 unsigned int target_full;
207 unsigned int eqbs; 213 unsigned int eqbs;
@@ -245,10 +251,10 @@ struct qdio_input_q {
245struct qdio_output_q { 251struct qdio_output_q {
246 /* PCIs are enabled for the queue */ 252 /* PCIs are enabled for the queue */
247 int pci_out_enabled; 253 int pci_out_enabled;
248 /* IQDIO: output multiple buffers (enhanced SIGA) */
249 int use_enh_siga;
250 /* timer to check for more outbound work */ 254 /* timer to check for more outbound work */
251 struct timer_list timer; 255 struct timer_list timer;
256 /* used SBALs before tasklet schedule */
257 int scan_threshold;
252}; 258};
253 259
254/* 260/*
@@ -383,12 +389,13 @@ static inline int multicast_outbound(struct qdio_q *q)
383 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 389 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
384#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 390#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
385 391
386#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
387#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
388#define need_siga_in(q) (q->irq_ptr->siga_flag.input) 392#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
389#define need_siga_out(q) (q->irq_ptr->siga_flag.output) 393#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
390#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) 394#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
391#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) 395#define need_siga_sync_after_ai(q) \
396 (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
397#define need_siga_sync_out_after_pci(q) \
398 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
392 399
393#define for_each_input_queue(irq_ptr, q, i) \ 400#define for_each_input_queue(irq_ptr, q, i) \
394 for (i = 0, q = irq_ptr->input_qs[0]; \ 401 for (i = 0, q = irq_ptr->input_qs[0]; \
@@ -423,9 +430,9 @@ struct indicator_t {
423 430
424extern struct indicator_t *q_indicators; 431extern struct indicator_t *q_indicators;
425 432
426static inline int shared_ind(struct qdio_irq *irq_ptr) 433static inline int shared_ind(u32 *dsci)
427{ 434{
428 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 435 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
429} 436}
430 437
431/* prototypes for thin interrupt */ 438/* prototypes for thin interrupt */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 28868e7471a5..f8b03a636e49 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -151,6 +151,7 @@ static char *qperf_names[] = {
151 "Inbound queue full", 151 "Inbound queue full",
152 "Outbound calls", 152 "Outbound calls",
153 "Outbound handler", 153 "Outbound handler",
154 "Outbound queue full",
154 "Outbound fast_requeue", 155 "Outbound fast_requeue",
155 "Outbound target_full", 156 "Outbound target_full",
156 "QEBSM eqbs", 157 "QEBSM eqbs",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5fcfa7f9e9ef..e9fff2b9bce2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18#include <asm/debug.h> 19#include <asm/debug.h>
19#include <asm/qdio.h> 20#include <asm/qdio.h>
@@ -29,11 +30,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29MODULE_DESCRIPTION("QDIO base support"); 30MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
32static inline int do_siga_sync(struct subchannel_id schid, 33static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask) 34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
34{ 36{
35 register unsigned long __fc asm ("0") = 2; 37 register unsigned long __fc asm ("0") = fc;
36 register struct subchannel_id __schid asm ("1") = schid; 38 register unsigned long __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask; 39 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask; 40 register unsigned long in asm ("3") = in_mask;
39 int cc; 41 int cc;
@@ -47,10 +49,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
47 return cc; 49 return cc;
48} 50}
49 51
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) 52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
51{ 54{
52 register unsigned long __fc asm ("0") = 1; 55 register unsigned long __fc asm ("0") = fc;
53 register struct subchannel_id __schid asm ("1") = schid; 56 register unsigned long __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask; 57 register unsigned long __mask asm ("2") = mask;
55 int cc; 58 int cc;
56 59
@@ -279,16 +282,20 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 283 unsigned int input)
281{ 284{
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
282 int cc; 287 int cc;
283 288
284 if (!need_siga_sync(q))
285 return 0;
286
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qperf_inc(q, siga_sync); 290 qperf_inc(q, siga_sync);
289 291
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 292 if (is_qebsm(q)) {
291 if (cc) 293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
295 }
296
297 cc = do_siga_sync(schid, output, input, fc);
298 if (unlikely(cc))
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
293 return cc; 300 return cc;
294} 301}
@@ -301,38 +308,22 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
301 return qdio_siga_sync(q, q->mask, 0); 308 return qdio_siga_sync(q, q->mask, 0);
302} 309}
303 310
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{ 312{
316 unsigned long schid; 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = 0; 314 unsigned int fc = QDIO_SIGA_WRITE;
318 u64 start_time = 0; 315 u64 start_time = 0;
319 int cc; 316 int cc;
320 317
321 if (q->u.out.use_enh_siga)
322 fc = 3;
323
324 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
325 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
326 fc |= 0x80; 320 fc |= QDIO_SIGA_QEBSM_FLAG;
327 } 321 }
328 else
329 schid = *((u32 *)&q->irq_ptr->schid);
330
331again: 322again:
332 cc = do_siga_output(schid, q->mask, busy_bit, fc); 323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 324
334 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
335 if (*busy_bit) { 326 if (unlikely(*busy_bit)) {
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 328
338 if (!start_time) { 329 if (!start_time) {
@@ -347,32 +338,41 @@ again:
347 338
348static inline int qdio_siga_input(struct qdio_q *q) 339static inline int qdio_siga_input(struct qdio_q *q)
349{ 340{
341 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 unsigned int fc = QDIO_SIGA_READ;
350 int cc; 343 int cc;
351 344
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 345 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qperf_inc(q, siga_read); 346 qperf_inc(q, siga_read);
354 347
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 348 if (is_qebsm(q)) {
356 if (cc) 349 schid = q->irq_ptr->sch_token;
350 fc |= QDIO_SIGA_QEBSM_FLAG;
351 }
352
353 cc = do_siga_input(schid, q->mask, fc);
354 if (unlikely(cc))
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
358 return cc; 356 return cc;
359} 357}
360 358
361static inline void qdio_sync_after_thinint(struct qdio_q *q) 359#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361
362static inline void qdio_sync_queues(struct qdio_q *q)
362{ 363{
363 if (pci_out_supported(q)) { 364 /* PCI capable outbound queues will also be scanned so sync them too */
364 if (need_siga_sync_thinint(q)) 365 if (pci_out_supported(q))
365 qdio_siga_sync_all(q); 366 qdio_siga_sync_all(q);
366 else if (need_siga_sync_out_thinint(q)) 367 else
367 qdio_siga_sync_out(q);
368 } else
369 qdio_siga_sync_q(q); 368 qdio_siga_sync_q(q);
370} 369}
371 370
372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 371int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state) 372 unsigned char *state)
374{ 373{
375 qdio_siga_sync_q(q); 374 if (need_siga_sync(q))
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0); 376 return get_buf_states(q, bufnr, state, 1, 0);
377} 377}
378 378
@@ -549,7 +549,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
549 if (!atomic_read(&q->nr_buf_used)) 549 if (!atomic_read(&q->nr_buf_used))
550 return 1; 550 return 1;
551 551
552 qdio_siga_sync_q(q); 552 if (need_siga_sync(q))
553 qdio_siga_sync_q(q);
553 get_buf_state(q, q->first_to_check, &state, 0); 554 get_buf_state(q, q->first_to_check, &state, 0);
554 555
555 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 556 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -644,9 +645,12 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
644 int count, stop; 645 int count, stop;
645 unsigned char state; 646 unsigned char state;
646 647
647 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 648 if (need_siga_sync(q))
648 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 649 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
649 qdio_siga_sync_q(q); 650 !pci_out_supported(q)) ||
651 (queue_type(q) == QDIO_IQDIO_QFMT &&
652 multicast_outbound(q)))
653 qdio_siga_sync_q(q);
650 654
651 /* 655 /*
652 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 656 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -818,7 +822,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
818static void __tiqdio_inbound_processing(struct qdio_q *q) 822static void __tiqdio_inbound_processing(struct qdio_q *q)
819{ 823{
820 qperf_inc(q, tasklet_inbound); 824 qperf_inc(q, tasklet_inbound);
821 qdio_sync_after_thinint(q); 825 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
826 qdio_sync_queues(q);
822 827
823 /* 828 /*
824 * The interrupt could be caused by a PCI request. Check the 829 * The interrupt could be caused by a PCI request. Check the
@@ -898,16 +903,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
898 tasklet_schedule(&q->tasklet); 903 tasklet_schedule(&q->tasklet);
899 } 904 }
900 905
901 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 906 if (!pci_out_supported(q))
902 return; 907 return;
903 908
904 for_each_output_queue(irq_ptr, q, i) { 909 for_each_output_queue(irq_ptr, q, i) {
905 if (qdio_outbound_q_done(q)) 910 if (qdio_outbound_q_done(q))
906 continue; 911 continue;
907 912 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
908 if (!siga_syncs_out_pci(q))
909 qdio_siga_sync_q(q); 913 qdio_siga_sync_q(q);
910
911 tasklet_schedule(&q->tasklet); 914 tasklet_schedule(&q->tasklet);
912 } 915 }
913} 916}
@@ -970,6 +973,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
970 return; 973 return;
971 } 974 }
972 975
976 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
973 if (irq_ptr->perf_stat_enabled) 977 if (irq_ptr->perf_stat_enabled)
974 irq_ptr->perf_stat.qdio_int++; 978 irq_ptr->perf_stat.qdio_int++;
975 979
@@ -1273,7 +1277,6 @@ int qdio_establish(struct qdio_initialize *init_data)
1273 } 1277 }
1274 1278
1275 qdio_setup_ssqd_info(irq_ptr); 1279 qdio_setup_ssqd_info(irq_ptr);
1276 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1277 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1280 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1278 1281
1279 /* qebsm is now setup if available, initialize buffer states */ 1282 /* qebsm is now setup if available, initialize buffer states */
@@ -1445,52 +1448,38 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1445 used = atomic_add_return(count, &q->nr_buf_used); 1448 used = atomic_add_return(count, &q->nr_buf_used);
1446 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1449 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1447 1450
1451 if (used == QDIO_MAX_BUFFERS_PER_Q)
1452 qperf_inc(q, outbound_queue_full);
1453
1448 if (callflags & QDIO_FLAG_PCI_OUT) { 1454 if (callflags & QDIO_FLAG_PCI_OUT) {
1449 q->u.out.pci_out_enabled = 1; 1455 q->u.out.pci_out_enabled = 1;
1450 qperf_inc(q, pci_request_int); 1456 qperf_inc(q, pci_request_int);
1451 } 1457 } else
1452 else
1453 q->u.out.pci_out_enabled = 0; 1458 q->u.out.pci_out_enabled = 0;
1454 1459
1455 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1460 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1456 if (multicast_outbound(q)) 1461 /* One SIGA-W per buffer required for unicast HiperSockets. */
1462 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1463
1464 rc = qdio_kick_outbound_q(q);
1465 } else if (need_siga_sync(q)) {
1466 rc = qdio_siga_sync_q(q);
1467 } else {
1468 /* try to fast requeue buffers */
1469 get_buf_state(q, prev_buf(bufnr), &state, 0);
1470 if (state != SLSB_CU_OUTPUT_PRIMED)
1457 rc = qdio_kick_outbound_q(q); 1471 rc = qdio_kick_outbound_q(q);
1458 else 1472 else
1459 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1473 qperf_inc(q, fast_requeue);
1460 (count > 1) &&
1461 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1462 /* exploit enhanced SIGA */
1463 q->u.out.use_enh_siga = 1;
1464 rc = qdio_kick_outbound_q(q);
1465 } else {
1466 /*
1467 * One siga-w per buffer required for unicast
1468 * HiperSockets.
1469 */
1470 q->u.out.use_enh_siga = 0;
1471 while (count--) {
1472 rc = qdio_kick_outbound_q(q);
1473 if (rc)
1474 goto out;
1475 }
1476 }
1477 goto out;
1478 }
1479
1480 if (need_siga_sync(q)) {
1481 qdio_siga_sync_q(q);
1482 goto out;
1483 } 1474 }
1484 1475
1485 /* try to fast requeue buffers */ 1476 /* in case of SIGA errors we must process the error immediately */
1486 get_buf_state(q, prev_buf(bufnr), &state, 0); 1477 if (used >= q->u.out.scan_threshold || rc)
1487 if (state != SLSB_CU_OUTPUT_PRIMED) 1478 tasklet_schedule(&q->tasklet);
1488 rc = qdio_kick_outbound_q(q);
1489 else 1479 else
1490 qperf_inc(q, fast_requeue); 1480 /* free the SBALs in case of no further traffic */
1491 1481 if (!timer_pending(&q->u.out.timer))
1492out: 1482 mod_timer(&q->u.out.timer, jiffies + HZ);
1493 tasklet_schedule(&q->tasklet);
1494 return rc; 1483 return rc;
1495} 1484}
1496 1485
@@ -1550,7 +1539,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1550 1539
1551 WARN_ON(queue_irqs_enabled(q)); 1540 WARN_ON(queue_irqs_enabled(q));
1552 1541
1553 if (!shared_ind(q->irq_ptr)) 1542 if (!shared_ind(q->irq_ptr->dsci))
1554 xchg(q->irq_ptr->dsci, 0); 1543 xchg(q->irq_ptr->dsci, 0);
1555 1544
1556 qdio_stop_polling(q); 1545 qdio_stop_polling(q);
@@ -1560,7 +1549,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1560 * We need to check again to not lose initiative after 1549 * We need to check again to not lose initiative after
1561 * resetting the ACK state. 1550 * resetting the ACK state.
1562 */ 1551 */
1563 if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) 1552 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1564 goto rescan; 1553 goto rescan;
1565 if (!qdio_inbound_q_done(q)) 1554 if (!qdio_inbound_q_done(q))
1566 goto rescan; 1555 goto rescan;
@@ -1600,12 +1589,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1600 q = irq_ptr->input_qs[nr]; 1589 q = irq_ptr->input_qs[nr];
1601 WARN_ON(queue_irqs_enabled(q)); 1590 WARN_ON(queue_irqs_enabled(q));
1602 1591
1603 qdio_sync_after_thinint(q);
1604
1605 /* 1592 /*
1606 * The interrupt could be caused by a PCI request. Check the 1593 * Cannot rely on automatic sync after interrupt since queues may
1607 * PCI capable outbound queues. 1594 * also be examined without interrupt.
1608 */ 1595 */
1596 if (need_siga_sync(q))
1597 qdio_sync_queues(q);
1598
1599 /* check the PCI capable outbound queues. */
1609 qdio_check_outbound_after_thinint(q); 1600 qdio_check_outbound_after_thinint(q);
1610 1601
1611 if (!qdio_inbound_q_moved(q)) 1602 if (!qdio_inbound_q_moved(q))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a13cf7ec64b2..89107d0938c4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -178,6 +178,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
179 179
180 q->is_input_q = 0; 180 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold;
181 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 182 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
182 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 183 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
183 184
@@ -196,14 +197,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
196 irq_ptr->siga_flag.output = 1; 197 irq_ptr->siga_flag.output = 1;
197 if (qdioac & AC1_SIGA_SYNC_NEEDED) 198 if (qdioac & AC1_SIGA_SYNC_NEEDED)
198 irq_ptr->siga_flag.sync = 1; 199 irq_ptr->siga_flag.sync = 1;
199 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) 200 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
200 irq_ptr->siga_flag.no_sync_ti = 1; 201 irq_ptr->siga_flag.sync_after_ai = 1;
201 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) 202 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
202 irq_ptr->siga_flag.no_sync_out_pci = 1; 203 irq_ptr->siga_flag.sync_out_after_pci = 1;
203
204 if (irq_ptr->siga_flag.no_sync_out_pci &&
205 irq_ptr->siga_flag.no_sync_ti)
206 irq_ptr->siga_flag.no_sync_out_ti = 1;
207} 204}
208 205
209static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 206static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -451,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
451 char s[80]; 448 char s[80];
452 449
453 snprintf(s, 80, "qdio: %s %s on SC %x using " 450 snprintf(s, 80, "qdio: %s %s on SC %x using "
454 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", 451 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
455 dev_name(&cdev->dev), 452 dev_name(&cdev->dev),
456 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 453 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
457 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 454 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -463,9 +460,8 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
463 (irq_ptr->siga_flag.input) ? "R" : " ", 460 (irq_ptr->siga_flag.input) ? "R" : " ",
464 (irq_ptr->siga_flag.output) ? "W" : " ", 461 (irq_ptr->siga_flag.output) ? "W" : " ",
465 (irq_ptr->siga_flag.sync) ? "S" : " ", 462 (irq_ptr->siga_flag.sync) ? "S" : " ",
466 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", 463 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
467 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", 464 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
468 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
469 printk(KERN_INFO "%s", s); 465 printk(KERN_INFO "%s", s);
470} 466}
471 467
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5d9c66627b6e..5c4e741d8221 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/kernel_stat.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/debug.h> 13#include <asm/debug.h>
13#include <asm/qdio.h> 14#include <asm/qdio.h>
@@ -35,22 +36,8 @@ static u8 *tiqdio_alsi;
35 36
36struct indicator_t *q_indicators; 37struct indicator_t *q_indicators;
37 38
38static int css_qdio_omit_svs;
39
40static u64 last_ai_time; 39static u64 last_ai_time;
41 40
42static inline unsigned long do_clear_global_summary(void)
43{
44 register unsigned long __fn asm("1") = 3;
45 register unsigned long __tmp asm("2");
46 register unsigned long __time asm("3");
47
48 asm volatile(
49 " .insn rre,0xb2650000,2,0"
50 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
51 return __time;
52}
53
54/* returns addr for the device state change indicator */ 41/* returns addr for the device state change indicator */
55static u32 *get_indicator(void) 42static u32 *get_indicator(void)
56{ 43{
@@ -83,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
83 struct qdio_q *q; 70 struct qdio_q *q;
84 int i; 71 int i;
85 72
86 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
87 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
88 css_qdio_omit_svs = 1;
89
90 mutex_lock(&tiq_list_lock); 73 mutex_lock(&tiq_list_lock);
91 for_each_input_queue(irq_ptr, q, i) 74 for_each_input_queue(irq_ptr, q, i)
92 list_add_rcu(&q->entry, &tiq_list); 75 list_add_rcu(&q->entry, &tiq_list);
@@ -112,9 +95,9 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112 } 95 }
113} 96}
114 97
115static inline int shared_ind_used(void) 98static inline u32 shared_ind_set(void)
116{ 99{
117 return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); 100 return q_indicators[TIQDIO_SHARED_IND].ind;
118} 101}
119 102
120/** 103/**
@@ -124,20 +107,11 @@ static inline int shared_ind_used(void)
124 */ 107 */
125static void tiqdio_thinint_handler(void *alsi, void *data) 108static void tiqdio_thinint_handler(void *alsi, void *data)
126{ 109{
110 u32 si_used = shared_ind_set();
127 struct qdio_q *q; 111 struct qdio_q *q;
128 112
129 last_ai_time = S390_lowcore.int_clock; 113 last_ai_time = S390_lowcore.int_clock;
130 114 kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
131 /*
132 * SVS only when needed: issue SVS to benefit from iqdio interrupt
133 * avoidance (SVS clears adapter interrupt suppression overwrite).
134 */
135 if (!css_qdio_omit_svs)
136 do_clear_global_summary();
137
138 /* reset local summary indicator */
139 if (shared_ind_used())
140 xchg(tiqdio_alsi, 0);
141 115
142 /* protect tiq_list entries, only changed in activate or shutdown */ 116 /* protect tiq_list entries, only changed in activate or shutdown */
143 rcu_read_lock(); 117 rcu_read_lock();
@@ -146,7 +120,10 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
146 list_for_each_entry_rcu(q, &tiq_list, entry) { 120 list_for_each_entry_rcu(q, &tiq_list, entry) {
147 121
148 /* only process queues from changed sets */ 122 /* only process queues from changed sets */
149 if (!*q->irq_ptr->dsci) 123 if (unlikely(shared_ind(q->irq_ptr->dsci))) {
124 if (!si_used)
125 continue;
126 } else if (!*q->irq_ptr->dsci)
150 continue; 127 continue;
151 128
152 if (q->u.in.queue_start_poll) { 129 if (q->u.in.queue_start_poll) {
@@ -162,7 +139,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
162 q->irq_ptr->int_parm); 139 q->irq_ptr->int_parm);
163 } else { 140 } else {
164 /* only clear it if the indicator is non-shared */ 141 /* only clear it if the indicator is non-shared */
165 if (!shared_ind(q->irq_ptr)) 142 if (!shared_ind(q->irq_ptr->dsci))
166 xchg(q->irq_ptr->dsci, 0); 143 xchg(q->irq_ptr->dsci, 0);
167 /* 144 /*
168 * Call inbound processing but not directly 145 * Call inbound processing but not directly
@@ -178,13 +155,8 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
178 * If the shared indicator was used clear it now after all queues 155 * If the shared indicator was used clear it now after all queues
179 * were processed. 156 * were processed.
180 */ 157 */
181 if (shared_ind_used()) { 158 if (si_used && shared_ind_set())
182 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 159 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
183
184 /* prevent racing */
185 if (*tiqdio_alsi)
186 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
187 }
188} 160}
189 161
190static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 162static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -269,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
269{ 241{
270 if (!is_thinint_irq(irq_ptr)) 242 if (!is_thinint_irq(irq_ptr))
271 return 0; 243 return 0;
272
273 /* Check for aif time delay disablement. If installed,
274 * omit SVS even under LPAR
275 */
276 if (css_general_characteristics.aif_tdd)
277 css_qdio_omit_svs = 1;
278 return set_subchannel_ind(irq_ptr, 0); 244 return set_subchannel_ind(irq_ptr, 0);
279} 245}
280 246
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8fd8c62455e9..67302b944ab3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,6 +27,7 @@
27#define KMSG_COMPONENT "ap" 27#define KMSG_COMPONENT "ap"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 29
30#include <linux/kernel_stat.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -154,7 +155,7 @@ static inline int ap_instructions_available(void)
154 */ 155 */
155static int ap_interrupts_available(void) 156static int ap_interrupts_available(void)
156{ 157{
157 return test_facility(1) && test_facility(2); 158 return test_facility(2) && test_facility(65);
158} 159}
159 160
160/** 161/**
@@ -221,6 +222,69 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
221} 222}
222#endif 223#endif
223 224
225static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
226 int *support)
227{
228 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
229 register struct ap_queue_status reg1 asm ("1");
230 register unsigned long reg2 asm ("2") = 0UL;
231
232 asm volatile(
233 ".long 0xb2af0000\n"
234 "0: la %1,0\n"
235 "1:\n"
236 EX_TABLE(0b, 1b)
237 : "+d" (reg0), "=d" (reg1), "=d" (reg2)
238 :
239 : "cc");
240
241 if (reg2 & 0x6000000000000000ULL)
242 *support = 1;
243 else
244 *support = 0;
245
246 return reg1;
247}
248
249/**
250 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
251 * support.
252 * @qid: The AP queue number
253 *
254 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
255 */
256int ap_4096_commands_available(ap_qid_t qid)
257{
258 struct ap_queue_status status;
259 int i, support = 0;
260 status = __ap_4096_commands_available(qid, &support);
261
262 for (i = 0; i < AP_MAX_RESET; i++) {
263 switch (status.response_code) {
264 case AP_RESPONSE_NORMAL:
265 return support;
266 case AP_RESPONSE_RESET_IN_PROGRESS:
267 case AP_RESPONSE_BUSY:
268 break;
269 case AP_RESPONSE_Q_NOT_AVAIL:
270 case AP_RESPONSE_DECONFIGURED:
271 case AP_RESPONSE_CHECKSTOPPED:
272 case AP_RESPONSE_INVALID_ADDRESS:
273 return 0;
274 case AP_RESPONSE_OTHERWISE_CHANGED:
275 break;
276 default:
277 break;
278 }
279 if (i < AP_MAX_RESET - 1) {
280 udelay(5);
281 status = __ap_4096_commands_available(qid, &support);
282 }
283 }
284 return support;
285}
286EXPORT_SYMBOL(ap_4096_commands_available);
287
224/** 288/**
225 * ap_queue_enable_interruption(): Enable interruption on an AP. 289 * ap_queue_enable_interruption(): Enable interruption on an AP.
226 * @qid: The AP queue number 290 * @qid: The AP queue number
@@ -1042,6 +1106,7 @@ out:
1042 1106
1043static void ap_interrupt_handler(void *unused1, void *unused2) 1107static void ap_interrupt_handler(void *unused1, void *unused2)
1044{ 1108{
1109 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1045 tasklet_schedule(&ap_tasklet); 1110 tasklet_schedule(&ap_tasklet);
1046} 1111}
1047 1112
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4785d07cd447..08b9738285b4 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -196,4 +196,6 @@ void ap_flush_queue(struct ap_device *ap_dev);
196int ap_module_init(void); 196int ap_module_init(void);
197void ap_module_exit(void); 197void ap_module_exit(void);
198 198
199int ap_4096_commands_available(ap_qid_t qid);
200
199#endif /* _AP_BUS_H_ */ 201#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 7fca9c10ffcf..8e65447f76b7 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -396,8 +396,15 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
396 if (copied == 0) { 396 if (copied == 0) {
397 unsigned int len; 397 unsigned int len;
398 spin_unlock_bh(&zcrypt_device_lock); 398 spin_unlock_bh(&zcrypt_device_lock);
399 /* len is max 256 / 2 - 120 = 8 */ 399 /* len is max 256 / 2 - 120 = 8
400 len = crt->inputdatalength / 2 - 120; 400 * For bigger device just assume len of leading
401 * 0s is 8 as stated in the requirements for
402 * ica_rsa_modexpo_crt struct in zcrypt.h.
403 */
404 if (crt->inputdatalength <= 256)
405 len = crt->inputdatalength / 2 - 120;
406 else
407 len = 8;
401 if (len > sizeof(z1)) 408 if (len > sizeof(z1))
402 return -EFAULT; 409 return -EFAULT;
403 z1 = z2 = z3 = 0; 410 z1 = z2 = z3 = 0;
@@ -405,6 +412,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
405 copy_from_user(&z2, crt->bp_key, len) || 412 copy_from_user(&z2, crt->bp_key, len) ||
406 copy_from_user(&z3, crt->u_mult_inv, len)) 413 copy_from_user(&z3, crt->u_mult_inv, len))
407 return -EFAULT; 414 return -EFAULT;
415 z1 = z2 = z3 = 0;
408 copied = 1; 416 copied = 1;
409 /* 417 /*
410 * We have to restart device lookup - 418 * We have to restart device lookup -
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 8e7ffbf2466c..88ebd114735b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -109,6 +109,7 @@ struct zcrypt_device {
109 int request_count; /* # current requests. */ 109 int request_count; /* # current requests. */
110 110
111 struct ap_message reply; /* Per-device reply structure. */ 111 struct ap_message reply; /* Per-device reply structure. */
112 int max_exp_bit_length;
112}; 113};
113 114
114struct zcrypt_device *zcrypt_device_alloc(size_t); 115struct zcrypt_device *zcrypt_device_alloc(size_t);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 9c409efa1ecf..2176d00b395e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -41,7 +41,7 @@
41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE 43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
44#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE 44#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
45 45
46#define CEX2A_SPEED_RATING 970 46#define CEX2A_SPEED_RATING 970
47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ 47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
@@ -49,8 +49,10 @@
49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
51 51
52#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE 52#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
53#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE 53 * (max outputdatalength) +
54 * type80_hdr*/
55#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
54 56
55#define CEX2A_CLEANUP_TIME (15*HZ) 57#define CEX2A_CLEANUP_TIME (15*HZ)
56#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 58#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
@@ -110,7 +112,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
110 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; 112 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
111 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; 113 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
112 inp = meb1->message + sizeof(meb1->message) - mod_len; 114 inp = meb1->message + sizeof(meb1->message) - mod_len;
113 } else { 115 } else if (mod_len <= 256) {
114 struct type50_meb2_msg *meb2 = ap_msg->message; 116 struct type50_meb2_msg *meb2 = ap_msg->message;
115 memset(meb2, 0, sizeof(*meb2)); 117 memset(meb2, 0, sizeof(*meb2));
116 ap_msg->length = sizeof(*meb2); 118 ap_msg->length = sizeof(*meb2);
@@ -120,6 +122,17 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
120 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; 122 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
121 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; 123 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
122 inp = meb2->message + sizeof(meb2->message) - mod_len; 124 inp = meb2->message + sizeof(meb2->message) - mod_len;
125 } else {
126 /* mod_len > 256 = 4096 bit RSA Key */
127 struct type50_meb3_msg *meb3 = ap_msg->message;
128 memset(meb3, 0, sizeof(*meb3));
129 ap_msg->length = sizeof(*meb3);
130 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
131 meb3->header.msg_len = sizeof(*meb3);
132 meb3->keyblock_type = TYPE50_MEB3_FMT;
133 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
134 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
135 inp = meb3->message + sizeof(meb3->message) - mod_len;
123 } 136 }
124 137
125 if (copy_from_user(mod, mex->n_modulus, mod_len) || 138 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
@@ -142,7 +155,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
142 struct ap_message *ap_msg, 155 struct ap_message *ap_msg,
143 struct ica_rsa_modexpo_crt *crt) 156 struct ica_rsa_modexpo_crt *crt)
144{ 157{
145 int mod_len, short_len, long_len, long_offset; 158 int mod_len, short_len, long_len, long_offset, limit;
146 unsigned char *p, *q, *dp, *dq, *u, *inp; 159 unsigned char *p, *q, *dp, *dq, *u, *inp;
147 160
148 mod_len = crt->inputdatalength; 161 mod_len = crt->inputdatalength;
@@ -152,14 +165,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
152 /* 165 /*
153 * CEX2A cannot handle p, dp, or U > 128 bytes. 166 * CEX2A cannot handle p, dp, or U > 128 bytes.
154 * If we have one of these, we need to do extra checking. 167 * If we have one of these, we need to do extra checking.
168 * For CEX3A the limit is 256 bytes.
155 */ 169 */
156 if (long_len > 128) { 170 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
171 limit = 256;
172 else
173 limit = 128;
174
175 if (long_len > limit) {
157 /* 176 /*
158 * zcrypt_rsa_crt already checked for the leading 177 * zcrypt_rsa_crt already checked for the leading
159 * zeroes of np_prime, bp_key and u_mult_inc. 178 * zeroes of np_prime, bp_key and u_mult_inc.
160 */ 179 */
161 long_offset = long_len - 128; 180 long_offset = long_len - limit;
162 long_len = 128; 181 long_len = limit;
163 } else 182 } else
164 long_offset = 0; 183 long_offset = 0;
165 184
@@ -180,7 +199,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
180 dq = crb1->dq + sizeof(crb1->dq) - short_len; 199 dq = crb1->dq + sizeof(crb1->dq) - short_len;
181 u = crb1->u + sizeof(crb1->u) - long_len; 200 u = crb1->u + sizeof(crb1->u) - long_len;
182 inp = crb1->message + sizeof(crb1->message) - mod_len; 201 inp = crb1->message + sizeof(crb1->message) - mod_len;
183 } else { 202 } else if (long_len <= 128) {
184 struct type50_crb2_msg *crb2 = ap_msg->message; 203 struct type50_crb2_msg *crb2 = ap_msg->message;
185 memset(crb2, 0, sizeof(*crb2)); 204 memset(crb2, 0, sizeof(*crb2));
186 ap_msg->length = sizeof(*crb2); 205 ap_msg->length = sizeof(*crb2);
@@ -193,6 +212,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
193 dq = crb2->dq + sizeof(crb2->dq) - short_len; 212 dq = crb2->dq + sizeof(crb2->dq) - short_len;
194 u = crb2->u + sizeof(crb2->u) - long_len; 213 u = crb2->u + sizeof(crb2->u) - long_len;
195 inp = crb2->message + sizeof(crb2->message) - mod_len; 214 inp = crb2->message + sizeof(crb2->message) - mod_len;
215 } else {
216 /* long_len >= 256 */
217 struct type50_crb3_msg *crb3 = ap_msg->message;
218 memset(crb3, 0, sizeof(*crb3));
219 ap_msg->length = sizeof(*crb3);
220 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
221 crb3->header.msg_len = sizeof(*crb3);
222 crb3->keyblock_type = TYPE50_CRB3_FMT;
223 p = crb3->p + sizeof(crb3->p) - long_len;
224 q = crb3->q + sizeof(crb3->q) - short_len;
225 dp = crb3->dp + sizeof(crb3->dp) - long_len;
226 dq = crb3->dq + sizeof(crb3->dq) - short_len;
227 u = crb3->u + sizeof(crb3->u) - long_len;
228 inp = crb3->message + sizeof(crb3->message) - mod_len;
196 } 229 }
197 230
198 if (copy_from_user(p, crt->np_prime + long_offset, long_len) || 231 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
@@ -203,7 +236,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
203 copy_from_user(inp, crt->inputdata, mod_len)) 236 copy_from_user(inp, crt->inputdata, mod_len))
204 return -EFAULT; 237 return -EFAULT;
205 238
206
207 return 0; 239 return 0;
208} 240}
209 241
@@ -230,7 +262,10 @@ static int convert_type80(struct zcrypt_device *zdev,
230 zdev->online = 0; 262 zdev->online = 0;
231 return -EAGAIN; /* repeat the request on a different device. */ 263 return -EAGAIN; /* repeat the request on a different device. */
232 } 264 }
233 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 265 if (zdev->user_space_type == ZCRYPT_CEX2A)
266 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
267 else
268 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
234 data = reply->message + t80h->len - outputdatalength; 269 data = reply->message + t80h->len - outputdatalength;
235 if (copy_to_user(outputdata, data, outputdatalength)) 270 if (copy_to_user(outputdata, data, outputdatalength))
236 return -EFAULT; 271 return -EFAULT;
@@ -282,7 +317,10 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
282 } 317 }
283 t80h = reply->message; 318 t80h = reply->message;
284 if (t80h->type == TYPE80_RSP_CODE) { 319 if (t80h->type == TYPE80_RSP_CODE) {
285 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); 320 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
321 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
322 else
323 length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
286 memcpy(msg->message, reply->message, length); 324 memcpy(msg->message, reply->message, length);
287 } else 325 } else
288 memcpy(msg->message, reply->message, sizeof error_reply); 326 memcpy(msg->message, reply->message, sizeof error_reply);
@@ -307,7 +345,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
307 int rc; 345 int rc;
308 346
309 ap_init_message(&ap_msg); 347 ap_init_message(&ap_msg);
310 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 348 if (zdev->user_space_type == ZCRYPT_CEX2A)
349 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
350 else
351 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
311 if (!ap_msg.message) 352 if (!ap_msg.message)
312 return -ENOMEM; 353 return -ENOMEM;
313 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 354 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -345,7 +386,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
345 int rc; 386 int rc;
346 387
347 ap_init_message(&ap_msg); 388 ap_init_message(&ap_msg);
348 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 389 if (zdev->user_space_type == ZCRYPT_CEX2A)
390 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
391 else
392 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
349 if (!ap_msg.message) 393 if (!ap_msg.message)
350 return -ENOMEM; 394 return -ENOMEM;
351 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 395 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -397,6 +441,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
397 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 441 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
398 zdev->short_crt = 1; 442 zdev->short_crt = 1;
399 zdev->speed_rating = CEX2A_SPEED_RATING; 443 zdev->speed_rating = CEX2A_SPEED_RATING;
444 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
400 break; 445 break;
401 case AP_DEVICE_TYPE_CEX3A: 446 case AP_DEVICE_TYPE_CEX3A:
402 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 447 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
@@ -404,8 +449,13 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
404 return -ENOMEM; 449 return -ENOMEM;
405 zdev->user_space_type = ZCRYPT_CEX3A; 450 zdev->user_space_type = ZCRYPT_CEX3A;
406 zdev->type_string = "CEX3A"; 451 zdev->type_string = "CEX3A";
407 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE; 452 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
408 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 453 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
454 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
455 if (ap_4096_commands_available(ap_dev->qid)) {
456 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
457 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
458 }
409 zdev->short_crt = 1; 459 zdev->short_crt = 1;
410 zdev->speed_rating = CEX3A_SPEED_RATING; 460 zdev->speed_rating = CEX3A_SPEED_RATING;
411 break; 461 break;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 8f69d1dacab8..0350665810cf 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -51,8 +51,10 @@ struct type50_hdr {
51 51
52#define TYPE50_MEB1_FMT 0x0001 52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002 53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_MEB3_FMT 0x0003
54#define TYPE50_CRB1_FMT 0x0011 55#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012 56#define TYPE50_CRB2_FMT 0x0012
57#define TYPE50_CRB3_FMT 0x0013
56 58
57/* Mod-Exp, with a small modulus */ 59/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg { 60struct type50_meb1_msg {
@@ -74,6 +76,16 @@ struct type50_meb2_msg {
74 unsigned char message[256]; 76 unsigned char message[256];
75} __attribute__((packed)); 77} __attribute__((packed));
76 78
79/* Mod-Exp, with a larger modulus */
80struct type50_meb3_msg {
81 struct type50_hdr header;
82 unsigned short keyblock_type; /* 0x0003 */
83 unsigned char reserved[6];
84 unsigned char exponent[512];
85 unsigned char modulus[512];
86 unsigned char message[512];
87} __attribute__((packed));
88
77/* CRT, with a small modulus */ 89/* CRT, with a small modulus */
78struct type50_crb1_msg { 90struct type50_crb1_msg {
79 struct type50_hdr header; 91 struct type50_hdr header;
@@ -100,6 +112,19 @@ struct type50_crb2_msg {
100 unsigned char message[256]; 112 unsigned char message[256];
101} __attribute__((packed)); 113} __attribute__((packed));
102 114
115/* CRT, with a larger modulus */
116struct type50_crb3_msg {
117 struct type50_hdr header;
118 unsigned short keyblock_type; /* 0x0013 */
119 unsigned char reserved[6];
120 unsigned char p[256];
121 unsigned char q[256];
122 unsigned char dp[256];
123 unsigned char dq[256];
124 unsigned char u[256];
125 unsigned char message[512];
126} __attribute__((packed));
127
103/** 128/**
104 * The type 80 response family is associated with a CEX2A card. 129 * The type 80 response family is associated with a CEX2A card.
105 * 130 *
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 09e934b295a0..1afb69c75fea 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -373,6 +373,7 @@ static int zcrypt_pcica_probe(struct ap_device *ap_dev)
373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE; 373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE; 374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
375 zdev->speed_rating = PCICA_SPEED_RATING; 375 zdev->speed_rating = PCICA_SPEED_RATING;
376 zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
376 ap_dev->reply = &zdev->reply; 377 ap_dev->reply = &zdev->reply;
377 ap_dev->private = zdev; 378 ap_dev->private = zdev;
378 rc = zcrypt_device_register(zdev); 379 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 9dec5c77cff4..aa4c050a5694 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -579,6 +579,7 @@ static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE; 579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE; 580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
581 zdev->speed_rating = PCICC_SPEED_RATING; 581 zdev->speed_rating = PCICC_SPEED_RATING;
582 zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
582 ap_dev->reply = &zdev->reply; 583 ap_dev->reply = &zdev->reply;
583 ap_dev->private = zdev; 584 ap_dev->private = zdev;
584 rc = zcrypt_device_register(zdev); 585 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 510fab4577d4..4f85eb725f4f 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -45,12 +45,12 @@
45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
48#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE 48#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
49 49
50#define PCIXCC_MCL2_SPEED_RATING 7870 50#define PCIXCC_MCL2_SPEED_RATING 7870
51#define PCIXCC_MCL3_SPEED_RATING 7870 51#define PCIXCC_MCL3_SPEED_RATING 7870
52#define CEX2C_SPEED_RATING 7000 52#define CEX2C_SPEED_RATING 7000
53#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */ 53#define CEX3C_SPEED_RATING 6500
54 54
55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -567,6 +567,15 @@ static int convert_response_ica(struct zcrypt_device *zdev,
567 case TYPE88_RSP_CODE: 567 case TYPE88_RSP_CODE:
568 return convert_error(zdev, reply); 568 return convert_error(zdev, reply);
569 case TYPE86_RSP_CODE: 569 case TYPE86_RSP_CODE:
570 if (msg->cprbx.ccp_rtcode &&
571 (msg->cprbx.ccp_rscode == 0x14f) &&
572 (outputdatalength > 256)) {
573 if (zdev->max_exp_bit_length <= 17) {
574 zdev->max_exp_bit_length = 17;
575 return -EAGAIN;
576 } else
577 return -EINVAL;
578 }
570 if (msg->hdr.reply_code) 579 if (msg->hdr.reply_code)
571 return convert_error(zdev, reply); 580 return convert_error(zdev, reply);
572 if (msg->cprbx.cprb_ver_id == 0x02) 581 if (msg->cprbx.cprb_ver_id == 0x02)
@@ -1052,11 +1061,13 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1052 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; 1061 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
1053 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 1062 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
1054 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1063 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1064 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1055 } else { 1065 } else {
1056 zdev->type_string = "PCIXCC_MCL3"; 1066 zdev->type_string = "PCIXCC_MCL3";
1057 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; 1067 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
1058 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1068 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1059 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1069 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1070 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1060 } 1071 }
1061 break; 1072 break;
1062 case AP_DEVICE_TYPE_CEX2C: 1073 case AP_DEVICE_TYPE_CEX2C:
@@ -1065,6 +1076,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1065 zdev->speed_rating = CEX2C_SPEED_RATING; 1076 zdev->speed_rating = CEX2C_SPEED_RATING;
1066 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1077 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1067 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1078 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1079 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1068 break; 1080 break;
1069 case AP_DEVICE_TYPE_CEX3C: 1081 case AP_DEVICE_TYPE_CEX3C:
1070 zdev->user_space_type = ZCRYPT_CEX3C; 1082 zdev->user_space_type = ZCRYPT_CEX3C;
@@ -1072,6 +1084,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1072 zdev->speed_rating = CEX3C_SPEED_RATING; 1084 zdev->speed_rating = CEX3C_SPEED_RATING;
1073 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 1085 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1074 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 1086 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1087 zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
1075 break; 1088 break;
1076 default: 1089 default:
1077 goto out_free; 1090 goto out_free;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 375aeeaf9ea5..414427d64a8f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -10,6 +10,7 @@
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> 10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */ 11 */
12 12
13#include <linux/kernel_stat.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/bootmem.h> 15#include <linux/bootmem.h>
15#include <linux/err.h> 16#include <linux/err.h>
@@ -25,6 +26,7 @@
25#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/s390_ext.h> 28#include <asm/s390_ext.h>
29#include <asm/irq.h>
28 30
29#define VIRTIO_SUBCODE_64 0x0D00 31#define VIRTIO_SUBCODE_64 0x0D00
30 32
@@ -379,6 +381,7 @@ static void kvm_extint_handler(unsigned int ext_int_code,
379 u16 subcode; 381 u16 subcode;
380 u32 param; 382 u32 param;
381 383
384 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
382 subcode = ext_int_code >> 16; 385 subcode = ext_int_code >> 16;
383 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 386 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
384 return; 387 return;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 456b18743397..fa80ba1f0344 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -2,7 +2,8 @@ menu "S/390 network device drivers"
2 depends on NETDEVICES && S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 def_tristate m
6 prompt "Lan Channel Station Interface"
6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 8 help
8 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
@@ -12,7 +13,8 @@ config LCS
12 If you do not know what it is, it's safe to choose Y. 13 If you do not know what it is, it's safe to choose Y.
13 14
14config CTCM 15config CTCM
15 tristate "CTC and MPC SNA device support" 16 def_tristate m
17 prompt "CTC and MPC SNA device support"
16 depends on CCW && NETDEVICES 18 depends on CCW && NETDEVICES
17 help 19 help
18 Select this option if you want to use channel-to-channel 20 Select this option if you want to use channel-to-channel
@@ -26,7 +28,8 @@ config CTCM
26 If you do not need any channel-to-channel connection, choose N. 28 If you do not need any channel-to-channel connection, choose N.
27 29
28config NETIUCV 30config NETIUCV
29 tristate "IUCV network device support (VM only)" 31 def_tristate m
32 prompt "IUCV network device support (VM only)"
30 depends on IUCV && NETDEVICES 33 depends on IUCV && NETDEVICES
31 help 34 help
32 Select this option if you want to use inter-user communication 35 Select this option if you want to use inter-user communication
@@ -37,14 +40,16 @@ config NETIUCV
37 The module name is netiucv. If unsure, choose Y. 40 The module name is netiucv. If unsure, choose Y.
38 41
39config SMSGIUCV 42config SMSGIUCV
40 tristate "IUCV special message support (VM only)" 43 def_tristate m
44 prompt "IUCV special message support (VM only)"
41 depends on IUCV 45 depends on IUCV
42 help 46 help
43 Select this option if you want to be able to receive SMSG messages 47 Select this option if you want to be able to receive SMSG messages
44 from other VM guest systems. 48 from other VM guest systems.
45 49
46config SMSGIUCV_EVENT 50config SMSGIUCV_EVENT
47 tristate "Deliver IUCV special messages as uevents (VM only)" 51 def_tristate m
52 prompt "Deliver IUCV special messages as uevents (VM only)"
48 depends on SMSGIUCV 53 depends on SMSGIUCV
49 help 54 help
50 Select this option to deliver CP special messages (SMSGs) as 55 Select this option to deliver CP special messages (SMSGs) as
@@ -54,7 +59,8 @@ config SMSGIUCV_EVENT
54 To compile as a module, choose M. The module name is "smsgiucv_app". 59 To compile as a module, choose M. The module name is "smsgiucv_app".
55 60
56config CLAW 61config CLAW
57 tristate "CLAW device support" 62 def_tristate m
63 prompt "CLAW device support"
58 depends on CCW && NETDEVICES 64 depends on CCW && NETDEVICES
59 help 65 help
60 This driver supports channel attached CLAW devices. 66 This driver supports channel attached CLAW devices.
@@ -64,7 +70,8 @@ config CLAW
64 To compile into the kernel, choose Y. 70 To compile into the kernel, choose Y.
65 71
66config QETH 72config QETH
67 tristate "Gigabit Ethernet device support" 73 def_tristate y
74 prompt "Gigabit Ethernet device support"
68 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO 75 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
69 help 76 help
70 This driver supports the IBM System z OSA Express adapters 77 This driver supports the IBM System z OSA Express adapters
@@ -78,25 +85,25 @@ config QETH
78 The module name is qeth. 85 The module name is qeth.
79 86
80config QETH_L2 87config QETH_L2
81 tristate "qeth layer 2 device support" 88 def_tristate y
82 depends on QETH 89 prompt "qeth layer 2 device support"
83 help 90 depends on QETH
84 Select this option to be able to run qeth devices in layer 2 mode. 91 help
85 To compile as a module, choose M. The module name is qeth_l2. 92 Select this option to be able to run qeth devices in layer 2 mode.
86 If unsure, choose y. 93 To compile as a module, choose M. The module name is qeth_l2.
94 If unsure, choose y.
87 95
88config QETH_L3 96config QETH_L3
89 tristate "qeth layer 3 device support" 97 def_tristate y
90 depends on QETH 98 prompt "qeth layer 3 device support"
91 help 99 depends on QETH
92 Select this option to be able to run qeth devices in layer 3 mode. 100 help
93 To compile as a module choose M. The module name is qeth_l3. 101 Select this option to be able to run qeth devices in layer 3 mode.
94 If unsure, choose Y. 102 To compile as a module choose M. The module name is qeth_l3.
103 If unsure, choose Y.
95 104
96config QETH_IPV6 105config QETH_IPV6
97 bool 106 def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
98 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
99 default y
100 107
101config CCWGROUP 108config CCWGROUP
102 tristate 109 tristate
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 8e4153d740f3..ce3a5c13ce0b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,6 +63,7 @@
63 63
64#define KMSG_COMPONENT "claw" 64#define KMSG_COMPONENT "claw"
65 65
66#include <linux/kernel_stat.h>
66#include <asm/ccwdev.h> 67#include <asm/ccwdev.h>
67#include <asm/ccwgroup.h> 68#include <asm/ccwgroup.h>
68#include <asm/debug.h> 69#include <asm/debug.h>
@@ -640,6 +641,7 @@ claw_irq_handler(struct ccw_device *cdev,
640 struct claw_env *p_env; 641 struct claw_env *p_env;
641 struct chbk *p_ch_r=NULL; 642 struct chbk *p_ch_r=NULL;
642 643
644 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
643 CLAW_DBF_TEXT(4, trace, "clawirq"); 645 CLAW_DBF_TEXT(4, trace, "clawirq");
644 /* Bypass all 'unsolicited interrupts' */ 646 /* Bypass all 'unsolicited interrupts' */
645 privptr = dev_get_drvdata(&cdev->dev); 647 privptr = dev_get_drvdata(&cdev->dev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2c7d2d9be4d0..4c2845985927 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,6 +24,7 @@
24#define KMSG_COMPONENT "ctcm" 24#define KMSG_COMPONENT "ctcm"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 26
27#include <linux/kernel_stat.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/init.h> 29#include <linux/init.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -1204,6 +1205,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1204 int cstat; 1205 int cstat;
1205 int dstat; 1206 int dstat;
1206 1207
1208 kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1209 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); 1210 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1209 1211
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c9f13b9ea339..09e7a053c844 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,6 +26,7 @@
26#define KMSG_COMPONENT "lcs" 26#define KMSG_COMPONENT "lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 28
29#include <linux/kernel_stat.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/if.h> 31#include <linux/if.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -1398,6 +1399,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1398 int rc, index; 1399 int rc, index;
1399 int cstat, dstat; 1400 int cstat, dstat;
1400 1401
1402 kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
1401 if (lcs_check_irb_error(cdev, irb)) 1403 if (lcs_check_irb_error(cdev, irb))
1402 return; 1404 return;
1403 1405
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b7d9dc0adc62..29f848bfc12f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3831,6 +3831,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
3831 init_data.int_parm = (unsigned long) card; 3831 init_data.int_parm = (unsigned long) card;
3832 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3832 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3833 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3833 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3834 init_data.scan_threshold =
3835 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
3834 3836
3835 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 3837 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3836 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 3838 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 2511f92302dd..8da5ed644c2b 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -290,6 +290,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
290 id->int_parm = (unsigned long) qdio; 290 id->int_parm = (unsigned long) qdio;
291 id->input_sbal_addr_array = (void **) (qdio->res_q); 291 id->input_sbal_addr_array = (void **) (qdio->res_q);
292 id->output_sbal_addr_array = (void **) (qdio->req_q); 292 id->output_sbal_addr_array = (void **) (qdio->req_q);
293 id->scan_threshold =
294 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
293} 295}
294 296
295/** 297/**
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f7db676de77d..1ee5dab3cfae 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -36,6 +36,7 @@
36#define KMSG_COMPONENT "iucv" 36#define KMSG_COMPONENT "iucv"
37#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 37#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
38 38
39#include <linux/kernel_stat.h>
39#include <linux/module.h> 40#include <linux/module.h>
40#include <linux/moduleparam.h> 41#include <linux/moduleparam.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
@@ -1804,6 +1805,7 @@ static void iucv_external_interrupt(unsigned int ext_int_code,
1804 struct iucv_irq_data *p; 1805 struct iucv_irq_data *p;
1805 struct iucv_irq_list *work; 1806 struct iucv_irq_list *work;
1806 1807
1808 kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
1807 p = iucv_irq_data[smp_processor_id()]; 1809 p = iucv_irq_data[smp_processor_id()];
1808 if (p->ippathid >= iucv_max_pathid) { 1810 if (p->ippathid >= iucv_max_pathid) {
1809 WARN_ON(p->ippathid >= iucv_max_pathid); 1811 WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
new file mode 100644
index 000000000000..15130b50dfe3
--- /dev/null
+++ b/tools/perf/arch/s390/Makefile
@@ -0,0 +1,4 @@
1ifndef NO_DWARF
2PERF_HAVE_DWARF_REGS := 1
3LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
4endif
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
new file mode 100644
index 000000000000..e19653e025fa
--- /dev/null
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -0,0 +1,22 @@
1/*
2 * Mapping of DWARF debug register numbers into register names.
3 *
4 * Copyright IBM Corp. 2010
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
6 *
7 */
8
9#include <libio.h>
10#include <dwarf-regs.h>
11
12#define NUM_GPRS 16
13
14static const char *gpr_names[NUM_GPRS] = {
15 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
16 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
17};
18
19const char *get_arch_regstr(unsigned int n)
20{
21 return (n >= NUM_GPRS) ? NULL : gpr_names[n];
22}