aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 17:50:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 17:50:50 -0500
commitfb5131e1880ea1ba3ba7197cd5cc66c9c288f715 (patch)
treef0d9f25f9079727b9ead5a2b4cc85a0fea9b4668 /arch/s390
parentd074b104cefcb6e8ded55a53e62fed59a246f55d (diff)
parent8e1023016cf17152972b98bce6c144834a4916d5 (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (65 commits) [S390] prevent unneccesary loops_per_jiffy recalculation [S390] cpuinfo: use get_online_cpus() instead of preempt_disable() [S390] smp: remove cpu hotplug messages [S390] mutex: enable spinning mutex on s390 [S390] mutex: Introduce arch_mutex_cpu_relax() [S390] cio: fix ccwgroup unregistration race condition [S390] perf: add DWARF register lookup for s390 [S390] cleanup ftrace backend functions [S390] ptrace cleanup [S390] smp/idle: call init_idle() before starting a new cpu [S390] smp: delay idle task creation [S390] dasd: Correct retry counter for terminated I/O. [S390] dasd: Add support for raw ECKD access. [S390] dasd: Prevent deadlock during suspend/resume. [S390] dasd: Improve handling of stolen DASD reservation [S390] dasd: do path verification for paths added at runtime [S390] dasd: add High Performance FICON multitrack support [S390] cio: reduce memory consumption of itcw structures [S390] nmi: enable machine checks early [S390] qeth: buffer count imbalance ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig130
-rw-r--r--arch/s390/Kconfig.debug6
-rw-r--r--arch/s390/defconfig152
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h33
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c116
-rw-r--r--arch/s390/hypfs/hypfs_diag.c82
-rw-r--r--arch/s390/hypfs/hypfs_vm.c62
-rw-r--r--arch/s390/hypfs/inode.c18
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/dasd.h3
-rw-r--r--arch/s390/include/asm/ftrace.h11
-rw-r--r--arch/s390/include/asm/hardirq.h16
-rw-r--r--arch/s390/include/asm/irq.h34
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/ptrace.h52
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/s390_ext.h29
-rw-r--r--arch/s390/include/asm/smp.h3
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h10
-rw-r--r--arch/s390/include/asm/timex.h20
-rw-r--r--arch/s390/kernel/asm-offsets.c14
-rw-r--r--arch/s390/kernel/compat_ptrace.h53
-rw-r--r--arch/s390/kernel/entry.S274
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S73
-rw-r--r--arch/s390/kernel/ftrace.c238
-rw-r--r--arch/s390/kernel/irq.c41
-rw-r--r--arch/s390/kernel/kprobes.c470
-rw-r--r--arch/s390/kernel/mcount.S32
-rw-r--r--arch/s390/kernel/mcount64.S29
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/process.c21
-rw-r--r--arch/s390/kernel/processor.c20
-rw-r--r--arch/s390/kernel/ptrace.c306
-rw-r--r--arch/s390/kernel/s390_ext.c125
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c47
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kernel/traps.c15
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/kvm/Kconfig7
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/mm/fault.c35
47 files changed, 1357 insertions, 1276 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6c6d7b339aae..ff19efdf6fef 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,13 +1,8 @@
1config SCHED_MC
2 def_bool y
3 depends on SMP
4
5config MMU 1config MMU
6 def_bool y 2 def_bool y
7 3
8config ZONE_DMA 4config ZONE_DMA
9 def_bool y 5 def_bool y if 64BIT
10 depends on 64BIT
11 6
12config LOCKDEP_SUPPORT 7config LOCKDEP_SUPPORT
13 def_bool y 8 def_bool y
@@ -25,12 +20,10 @@ config RWSEM_XCHGADD_ALGORITHM
25 def_bool y 20 def_bool y
26 21
27config ARCH_HAS_ILOG2_U32 22config ARCH_HAS_ILOG2_U32
28 bool 23 def_bool n
29 default n
30 24
31config ARCH_HAS_ILOG2_U64 25config ARCH_HAS_ILOG2_U64
32 bool 26 def_bool n
33 default n
34 27
35config GENERIC_HWEIGHT 28config GENERIC_HWEIGHT
36 def_bool y 29 def_bool y
@@ -42,9 +35,7 @@ config GENERIC_CLOCKEVENTS
42 def_bool y 35 def_bool y
43 36
44config GENERIC_BUG 37config GENERIC_BUG
45 bool 38 def_bool y if BUG
46 depends on BUG
47 default y
48 39
49config GENERIC_BUG_RELATIVE_POINTERS 40config GENERIC_BUG_RELATIVE_POINTERS
50 def_bool y 41 def_bool y
@@ -59,13 +50,10 @@ config ARCH_DMA_ADDR_T_64BIT
59 def_bool 64BIT 50 def_bool 64BIT
60 51
61config GENERIC_LOCKBREAK 52config GENERIC_LOCKBREAK
62 bool 53 def_bool y if SMP && PREEMPT
63 default y
64 depends on SMP && PREEMPT
65 54
66config PGSTE 55config PGSTE
67 bool 56 def_bool y if KVM
68 default y if KVM
69 57
70config VIRT_CPU_ACCOUNTING 58config VIRT_CPU_ACCOUNTING
71 def_bool y 59 def_bool y
@@ -85,7 +73,6 @@ config S390
85 select HAVE_DYNAMIC_FTRACE 73 select HAVE_DYNAMIC_FTRACE
86 select HAVE_FUNCTION_GRAPH_TRACER 74 select HAVE_FUNCTION_GRAPH_TRACER
87 select HAVE_REGS_AND_STACK_ACCESS_API 75 select HAVE_REGS_AND_STACK_ACCESS_API
88 select HAVE_DEFAULT_NO_SPIN_MUTEXES
89 select HAVE_OPROFILE 76 select HAVE_OPROFILE
90 select HAVE_KPROBES 77 select HAVE_KPROBES
91 select HAVE_KRETPROBES 78 select HAVE_KRETPROBES
@@ -130,8 +117,7 @@ config S390
130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 117 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
131 118
132config SCHED_OMIT_FRAME_POINTER 119config SCHED_OMIT_FRAME_POINTER
133 bool 120 def_bool y
134 default y
135 121
136source "init/Kconfig" 122source "init/Kconfig"
137 123
@@ -144,20 +130,21 @@ comment "Processor type and features"
144source "kernel/time/Kconfig" 130source "kernel/time/Kconfig"
145 131
146config 64BIT 132config 64BIT
147 bool "64 bit kernel" 133 def_bool y
134 prompt "64 bit kernel"
148 help 135 help
149 Select this option if you have an IBM z/Architecture machine 136 Select this option if you have an IBM z/Architecture machine
150 and want to use the 64 bit addressing mode. 137 and want to use the 64 bit addressing mode.
151 138
152config 32BIT 139config 32BIT
153 bool 140 def_bool y if !64BIT
154 default y if !64BIT
155 141
156config KTIME_SCALAR 142config KTIME_SCALAR
157 def_bool 32BIT 143 def_bool 32BIT
158 144
159config SMP 145config SMP
160 bool "Symmetric multi-processing support" 146 def_bool y
147 prompt "Symmetric multi-processing support"
161 ---help--- 148 ---help---
162 This enables support for systems with more than one CPU. If you have 149 This enables support for systems with more than one CPU. If you have
163 a system with only one CPU, like most personal computers, say N. If 150 a system with only one CPU, like most personal computers, say N. If
@@ -189,10 +176,10 @@ config NR_CPUS
189 approximately sixteen kilobytes to the kernel image. 176 approximately sixteen kilobytes to the kernel image.
190 177
191config HOTPLUG_CPU 178config HOTPLUG_CPU
192 bool "Support for hot-pluggable CPUs" 179 def_bool y
180 prompt "Support for hot-pluggable CPUs"
193 depends on SMP 181 depends on SMP
194 select HOTPLUG 182 select HOTPLUG
195 default n
196 help 183 help
197 Say Y here to be able to turn CPUs off and on. CPUs 184 Say Y here to be able to turn CPUs off and on. CPUs
198 can be controlled through /sys/devices/system/cpu/cpu#. 185 can be controlled through /sys/devices/system/cpu/cpu#.
@@ -208,14 +195,16 @@ config SCHED_MC
208 increased overhead in some places. 195 increased overhead in some places.
209 196
210config SCHED_BOOK 197config SCHED_BOOK
211 bool "Book scheduler support" 198 def_bool y
199 prompt "Book scheduler support"
212 depends on SMP && SCHED_MC 200 depends on SMP && SCHED_MC
213 help 201 help
214 Book scheduler support improves the CPU scheduler's decision making 202 Book scheduler support improves the CPU scheduler's decision making
215 when dealing with machines that have several books. 203 when dealing with machines that have several books.
216 204
217config MATHEMU 205config MATHEMU
218 bool "IEEE FPU emulation" 206 def_bool y
207 prompt "IEEE FPU emulation"
219 depends on MARCH_G5 208 depends on MARCH_G5
220 help 209 help
221 This option is required for IEEE compliant floating point arithmetic 210 This option is required for IEEE compliant floating point arithmetic
@@ -223,7 +212,8 @@ config MATHEMU
223 need this. 212 need this.
224 213
225config COMPAT 214config COMPAT
226 bool "Kernel support for 31 bit emulation" 215 def_bool y
216 prompt "Kernel support for 31 bit emulation"
227 depends on 64BIT 217 depends on 64BIT
228 select COMPAT_BINFMT_ELF 218 select COMPAT_BINFMT_ELF
229 help 219 help
@@ -233,16 +223,14 @@ config COMPAT
233 executing 31 bit applications. It is safe to say "Y". 223 executing 31 bit applications. It is safe to say "Y".
234 224
235config SYSVIPC_COMPAT 225config SYSVIPC_COMPAT
236 bool 226 def_bool y if COMPAT && SYSVIPC
237 depends on COMPAT && SYSVIPC
238 default y
239 227
240config AUDIT_ARCH 228config AUDIT_ARCH
241 bool 229 def_bool y
242 default y
243 230
244config S390_EXEC_PROTECT 231config S390_EXEC_PROTECT
245 bool "Data execute protection" 232 def_bool y
233 prompt "Data execute protection"
246 help 234 help
247 This option allows to enable a buffer overflow protection for user 235 This option allows to enable a buffer overflow protection for user
248 space programs and it also selects the addressing mode option above. 236 space programs and it also selects the addressing mode option above.
@@ -302,7 +290,8 @@ config MARCH_Z196
302endchoice 290endchoice
303 291
304config PACK_STACK 292config PACK_STACK
305 bool "Pack kernel stack" 293 def_bool y
294 prompt "Pack kernel stack"
306 help 295 help
307 This option enables the compiler option -mkernel-backchain if it 296 This option enables the compiler option -mkernel-backchain if it
308 is available. If the option is available the compiler supports 297 is available. If the option is available the compiler supports
@@ -315,7 +304,8 @@ config PACK_STACK
315 Say Y if you are unsure. 304 Say Y if you are unsure.
316 305
317config SMALL_STACK 306config SMALL_STACK
318 bool "Use 8kb for kernel stack instead of 16kb" 307 def_bool n
308 prompt "Use 8kb for kernel stack instead of 16kb"
319 depends on PACK_STACK && 64BIT && !LOCKDEP 309 depends on PACK_STACK && 64BIT && !LOCKDEP
320 help 310 help
321 If you say Y here and the compiler supports the -mkernel-backchain 311 If you say Y here and the compiler supports the -mkernel-backchain
@@ -327,7 +317,8 @@ config SMALL_STACK
327 Say N if you are unsure. 317 Say N if you are unsure.
328 318
329config CHECK_STACK 319config CHECK_STACK
330 bool "Detect kernel stack overflow" 320 def_bool y
321 prompt "Detect kernel stack overflow"
331 help 322 help
332 This option enables the compiler option -mstack-guard and 323 This option enables the compiler option -mstack-guard and
333 -mstack-size if they are available. If the compiler supports them 324 -mstack-size if they are available. If the compiler supports them
@@ -351,7 +342,8 @@ config STACK_GUARD
351 512 for 64 bit. 342 512 for 64 bit.
352 343
353config WARN_STACK 344config WARN_STACK
354 bool "Emit compiler warnings for function with broken stack usage" 345 def_bool n
346 prompt "Emit compiler warnings for function with broken stack usage"
355 help 347 help
356 This option enables the compiler options -mwarn-framesize and 348 This option enables the compiler options -mwarn-framesize and
357 -mwarn-dynamicstack. If the compiler supports these options it 349 -mwarn-dynamicstack. If the compiler supports these options it
@@ -386,24 +378,24 @@ config ARCH_SPARSEMEM_DEFAULT
386 def_bool y 378 def_bool y
387 379
388config ARCH_SELECT_MEMORY_MODEL 380config ARCH_SELECT_MEMORY_MODEL
389 def_bool y 381 def_bool y
390 382
391config ARCH_ENABLE_MEMORY_HOTPLUG 383config ARCH_ENABLE_MEMORY_HOTPLUG
392 def_bool y 384 def_bool y if SPARSEMEM
393 depends on SPARSEMEM
394 385
395config ARCH_ENABLE_MEMORY_HOTREMOVE 386config ARCH_ENABLE_MEMORY_HOTREMOVE
396 def_bool y 387 def_bool y
397 388
398config ARCH_HIBERNATION_POSSIBLE 389config ARCH_HIBERNATION_POSSIBLE
399 def_bool y if 64BIT 390 def_bool y if 64BIT
400 391
401source "mm/Kconfig" 392source "mm/Kconfig"
402 393
403comment "I/O subsystem configuration" 394comment "I/O subsystem configuration"
404 395
405config QDIO 396config QDIO
406 tristate "QDIO support" 397 def_tristate y
398 prompt "QDIO support"
407 ---help--- 399 ---help---
408 This driver provides the Queued Direct I/O base support for 400 This driver provides the Queued Direct I/O base support for
409 IBM System z. 401 IBM System z.
@@ -414,7 +406,8 @@ config QDIO
414 If unsure, say Y. 406 If unsure, say Y.
415 407
416config CHSC_SCH 408config CHSC_SCH
417 tristate "Support for CHSC subchannels" 409 def_tristate y
410 prompt "Support for CHSC subchannels"
418 help 411 help
419 This driver allows usage of CHSC subchannels. A CHSC subchannel 412 This driver allows usage of CHSC subchannels. A CHSC subchannel
420 is usually present on LPAR only. 413 is usually present on LPAR only.
@@ -432,7 +425,8 @@ config CHSC_SCH
432comment "Misc" 425comment "Misc"
433 426
434config IPL 427config IPL
435 bool "Builtin IPL record support" 428 def_bool y
429 prompt "Builtin IPL record support"
436 help 430 help
437 If you want to use the produced kernel to IPL directly from a 431 If you want to use the produced kernel to IPL directly from a
438 device, you have to merge a bootsector specific to the device 432 device, you have to merge a bootsector specific to the device
@@ -464,7 +458,8 @@ config FORCE_MAX_ZONEORDER
464 default "9" 458 default "9"
465 459
466config PFAULT 460config PFAULT
467 bool "Pseudo page fault support" 461 def_bool y
462 prompt "Pseudo page fault support"
468 help 463 help
469 Select this option, if you want to use PFAULT pseudo page fault 464 Select this option, if you want to use PFAULT pseudo page fault
470 handling under VM. If running native or in LPAR, this option 465 handling under VM. If running native or in LPAR, this option
@@ -476,7 +471,8 @@ config PFAULT
476 this option. 471 this option.
477 472
478config SHARED_KERNEL 473config SHARED_KERNEL
479 bool "VM shared kernel support" 474 def_bool y
475 prompt "VM shared kernel support"
480 help 476 help
481 Select this option, if you want to share the text segment of the 477 Select this option, if you want to share the text segment of the
482 Linux kernel between different VM guests. This reduces memory 478 Linux kernel between different VM guests. This reduces memory
@@ -487,7 +483,8 @@ config SHARED_KERNEL
487 doing and want to exploit this feature. 483 doing and want to exploit this feature.
488 484
489config CMM 485config CMM
490 tristate "Cooperative memory management" 486 def_tristate n
487 prompt "Cooperative memory management"
491 help 488 help
492 Select this option, if you want to enable the kernel interface 489 Select this option, if you want to enable the kernel interface
493 to reduce the memory size of the system. This is accomplished 490 to reduce the memory size of the system. This is accomplished
@@ -499,14 +496,16 @@ config CMM
499 option. 496 option.
500 497
501config CMM_IUCV 498config CMM_IUCV
502 bool "IUCV special message interface to cooperative memory management" 499 def_bool y
500 prompt "IUCV special message interface to cooperative memory management"
503 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) 501 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
504 help 502 help
505 Select this option to enable the special message interface to 503 Select this option to enable the special message interface to
506 the cooperative memory management. 504 the cooperative memory management.
507 505
508config APPLDATA_BASE 506config APPLDATA_BASE
509 bool "Linux - VM Monitor Stream, base infrastructure" 507 def_bool n
508 prompt "Linux - VM Monitor Stream, base infrastructure"
510 depends on PROC_FS 509 depends on PROC_FS
511 help 510 help
512 This provides a kernel interface for creating and updating z/VM APPLDATA 511 This provides a kernel interface for creating and updating z/VM APPLDATA
@@ -521,7 +520,8 @@ config APPLDATA_BASE
521 The /proc entries can also be read from, showing the current settings. 520 The /proc entries can also be read from, showing the current settings.
522 521
523config APPLDATA_MEM 522config APPLDATA_MEM
524 tristate "Monitor memory management statistics" 523 def_tristate m
524 prompt "Monitor memory management statistics"
525 depends on APPLDATA_BASE && VM_EVENT_COUNTERS 525 depends on APPLDATA_BASE && VM_EVENT_COUNTERS
526 help 526 help
527 This provides memory management related data to the Linux - VM Monitor 527 This provides memory management related data to the Linux - VM Monitor
@@ -537,7 +537,8 @@ config APPLDATA_MEM
537 appldata_mem.o. 537 appldata_mem.o.
538 538
539config APPLDATA_OS 539config APPLDATA_OS
540 tristate "Monitor OS statistics" 540 def_tristate m
541 prompt "Monitor OS statistics"
541 depends on APPLDATA_BASE 542 depends on APPLDATA_BASE
542 help 543 help
543 This provides OS related data to the Linux - VM Monitor Stream, like 544 This provides OS related data to the Linux - VM Monitor Stream, like
@@ -551,7 +552,8 @@ config APPLDATA_OS
551 appldata_os.o. 552 appldata_os.o.
552 553
553config APPLDATA_NET_SUM 554config APPLDATA_NET_SUM
554 tristate "Monitor overall network statistics" 555 def_tristate m
556 prompt "Monitor overall network statistics"
555 depends on APPLDATA_BASE && NET 557 depends on APPLDATA_BASE && NET
556 help 558 help
557 This provides network related data to the Linux - VM Monitor Stream, 559 This provides network related data to the Linux - VM Monitor Stream,
@@ -568,30 +570,32 @@ config APPLDATA_NET_SUM
568source kernel/Kconfig.hz 570source kernel/Kconfig.hz
569 571
570config S390_HYPFS_FS 572config S390_HYPFS_FS
571 bool "s390 hypervisor file system support" 573 def_bool y
574 prompt "s390 hypervisor file system support"
572 select SYS_HYPERVISOR 575 select SYS_HYPERVISOR
573 default y
574 help 576 help
575 This is a virtual file system intended to provide accounting 577 This is a virtual file system intended to provide accounting
576 information in an s390 hypervisor environment. 578 information in an s390 hypervisor environment.
577 579
578config KEXEC 580config KEXEC
579 bool "kexec system call" 581 def_bool n
582 prompt "kexec system call"
580 help 583 help
581 kexec is a system call that implements the ability to shutdown your 584 kexec is a system call that implements the ability to shutdown your
582 current kernel, and to start another kernel. It is like a reboot 585 current kernel, and to start another kernel. It is like a reboot
583 but is independent of hardware/microcode support. 586 but is independent of hardware/microcode support.
584 587
585config ZFCPDUMP 588config ZFCPDUMP
586 bool "zfcpdump support" 589 def_bool n
590 prompt "zfcpdump support"
587 select SMP 591 select SMP
588 default n
589 help 592 help
590 Select this option if you want to build an zfcpdump enabled kernel. 593 Select this option if you want to build an zfcpdump enabled kernel.
591 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. 594 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
592 595
593config S390_GUEST 596config S390_GUEST
594bool "s390 guest support for KVM (EXPERIMENTAL)" 597 def_bool y
598 prompt "s390 guest support for KVM (EXPERIMENTAL)"
595 depends on 64BIT && EXPERIMENTAL 599 depends on 64BIT && EXPERIMENTAL
596 select VIRTIO 600 select VIRTIO
597 select VIRTIO_RING 601 select VIRTIO_RING
@@ -603,9 +607,9 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
603 the default console. 607 the default console.
604 608
605config SECCOMP 609config SECCOMP
606 bool "Enable seccomp to safely compute untrusted bytecode" 610 def_bool y
611 prompt "Enable seccomp to safely compute untrusted bytecode"
607 depends on PROC_FS 612 depends on PROC_FS
608 default y
609 help 613 help
610 This kernel feature is useful for number crunching applications 614 This kernel feature is useful for number crunching applications
611 that may need to compute untrusted bytecode during their 615 that may need to compute untrusted bytecode during their
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 05221b13ffb1..2b380df95606 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,8 +1,7 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT 3config TRACE_IRQFLAGS_SUPPORT
4 bool 4 def_bool y
5 default y
6 5
7source "lib/Kconfig.debug" 6source "lib/Kconfig.debug"
8 7
@@ -19,7 +18,8 @@ config STRICT_DEVMEM
19 If you are unsure, say Y. 18 If you are unsure, say Y.
20 19
21config DEBUG_STRICT_USER_COPY_CHECKS 20config DEBUG_STRICT_USER_COPY_CHECKS
22 bool "Strict user copy size checks" 21 def_bool n
22 prompt "Strict user copy size checks"
23 ---help--- 23 ---help---
24 Enabling this option turns a certain set of sanity checks for user 24 Enabling this option turns a certain set of sanity checks for user
25 copy operations into compile time warnings. 25 copy operations into compile time warnings.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e40ac6ee6526..d79697157ac0 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -2,16 +2,12 @@ CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_AUDIT=y 4CONFIG_AUDIT=y
5CONFIG_RCU_TRACE=y
5CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
7CONFIG_CGROUPS=y
8CONFIG_CGROUP_NS=y
9CONFIG_SYSFS_DEPRECATED_V2=y
10CONFIG_UTS_NS=y
11CONFIG_IPC_NS=y
12CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
13# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
14# CONFIG_COMPAT_BRK is not set 10CONFIG_PERF_EVENTS=y
15CONFIG_SLAB=y 11CONFIG_SLAB=y
16CONFIG_KPROBES=y 12CONFIG_KPROBES=y
17CONFIG_MODULES=y 13CONFIG_MODULES=y
@@ -20,24 +16,12 @@ CONFIG_MODVERSIONS=y
20CONFIG_DEFAULT_DEADLINE=y 16CONFIG_DEFAULT_DEADLINE=y
21CONFIG_NO_HZ=y 17CONFIG_NO_HZ=y
22CONFIG_HIGH_RES_TIMERS=y 18CONFIG_HIGH_RES_TIMERS=y
23CONFIG_64BIT=y
24CONFIG_SMP=y
25CONFIG_NR_CPUS=32
26CONFIG_COMPAT=y
27CONFIG_S390_EXEC_PROTECT=y
28CONFIG_PACK_STACK=y
29CONFIG_CHECK_STACK=y
30CONFIG_PREEMPT=y 19CONFIG_PREEMPT=y
31CONFIG_MEMORY_HOTPLUG=y 20CONFIG_MEMORY_HOTPLUG=y
32CONFIG_MEMORY_HOTREMOVE=y 21CONFIG_MEMORY_HOTREMOVE=y
33CONFIG_QDIO=y
34CONFIG_CHSC_SCH=m
35CONFIG_IPL=y
36CONFIG_BINFMT_MISC=m 22CONFIG_BINFMT_MISC=m
37CONFIG_PFAULT=y
38CONFIG_HZ_100=y 23CONFIG_HZ_100=y
39CONFIG_KEXEC=y 24CONFIG_KEXEC=y
40CONFIG_S390_GUEST=y
41CONFIG_PM=y 25CONFIG_PM=y
42CONFIG_HIBERNATION=y 26CONFIG_HIBERNATION=y
43CONFIG_PACKET=y 27CONFIG_PACKET=y
@@ -46,16 +30,15 @@ CONFIG_NET_KEY=y
46CONFIG_AFIUCV=m 30CONFIG_AFIUCV=m
47CONFIG_INET=y 31CONFIG_INET=y
48CONFIG_IP_MULTICAST=y 32CONFIG_IP_MULTICAST=y
33# CONFIG_INET_LRO is not set
49CONFIG_IPV6=y 34CONFIG_IPV6=y
50CONFIG_NETFILTER=y 35CONFIG_NET_SCTPPROBE=m
51CONFIG_NETFILTER_NETLINK_QUEUE=m 36CONFIG_L2TP=m
52CONFIG_NETFILTER_NETLINK_LOG=m 37CONFIG_L2TP_DEBUGFS=m
53CONFIG_NF_CONNTRACK=m 38CONFIG_VLAN_8021Q=y
54# CONFIG_NF_CT_PROTO_SCTP is not set
55CONFIG_NET_SCHED=y 39CONFIG_NET_SCHED=y
56CONFIG_NET_SCH_CBQ=m 40CONFIG_NET_SCH_CBQ=m
57CONFIG_NET_SCH_PRIO=m 41CONFIG_NET_SCH_PRIO=m
58CONFIG_NET_SCH_MULTIQ=y
59CONFIG_NET_SCH_RED=m 42CONFIG_NET_SCH_RED=m
60CONFIG_NET_SCH_SFQ=m 43CONFIG_NET_SCH_SFQ=m
61CONFIG_NET_SCH_TEQL=m 44CONFIG_NET_SCH_TEQL=m
@@ -69,28 +52,14 @@ CONFIG_NET_CLS_U32=m
69CONFIG_CLS_U32_MARK=y 52CONFIG_CLS_U32_MARK=y
70CONFIG_NET_CLS_RSVP=m 53CONFIG_NET_CLS_RSVP=m
71CONFIG_NET_CLS_RSVP6=m 54CONFIG_NET_CLS_RSVP6=m
72CONFIG_NET_CLS_FLOW=m
73CONFIG_NET_CLS_ACT=y 55CONFIG_NET_CLS_ACT=y
74CONFIG_NET_ACT_POLICE=y 56CONFIG_NET_ACT_POLICE=y
75CONFIG_NET_ACT_NAT=m
76CONFIG_CAN=m
77CONFIG_CAN_RAW=m
78CONFIG_CAN_BCM=m
79CONFIG_CAN_VCAN=m
80CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 57CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
81# CONFIG_FIRMWARE_IN_KERNEL is not set 58# CONFIG_FIRMWARE_IN_KERNEL is not set
82CONFIG_BLK_DEV_LOOP=m 59CONFIG_BLK_DEV_LOOP=m
83CONFIG_BLK_DEV_NBD=m 60CONFIG_BLK_DEV_NBD=m
84CONFIG_BLK_DEV_RAM=y 61CONFIG_BLK_DEV_RAM=y
85CONFIG_BLK_DEV_XIP=y 62CONFIG_VIRTIO_BLK=y
86CONFIG_BLK_DEV_XPRAM=m
87CONFIG_DASD=y
88CONFIG_DASD_PROFILE=y
89CONFIG_DASD_ECKD=y
90CONFIG_DASD_FBA=y
91CONFIG_DASD_DIAG=y
92CONFIG_DASD_EER=y
93CONFIG_VIRTIO_BLK=m
94CONFIG_SCSI=y 63CONFIG_SCSI=y
95CONFIG_BLK_DEV_SD=y 64CONFIG_BLK_DEV_SD=y
96CONFIG_CHR_DEV_ST=y 65CONFIG_CHR_DEV_ST=y
@@ -102,101 +71,92 @@ CONFIG_SCSI_CONSTANTS=y
102CONFIG_SCSI_LOGGING=y 71CONFIG_SCSI_LOGGING=y
103CONFIG_SCSI_SCAN_ASYNC=y 72CONFIG_SCSI_SCAN_ASYNC=y
104CONFIG_ZFCP=y 73CONFIG_ZFCP=y
105CONFIG_SCSI_DH=m 74CONFIG_ZFCP_DIF=y
106CONFIG_SCSI_DH_RDAC=m
107CONFIG_SCSI_DH_HP_SW=m
108CONFIG_SCSI_DH_EMC=m
109CONFIG_SCSI_DH_ALUA=m
110CONFIG_SCSI_OSD_INITIATOR=m
111CONFIG_SCSI_OSD_ULD=m
112CONFIG_MD=y
113CONFIG_BLK_DEV_MD=y
114CONFIG_MD_LINEAR=m
115CONFIG_MD_RAID0=m
116CONFIG_MD_RAID1=m
117CONFIG_MD_MULTIPATH=m
118CONFIG_BLK_DEV_DM=y
119CONFIG_DM_CRYPT=y
120CONFIG_DM_SNAPSHOT=y
121CONFIG_DM_MIRROR=y
122CONFIG_DM_ZERO=y
123CONFIG_DM_MULTIPATH=m
124CONFIG_NETDEVICES=y 75CONFIG_NETDEVICES=y
125CONFIG_DUMMY=m 76CONFIG_DUMMY=m
126CONFIG_BONDING=m 77CONFIG_BONDING=m
127CONFIG_EQUALIZER=m 78CONFIG_EQUALIZER=m
128CONFIG_TUN=m 79CONFIG_TUN=m
129CONFIG_VETH=m
130CONFIG_NET_ETHERNET=y 80CONFIG_NET_ETHERNET=y
131CONFIG_LCS=m 81CONFIG_VIRTIO_NET=y
132CONFIG_CTCM=m
133CONFIG_QETH=y
134CONFIG_QETH_L2=y
135CONFIG_QETH_L3=y
136CONFIG_VIRTIO_NET=m
137CONFIG_HW_RANDOM_VIRTIO=m
138CONFIG_RAW_DRIVER=m 82CONFIG_RAW_DRIVER=m
139CONFIG_TN3270=y
140CONFIG_TN3270_TTY=y
141CONFIG_TN3270_FS=m
142CONFIG_TN3270_CONSOLE=y
143CONFIG_TN3215=y
144CONFIG_TN3215_CONSOLE=y
145CONFIG_SCLP_TTY=y
146CONFIG_SCLP_CONSOLE=y
147CONFIG_SCLP_VT220_TTY=y
148CONFIG_SCLP_VT220_CONSOLE=y
149CONFIG_SCLP_CPI=m
150CONFIG_SCLP_ASYNC=m
151CONFIG_S390_TAPE=m
152CONFIG_S390_TAPE_BLOCK=y
153CONFIG_S390_TAPE_34XX=m
154CONFIG_ACCESSIBILITY=y
155CONFIG_EXT2_FS=y 83CONFIG_EXT2_FS=y
156CONFIG_EXT3_FS=y 84CONFIG_EXT3_FS=y
157# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 85# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
86CONFIG_EXT4_FS=y
87CONFIG_EXT4_FS_POSIX_ACL=y
88CONFIG_EXT4_FS_SECURITY=y
158CONFIG_PROC_KCORE=y 89CONFIG_PROC_KCORE=y
159CONFIG_TMPFS=y 90CONFIG_TMPFS=y
160CONFIG_TMPFS_POSIX_ACL=y 91CONFIG_TMPFS_POSIX_ACL=y
161CONFIG_NFS_FS=y 92# CONFIG_NETWORK_FILESYSTEMS is not set
162CONFIG_NFS_V3=y
163CONFIG_NFSD=y
164CONFIG_NFSD_V3=y
165CONFIG_PARTITION_ADVANCED=y 93CONFIG_PARTITION_ADVANCED=y
166CONFIG_IBM_PARTITION=y 94CONFIG_IBM_PARTITION=y
167CONFIG_DLM=m 95CONFIG_DLM=m
168CONFIG_MAGIC_SYSRQ=y 96CONFIG_MAGIC_SYSRQ=y
169CONFIG_DEBUG_KERNEL=y 97CONFIG_DEBUG_KERNEL=y
170# CONFIG_SCHED_DEBUG is not set 98CONFIG_TIMER_STATS=y
171CONFIG_DEBUG_SPINLOCK=y 99CONFIG_PROVE_LOCKING=y
172CONFIG_DEBUG_MUTEXES=y 100CONFIG_PROVE_RCU=y
101CONFIG_LOCK_STAT=y
102CONFIG_DEBUG_LOCKDEP=y
173CONFIG_DEBUG_SPINLOCK_SLEEP=y 103CONFIG_DEBUG_SPINLOCK_SLEEP=y
104CONFIG_DEBUG_LIST=y
105CONFIG_DEBUG_NOTIFIERS=y
174# CONFIG_RCU_CPU_STALL_DETECTOR is not set 106# CONFIG_RCU_CPU_STALL_DETECTOR is not set
175CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 107CONFIG_KPROBES_SANITY_TEST=y
108CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
109CONFIG_LATENCYTOP=y
176CONFIG_SYSCTL_SYSCALL_CHECK=y 110CONFIG_SYSCTL_SYSCALL_CHECK=y
177CONFIG_SAMPLES=y 111CONFIG_DEBUG_PAGEALLOC=y
178CONFIG_CRYPTO_FIPS=y 112# CONFIG_FTRACE is not set
113# CONFIG_STRICT_DEVMEM is not set
114CONFIG_CRYPTO_NULL=m
115CONFIG_CRYPTO_CRYPTD=m
179CONFIG_CRYPTO_AUTHENC=m 116CONFIG_CRYPTO_AUTHENC=m
117CONFIG_CRYPTO_TEST=m
180CONFIG_CRYPTO_CCM=m 118CONFIG_CRYPTO_CCM=m
181CONFIG_CRYPTO_GCM=m 119CONFIG_CRYPTO_GCM=m
120CONFIG_CRYPTO_CBC=y
182CONFIG_CRYPTO_CTS=m 121CONFIG_CRYPTO_CTS=m
183CONFIG_CRYPTO_ECB=m 122CONFIG_CRYPTO_ECB=m
123CONFIG_CRYPTO_LRW=m
184CONFIG_CRYPTO_PCBC=m 124CONFIG_CRYPTO_PCBC=m
125CONFIG_CRYPTO_XTS=m
126CONFIG_CRYPTO_XCBC=m
185CONFIG_CRYPTO_VMAC=m 127CONFIG_CRYPTO_VMAC=m
128CONFIG_CRYPTO_MD4=m
129CONFIG_CRYPTO_MICHAEL_MIC=m
186CONFIG_CRYPTO_RMD128=m 130CONFIG_CRYPTO_RMD128=m
187CONFIG_CRYPTO_RMD160=m 131CONFIG_CRYPTO_RMD160=m
188CONFIG_CRYPTO_RMD256=m 132CONFIG_CRYPTO_RMD256=m
189CONFIG_CRYPTO_RMD320=m 133CONFIG_CRYPTO_RMD320=m
134CONFIG_CRYPTO_SHA256=m
135CONFIG_CRYPTO_SHA512=m
136CONFIG_CRYPTO_TGR192=m
137CONFIG_CRYPTO_WP512=m
138CONFIG_CRYPTO_ANUBIS=m
139CONFIG_CRYPTO_ARC4=m
140CONFIG_CRYPTO_BLOWFISH=m
190CONFIG_CRYPTO_CAMELLIA=m 141CONFIG_CRYPTO_CAMELLIA=m
142CONFIG_CRYPTO_CAST5=m
143CONFIG_CRYPTO_CAST6=m
144CONFIG_CRYPTO_DES=m
191CONFIG_CRYPTO_FCRYPT=m 145CONFIG_CRYPTO_FCRYPT=m
146CONFIG_CRYPTO_KHAZAD=m
192CONFIG_CRYPTO_SALSA20=m 147CONFIG_CRYPTO_SALSA20=m
193CONFIG_CRYPTO_SEED=m 148CONFIG_CRYPTO_SEED=m
149CONFIG_CRYPTO_SERPENT=m
150CONFIG_CRYPTO_TEA=m
151CONFIG_CRYPTO_TWOFISH=m
152CONFIG_CRYPTO_DEFLATE=m
194CONFIG_CRYPTO_ZLIB=m 153CONFIG_CRYPTO_ZLIB=m
195CONFIG_CRYPTO_LZO=m 154CONFIG_CRYPTO_LZO=m
196CONFIG_ZCRYPT=m 155CONFIG_ZCRYPT=m
156CONFIG_CRYPTO_SHA1_S390=m
157CONFIG_CRYPTO_SHA256_S390=m
197CONFIG_CRYPTO_SHA512_S390=m 158CONFIG_CRYPTO_SHA512_S390=m
198CONFIG_CRC_T10DIF=y 159CONFIG_CRYPTO_DES_S390=m
199CONFIG_CRC32=m 160CONFIG_CRYPTO_AES_S390=m
200CONFIG_CRC7=m 161CONFIG_CRC7=m
201CONFIG_KVM=m 162CONFIG_VIRTIO_BALLOON=y
202CONFIG_VIRTIO_BALLOON=m
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index b08d2abf6178..2e671d5004ca 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index fa487d4cc08b..80c1526f2af3 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -12,6 +12,8 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/workqueue.h>
16#include <linux/kref.h>
15 17
16#define REG_FILE_MODE 0440 18#define REG_FILE_MODE 0440
17#define UPDATE_FILE_MODE 0220 19#define UPDATE_FILE_MODE 0220
@@ -38,6 +40,33 @@ extern int hypfs_vm_init(void);
38extern void hypfs_vm_exit(void); 40extern void hypfs_vm_exit(void);
39extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); 41extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
40 42
41/* Directory for debugfs files */ 43/* debugfs interface */
42extern struct dentry *hypfs_dbfs_dir; 44struct hypfs_dbfs_file;
45
46struct hypfs_dbfs_data {
47 void *buf;
48 void *buf_free_ptr;
49 size_t size;
50 struct hypfs_dbfs_file *dbfs_file;;
51 struct kref kref;
52};
53
54struct hypfs_dbfs_file {
55 const char *name;
56 int (*data_create)(void **data, void **data_free_ptr,
57 size_t *size);
58 void (*data_free)(const void *buf_free_ptr);
59
60 /* Private data for hypfs_dbfs.c */
61 struct hypfs_dbfs_data *data;
62 struct delayed_work data_free_work;
63 struct mutex lock;
64 struct dentry *dentry;
65};
66
67extern int hypfs_dbfs_init(void);
68extern void hypfs_dbfs_exit(void);
69extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
70extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
71
43#endif /* _HYPFS_H_ */ 72#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
new file mode 100644
index 000000000000..b478013b7fec
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -0,0 +1,116 @@
1/*
2 * Hypervisor filesystem for Linux on s390 - debugfs interface
3 *
4 * Copyright (C) IBM Corp. 2010
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include "hypfs.h"
10
11static struct dentry *dbfs_dir;
12
13static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
14{
15 struct hypfs_dbfs_data *data;
16
17 data = kmalloc(sizeof(*data), GFP_KERNEL);
18 if (!data)
19 return NULL;
20 kref_init(&data->kref);
21 data->dbfs_file = f;
22 return data;
23}
24
25static void hypfs_dbfs_data_free(struct kref *kref)
26{
27 struct hypfs_dbfs_data *data;
28
29 data = container_of(kref, struct hypfs_dbfs_data, kref);
30 data->dbfs_file->data_free(data->buf_free_ptr);
31 kfree(data);
32}
33
34static void data_free_delayed(struct work_struct *work)
35{
36 struct hypfs_dbfs_data *data;
37 struct hypfs_dbfs_file *df;
38
39 df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
40 mutex_lock(&df->lock);
41 data = df->data;
42 df->data = NULL;
43 mutex_unlock(&df->lock);
44 kref_put(&data->kref, hypfs_dbfs_data_free);
45}
46
47static ssize_t dbfs_read(struct file *file, char __user *buf,
48 size_t size, loff_t *ppos)
49{
50 struct hypfs_dbfs_data *data;
51 struct hypfs_dbfs_file *df;
52 ssize_t rc;
53
54 if (*ppos != 0)
55 return 0;
56
57 df = file->f_path.dentry->d_inode->i_private;
58 mutex_lock(&df->lock);
59 if (!df->data) {
60 data = hypfs_dbfs_data_alloc(df);
61 if (!data) {
62 mutex_unlock(&df->lock);
63 return -ENOMEM;
64 }
65 rc = df->data_create(&data->buf, &data->buf_free_ptr,
66 &data->size);
67 if (rc) {
68 mutex_unlock(&df->lock);
69 kfree(data);
70 return rc;
71 }
72 df->data = data;
73 schedule_delayed_work(&df->data_free_work, HZ);
74 }
75 data = df->data;
76 kref_get(&data->kref);
77 mutex_unlock(&df->lock);
78
79 rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
80 kref_put(&data->kref, hypfs_dbfs_data_free);
81 return rc;
82}
83
84static const struct file_operations dbfs_ops = {
85 .read = dbfs_read,
86 .llseek = no_llseek,
87};
88
89int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
90{
91 df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
92 &dbfs_ops);
93 if (IS_ERR(df->dentry))
94 return PTR_ERR(df->dentry);
95 mutex_init(&df->lock);
96 INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
97 return 0;
98}
99
100void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
101{
102 debugfs_remove(df->dentry);
103}
104
105int hypfs_dbfs_init(void)
106{
107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
108 if (IS_ERR(dbfs_dir))
109 return PTR_ERR(dbfs_dir);
110 return 0;
111}
112
113void hypfs_dbfs_exit(void)
114{
115 debugfs_remove(dbfs_dir);
116}
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index cd4a81be9cf8..6023c6dc1fb7 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -555,81 +555,38 @@ struct dbfs_d204 {
555 char buf[]; /* d204 buffer */ 555 char buf[]; /* d204 buffer */
556} __attribute__ ((packed)); 556} __attribute__ ((packed));
557 557
558struct dbfs_d204_private { 558static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
559 struct dbfs_d204 *d204; /* Aligned d204 data with header */
560 void *base; /* Base pointer (needed for vfree) */
561};
562
563static int dbfs_d204_open(struct inode *inode, struct file *file)
564{ 559{
565 struct dbfs_d204_private *data;
566 struct dbfs_d204 *d204; 560 struct dbfs_d204 *d204;
567 int rc, buf_size; 561 int rc, buf_size;
562 void *base;
568 563
569 data = kzalloc(sizeof(*data), GFP_KERNEL);
570 if (!data)
571 return -ENOMEM;
572 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr); 564 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
573 data->base = vmalloc(buf_size); 565 base = vmalloc(buf_size);
574 if (!data->base) { 566 if (!base)
575 rc = -ENOMEM; 567 return -ENOMEM;
576 goto fail_kfree_data; 568 memset(base, 0, buf_size);
569 d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
570 rc = diag204_do_store(d204->buf, diag204_buf_pages);
571 if (rc) {
572 vfree(base);
573 return rc;
577 } 574 }
578 memset(data->base, 0, buf_size);
579 d204 = page_align_ptr(data->base + sizeof(d204->hdr))
580 - sizeof(d204->hdr);
581 rc = diag204_do_store(&d204->buf, diag204_buf_pages);
582 if (rc)
583 goto fail_vfree_base;
584 d204->hdr.version = DBFS_D204_HDR_VERSION; 575 d204->hdr.version = DBFS_D204_HDR_VERSION;
585 d204->hdr.len = PAGE_SIZE * diag204_buf_pages; 576 d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
586 d204->hdr.sc = diag204_store_sc; 577 d204->hdr.sc = diag204_store_sc;
587 data->d204 = d204; 578 *data = d204;
588 file->private_data = data; 579 *data_free_ptr = base;
589 return nonseekable_open(inode, file); 580 *size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
590
591fail_vfree_base:
592 vfree(data->base);
593fail_kfree_data:
594 kfree(data);
595 return rc;
596}
597
598static int dbfs_d204_release(struct inode *inode, struct file *file)
599{
600 struct dbfs_d204_private *data = file->private_data;
601
602 vfree(data->base);
603 kfree(data);
604 return 0; 581 return 0;
605} 582}
606 583
607static ssize_t dbfs_d204_read(struct file *file, char __user *buf, 584static struct hypfs_dbfs_file dbfs_file_d204 = {
608 size_t size, loff_t *ppos) 585 .name = "diag_204",
609{ 586 .data_create = dbfs_d204_create,
610 struct dbfs_d204_private *data = file->private_data; 587 .data_free = vfree,
611
612 return simple_read_from_buffer(buf, size, ppos, data->d204,
613 data->d204->hdr.len +
614 sizeof(data->d204->hdr));
615}
616
617static const struct file_operations dbfs_d204_ops = {
618 .open = dbfs_d204_open,
619 .read = dbfs_d204_read,
620 .release = dbfs_d204_release,
621 .llseek = no_llseek,
622}; 588};
623 589
624static int hypfs_dbfs_init(void)
625{
626 dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
627 NULL, &dbfs_d204_ops);
628 if (IS_ERR(dbfs_d204_file))
629 return PTR_ERR(dbfs_d204_file);
630 return 0;
631}
632
633__init int hypfs_diag_init(void) 590__init int hypfs_diag_init(void)
634{ 591{
635 int rc; 592 int rc;
@@ -639,7 +596,7 @@ __init int hypfs_diag_init(void)
639 return -ENODATA; 596 return -ENODATA;
640 } 597 }
641 if (diag204_info_type == INFO_EXT) { 598 if (diag204_info_type == INFO_EXT) {
642 rc = hypfs_dbfs_init(); 599 rc = hypfs_dbfs_create_file(&dbfs_file_d204);
643 if (rc) 600 if (rc)
644 return rc; 601 return rc;
645 } 602 }
@@ -660,6 +617,7 @@ void hypfs_diag_exit(void)
660 debugfs_remove(dbfs_d204_file); 617 debugfs_remove(dbfs_d204_file);
661 diag224_delete_name_table(); 618 diag224_delete_name_table();
662 diag204_free_buffer(); 619 diag204_free_buffer();
620 hypfs_dbfs_remove_file(&dbfs_file_d204);
663} 621}
664 622
665/* 623/*
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 26cf177f6a3a..e54796002f61 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -20,8 +20,6 @@ static char local_guest[] = " ";
20static char all_guests[] = "* "; 20static char all_guests[] = "* ";
21static char *guest_query; 21static char *guest_query;
22 22
23static struct dentry *dbfs_d2fc_file;
24
25struct diag2fc_data { 23struct diag2fc_data {
26 __u32 version; 24 __u32 version;
27 __u32 flags; 25 __u32 flags;
@@ -104,7 +102,7 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
104 return data; 102 return data;
105} 103}
106 104
107static void diag2fc_free(void *data) 105static void diag2fc_free(const void *data)
108{ 106{
109 vfree(data); 107 vfree(data);
110} 108}
@@ -239,43 +237,29 @@ struct dbfs_d2fc {
239 char buf[]; /* d2fc buffer */ 237 char buf[]; /* d2fc buffer */
240} __attribute__ ((packed)); 238} __attribute__ ((packed));
241 239
242static int dbfs_d2fc_open(struct inode *inode, struct file *file) 240static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
243{ 241{
244 struct dbfs_d2fc *data; 242 struct dbfs_d2fc *d2fc;
245 unsigned int count; 243 unsigned int count;
246 244
247 data = diag2fc_store(guest_query, &count, sizeof(data->hdr)); 245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
248 if (IS_ERR(data)) 246 if (IS_ERR(d2fc))
249 return PTR_ERR(data); 247 return PTR_ERR(d2fc);
250 get_clock_ext(data->hdr.tod_ext); 248 get_clock_ext(d2fc->hdr.tod_ext);
251 data->hdr.len = count * sizeof(struct diag2fc_data); 249 d2fc->hdr.len = count * sizeof(struct diag2fc_data);
252 data->hdr.version = DBFS_D2FC_HDR_VERSION; 250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
253 data->hdr.count = count; 251 d2fc->hdr.count = count;
254 memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved)); 252 memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
255 file->private_data = data; 253 *data = d2fc;
256 return nonseekable_open(inode, file); 254 *data_free_ptr = d2fc;
257} 255 *size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
258
259static int dbfs_d2fc_release(struct inode *inode, struct file *file)
260{
261 diag2fc_free(file->private_data);
262 return 0; 256 return 0;
263} 257}
264 258
265static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf, 259static struct hypfs_dbfs_file dbfs_file_2fc = {
266 size_t size, loff_t *ppos) 260 .name = "diag_2fc",
267{ 261 .data_create = dbfs_diag2fc_create,
268 struct dbfs_d2fc *data = file->private_data; 262 .data_free = diag2fc_free,
269
270 return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
271 sizeof(struct dbfs_d2fc_hdr));
272}
273
274static const struct file_operations dbfs_d2fc_ops = {
275 .open = dbfs_d2fc_open,
276 .read = dbfs_d2fc_read,
277 .release = dbfs_d2fc_release,
278 .llseek = no_llseek,
279}; 263};
280 264
281int hypfs_vm_init(void) 265int hypfs_vm_init(void)
@@ -288,18 +272,12 @@ int hypfs_vm_init(void)
288 guest_query = local_guest; 272 guest_query = local_guest;
289 else 273 else
290 return -EACCES; 274 return -EACCES;
291 275 return hypfs_dbfs_create_file(&dbfs_file_2fc);
292 dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
293 NULL, &dbfs_d2fc_ops);
294 if (IS_ERR(dbfs_d2fc_file))
295 return PTR_ERR(dbfs_d2fc_file);
296
297 return 0;
298} 276}
299 277
300void hypfs_vm_exit(void) 278void hypfs_vm_exit(void)
301{ 279{
302 if (!MACHINE_IS_VM) 280 if (!MACHINE_IS_VM)
303 return; 281 return;
304 debugfs_remove(dbfs_d2fc_file); 282 hypfs_dbfs_remove_file(&dbfs_file_2fc);
305} 283}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 47cc446dab8f..6fe874fc5f8e 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -46,8 +46,6 @@ static const struct super_operations hypfs_s_ops;
46/* start of list of all dentries, which have to be deleted on update */ 46/* start of list of all dentries, which have to be deleted on update */
47static struct dentry *hypfs_last_dentry; 47static struct dentry *hypfs_last_dentry;
48 48
49struct dentry *hypfs_dbfs_dir;
50
51static void hypfs_update_update(struct super_block *sb) 49static void hypfs_update_update(struct super_block *sb)
52{ 50{
53 struct hypfs_sb_info *sb_info = sb->s_fs_info; 51 struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -471,13 +469,12 @@ static int __init hypfs_init(void)
471{ 469{
472 int rc; 470 int rc;
473 471
474 hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 472 rc = hypfs_dbfs_init();
475 if (IS_ERR(hypfs_dbfs_dir)) 473 if (rc)
476 return PTR_ERR(hypfs_dbfs_dir); 474 return rc;
477
478 if (hypfs_diag_init()) { 475 if (hypfs_diag_init()) {
479 rc = -ENODATA; 476 rc = -ENODATA;
480 goto fail_debugfs_remove; 477 goto fail_dbfs_exit;
481 } 478 }
482 if (hypfs_vm_init()) { 479 if (hypfs_vm_init()) {
483 rc = -ENODATA; 480 rc = -ENODATA;
@@ -499,9 +496,8 @@ fail_hypfs_vm_exit:
499 hypfs_vm_exit(); 496 hypfs_vm_exit();
500fail_hypfs_diag_exit: 497fail_hypfs_diag_exit:
501 hypfs_diag_exit(); 498 hypfs_diag_exit();
502fail_debugfs_remove: 499fail_dbfs_exit:
503 debugfs_remove(hypfs_dbfs_dir); 500 hypfs_dbfs_exit();
504
505 pr_err("Initialization of hypfs failed with rc=%i\n", rc); 501 pr_err("Initialization of hypfs failed with rc=%i\n", rc);
506 return rc; 502 return rc;
507} 503}
@@ -510,7 +506,7 @@ static void __exit hypfs_exit(void)
510{ 506{
511 hypfs_diag_exit(); 507 hypfs_diag_exit();
512 hypfs_vm_exit(); 508 hypfs_vm_exit();
513 debugfs_remove(hypfs_dbfs_dir); 509 hypfs_dbfs_exit();
514 unregister_filesystem(&hypfs_type); 510 unregister_filesystem(&hypfs_type);
515 kobject_put(s390_kobj); 511 kobject_put(s390_kobj);
516} 512}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e8501115eca8..ff6f62e0ec3e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -204,6 +204,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
204 unsigned long, u8, int); 204 unsigned long, u8, int);
205int ccw_device_tm_intrg(struct ccw_device *cdev); 205int ccw_device_tm_intrg(struct ccw_device *cdev);
206 206
207int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
208
207extern int ccw_device_set_online(struct ccw_device *cdev); 209extern int ccw_device_set_online(struct ccw_device *cdev);
208extern int ccw_device_set_offline(struct ccw_device *cdev); 210extern int ccw_device_set_offline(struct ccw_device *cdev);
209 211
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 40e2ab0fa3f0..081434878296 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
202 202
203static inline int s390_nohz_delay(int cpu) 203static inline int s390_nohz_delay(int cpu)
204{ 204{
205 return per_cpu(s390_idle, cpu).nohz_delay != 0; 205 return __get_cpu_var(s390_idle).nohz_delay != 0;
206} 206}
207 207
208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) 208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index b604a9186f8e..0be28efe5b66 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -73,6 +73,7 @@ typedef struct dasd_information2_t {
73 * 0x02: use diag discipline (diag) 73 * 0x02: use diag discipline (diag)
74 * 0x04: set the device initially online (internal use only) 74 * 0x04: set the device initially online (internal use only)
75 * 0x08: enable ERP related logging 75 * 0x08: enable ERP related logging
76 * 0x20: give access to raw eckd data
76 */ 77 */
77#define DASD_FEATURE_DEFAULT 0x00 78#define DASD_FEATURE_DEFAULT 0x00
78#define DASD_FEATURE_READONLY 0x01 79#define DASD_FEATURE_READONLY 0x01
@@ -80,6 +81,8 @@ typedef struct dasd_information2_t {
80#define DASD_FEATURE_INITIAL_ONLINE 0x04 81#define DASD_FEATURE_INITIAL_ONLINE 0x04
81#define DASD_FEATURE_ERPLOG 0x08 82#define DASD_FEATURE_ERPLOG 0x08
82#define DASD_FEATURE_FAILFAST 0x10 83#define DASD_FEATURE_FAILFAST 0x10
84#define DASD_FEATURE_FAILONSLCK 0x20
85#define DASD_FEATURE_USERAW 0x40
83 86
84#define DASD_PARTN_BITS 2 87#define DASD_PARTN_BITS 2
85 88
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 96c14a9102b8..3c29be4836ed 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,20 +4,17 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern unsigned long ftrace_dyn_func;
8 7
9struct dyn_arch_ftrace { }; 8struct dyn_arch_ftrace { };
10 9
11#define MCOUNT_ADDR ((long)_mcount) 10#define MCOUNT_ADDR ((long)_mcount)
12 11
13#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
14#define MCOUNT_OFFSET_RET 18 13#define MCOUNT_INSN_SIZE 12
15#define MCOUNT_INSN_SIZE 24
16#define MCOUNT_OFFSET 14
17#else
18#define MCOUNT_OFFSET_RET 26
19#define MCOUNT_INSN_SIZE 30
20#define MCOUNT_OFFSET 8 14#define MCOUNT_OFFSET 8
15#else
16#define MCOUNT_INSN_SIZE 20
17#define MCOUNT_OFFSET 4
21#endif 18#endif
22 19
23static inline unsigned long ftrace_call_adjust(unsigned long addr) 20static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 881d94590aeb..e4155d3eb2cb 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -21,20 +21,4 @@
21 21
22#define HARDIRQ_BITS 8 22#define HARDIRQ_BITS 8
23 23
24void clock_comparator_work(void);
25
26static inline unsigned long long local_tick_disable(void)
27{
28 unsigned long long old;
29
30 old = S390_lowcore.clock_comparator;
31 S390_lowcore.clock_comparator = -1ULL;
32 return old;
33}
34
35static inline void local_tick_enable(unsigned long long comp)
36{
37 S390_lowcore.clock_comparator = comp;
38}
39
40#endif /* __ASM_HARDIRQ_H */ 24#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7da991a858f8..db14a311f1d2 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,23 +1,33 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#ifdef __KERNEL__
5#include <linux/hardirq.h> 4#include <linux/hardirq.h>
6 5
7/*
8 * the definition of irqs has changed in 2.5.46:
9 * NR_IRQS is no longer the number of i/o
10 * interrupts (65536), but rather the number
11 * of interrupt classes (2).
12 * Only external and i/o interrupts make much sense here (CH).
13 */
14
15enum interruption_class { 6enum interruption_class {
16 EXTERNAL_INTERRUPT, 7 EXTERNAL_INTERRUPT,
17 IO_INTERRUPT, 8 IO_INTERRUPT,
18 9 EXTINT_CLK,
10 EXTINT_IPI,
11 EXTINT_TMR,
12 EXTINT_TLA,
13 EXTINT_PFL,
14 EXTINT_DSD,
15 EXTINT_VRT,
16 EXTINT_SCP,
17 EXTINT_IUC,
18 IOINT_QAI,
19 IOINT_QDI,
20 IOINT_DAS,
21 IOINT_C15,
22 IOINT_C70,
23 IOINT_TAP,
24 IOINT_VMR,
25 IOINT_LCS,
26 IOINT_CLW,
27 IOINT_CTC,
28 IOINT_APB,
29 NMI_NMI,
19 NR_IRQS, 30 NR_IRQS,
20}; 31};
21 32
22#endif /* __KERNEL__ */ 33#endif /* _ASM_IRQ_H */
23#endif
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 330f68caffe4..a231a9439c4b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,7 +31,6 @@
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33 33
34#define __ARCH_WANT_KPROBES_INSN_SLOT
35struct pt_regs; 34struct pt_regs;
36struct kprobe; 35struct kprobe;
37 36
@@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t;
58/* Architecture specific copy of original instruction */ 57/* Architecture specific copy of original instruction */
59struct arch_specific_insn { 58struct arch_specific_insn {
60 /* copy of original instruction */ 59 /* copy of original instruction */
61 kprobe_opcode_t *insn; 60 kprobe_opcode_t insn[MAX_INSN_SIZE];
62 int fixup;
63 int ilen;
64 int reg;
65}; 61};
66 62
67struct ins_replace_args {
68 kprobe_opcode_t *ptr;
69 kprobe_opcode_t old;
70 kprobe_opcode_t new;
71};
72struct prev_kprobe { 63struct prev_kprobe {
73 struct kprobe *kp; 64 struct kprobe *kp;
74 unsigned long status; 65 unsigned long status;
75 unsigned long saved_psw;
76 unsigned long kprobe_saved_imask;
77 unsigned long kprobe_saved_ctl[3];
78}; 66};
79 67
80/* per-cpu kprobe control block */ 68/* per-cpu kprobe control block */
@@ -82,17 +70,13 @@ struct kprobe_ctlblk {
82 unsigned long kprobe_status; 70 unsigned long kprobe_status;
83 unsigned long kprobe_saved_imask; 71 unsigned long kprobe_saved_imask;
84 unsigned long kprobe_saved_ctl[3]; 72 unsigned long kprobe_saved_ctl[3];
85 struct pt_regs jprobe_saved_regs;
86 unsigned long jprobe_saved_r14;
87 unsigned long jprobe_saved_r15;
88 struct prev_kprobe prev_kprobe; 73 struct prev_kprobe prev_kprobe;
74 struct pt_regs jprobe_saved_regs;
89 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; 75 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
90}; 76};
91 77
92void arch_remove_kprobe(struct kprobe *p); 78void arch_remove_kprobe(struct kprobe *p);
93void kretprobe_trampoline(void); 79void kretprobe_trampoline(void);
94int is_prohibited_opcode(kprobe_opcode_t *instruction);
95void get_instruction_type(struct arch_specific_insn *ainsn);
96 80
97int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 81int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
98int kprobe_exceptions_notify(struct notifier_block *self, 82int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 8d6f87169577..bf3de04170a7 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr)
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
35extern void print_cpu_info(void);
36extern int get_cpu_capability(unsigned int *); 35extern int get_cpu_capability(unsigned int *);
37 36
38/* 37/*
@@ -81,7 +80,8 @@ struct thread_struct {
81 mm_segment_t mm_segment; 80 mm_segment_t mm_segment;
82 unsigned long prot_addr; /* address of protection-excep. */ 81 unsigned long prot_addr; /* address of protection-excep. */
83 unsigned int trap_no; 82 unsigned int trap_no;
84 per_struct per_info; 83 struct per_regs per_user; /* User specified PER registers */
84 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 85 /* pfault_wait is used to block the process on a pfault event */
86 unsigned long pfault_wait; 86 unsigned long pfault_wait;
87}; 87};
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d9d42b1e46fa..9ad628a8574a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -331,10 +331,60 @@ struct pt_regs
331 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr; 332 unsigned short svcnr;
333}; 333};
334
335/*
336 * Program event recording (PER) register set.
337 */
338struct per_regs {
339 unsigned long control; /* PER control bits */
340 unsigned long start; /* PER starting address */
341 unsigned long end; /* PER ending address */
342};
343
344/*
345 * PER event contains information about the cause of the last PER exception.
346 */
347struct per_event {
348 unsigned short cause; /* PER code, ATMID and AI */
349 unsigned long address; /* PER address */
350 unsigned char paid; /* PER access identification */
351};
352
353/*
354 * Simplified per_info structure used to decode the ptrace user space ABI.
355 */
356struct per_struct_kernel {
357 unsigned long cr9; /* PER control bits */
358 unsigned long cr10; /* PER starting address */
359 unsigned long cr11; /* PER ending address */
360 unsigned long bits; /* Obsolete software bits */
361 unsigned long starting_addr; /* User specified start address */
362 unsigned long ending_addr; /* User specified end address */
363 unsigned short perc_atmid; /* PER trap ATMID */
364 unsigned long address; /* PER trap instruction address */
365 unsigned char access_id; /* PER trap access identification */
366};
367
368#define PER_EVENT_MASK 0xE9000000UL
369
370#define PER_EVENT_BRANCH 0x80000000UL
371#define PER_EVENT_IFETCH 0x40000000UL
372#define PER_EVENT_STORE 0x20000000UL
373#define PER_EVENT_STORE_REAL 0x08000000UL
374#define PER_EVENT_NULLIFICATION 0x01000000UL
375
376#define PER_CONTROL_MASK 0x00a00000UL
377
378#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
379#define PER_CONTROL_ALTERATION 0x00200000UL
380
334#endif 381#endif
335 382
336/* 383/*
337 * Now for the program event recording (trace) definitions. 384 * Now for the user space program event recording (trace) definitions.
385 * The following structures are used only for the ptrace interface, don't
386 * touch or even look at it if you don't want to modify the user-space
387 * ptrace interface. In particular stay away from it for in-kernel PER.
338 */ 388 */
339typedef struct 389typedef struct
340{ 390{
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 46e96bc1f5a1..350e7ee5952d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -361,6 +361,7 @@ struct qdio_initialize {
361 qdio_handler_t *input_handler; 361 qdio_handler_t *input_handler;
362 qdio_handler_t *output_handler; 362 qdio_handler_t *output_handler;
363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long); 363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
364 int scan_threshold;
364 unsigned long int_parm; 365 unsigned long int_parm;
365 void **input_sbal_addr_array; 366 void **input_sbal_addr_array;
366 void **output_sbal_addr_array; 367 void **output_sbal_addr_array;
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
index 1a9307e70842..080876d5f196 100644
--- a/arch/s390/include/asm/s390_ext.h
+++ b/arch/s390/include/asm/s390_ext.h
@@ -1,32 +1,17 @@
1#ifndef _S390_EXTINT_H
2#define _S390_EXTINT_H
3
4/* 1/*
5 * include/asm-s390/s390_ext.h 2 * Copyright IBM Corp. 1999,2010
6 * 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
7 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Copyright IBM Corp. 1999,2007
9 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
10 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 */ 5 */
12 6
7#ifndef _S390_EXTINT_H
8#define _S390_EXTINT_H
9
13#include <linux/types.h> 10#include <linux/types.h>
14 11
15typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); 12typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
16 13
17typedef struct ext_int_info_t {
18 struct ext_int_info_t *next;
19 ext_int_handler_t handler;
20 __u16 code;
21} ext_int_info_t;
22
23extern ext_int_info_t *ext_int_hash[];
24
25int register_external_interrupt(__u16 code, ext_int_handler_t handler); 14int register_external_interrupt(__u16 code, ext_int_handler_t handler);
26int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
27 ext_int_info_t *info);
28int unregister_external_interrupt(__u16 code, ext_int_handler_t handler); 15int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
29int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
30 ext_int_info_t *info);
31 16
32#endif 17#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index edc03cb9cd79..045e009fc164 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -20,7 +20,6 @@ extern void machine_power_off_smp(void);
20 20
21extern int __cpu_disable (void); 21extern int __cpu_disable (void);
22extern void __cpu_die (unsigned int cpu); 22extern void __cpu_die (unsigned int cpu);
23extern void cpu_die (void) __attribute__ ((noreturn));
24extern int __cpu_up (unsigned int cpu); 23extern int __cpu_up (unsigned int cpu);
25 24
26extern struct mutex smp_cpu_state_mutex; 25extern struct mutex smp_cpu_state_mutex;
@@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
71 70
72#ifdef CONFIG_HOTPLUG_CPU 71#ifdef CONFIG_HOTPLUG_CPU
73extern int smp_rescan_cpus(void); 72extern int smp_rescan_cpus(void);
73extern void __noreturn cpu_die(void);
74#else 74#else
75static inline int smp_rescan_cpus(void) { return 0; } 75static inline int smp_rescan_cpus(void) { return 0; }
76static inline void cpu_die(void) { }
76#endif 77#endif
77 78
78#endif /* __ASM_SMP_H */ 79#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3ad16dbf622e..6710b0eac165 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,6 +20,7 @@
20struct task_struct; 20struct task_struct;
21 21
22extern struct task_struct *__switch_to(void *, void *); 22extern struct task_struct *__switch_to(void *, void *);
23extern void update_per_regs(struct task_struct *task);
23 24
24static inline void save_fp_regs(s390_fp_regs *fpregs) 25static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 26{
@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
93 if (next->mm) { \ 94 if (next->mm) { \
94 restore_fp_regs(&next->thread.fp_regs); \ 95 restore_fp_regs(&next->thread.fp_regs); \
95 restore_access_regs(&next->thread.acrs[0]); \ 96 restore_access_regs(&next->thread.acrs[0]); \
97 update_per_regs(next); \
96 } \ 98 } \
97 prev = __switch_to(prev,next); \ 99 prev = __switch_to(prev,next); \
98} while (0) 100} while (0)
@@ -101,11 +103,9 @@ extern void account_vtime(struct task_struct *, struct task_struct *);
101extern void account_tick_vtime(struct task_struct *); 103extern void account_tick_vtime(struct task_struct *);
102 104
103#ifdef CONFIG_PFAULT 105#ifdef CONFIG_PFAULT
104extern void pfault_irq_init(void);
105extern int pfault_init(void); 106extern int pfault_init(void);
106extern void pfault_fini(void); 107extern void pfault_fini(void);
107#else /* CONFIG_PFAULT */ 108#else /* CONFIG_PFAULT */
108#define pfault_irq_init() do { } while (0)
109#define pfault_init() ({-1;}) 109#define pfault_init() ({-1;})
110#define pfault_fini() do { } while (0) 110#define pfault_fini() do { } while (0)
111#endif /* CONFIG_PFAULT */ 111#endif /* CONFIG_PFAULT */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 5baf0230b29b..ebc77091466f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -74,7 +74,7 @@ struct thread_info {
74/* how to get the thread information struct from C */ 74/* how to get the thread information struct from C */
75static inline struct thread_info *current_thread_info(void) 75static inline struct thread_info *current_thread_info(void)
76{ 76{
77 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); 77 return (struct thread_info *) S390_lowcore.thread_info;
78} 78}
79 79
80#define THREAD_SIZE_ORDER THREAD_ORDER 80#define THREAD_SIZE_ORDER THREAD_ORDER
@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
88#define TIF_SIGPENDING 2 /* signal pending */ 88#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_31BIT 17 /* 32bit process */ 99#define TIF_31BIT 17 /* 32bit process */
100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
102#define TIF_FREEZE 20 /* thread is freezing for suspend */ 102#define TIF_SINGLE_STEP 20 /* This task is single stepped */
103#define TIF_FREEZE 21 /* thread is freezing for suspend */
103 104
104#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 105#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
105#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 106#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
106#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 107#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 108#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
108#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 109#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 110#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 111#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 115#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
116#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
118#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
117#define _TIF_FREEZE (1<<TIF_FREEZE) 119#define _TIF_FREEZE (1<<TIF_FREEZE)
118 120
119#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 09d345a701dc..88829a40af6f 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -11,6 +11,8 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14#include <asm/lowcore.h>
15
14/* The value of the TOD clock for 1.1.1970. */ 16/* The value of the TOD clock for 1.1.1970. */
15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 17#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
16 18
@@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time)
49 asm volatile("stckc %0" : "=Q" (*time)); 51 asm volatile("stckc %0" : "=Q" (*time));
50} 52}
51 53
54void clock_comparator_work(void);
55
56static inline unsigned long long local_tick_disable(void)
57{
58 unsigned long long old;
59
60 old = S390_lowcore.clock_comparator;
61 S390_lowcore.clock_comparator = -1ULL;
62 set_clock_comparator(S390_lowcore.clock_comparator);
63 return old;
64}
65
66static inline void local_tick_enable(unsigned long long comp)
67{
68 S390_lowcore.clock_comparator = comp;
69 set_clock_comparator(S390_lowcore.clock_comparator);
70}
71
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 72#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
53 73
54typedef unsigned long long cycles_t; 74typedef unsigned long long cycles_t;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 33982e7ce04d..fe03c140002a 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,14 +23,16 @@ int main(void)
23{ 23{
24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 26 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
28 BLANK(); 27 BLANK();
29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
30 BLANK(); 29 BLANK();
31 DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); 30 DEFINE(__THREAD_per_cause,
32 DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); 31 offsetof(struct task_struct, thread.per_event.cause));
33 DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); 32 DEFINE(__THREAD_per_address,
33 offsetof(struct task_struct, thread.per_event.address));
34 DEFINE(__THREAD_per_paid,
35 offsetof(struct task_struct, thread.per_event.paid));
34 BLANK(); 36 BLANK();
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 37 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -85,9 +87,9 @@ int main(void)
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 87 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 88 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); 89 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
88 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); 90 DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
89 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 91 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
90 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 92 DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
91 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); 93 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
92 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 94 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
93 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); 95 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 3141025724f4..12b823833510 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -4,40 +4,19 @@
4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
5#include "compat_linux.h" /* needed for psw_compat_t */ 5#include "compat_linux.h" /* needed for psw_compat_t */
6 6
7typedef struct { 7struct compat_per_struct_kernel {
8 __u32 cr[NUM_CR_WORDS]; 8 __u32 cr9; /* PER control bits */
9} per_cr_words32; 9 __u32 cr10; /* PER starting address */
10 10 __u32 cr11; /* PER ending address */
11typedef struct { 11 __u32 bits; /* Obsolete software bits */
12 __u16 perc_atmid; /* 0x096 */ 12 __u32 starting_addr; /* User specified start address */
13 __u32 address; /* 0x098 */ 13 __u32 ending_addr; /* User specified end address */
14 __u8 access_id; /* 0x0a1 */ 14 __u16 perc_atmid; /* PER trap ATMID */
15} per_lowcore_words32; 15 __u32 address; /* PER trap instruction address */
16 16 __u8 access_id; /* PER trap access identification */
17typedef struct { 17};
18 union {
19 per_cr_words32 words;
20 } control_regs;
21 /*
22 * Use these flags instead of setting em_instruction_fetch
23 * directly they are used so that single stepping can be
24 * switched on & off while not affecting other tracing
25 */
26 unsigned single_step : 1;
27 unsigned instruction_fetch : 1;
28 unsigned : 30;
29 /*
30 * These addresses are copied into cr10 & cr11 if single
31 * stepping is switched off
32 */
33 __u32 starting_addr;
34 __u32 ending_addr;
35 union {
36 per_lowcore_words32 words;
37 } lowcore;
38} per_struct32;
39 18
40struct user_regs_struct32 19struct compat_user_regs_struct
41{ 20{
42 psw_compat_t psw; 21 psw_compat_t psw;
43 u32 gprs[NUM_GPRS]; 22 u32 gprs[NUM_GPRS];
@@ -50,14 +29,14 @@ struct user_regs_struct32
50 * itself as there is no "official" ptrace interface for hardware 29 * itself as there is no "official" ptrace interface for hardware
51 * watchpoints. This is the way intel does it. 30 * watchpoints. This is the way intel does it.
52 */ 31 */
53 per_struct32 per_info; 32 struct compat_per_struct_kernel per_info;
54 u32 ieee_instruction_pointer; /* obsolete, always 0 */ 33 u32 ieee_instruction_pointer; /* obsolete, always 0 */
55}; 34};
56 35
57struct user32 { 36struct compat_user {
58 /* We start with the registers, to mimic the way that "memory" 37 /* We start with the registers, to mimic the way that "memory"
59 is returned from the ptrace(3,...) function. */ 38 is returned from the ptrace(3,...) function. */
60 struct user_regs_struct32 regs; /* Where the registers are actually stored */ 39 struct compat_user_regs_struct regs;
61 /* The rest of this junk is to help gdb figure out what goes where */ 40 /* The rest of this junk is to help gdb figure out what goes where */
62 u32 u_tsize; /* Text segment size (pages). */ 41 u32 u_tsize; /* Text segment size (pages). */
63 u32 u_dsize; /* Data segment size (pages). */ 42 u32 u_dsize; /* Data segment size (pages). */
@@ -79,6 +58,6 @@ typedef struct
79 __u32 len; 58 __u32 len;
80 __u32 kernel_addr; 59 __u32 kernel_addr;
81 __u32 process_addr; 60 __u32 process_addr;
82} ptrace_area_emu31; 61} compat_ptrace_area;
83 62
84#endif /* _PTRACE32_H */ 63#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1ecc337fb679..648f64239a9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,7 +9,6 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
@@ -49,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
49SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
50 49
51_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
52 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
53_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING) 53 _TIF_MCCK_PENDING)
55_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -110,31 +109,36 @@ STACK_SIZE = 1 << STACK_SHIFT
1101: stm %r10,%r11,\lc_sum 1091: stm %r10,%r11,\lc_sum
111 .endm 110 .endm
112 111
113 .macro SAVE_ALL_BASE savearea 112 .macro SAVE_ALL_SVC psworg,savearea
114 stm %r12,%r15,\savearea 113 stm %r12,%r15,\savearea
115 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 114 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
115 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
116 s %r15,BASED(.Lc_spsize) # make room for registers & psw
116 .endm 117 .endm
117 118
118 .macro SAVE_ALL_SVC psworg,savearea 119 .macro SAVE_ALL_BASE savearea
119 la %r12,\psworg 120 stm %r12,%r15,\savearea
120 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 121 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
121 .endm 122 .endm
122 123
123 .macro SAVE_ALL_SYNC psworg,savearea 124 .macro SAVE_ALL_PGM psworg,savearea
124 la %r12,\psworg
125 tm \psworg+1,0x01 # test problem state bit 125 tm \psworg+1,0x01 # test problem state bit
126 bz BASED(2f) # skip stack setup save
127 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
128#ifdef CONFIG_CHECK_STACK 126#ifdef CONFIG_CHECK_STACK
129 b BASED(3f) 127 bnz BASED(1f)
1302: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 128 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
131 bz BASED(stack_overflow) 129 bnz BASED(2f)
1323: 130 la %r12,\psworg
131 b BASED(stack_overflow)
132#else
133 bz BASED(2f)
133#endif 134#endif
1342: 1351: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
1362: s %r15,BASED(.Lc_spsize) # make room for registers & psw
135 .endm 137 .endm
136 138
137 .macro SAVE_ALL_ASYNC psworg,savearea 139 .macro SAVE_ALL_ASYNC psworg,savearea
140 stm %r12,%r15,\savearea
141 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
138 la %r12,\psworg 142 la %r12,\psworg
139 tm \psworg+1,0x01 # test problem state bit 143 tm \psworg+1,0x01 # test problem state bit
140 bnz BASED(1f) # from user -> load async stack 144 bnz BASED(1f) # from user -> load async stack
@@ -149,27 +153,23 @@ STACK_SIZE = 1 << STACK_SHIFT
1490: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 1530: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
150 slr %r14,%r15 154 slr %r14,%r15
151 sra %r14,STACK_SHIFT 155 sra %r14,STACK_SHIFT
152 be BASED(2f)
1531: l %r15,__LC_ASYNC_STACK
154#ifdef CONFIG_CHECK_STACK 156#ifdef CONFIG_CHECK_STACK
155 b BASED(3f) 157 bnz BASED(1f)
1562: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 158 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
157 bz BASED(stack_overflow) 159 bnz BASED(2f)
1583: 160 b BASED(stack_overflow)
161#else
162 bz BASED(2f)
159#endif 163#endif
1602: 1641: l %r15,__LC_ASYNC_STACK
1652: s %r15,BASED(.Lc_spsize) # make room for registers & psw
161 .endm 166 .endm
162 167
163 .macro CREATE_STACK_FRAME psworg,savearea 168 .macro CREATE_STACK_FRAME savearea
164 s %r15,BASED(.Lc_spsize) # make room for registers & psw 169 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
165 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
166 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 170 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
167 icm %r12,12,__LC_SVC_ILC
168 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
169 st %r12,SP_ILC(%r15)
170 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 171 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
171 la %r12,0 172 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
172 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
173 .endm 173 .endm
174 174
175 .macro RESTORE_ALL psworg,sync 175 .macro RESTORE_ALL psworg,sync
@@ -188,6 +188,8 @@ STACK_SIZE = 1 << STACK_SHIFT
188 ssm __SF_EMPTY(%r15) 188 ssm __SF_EMPTY(%r15)
189 .endm 189 .endm
190 190
191 .section .kprobes.text, "ax"
192
191/* 193/*
192 * Scheduler resume function, called by switch_to 194 * Scheduler resume function, called by switch_to
193 * gpr2 = (task_struct *) prev 195 * gpr2 = (task_struct *) prev
@@ -198,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
198 .globl __switch_to 200 .globl __switch_to
199__switch_to: 201__switch_to:
200 basr %r1,0 202 basr %r1,0
201__switch_to_base: 2030: l %r4,__THREAD_info(%r2) # get thread_info of prev
202 tm __THREAD_per(%r3),0xe8 # new process is using per ? 204 l %r5,__THREAD_info(%r3) # get thread_info of next
203 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
204 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
205 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
206 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
207 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
208__switch_to_noper:
209 l %r4,__THREAD_info(%r2) # get thread_info of prev
210 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 205 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
211 bz __switch_to_no_mcck-__switch_to_base(%r1) 206 bz 1f-0b(%r1)
212 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 207 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
213 l %r4,__THREAD_info(%r3) # get thread_info of next 208 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
214 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 2091: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
215__switch_to_no_mcck: 210 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
216 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 211 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
217 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
218 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
219 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 214 st %r3,__LC_CURRENT # store task struct of next
220 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 215 st %r5,__LC_THREAD_INFO # store thread info of next
221 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 216 ahi %r5,STACK_SIZE # end of kernel stack of next
222 l %r3,__THREAD_info(%r3) # load thread_info from task struct 217 st %r5,__LC_KERNEL_STACK # store end of kernel stack
223 st %r3,__LC_THREAD_INFO
224 ahi %r3,STACK_SIZE
225 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
226 br %r14 218 br %r14
227 219
228__critical_start: 220__critical_start:
@@ -235,10 +227,11 @@ __critical_start:
235system_call: 227system_call:
236 stpt __LC_SYNC_ENTER_TIMER 228 stpt __LC_SYNC_ENTER_TIMER
237sysc_saveall: 229sysc_saveall:
238 SAVE_ALL_BASE __LC_SAVE_AREA
239 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 230 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
240 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 231 CREATE_STACK_FRAME __LC_SAVE_AREA
241 lh %r7,0x8a # get svc number from lowcore 232 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
233 mvc SP_ILC(4,%r15),__LC_SVC_ILC
234 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
242sysc_vtime: 235sysc_vtime:
243 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 236 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
244sysc_stime: 237sysc_stime:
@@ -246,20 +239,20 @@ sysc_stime:
246sysc_update: 239sysc_update:
247 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 240 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
248sysc_do_svc: 241sysc_do_svc:
249 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 242 xr %r7,%r7
250 ltr %r7,%r7 # test for svc 0 243 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
251 bnz BASED(sysc_nr_ok) # svc number > 0 244 bnz BASED(sysc_nr_ok) # svc number > 0
252 # svc 0: system call number in %r1 245 # svc 0: system call number in %r1
253 cl %r1,BASED(.Lnr_syscalls) 246 cl %r1,BASED(.Lnr_syscalls)
254 bnl BASED(sysc_nr_ok) 247 bnl BASED(sysc_nr_ok)
248 sth %r1,SP_SVCNR(%r15)
255 lr %r7,%r1 # copy svc number to %r7 249 lr %r7,%r1 # copy svc number to %r7
256sysc_nr_ok: 250sysc_nr_ok:
257 sth %r7,SP_SVCNR(%r15)
258 sll %r7,2 # svc number *4 251 sll %r7,2 # svc number *4
259 l %r8,BASED(.Lsysc_table) 252 l %r10,BASED(.Lsysc_table)
260 tm __TI_flags+2(%r9),_TIF_SYSCALL 253 tm __TI_flags+2(%r12),_TIF_SYSCALL
261 mvc SP_ARGS(4,%r15),SP_R7(%r15) 254 mvc SP_ARGS(4,%r15),SP_R7(%r15)
262 l %r8,0(%r7,%r8) # get system call addr. 255 l %r8,0(%r7,%r10) # get system call addr.
263 bnz BASED(sysc_tracesys) 256 bnz BASED(sysc_tracesys)
264 basr %r14,%r8 # call sys_xxxx 257 basr %r14,%r8 # call sys_xxxx
265 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 258 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -267,7 +260,7 @@ sysc_nr_ok:
267sysc_return: 260sysc_return:
268 LOCKDEP_SYS_EXIT 261 LOCKDEP_SYS_EXIT
269sysc_tif: 262sysc_tif:
270 tm __TI_flags+3(%r9),_TIF_WORK_SVC 263 tm __TI_flags+3(%r12),_TIF_WORK_SVC
271 bnz BASED(sysc_work) # there is work to do (signals etc.) 264 bnz BASED(sysc_work) # there is work to do (signals etc.)
272sysc_restore: 265sysc_restore:
273 RESTORE_ALL __LC_RETURN_PSW,1 266 RESTORE_ALL __LC_RETURN_PSW,1
@@ -284,17 +277,17 @@ sysc_work:
284# One of the work bits is on. Find out which one. 277# One of the work bits is on. Find out which one.
285# 278#
286sysc_work_tif: 279sysc_work_tif:
287 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 280 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
288 bo BASED(sysc_mcck_pending) 281 bo BASED(sysc_mcck_pending)
289 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 282 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
290 bo BASED(sysc_reschedule) 283 bo BASED(sysc_reschedule)
291 tm __TI_flags+3(%r9),_TIF_SIGPENDING 284 tm __TI_flags+3(%r12),_TIF_SIGPENDING
292 bo BASED(sysc_sigpending) 285 bo BASED(sysc_sigpending)
293 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 286 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
294 bo BASED(sysc_notify_resume) 287 bo BASED(sysc_notify_resume)
295 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 288 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
296 bo BASED(sysc_restart) 289 bo BASED(sysc_restart)
297 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 290 tm __TI_flags+3(%r12),_TIF_PER_TRAP
298 bo BASED(sysc_singlestep) 291 bo BASED(sysc_singlestep)
299 b BASED(sysc_return) # beware of critical section cleanup 292 b BASED(sysc_return) # beware of critical section cleanup
300 293
@@ -318,13 +311,13 @@ sysc_mcck_pending:
318# _TIF_SIGPENDING is set, call do_signal 311# _TIF_SIGPENDING is set, call do_signal
319# 312#
320sysc_sigpending: 313sysc_sigpending:
321 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 314 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
322 la %r2,SP_PTREGS(%r15) # load pt_regs 315 la %r2,SP_PTREGS(%r15) # load pt_regs
323 l %r1,BASED(.Ldo_signal) 316 l %r1,BASED(.Ldo_signal)
324 basr %r14,%r1 # call do_signal 317 basr %r14,%r1 # call do_signal
325 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 318 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
326 bo BASED(sysc_restart) 319 bo BASED(sysc_restart)
327 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 320 tm __TI_flags+3(%r12),_TIF_PER_TRAP
328 bo BASED(sysc_singlestep) 321 bo BASED(sysc_singlestep)
329 b BASED(sysc_return) 322 b BASED(sysc_return)
330 323
@@ -342,23 +335,23 @@ sysc_notify_resume:
342# _TIF_RESTART_SVC is set, set up registers and restart svc 335# _TIF_RESTART_SVC is set, set up registers and restart svc
343# 336#
344sysc_restart: 337sysc_restart:
345 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 338 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
346 l %r7,SP_R2(%r15) # load new svc number 339 l %r7,SP_R2(%r15) # load new svc number
347 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 340 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
348 lm %r2,%r6,SP_R2(%r15) # load svc arguments 341 lm %r2,%r6,SP_R2(%r15) # load svc arguments
342 sth %r7,SP_SVCNR(%r15)
349 b BASED(sysc_nr_ok) # restart svc 343 b BASED(sysc_nr_ok) # restart svc
350 344
351# 345#
352# _TIF_SINGLE_STEP is set, call do_single_step 346# _TIF_PER_TRAP is set, call do_per_trap
353# 347#
354sysc_singlestep: 348sysc_singlestep:
355 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 349 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
356 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 350 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
357 mvi SP_SVCNR+1(%r15),0xff
358 la %r2,SP_PTREGS(%r15) # address of register-save area 351 la %r2,SP_PTREGS(%r15) # address of register-save area
359 l %r1,BASED(.Lhandle_per) # load adr. of per handler 352 l %r1,BASED(.Lhandle_per) # load adr. of per handler
360 la %r14,BASED(sysc_return) # load adr. of system return 353 la %r14,BASED(sysc_return) # load adr. of system return
361 br %r1 # branch to do_single_step 354 br %r1 # branch to do_per_trap
362 355
363# 356#
364# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 357# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -368,15 +361,15 @@ sysc_tracesys:
368 l %r1,BASED(.Ltrace_entry) 361 l %r1,BASED(.Ltrace_entry)
369 la %r2,SP_PTREGS(%r15) # load pt_regs 362 la %r2,SP_PTREGS(%r15) # load pt_regs
370 la %r3,0 363 la %r3,0
371 srl %r7,2 364 xr %r0,%r0
372 st %r7,SP_R2(%r15) 365 icm %r0,3,SP_SVCNR(%r15)
366 st %r0,SP_R2(%r15)
373 basr %r14,%r1 367 basr %r14,%r1
374 cl %r2,BASED(.Lnr_syscalls) 368 cl %r2,BASED(.Lnr_syscalls)
375 bnl BASED(sysc_tracenogo) 369 bnl BASED(sysc_tracenogo)
376 l %r8,BASED(.Lsysc_table)
377 lr %r7,%r2 370 lr %r7,%r2
378 sll %r7,2 # svc number *4 371 sll %r7,2 # svc number *4
379 l %r8,0(%r7,%r8) 372 l %r8,0(%r7,%r10)
380sysc_tracego: 373sysc_tracego:
381 lm %r3,%r6,SP_R3(%r15) 374 lm %r3,%r6,SP_R3(%r15)
382 mvc SP_ARGS(4,%r15),SP_R7(%r15) 375 mvc SP_ARGS(4,%r15),SP_R7(%r15)
@@ -384,7 +377,7 @@ sysc_tracego:
384 basr %r14,%r8 # call sys_xxx 377 basr %r14,%r8 # call sys_xxx
385 st %r2,SP_R2(%r15) # store return value 378 st %r2,SP_R2(%r15) # store return value
386sysc_tracenogo: 379sysc_tracenogo:
387 tm __TI_flags+2(%r9),_TIF_SYSCALL 380 tm __TI_flags+2(%r12),_TIF_SYSCALL
388 bz BASED(sysc_return) 381 bz BASED(sysc_return)
389 l %r1,BASED(.Ltrace_exit) 382 l %r1,BASED(.Ltrace_exit)
390 la %r2,SP_PTREGS(%r15) # load pt_regs 383 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -397,7 +390,7 @@ sysc_tracenogo:
397 .globl ret_from_fork 390 .globl ret_from_fork
398ret_from_fork: 391ret_from_fork:
399 l %r13,__LC_SVC_NEW_PSW+4 392 l %r13,__LC_SVC_NEW_PSW+4
400 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 393 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
401 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 394 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
402 bo BASED(0f) 395 bo BASED(0f)
403 st %r15,SP_R15(%r15) # store stack pointer for new kthread 396 st %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -432,8 +425,8 @@ kernel_execve:
4320: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4250: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
433 l %r15,__LC_KERNEL_STACK # load ksp 426 l %r15,__LC_KERNEL_STACK # load ksp
434 s %r15,BASED(.Lc_spsize) # make room for registers & psw 427 s %r15,BASED(.Lc_spsize) # make room for registers & psw
435 l %r9,__LC_THREAD_INFO
436 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 428 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
429 l %r12,__LC_THREAD_INFO
437 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 430 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
438 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 431 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
439 l %r1,BASED(.Lexecve_tail) 432 l %r1,BASED(.Lexecve_tail)
@@ -463,26 +456,27 @@ pgm_check_handler:
463 SAVE_ALL_BASE __LC_SAVE_AREA 456 SAVE_ALL_BASE __LC_SAVE_AREA
464 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 457 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
465 bnz BASED(pgm_per) # got per exception -> special case 458 bnz BASED(pgm_per) # got per exception -> special case
466 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 459 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
467 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 460 CREATE_STACK_FRAME __LC_SAVE_AREA
461 xc SP_ILC(4,%r15),SP_ILC(%r15)
462 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
463 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
468 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 464 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
469 bz BASED(pgm_no_vtime) 465 bz BASED(pgm_no_vtime)
470 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 466 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
471 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 467 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
472 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 468 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
473pgm_no_vtime: 469pgm_no_vtime:
474 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
475 l %r3,__LC_PGM_ILC # load program interruption code 470 l %r3,__LC_PGM_ILC # load program interruption code
476 l %r4,__LC_TRANS_EXC_CODE 471 l %r4,__LC_TRANS_EXC_CODE
477 REENABLE_IRQS 472 REENABLE_IRQS
478 la %r8,0x7f 473 la %r8,0x7f
479 nr %r8,%r3 474 nr %r8,%r3
480pgm_do_call:
481 l %r7,BASED(.Ljump_table)
482 sll %r8,2 475 sll %r8,2
483 l %r7,0(%r8,%r7) # load address of handler routine 476 l %r1,BASED(.Ljump_table)
477 l %r1,0(%r8,%r1) # load address of handler routine
484 la %r2,SP_PTREGS(%r15) # address of register-save area 478 la %r2,SP_PTREGS(%r15) # address of register-save area
485 basr %r14,%r7 # branch to interrupt-handler 479 basr %r14,%r1 # branch to interrupt-handler
486pgm_exit: 480pgm_exit:
487 b BASED(sysc_return) 481 b BASED(sysc_return)
488 482
@@ -503,33 +497,34 @@ pgm_per:
503# Normal per exception 497# Normal per exception
504# 498#
505pgm_per_std: 499pgm_per_std:
506 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 500 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
507 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 501 CREATE_STACK_FRAME __LC_SAVE_AREA
502 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
503 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
508 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 504 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
509 bz BASED(pgm_no_vtime2) 505 bz BASED(pgm_no_vtime2)
510 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 506 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
511 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 507 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
512 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 508 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
513pgm_no_vtime2: 509pgm_no_vtime2:
514 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 510 l %r1,__TI_task(%r12)
515 l %r1,__TI_task(%r9)
516 tm SP_PSW+1(%r15),0x01 # kernel per event ? 511 tm SP_PSW+1(%r15),0x01 # kernel per event ?
517 bz BASED(kernel_per) 512 bz BASED(kernel_per)
518 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 513 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
519 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 514 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
520 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 515 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
521 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 516 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
522 l %r3,__LC_PGM_ILC # load program interruption code 517 l %r3,__LC_PGM_ILC # load program interruption code
523 l %r4,__LC_TRANS_EXC_CODE 518 l %r4,__LC_TRANS_EXC_CODE
524 REENABLE_IRQS 519 REENABLE_IRQS
525 la %r8,0x7f 520 la %r8,0x7f
526 nr %r8,%r3 # clear per-event-bit and ilc 521 nr %r8,%r3 # clear per-event-bit and ilc
527 be BASED(pgm_exit2) # only per or per+check ? 522 be BASED(pgm_exit2) # only per or per+check ?
528 l %r7,BASED(.Ljump_table)
529 sll %r8,2 523 sll %r8,2
530 l %r7,0(%r8,%r7) # load address of handler routine 524 l %r1,BASED(.Ljump_table)
525 l %r1,0(%r8,%r1) # load address of handler routine
531 la %r2,SP_PTREGS(%r15) # address of register-save area 526 la %r2,SP_PTREGS(%r15) # address of register-save area
532 basr %r14,%r7 # branch to interrupt-handler 527 basr %r14,%r1 # branch to interrupt-handler
533pgm_exit2: 528pgm_exit2:
534 b BASED(sysc_return) 529 b BASED(sysc_return)
535 530
@@ -537,18 +532,19 @@ pgm_exit2:
537# it was a single stepped SVC that is causing all the trouble 532# it was a single stepped SVC that is causing all the trouble
538# 533#
539pgm_svcper: 534pgm_svcper:
540 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 535 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
541 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 536 CREATE_STACK_FRAME __LC_SAVE_AREA
537 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
538 mvc SP_ILC(4,%r15),__LC_SVC_ILC
539 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
542 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 540 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
543 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 541 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
544 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 542 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
545 lh %r7,0x8a # get svc number from lowcore 543 l %r8,__TI_task(%r12)
546 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 544 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
547 l %r8,__TI_task(%r9) 545 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
548 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 546 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
549 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS 547 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
550 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
551 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 548 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
553 lm %r2,%r6,SP_R2(%r15) # load svc arguments 549 lm %r2,%r6,SP_R2(%r15) # load svc arguments
554 b BASED(sysc_do_svc) 550 b BASED(sysc_do_svc)
@@ -558,8 +554,7 @@ pgm_svcper:
558# 554#
559kernel_per: 555kernel_per:
560 REENABLE_IRQS 556 REENABLE_IRQS
561 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 557 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
562 mvi SP_SVCNR+1(%r15),0xff
563 la %r2,SP_PTREGS(%r15) # address of register-save area 558 la %r2,SP_PTREGS(%r15) # address of register-save area
564 l %r1,BASED(.Lhandle_per) # load adr. of per handler 559 l %r1,BASED(.Lhandle_per) # load adr. of per handler
565 basr %r14,%r1 # branch to do_single_step 560 basr %r14,%r1 # branch to do_single_step
@@ -573,9 +568,10 @@ kernel_per:
573io_int_handler: 568io_int_handler:
574 stck __LC_INT_CLOCK 569 stck __LC_INT_CLOCK
575 stpt __LC_ASYNC_ENTER_TIMER 570 stpt __LC_ASYNC_ENTER_TIMER
576 SAVE_ALL_BASE __LC_SAVE_AREA+16
577 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 571 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
578 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 572 CREATE_STACK_FRAME __LC_SAVE_AREA+16
573 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
574 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
579 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 575 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
580 bz BASED(io_no_vtime) 576 bz BASED(io_no_vtime)
581 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 577 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -583,7 +579,6 @@ io_int_handler:
583 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 579 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
584io_no_vtime: 580io_no_vtime:
585 TRACE_IRQS_OFF 581 TRACE_IRQS_OFF
586 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
587 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 582 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
588 la %r2,SP_PTREGS(%r15) # address of register-save area 583 la %r2,SP_PTREGS(%r15) # address of register-save area
589 basr %r14,%r1 # branch to standard irq handler 584 basr %r14,%r1 # branch to standard irq handler
@@ -591,7 +586,7 @@ io_return:
591 LOCKDEP_SYS_EXIT 586 LOCKDEP_SYS_EXIT
592 TRACE_IRQS_ON 587 TRACE_IRQS_ON
593io_tif: 588io_tif:
594 tm __TI_flags+3(%r9),_TIF_WORK_INT 589 tm __TI_flags+3(%r12),_TIF_WORK_INT
595 bnz BASED(io_work) # there is work to do (signals etc.) 590 bnz BASED(io_work) # there is work to do (signals etc.)
596io_restore: 591io_restore:
597 RESTORE_ALL __LC_RETURN_PSW,0 592 RESTORE_ALL __LC_RETURN_PSW,0
@@ -609,9 +604,9 @@ io_work:
609 bo BASED(io_work_user) # yes -> do resched & signal 604 bo BASED(io_work_user) # yes -> do resched & signal
610#ifdef CONFIG_PREEMPT 605#ifdef CONFIG_PREEMPT
611 # check for preemptive scheduling 606 # check for preemptive scheduling
612 icm %r0,15,__TI_precount(%r9) 607 icm %r0,15,__TI_precount(%r12)
613 bnz BASED(io_restore) # preemption disabled 608 bnz BASED(io_restore) # preemption disabled
614 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 609 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
615 bno BASED(io_restore) 610 bno BASED(io_restore)
616 # switch to kernel stack 611 # switch to kernel stack
617 l %r1,SP_R15(%r15) 612 l %r1,SP_R15(%r15)
@@ -645,13 +640,13 @@ io_work_user:
645# and _TIF_MCCK_PENDING 640# and _TIF_MCCK_PENDING
646# 641#
647io_work_tif: 642io_work_tif:
648 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 643 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
649 bo BASED(io_mcck_pending) 644 bo BASED(io_mcck_pending)
650 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 645 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
651 bo BASED(io_reschedule) 646 bo BASED(io_reschedule)
652 tm __TI_flags+3(%r9),_TIF_SIGPENDING 647 tm __TI_flags+3(%r12),_TIF_SIGPENDING
653 bo BASED(io_sigpending) 648 bo BASED(io_sigpending)
654 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 649 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
655 bo BASED(io_notify_resume) 650 bo BASED(io_notify_resume)
656 b BASED(io_return) # beware of critical section cleanup 651 b BASED(io_return) # beware of critical section cleanup
657 652
@@ -711,16 +706,16 @@ io_notify_resume:
711ext_int_handler: 706ext_int_handler:
712 stck __LC_INT_CLOCK 707 stck __LC_INT_CLOCK
713 stpt __LC_ASYNC_ENTER_TIMER 708 stpt __LC_ASYNC_ENTER_TIMER
714 SAVE_ALL_BASE __LC_SAVE_AREA+16
715 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 709 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
716 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 710 CREATE_STACK_FRAME __LC_SAVE_AREA+16
711 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
712 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
717 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 713 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
718 bz BASED(ext_no_vtime) 714 bz BASED(ext_no_vtime)
719 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 715 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
720 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 716 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
721 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 717 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
722ext_no_vtime: 718ext_no_vtime:
723 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
724 TRACE_IRQS_OFF 719 TRACE_IRQS_OFF
725 la %r2,SP_PTREGS(%r15) # address of register-save area 720 la %r2,SP_PTREGS(%r15) # address of register-save area
726 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 721 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
@@ -775,7 +770,10 @@ mcck_int_main:
775 sra %r14,PAGE_SHIFT 770 sra %r14,PAGE_SHIFT
776 be BASED(0f) 771 be BASED(0f)
777 l %r15,__LC_PANIC_STACK # load panic stack 772 l %r15,__LC_PANIC_STACK # load panic stack
7780: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 7730: s %r15,BASED(.Lc_spsize) # make room for registers & psw
774 CREATE_STACK_FRAME __LC_SAVE_AREA+32
775 mvc SP_PSW(8,%r15),0(%r12)
776 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
779 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 777 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
780 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 778 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
781 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 779 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -784,7 +782,6 @@ mcck_int_main:
784 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 782 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
785 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER 783 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
786mcck_no_vtime: 784mcck_no_vtime:
787 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
788 la %r2,SP_PTREGS(%r15) # load pt_regs 785 la %r2,SP_PTREGS(%r15) # load pt_regs
789 l %r1,BASED(.Ls390_mcck) 786 l %r1,BASED(.Ls390_mcck)
790 basr %r14,%r1 # call machine check handler 787 basr %r14,%r1 # call machine check handler
@@ -796,7 +793,7 @@ mcck_no_vtime:
796 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 793 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
797 lr %r15,%r1 794 lr %r15,%r1
798 stosm __SF_EMPTY(%r15),0x04 # turn dat on 795 stosm __SF_EMPTY(%r15),0x04 # turn dat on
799 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 796 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
800 bno BASED(mcck_return) 797 bno BASED(mcck_return)
801 TRACE_IRQS_OFF 798 TRACE_IRQS_OFF
802 l %r1,BASED(.Ls390_handle_mcck) 799 l %r1,BASED(.Ls390_handle_mcck)
@@ -861,6 +858,8 @@ restart_crash:
861restart_go: 858restart_go:
862#endif 859#endif
863 860
861 .section .kprobes.text, "ax"
862
864#ifdef CONFIG_CHECK_STACK 863#ifdef CONFIG_CHECK_STACK
865/* 864/*
866 * The synchronous or the asynchronous stack overflowed. We are dead. 865 * The synchronous or the asynchronous stack overflowed. We are dead.
@@ -943,12 +942,13 @@ cleanup_system_call:
943 bh BASED(0f) 942 bh BASED(0f)
944 mvc __LC_SAVE_AREA(16),0(%r12) 943 mvc __LC_SAVE_AREA(16),0(%r12)
9450: st %r13,4(%r12) 9440: st %r13,4(%r12)
946 st %r12,__LC_SAVE_AREA+48 # argh 945 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
947 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 946 s %r15,BASED(.Lc_spsize) # make room for registers & psw
948 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
949 l %r12,__LC_SAVE_AREA+48 # argh
950 st %r15,12(%r12) 947 st %r15,12(%r12)
951 lh %r7,0x8a 948 CREATE_STACK_FRAME __LC_SAVE_AREA
949 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
950 mvc SP_ILC(4,%r15),__LC_SVC_ILC
951 mvc 0(4,%r12),__LC_THREAD_INFO
952cleanup_vtime: 952cleanup_vtime:
953 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 953 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
954 bhe BASED(cleanup_stime) 954 bhe BASED(cleanup_stime)
@@ -1046,7 +1046,7 @@ cleanup_io_restore_insn:
1046.Ldo_signal: .long do_signal 1046.Ldo_signal: .long do_signal
1047.Ldo_notify_resume: 1047.Ldo_notify_resume:
1048 .long do_notify_resume 1048 .long do_notify_resume
1049.Lhandle_per: .long do_single_step 1049.Lhandle_per: .long do_per_trap
1050.Ldo_execve: .long do_execve 1050.Ldo_execve: .long do_execve
1051.Lexecve_tail: .long execve_tail 1051.Lexecve_tail: .long execve_tail
1052.Ljump_table: .long pgm_check_table 1052.Ljump_table: .long pgm_check_table
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 95c1dfc4ef31..17a6f83a2d67 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
12 12
13extern int sysctl_userprocess_debug; 13extern int sysctl_userprocess_debug;
14 14
15void do_single_step(struct pt_regs *regs); 15void do_per_trap(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit); 16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs); 17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs); 18void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 8f3e802174db..9d3603d6c511 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51STACK_SIZE = 1 << STACK_SHIFT 51STACK_SIZE = 1 << STACK_SHIFT
52 52
53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -197,6 +197,8 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
197 ssm __SF_EMPTY(%r15) 197 ssm __SF_EMPTY(%r15)
198 .endm 198 .endm
199 199
200 .section .kprobes.text, "ax"
201
200/* 202/*
201 * Scheduler resume function, called by switch_to 203 * Scheduler resume function, called by switch_to
202 * gpr2 = (task_struct *) prev 204 * gpr2 = (task_struct *) prev
@@ -206,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
206 */ 208 */
207 .globl __switch_to 209 .globl __switch_to
208__switch_to: 210__switch_to:
209 tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? 211 lg %r4,__THREAD_info(%r2) # get thread_info of prev
210 jz __switch_to_noper # if not we're fine 212 lg %r5,__THREAD_info(%r3) # get thread_info of next
211 stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
212 clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
213 je __switch_to_noper # we got away without bashing TLB's
214 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
215__switch_to_noper:
216 lg %r4,__THREAD_info(%r2) # get thread_info of prev
217 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
218 jz __switch_to_no_mcck 214 jz 0f
219 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 215 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
220 lg %r4,__THREAD_info(%r3) # get thread_info of next 216 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
221 oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 2170: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
222__switch_to_no_mcck: 218 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
223 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 219 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
224 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
225 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
226 lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 222 stg %r3,__LC_CURRENT # store task struct of next
227 stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct 223 stg %r5,__LC_THREAD_INFO # store thread info of next
228 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 224 aghi %r5,STACK_SIZE # end of kernel stack of next
229 lg %r3,__THREAD_info(%r3) # load thread_info from task struct 225 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
230 stg %r3,__LC_THREAD_INFO
231 aghi %r3,STACK_SIZE
232 stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
233 br %r14 226 br %r14
234 227
235__critical_start: 228__critical_start:
@@ -309,7 +302,7 @@ sysc_work_tif:
309 jo sysc_notify_resume 302 jo sysc_notify_resume
310 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 303 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
311 jo sysc_restart 304 jo sysc_restart
312 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 305 tm __TI_flags+7(%r12),_TIF_PER_TRAP
313 jo sysc_singlestep 306 jo sysc_singlestep
314 j sysc_return # beware of critical section cleanup 307 j sysc_return # beware of critical section cleanup
315 308
@@ -331,12 +324,12 @@ sysc_mcck_pending:
331# _TIF_SIGPENDING is set, call do_signal 324# _TIF_SIGPENDING is set, call do_signal
332# 325#
333sysc_sigpending: 326sysc_sigpending:
334 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 327 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
335 la %r2,SP_PTREGS(%r15) # load pt_regs 328 la %r2,SP_PTREGS(%r15) # load pt_regs
336 brasl %r14,do_signal # call do_signal 329 brasl %r14,do_signal # call do_signal
337 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 330 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
338 jo sysc_restart 331 jo sysc_restart
339 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 332 tm __TI_flags+7(%r12),_TIF_PER_TRAP
340 jo sysc_singlestep 333 jo sysc_singlestep
341 j sysc_return 334 j sysc_return
342 335
@@ -361,14 +354,14 @@ sysc_restart:
361 j sysc_nr_ok # restart svc 354 j sysc_nr_ok # restart svc
362 355
363# 356#
364# _TIF_SINGLE_STEP is set, call do_single_step 357# _TIF_PER_TRAP is set, call do_per_trap
365# 358#
366sysc_singlestep: 359sysc_singlestep:
367 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 360 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
368 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 361 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
369 la %r2,SP_PTREGS(%r15) # address of register-save area 362 la %r2,SP_PTREGS(%r15) # address of register-save area
370 larl %r14,sysc_return # load adr. of system return 363 larl %r14,sysc_return # load adr. of system return
371 jg do_single_step # branch to do_sigtrap 364 jg do_per_trap
372 365
373# 366#
374# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 367# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -524,10 +517,10 @@ pgm_no_vtime2:
524 lg %r1,__TI_task(%r12) 517 lg %r1,__TI_task(%r12)
525 tm SP_PSW+1(%r15),0x01 # kernel per event ? 518 tm SP_PSW+1(%r15),0x01 # kernel per event ?
526 jz kernel_per 519 jz kernel_per
527 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 520 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
528 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 521 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
529 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 522 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
530 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 523 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
531 lgf %r3,__LC_PGM_ILC # load program interruption code 524 lgf %r3,__LC_PGM_ILC # load program interruption code
532 lg %r4,__LC_TRANS_EXC_CODE 525 lg %r4,__LC_TRANS_EXC_CODE
533 REENABLE_IRQS 526 REENABLE_IRQS
@@ -556,10 +549,10 @@ pgm_svcper:
556 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 549 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
557 LAST_BREAK 550 LAST_BREAK
558 lg %r8,__TI_task(%r12) 551 lg %r8,__TI_task(%r12)
559 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 552 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
560 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 553 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
561 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 554 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
562 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 555 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
563 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 556 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
564 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 557 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
565 j sysc_do_svc 558 j sysc_do_svc
@@ -571,7 +564,7 @@ kernel_per:
571 REENABLE_IRQS 564 REENABLE_IRQS
572 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 565 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
573 la %r2,SP_PTREGS(%r15) # address of register-save area 566 la %r2,SP_PTREGS(%r15) # address of register-save area
574 brasl %r14,do_single_step 567 brasl %r14,do_per_trap
575 j pgm_exit 568 j pgm_exit
576 569
577/* 570/*
@@ -868,6 +861,8 @@ restart_crash:
868restart_go: 861restart_go:
869#endif 862#endif
870 863
864 .section .kprobes.text, "ax"
865
871#ifdef CONFIG_CHECK_STACK 866#ifdef CONFIG_CHECK_STACK
872/* 867/*
873 * The synchronous or the asynchronous stack overflowed. We are dead. 868 * The synchronous or the asynchronous stack overflowed. We are dead.
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 6a83d0581317..78bdf0e5dff7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -4,7 +4,7 @@
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
@@ -12,176 +12,144 @@
12#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kprobes.h>
15#include <trace/syscall.h> 16#include <trace/syscall.h>
16#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
17 18
19#ifdef CONFIG_64BIT
20#define MCOUNT_OFFSET_RET 12
21#else
22#define MCOUNT_OFFSET_RET 22
23#endif
24
18#ifdef CONFIG_DYNAMIC_FTRACE 25#ifdef CONFIG_DYNAMIC_FTRACE
19 26
20void ftrace_disable_code(void); 27void ftrace_disable_code(void);
21void ftrace_disable_return(void); 28void ftrace_enable_insn(void);
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26 29
27#ifdef CONFIG_64BIT 30#ifdef CONFIG_64BIT
28 31/*
32 * The 64-bit mcount code looks like this:
33 * stg %r14,8(%r15) # offset 0
34 * > larl %r1,<&counter> # offset 6
35 * > brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * Total length is 24 bytes. The middle two instructions of the mcount
38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
39 * The 64-bit enabled ftrace code block looks like this:
40 * stg %r14,8(%r15) # offset 0
41 * > lg %r1,__LC_FTRACE_FUNC # offset 6
42 * > lgr %r0,%r0 # offset 12
43 * > basr %r14,%r1 # offset 16
44 * lg %r14,8(%15) # offset 18
45 * The return points of the mcount/ftrace function have the same offset 18.
46 * The 64-bit disable ftrace code block looks like this:
47 * stg %r14,8(%r15) # offset 0
48 * > jg .+18 # offset 6
49 * > lgr %r0,%r0 # offset 12
50 * > basr %r14,%r1 # offset 16
51 * lg %r14,8(%15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible.
54 */
29asm( 55asm(
30 " .align 4\n" 56 " .align 4\n"
31 "ftrace_disable_code:\n" 57 "ftrace_disable_code:\n"
32 " j 0f\n" 58 " jg 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
36 "ftrace_disable_return:\n"
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n" 59 " lgr %r0,%r0\n"
39 "0:\n"); 60 " basr %r14,%r1\n"
40 61 "0:\n"
41asm(
42 " .align 4\n" 62 " .align 4\n"
43 "ftrace_nop_code:\n" 63 "ftrace_enable_insn:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 64 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
45 65
46asm( 66#define FTRACE_INSN_SIZE 6
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
50 67
51#else /* CONFIG_64BIT */ 68#else /* CONFIG_64BIT */
52 69/*
70 * The 31-bit mcount code looks like this:
71 * st %r14,4(%r15) # offset 0
72 * > bras %r1,0f # offset 4
73 * > .long _mcount # offset 8
74 * > .long <&counter> # offset 12
75 * > 0: l %r14,0(%r1) # offset 16
76 * > l %r1,4(%r1) # offset 20
77 * basr %r14,%r14 # offset 24
78 * l %r14,4(%r15) # offset 26
79 * Total length is 30 bytes. The twenty bytes starting from offset 4
80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
81 * The 31-bit enabled ftrace code block looks like this:
82 * st %r14,4(%r15) # offset 0
83 * > l %r14,__LC_FTRACE_FUNC # offset 4
84 * > j 0f # offset 8
85 * > .fill 12,1,0x07 # offset 12
86 * 0: basr %r14,%r14 # offset 24
87 * l %r14,4(%r14) # offset 26
88 * The return points of the mcount/ftrace function have the same offset 26.
89 * The 31-bit disabled ftrace code block looks like this:
90 * st %r14,4(%r15) # offset 0
91 * > j .+26 # offset 4
92 * > j 0f # offset 8
93 * > .fill 12,1,0x07 # offset 12
94 * 0: basr %r14,%r14 # offset 24
95 * l %r14,4(%r14) # offset 26
96 * The j instruction branches to offset 30 to skip as many instructions
97 * as possible.
98 */
53asm( 99asm(
54 " .align 4\n" 100 " .align 4\n"
55 "ftrace_disable_code:\n" 101 "ftrace_disable_code:\n"
102 " j 1f\n"
56 " j 0f\n" 103 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 104 " .fill 12,1,0x07\n"
58 " basr %r14,%r1\n" 105 "0: basr %r14,%r14\n"
59 "ftrace_disable_return:\n" 106 "1:\n"
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
69
70asm(
71 " .align 4\n" 107 " .align 4\n"
72 "ftrace_nop_code:\n" 108 "ftrace_enable_insn:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 109 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
74 110
75asm( 111#define FTRACE_INSN_SIZE 4
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
79 112
80#endif /* CONFIG_64BIT */ 113#endif /* CONFIG_64BIT */
81 114
82static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
85{
86 unsigned char replaced[MCOUNT_INSN_SIZE];
87
88 /*
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
94 */
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
106{
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111 115
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr) 117 unsigned long addr)
114{ 118{
115 if (addr == MCOUNT_ADDR) 119 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
116 return ftrace_make_initial_nop(mod, rec, addr); 120 MCOUNT_INSN_SIZE))
117 return ftrace_modify_code(rec->ip, 121 return -EPERM;
118 ftrace_call_code, FTRACE_INSN_SIZE, 122 return 0;
119 ftrace_nop_code, FTRACE_INSN_SIZE);
120} 123}
121 124
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{ 126{
124 return ftrace_modify_code(rec->ip, 127 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
125 ftrace_nop_code, FTRACE_INSN_SIZE, 128 FTRACE_INSN_SIZE))
126 ftrace_call_code, FTRACE_INSN_SIZE); 129 return -EPERM;
130 return 0;
127} 131}
128 132
129int ftrace_update_ftrace_func(ftrace_func_t func) 133int ftrace_update_ftrace_func(ftrace_func_t func)
130{ 134{
131 ftrace_dyn_func = (unsigned long)func;
132 return 0; 135 return 0;
133} 136}
134 137
135int __init ftrace_dyn_arch_init(void *data) 138int __init ftrace_dyn_arch_init(void *data)
136{ 139{
137 *(unsigned long *)data = 0; 140 *(unsigned long *) data = 0;
138 return 0; 141 return 0;
139} 142}
140 143
141#endif /* CONFIG_DYNAMIC_FTRACE */ 144#endif /* CONFIG_DYNAMIC_FTRACE */
142 145
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER 146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154 unsigned short opcode = 0xa704;
155
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161 unsigned short opcode = 0xa7f4;
162
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168 return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175 return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/* 147/*
181 * Hook the return address and push it in the stack of return addresses 148 * Hook the return address and push it in the stack of return addresses
182 * in current thread info. 149 * in current thread info.
183 */ 150 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152 unsigned long ip)
185{ 153{
186 struct ftrace_graph_ent trace; 154 struct ftrace_graph_ent trace;
187 155
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
189 goto out; 157 goto out;
190 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 158 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
191 goto out; 159 goto out;
192 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 160 trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
193 /* Only trace if the calling function expects to. */ 161 /* Only trace if the calling function expects to. */
194 if (!ftrace_graph_entry(&trace)) { 162 if (!ftrace_graph_entry(&trace)) {
195 current->curr_ret_stack--; 163 current->curr_ret_stack--;
196 goto out; 164 goto out;
197 } 165 }
198 parent = (unsigned long)return_to_handler; 166 parent = (unsigned long) return_to_handler;
199out: 167out:
200 return parent; 168 return parent;
201} 169}
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181 unsigned short offset;
182
183 offset = ((void *) prepare_ftrace_return -
184 (void *) ftrace_graph_caller) / 2;
185 return probe_kernel_write(ftrace_graph_caller + 2,
186 &offset, sizeof(offset));
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191 static unsigned short offset = 0x0002;
192
193 return probe_kernel_write(ftrace_graph_caller + 2,
194 &offset, sizeof(offset));
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 026a37a94fc9..ea5099c9709c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * Copyright IBM Corp. 2004,2010
3 *
4 * Copyright IBM Corp. 2004,2007
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 3 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com) 4 * Thomas Spatzier (tspat@de.ibm.com)
7 * 5 *
@@ -17,12 +15,42 @@
17#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
18#include <linux/profile.h> 16#include <linux/profile.h>
19 17
18struct irq_class {
19 char *name;
20 char *desc;
21};
22
23static const struct irq_class intrclass_names[] = {
24 {.name = "EXT" },
25 {.name = "I/O" },
26 {.name = "CLK", .desc = "[EXT] Clock Comparator" },
27 {.name = "IPI", .desc = "[EXT] Signal Processor" },
28 {.name = "TMR", .desc = "[EXT] CPU Timer" },
29 {.name = "TAL", .desc = "[EXT] Timing Alert" },
30 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
31 {.name = "DSD", .desc = "[EXT] DASD Diag" },
32 {.name = "VRT", .desc = "[EXT] Virtio" },
33 {.name = "SCP", .desc = "[EXT] Service Call" },
34 {.name = "IUC", .desc = "[EXT] IUCV" },
35 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
36 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
37 {.name = "DAS", .desc = "[I/O] DASD" },
38 {.name = "C15", .desc = "[I/O] 3215" },
39 {.name = "C70", .desc = "[I/O] 3270" },
40 {.name = "TAP", .desc = "[I/O] Tape" },
41 {.name = "VMR", .desc = "[I/O] Unit Record Devices" },
42 {.name = "LCS", .desc = "[I/O] LCS" },
43 {.name = "CLW", .desc = "[I/O] CLAW" },
44 {.name = "CTC", .desc = "[I/O] CTC" },
45 {.name = "APB", .desc = "[I/O] AP Bus" },
46 {.name = "NMI", .desc = "[NMI] Machine Check" },
47};
48
20/* 49/*
21 * show_interrupts is needed by /proc/interrupts. 50 * show_interrupts is needed by /proc/interrupts.
22 */ 51 */
23int show_interrupts(struct seq_file *p, void *v) 52int show_interrupts(struct seq_file *p, void *v)
24{ 53{
25 static const char *intrclass_names[] = { "EXT", "I/O", };
26 int i = *(loff_t *) v, j; 54 int i = *(loff_t *) v, j;
27 55
28 get_online_cpus(); 56 get_online_cpus();
@@ -34,15 +62,16 @@ int show_interrupts(struct seq_file *p, void *v)
34 } 62 }
35 63
36 if (i < NR_IRQS) { 64 if (i < NR_IRQS) {
37 seq_printf(p, "%s: ", intrclass_names[i]); 65 seq_printf(p, "%s: ", intrclass_names[i].name);
38#ifndef CONFIG_SMP 66#ifndef CONFIG_SMP
39 seq_printf(p, "%10u ", kstat_irqs(i)); 67 seq_printf(p, "%10u ", kstat_irqs(i));
40#else 68#else
41 for_each_online_cpu(j) 69 for_each_online_cpu(j)
42 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 70 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
43#endif 71#endif
72 if (intrclass_names[i].desc)
73 seq_printf(p, " %s", intrclass_names[i].desc);
44 seq_putc(p, '\n'); 74 seq_putc(p, '\n');
45
46 } 75 }
47 put_online_cpus(); 76 put_online_cpus();
48 return 0; 77 return 0;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2564793ec2b6..1d05d669107c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -32,34 +32,14 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/hardirq.h> 33#include <linux/hardirq.h>
34 34
35DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 35DEFINE_PER_CPU(struct kprobe *, current_kprobe);
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
37 37
38struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 38struct kretprobe_blackpoint kretprobe_blacklist[] = { };
39 39
40int __kprobes arch_prepare_kprobe(struct kprobe *p) 40static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
41{
42 /* Make sure the probe isn't going on a difficult instruction */
43 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
44 return -EINVAL;
45
46 if ((unsigned long)p->addr & 0x01)
47 return -EINVAL;
48
49 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot()))
51 return -ENOMEM;
52
53 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
54
55 get_instruction_type(&p->ainsn);
56 p->opcode = *p->addr;
57 return 0;
58}
59
60int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
61{ 41{
62 switch (*(__u8 *) instruction) { 42 switch (insn[0] >> 8) {
63 case 0x0c: /* bassm */ 43 case 0x0c: /* bassm */
64 case 0x0b: /* bsm */ 44 case 0x0b: /* bsm */
65 case 0x83: /* diag */ 45 case 0x83: /* diag */
@@ -68,7 +48,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
68 case 0xad: /* stosm */ 48 case 0xad: /* stosm */
69 return -EINVAL; 49 return -EINVAL;
70 } 50 }
71 switch (*(__u16 *) instruction) { 51 switch (insn[0]) {
72 case 0x0101: /* pr */ 52 case 0x0101: /* pr */
73 case 0xb25a: /* bsa */ 53 case 0xb25a: /* bsa */
74 case 0xb240: /* bakr */ 54 case 0xb240: /* bakr */
@@ -81,93 +61,92 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
81 return 0; 61 return 0;
82} 62}
83 63
84void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) 64static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
85{ 65{
86 /* default fixup method */ 66 /* default fixup method */
87 ainsn->fixup = FIXUP_PSW_NORMAL; 67 int fixup = FIXUP_PSW_NORMAL;
88
89 /* save r1 operand */
90 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
91 68
92 /* save the instruction length (pop 5-5) in bytes */ 69 switch (insn[0] >> 8) {
93 switch (*(__u8 *) (ainsn->insn) >> 6) {
94 case 0:
95 ainsn->ilen = 2;
96 break;
97 case 1:
98 case 2:
99 ainsn->ilen = 4;
100 break;
101 case 3:
102 ainsn->ilen = 6;
103 break;
104 }
105
106 switch (*(__u8 *) ainsn->insn) {
107 case 0x05: /* balr */ 70 case 0x05: /* balr */
108 case 0x0d: /* basr */ 71 case 0x0d: /* basr */
109 ainsn->fixup = FIXUP_RETURN_REGISTER; 72 fixup = FIXUP_RETURN_REGISTER;
110 /* if r2 = 0, no branch will be taken */ 73 /* if r2 = 0, no branch will be taken */
111 if ((*ainsn->insn & 0x0f) == 0) 74 if ((insn[0] & 0x0f) == 0)
112 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; 75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
113 break; 76 break;
114 case 0x06: /* bctr */ 77 case 0x06: /* bctr */
115 case 0x07: /* bcr */ 78 case 0x07: /* bcr */
116 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 79 fixup = FIXUP_BRANCH_NOT_TAKEN;
117 break; 80 break;
118 case 0x45: /* bal */ 81 case 0x45: /* bal */
119 case 0x4d: /* bas */ 82 case 0x4d: /* bas */
120 ainsn->fixup = FIXUP_RETURN_REGISTER; 83 fixup = FIXUP_RETURN_REGISTER;
121 break; 84 break;
122 case 0x47: /* bc */ 85 case 0x47: /* bc */
123 case 0x46: /* bct */ 86 case 0x46: /* bct */
124 case 0x86: /* bxh */ 87 case 0x86: /* bxh */
125 case 0x87: /* bxle */ 88 case 0x87: /* bxle */
126 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 89 fixup = FIXUP_BRANCH_NOT_TAKEN;
127 break; 90 break;
128 case 0x82: /* lpsw */ 91 case 0x82: /* lpsw */
129 ainsn->fixup = FIXUP_NOT_REQUIRED; 92 fixup = FIXUP_NOT_REQUIRED;
130 break; 93 break;
131 case 0xb2: /* lpswe */ 94 case 0xb2: /* lpswe */
132 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { 95 if ((insn[0] & 0xff) == 0xb2)
133 ainsn->fixup = FIXUP_NOT_REQUIRED; 96 fixup = FIXUP_NOT_REQUIRED;
134 }
135 break; 97 break;
136 case 0xa7: /* bras */ 98 case 0xa7: /* bras */
137 if ((*ainsn->insn & 0x0f) == 0x05) { 99 if ((insn[0] & 0x0f) == 0x05)
138 ainsn->fixup |= FIXUP_RETURN_REGISTER; 100 fixup |= FIXUP_RETURN_REGISTER;
139 }
140 break; 101 break;
141 case 0xc0: 102 case 0xc0:
142 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ 103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
143 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ 104 (insn[0] & 0x0f) == 0x05) /* brasl */
144 ainsn->fixup |= FIXUP_RETURN_REGISTER; 105 fixup |= FIXUP_RETURN_REGISTER;
145 break; 106 break;
146 case 0xeb: 107 case 0xeb:
147 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ 108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
148 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ 109 (insn[2] & 0xff) == 0x45) /* bxleg */
149 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 110 fixup = FIXUP_BRANCH_NOT_TAKEN;
150 }
151 break; 111 break;
152 case 0xe3: /* bctg */ 112 case 0xe3: /* bctg */
153 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { 113 if ((insn[2] & 0xff) == 0x46)
154 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 114 fixup = FIXUP_BRANCH_NOT_TAKEN;
155 }
156 break; 115 break;
157 } 116 }
117 return fixup;
118}
119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode(p->addr))
127 return -EINVAL;
128
129 p->opcode = *p->addr;
130 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
131
132 return 0;
158} 133}
159 134
135struct ins_replace_args {
136 kprobe_opcode_t *ptr;
137 kprobe_opcode_t opcode;
138};
139
160static int __kprobes swap_instruction(void *aref) 140static int __kprobes swap_instruction(void *aref)
161{ 141{
162 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 142 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
163 unsigned long status = kcb->kprobe_status; 143 unsigned long status = kcb->kprobe_status;
164 struct ins_replace_args *args = aref; 144 struct ins_replace_args *args = aref;
165 int rc;
166 145
167 kcb->kprobe_status = KPROBE_SWAP_INST; 146 kcb->kprobe_status = KPROBE_SWAP_INST;
168 rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); 147 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
169 kcb->kprobe_status = status; 148 kcb->kprobe_status = status;
170 return rc; 149 return 0;
171} 150}
172 151
173void __kprobes arch_arm_kprobe(struct kprobe *p) 152void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -175,8 +154,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
175 struct ins_replace_args args; 154 struct ins_replace_args args;
176 155
177 args.ptr = p->addr; 156 args.ptr = p->addr;
178 args.old = p->opcode; 157 args.opcode = BREAKPOINT_INSTRUCTION;
179 args.new = BREAKPOINT_INSTRUCTION;
180 stop_machine(swap_instruction, &args, NULL); 158 stop_machine(swap_instruction, &args, NULL);
181} 159}
182 160
@@ -185,64 +163,69 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
185 struct ins_replace_args args; 163 struct ins_replace_args args;
186 164
187 args.ptr = p->addr; 165 args.ptr = p->addr;
188 args.old = BREAKPOINT_INSTRUCTION; 166 args.opcode = p->opcode;
189 args.new = p->opcode;
190 stop_machine(swap_instruction, &args, NULL); 167 stop_machine(swap_instruction, &args, NULL);
191} 168}
192 169
193void __kprobes arch_remove_kprobe(struct kprobe *p) 170void __kprobes arch_remove_kprobe(struct kprobe *p)
194{ 171{
195 if (p->ainsn.insn) {
196 free_insn_slot(p->ainsn.insn, 0);
197 p->ainsn.insn = NULL;
198 }
199} 172}
200 173
201static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 174static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs,
176 unsigned long ip)
202{ 177{
203 per_cr_bits kprobe_per_regs[1]; 178 struct per_regs per_kprobe;
204 179
205 memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 180 /* Set up the PER control registers %cr9-%cr11 */
206 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; 181 per_kprobe.control = PER_EVENT_IFETCH;
182 per_kprobe.start = ip;
183 per_kprobe.end = ip;
207 184
208 /* Set up the per control reg info, will pass to lctl */ 185 /* Save control regs and psw mask */
209 kprobe_per_regs[0].em_instruction_fetch = 1; 186 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
210 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; 187 kcb->kprobe_saved_imask = regs->psw.mask &
211 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; 188 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
212 189
213 /* Set the PER control regs, turns on single step for this address */ 190 /* Set PER control regs, turns on single step for the given address */
214 __ctl_load(kprobe_per_regs, 9, 11); 191 __ctl_load(per_kprobe, 9, 11);
215 regs->psw.mask |= PSW_MASK_PER; 192 regs->psw.mask |= PSW_MASK_PER;
216 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 193 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
194 regs->psw.addr = ip | PSW_ADDR_AMODE;
217} 195}
218 196
219static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 197static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
198 struct pt_regs *regs,
199 unsigned long ip)
220{ 200{
221 kcb->prev_kprobe.kp = kprobe_running(); 201 /* Restore control regs and psw mask, set new psw address */
222 kcb->prev_kprobe.status = kcb->kprobe_status; 202 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
223 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; 203 regs->psw.mask &= ~PSW_MASK_PER;
224 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, 204 regs->psw.mask |= kcb->kprobe_saved_imask;
225 sizeof(kcb->kprobe_saved_ctl)); 205 regs->psw.addr = ip | PSW_ADDR_AMODE;
226} 206}
227 207
228static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 208/*
209 * Activate a kprobe by storing its pointer to current_kprobe. The
210 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
211 * two kprobes can be active, see KPROBE_REENTER.
212 */
213static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
229{ 214{
230 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 215 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
231 kcb->kprobe_status = kcb->prev_kprobe.status; 216 kcb->prev_kprobe.status = kcb->kprobe_status;
232 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; 217 __get_cpu_var(current_kprobe) = p;
233 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
234 sizeof(kcb->kprobe_saved_ctl));
235} 218}
236 219
237static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 220/*
238 struct kprobe_ctlblk *kcb) 221 * Deactivate a kprobe by backing up to the previous state. If the
222 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
223 * for any other state prev_kprobe.kp will be NULL.
224 */
225static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
239{ 226{
240 __get_cpu_var(current_kprobe) = p; 227 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
241 /* Save the interrupt and per flags */ 228 kcb->kprobe_status = kcb->prev_kprobe.status;
242 kcb->kprobe_saved_imask = regs->psw.mask &
243 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
244 /* Save the control regs that govern PER */
245 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
246} 229}
247 230
248void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 231void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -251,79 +234,104 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
251 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 234 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
252 235
253 /* Replace the return addr with trampoline addr */ 236 /* Replace the return addr with trampoline addr */
254 regs->gprs[14] = (unsigned long)&kretprobe_trampoline; 237 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
238}
239
240static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
241 struct kprobe *p)
242{
243 switch (kcb->kprobe_status) {
244 case KPROBE_HIT_SSDONE:
245 case KPROBE_HIT_ACTIVE:
246 kprobes_inc_nmissed_count(p);
247 break;
248 case KPROBE_HIT_SS:
249 case KPROBE_REENTER:
250 default:
251 /*
252 * A kprobe on the code path to single step an instruction
253 * is a BUG. The code path resides in the .kprobes.text
254 * section and is executed with interrupts disabled.
255 */
256 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
257 dump_kprobe(p);
258 BUG();
259 }
255} 260}
256 261
257static int __kprobes kprobe_handler(struct pt_regs *regs) 262static int __kprobes kprobe_handler(struct pt_regs *regs)
258{ 263{
259 struct kprobe *p;
260 int ret = 0;
261 unsigned long *addr = (unsigned long *)
262 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
263 struct kprobe_ctlblk *kcb; 264 struct kprobe_ctlblk *kcb;
265 struct kprobe *p;
264 266
265 /* 267 /*
266 * We don't want to be preempted for the entire 268 * We want to disable preemption for the entire duration of kprobe
267 * duration of kprobe processing 269 * processing. That includes the calls to the pre/post handlers
270 * and single stepping the kprobe instruction.
268 */ 271 */
269 preempt_disable(); 272 preempt_disable();
270 kcb = get_kprobe_ctlblk(); 273 kcb = get_kprobe_ctlblk();
274 p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
271 275
272 /* Check we're not actually recursing */ 276 if (p) {
273 if (kprobe_running()) { 277 if (kprobe_running()) {
274 p = get_kprobe(addr); 278 /*
275 if (p) { 279 * We have hit a kprobe while another is still
276 if (kcb->kprobe_status == KPROBE_HIT_SS && 280 * active. This can happen in the pre and post
277 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 281 * handler. Single step the instruction of the
278 regs->psw.mask &= ~PSW_MASK_PER; 282 * new probe but do not call any handler function
279 regs->psw.mask |= kcb->kprobe_saved_imask; 283 * of this secondary kprobe.
280 goto no_kprobe; 284 * push_kprobe and pop_kprobe saves and restores
281 } 285 * the currently active kprobe.
282 /* We have reentered the kprobe_handler(), since
283 * another probe was hit while within the handler.
284 * We here save the original kprobes variables and
285 * just single step on the instruction of the new probe
286 * without calling any user handlers.
287 */ 286 */
288 save_previous_kprobe(kcb); 287 kprobe_reenter_check(kcb, p);
289 set_current_kprobe(p, regs, kcb); 288 push_kprobe(kcb, p);
290 kprobes_inc_nmissed_count(p);
291 prepare_singlestep(p, regs);
292 kcb->kprobe_status = KPROBE_REENTER; 289 kcb->kprobe_status = KPROBE_REENTER;
293 return 1;
294 } else { 290 } else {
295 p = __get_cpu_var(current_kprobe); 291 /*
296 if (p->break_handler && p->break_handler(p, regs)) { 292 * If we have no pre-handler or it returned 0, we
297 goto ss_probe; 293 * continue with single stepping. If we have a
298 } 294 * pre-handler and it returned non-zero, it prepped
295 * for calling the break_handler below on re-entry
296 * for jprobe processing, so get out doing nothing
297 * more here.
298 */
299 push_kprobe(kcb, p);
300 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
301 if (p->pre_handler && p->pre_handler(p, regs))
302 return 1;
303 kcb->kprobe_status = KPROBE_HIT_SS;
299 } 304 }
300 goto no_kprobe; 305 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
301 }
302
303 p = get_kprobe(addr);
304 if (!p)
305 /*
306 * No kprobe at this address. The fault has not been
307 * caused by a kprobe breakpoint. The race of breakpoint
308 * vs. kprobe remove does not exist because on s390 we
309 * use stop_machine to arm/disarm the breakpoints.
310 */
311 goto no_kprobe;
312
313 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
314 set_current_kprobe(p, regs, kcb);
315 if (p->pre_handler && p->pre_handler(p, regs))
316 /* handler has already set things up, so skip ss setup */
317 return 1; 306 return 1;
318 307 } else if (kprobe_running()) {
319ss_probe: 308 p = __get_cpu_var(current_kprobe);
320 prepare_singlestep(p, regs); 309 if (p->break_handler && p->break_handler(p, regs)) {
321 kcb->kprobe_status = KPROBE_HIT_SS; 310 /*
322 return 1; 311 * Continuation after the jprobe completed and
323 312 * caused the jprobe_return trap. The jprobe
324no_kprobe: 313 * break_handler "returns" to the original
314 * function that still has the kprobe breakpoint
315 * installed. We continue with single stepping.
316 */
317 kcb->kprobe_status = KPROBE_HIT_SS;
318 enable_singlestep(kcb, regs,
319 (unsigned long) p->ainsn.insn);
320 return 1;
321 } /* else:
322 * No kprobe at this address and the current kprobe
323 * has no break handler (no jprobe!). The kernel just
324 * exploded, let the standard trap handler pick up the
325 * pieces.
326 */
327 } /* else:
328 * No kprobe at this address and no active kprobe. The trap has
329 * not been caused by a kprobe breakpoint. The race of breakpoint
330 * vs. kprobe remove does not exist because on s390 as we use
331 * stop_machine to arm/disarm the breakpoints.
332 */
325 preempt_enable_no_resched(); 333 preempt_enable_no_resched();
326 return ret; 334 return 0;
327} 335}
328 336
329/* 337/*
@@ -344,12 +352,12 @@ static void __used kretprobe_trampoline_holder(void)
344static int __kprobes trampoline_probe_handler(struct kprobe *p, 352static int __kprobes trampoline_probe_handler(struct kprobe *p,
345 struct pt_regs *regs) 353 struct pt_regs *regs)
346{ 354{
347 struct kretprobe_instance *ri = NULL; 355 struct kretprobe_instance *ri;
348 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
349 struct hlist_node *node, *tmp; 357 struct hlist_node *node, *tmp;
350 unsigned long flags, orig_ret_address = 0; 358 unsigned long flags, orig_ret_address;
351 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 359 unsigned long trampoline_address;
352 kprobe_opcode_t *correct_ret_addr = NULL; 360 kprobe_opcode_t *correct_ret_addr;
353 361
354 INIT_HLIST_HEAD(&empty_rp); 362 INIT_HLIST_HEAD(&empty_rp);
355 kretprobe_hash_lock(current, &head, &flags); 363 kretprobe_hash_lock(current, &head, &flags);
@@ -367,12 +375,16 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
367 * real return address, and all the rest will point to 375 * real return address, and all the rest will point to
368 * kretprobe_trampoline 376 * kretprobe_trampoline
369 */ 377 */
378 ri = NULL;
379 orig_ret_address = 0;
380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline;
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current) 383 if (ri->task != current)
372 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
373 continue; 385 continue;
374 386
375 orig_ret_address = (unsigned long)ri->ret_addr; 387 orig_ret_address = (unsigned long) ri->ret_addr;
376 388
377 if (orig_ret_address != trampoline_address) 389 if (orig_ret_address != trampoline_address)
378 /* 390 /*
@@ -391,7 +403,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
391 /* another task is sharing our hash bucket */ 403 /* another task is sharing our hash bucket */
392 continue; 404 continue;
393 405
394 orig_ret_address = (unsigned long)ri->ret_addr; 406 orig_ret_address = (unsigned long) ri->ret_addr;
395 407
396 if (ri->rp && ri->rp->handler) { 408 if (ri->rp && ri->rp->handler) {
397 ri->ret_addr = correct_ret_addr; 409 ri->ret_addr = correct_ret_addr;
@@ -400,19 +412,18 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
400 412
401 recycle_rp_inst(ri, &empty_rp); 413 recycle_rp_inst(ri, &empty_rp);
402 414
403 if (orig_ret_address != trampoline_address) { 415 if (orig_ret_address != trampoline_address)
404 /* 416 /*
405 * This is the real return address. Any other 417 * This is the real return address. Any other
406 * instances associated with this task are for 418 * instances associated with this task are for
407 * other calls deeper on the call stack 419 * other calls deeper on the call stack
408 */ 420 */
409 break; 421 break;
410 }
411 } 422 }
412 423
413 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 424 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
414 425
415 reset_current_kprobe(); 426 pop_kprobe(get_kprobe_ctlblk());
416 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
417 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
418 429
@@ -439,55 +450,42 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
439static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 450static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
440{ 451{
441 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 452 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
453 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
454 int fixup = get_fixup_type(p->ainsn.insn);
442 455
443 regs->psw.addr &= PSW_ADDR_INSN; 456 if (fixup & FIXUP_PSW_NORMAL)
444 457 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
445 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
446 regs->psw.addr = (unsigned long)p->addr +
447 ((unsigned long)regs->psw.addr -
448 (unsigned long)p->ainsn.insn);
449 458
450 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) 459 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
451 if ((unsigned long)regs->psw.addr - 460 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
452 (unsigned long)p->ainsn.insn == p->ainsn.ilen) 461 if (ip - (unsigned long) p->ainsn.insn == ilen)
453 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; 462 ip = (unsigned long) p->addr + ilen;
463 }
454 464
455 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) 465 if (fixup & FIXUP_RETURN_REGISTER) {
456 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + 466 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
457 (regs->gprs[p->ainsn.reg] - 467 regs->gprs[reg] += (unsigned long) p->addr -
458 (unsigned long)p->ainsn.insn)) 468 (unsigned long) p->ainsn.insn;
459 | PSW_ADDR_AMODE; 469 }
460 470
461 regs->psw.addr |= PSW_ADDR_AMODE; 471 disable_singlestep(kcb, regs, ip);
462 /* turn off PER mode */
463 regs->psw.mask &= ~PSW_MASK_PER;
464 /* Restore the original per control regs */
465 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
466 regs->psw.mask |= kcb->kprobe_saved_imask;
467} 472}
468 473
469static int __kprobes post_kprobe_handler(struct pt_regs *regs) 474static int __kprobes post_kprobe_handler(struct pt_regs *regs)
470{ 475{
471 struct kprobe *cur = kprobe_running();
472 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 struct kprobe *p = kprobe_running();
473 478
474 if (!cur) 479 if (!p)
475 return 0; 480 return 0;
476 481
477 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 482 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
478 kcb->kprobe_status = KPROBE_HIT_SSDONE; 483 kcb->kprobe_status = KPROBE_HIT_SSDONE;
479 cur->post_handler(cur, regs, 0); 484 p->post_handler(p, regs, 0);
480 } 485 }
481 486
482 resume_execution(cur, regs); 487 resume_execution(p, regs);
483 488 pop_kprobe(kcb);
484 /*Restore back the original saved kprobes variables and continue. */
485 if (kcb->kprobe_status == KPROBE_REENTER) {
486 restore_previous_kprobe(kcb);
487 goto out;
488 }
489 reset_current_kprobe();
490out:
491 preempt_enable_no_resched(); 489 preempt_enable_no_resched();
492 490
493 /* 491 /*
@@ -495,17 +493,16 @@ out:
495 * will have PER set, in which case, continue the remaining processing 493 * will have PER set, in which case, continue the remaining processing
496 * of do_single_step, as if this is not a probe hit. 494 * of do_single_step, as if this is not a probe hit.
497 */ 495 */
498 if (regs->psw.mask & PSW_MASK_PER) { 496 if (regs->psw.mask & PSW_MASK_PER)
499 return 0; 497 return 0;
500 }
501 498
502 return 1; 499 return 1;
503} 500}
504 501
505static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) 502static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
506{ 503{
507 struct kprobe *cur = kprobe_running();
508 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 504 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
505 struct kprobe *p = kprobe_running();
509 const struct exception_table_entry *entry; 506 const struct exception_table_entry *entry;
510 507
511 switch(kcb->kprobe_status) { 508 switch(kcb->kprobe_status) {
@@ -521,14 +518,8 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
521 * and allow the page fault handler to continue as a 518 * and allow the page fault handler to continue as a
522 * normal page fault. 519 * normal page fault.
523 */ 520 */
524 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; 521 disable_singlestep(kcb, regs, (unsigned long) p->addr);
525 regs->psw.mask &= ~PSW_MASK_PER; 522 pop_kprobe(kcb);
526 regs->psw.mask |= kcb->kprobe_saved_imask;
527 if (kcb->kprobe_status == KPROBE_REENTER)
528 restore_previous_kprobe(kcb);
529 else {
530 reset_current_kprobe();
531 }
532 preempt_enable_no_resched(); 523 preempt_enable_no_resched();
533 break; 524 break;
534 case KPROBE_HIT_ACTIVE: 525 case KPROBE_HIT_ACTIVE:
@@ -538,7 +529,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
538 * we can also use npre/npostfault count for accouting 529 * we can also use npre/npostfault count for accouting
539 * these specific fault cases. 530 * these specific fault cases.
540 */ 531 */
541 kprobes_inc_nmissed_count(cur); 532 kprobes_inc_nmissed_count(p);
542 533
543 /* 534 /*
544 * We come here because instructions in the pre/post 535 * We come here because instructions in the pre/post
@@ -547,7 +538,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
547 * copy_from_user(), get_user() etc. Let the 538 * copy_from_user(), get_user() etc. Let the
548 * user-specified handler try to fix it first. 539 * user-specified handler try to fix it first.
549 */ 540 */
550 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 541 if (p->fault_handler && p->fault_handler(p, regs, trapnr))
551 return 1; 542 return 1;
552 543
553 /* 544 /*
@@ -589,7 +580,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
589int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 580int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
590 unsigned long val, void *data) 581 unsigned long val, void *data)
591{ 582{
592 struct die_args *args = (struct die_args *)data; 583 struct die_args *args = (struct die_args *) data;
593 struct pt_regs *regs = args->regs; 584 struct pt_regs *regs = args->regs;
594 int ret = NOTIFY_DONE; 585 int ret = NOTIFY_DONE;
595 586
@@ -598,16 +589,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
598 589
599 switch (val) { 590 switch (val) {
600 case DIE_BPT: 591 case DIE_BPT:
601 if (kprobe_handler(args->regs)) 592 if (kprobe_handler(regs))
602 ret = NOTIFY_STOP; 593 ret = NOTIFY_STOP;
603 break; 594 break;
604 case DIE_SSTEP: 595 case DIE_SSTEP:
605 if (post_kprobe_handler(args->regs)) 596 if (post_kprobe_handler(regs))
606 ret = NOTIFY_STOP; 597 ret = NOTIFY_STOP;
607 break; 598 break;
608 case DIE_TRAP: 599 case DIE_TRAP:
609 if (!preemptible() && kprobe_running() && 600 if (!preemptible() && kprobe_running() &&
610 kprobe_trap_handler(args->regs, args->trapnr)) 601 kprobe_trap_handler(regs, args->trapnr))
611 ret = NOTIFY_STOP; 602 ret = NOTIFY_STOP;
612 break; 603 break;
613 default: 604 default:
@@ -623,23 +614,19 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
623int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 614int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
624{ 615{
625 struct jprobe *jp = container_of(p, struct jprobe, kp); 616 struct jprobe *jp = container_of(p, struct jprobe, kp);
626 unsigned long addr;
627 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 617 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
618 unsigned long stack;
628 619
629 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 620 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
630 621
631 /* setup return addr to the jprobe handler routine */ 622 /* setup return addr to the jprobe handler routine */
632 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; 623 regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
633 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 624 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
634 625
635 /* r14 is the function return address */
636 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
637 /* r15 is the stack pointer */ 626 /* r15 is the stack pointer */
638 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; 627 stack = (unsigned long) regs->gprs[15];
639 addr = (unsigned long)kcb->jprobe_saved_r15;
640 628
641 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, 629 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
642 MIN_STACK_SIZE(addr));
643 return 1; 630 return 1;
644} 631}
645 632
@@ -656,30 +643,29 @@ void __kprobes jprobe_return_end(void)
656int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 643int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
657{ 644{
658 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 645 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
659 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); 646 unsigned long stack;
647
648 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
660 649
661 /* Put the regs back */ 650 /* Put the regs back */
662 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 651 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
663 /* put the stack back */ 652 /* put the stack back */
664 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 653 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
665 MIN_STACK_SIZE(stack_addr));
666 preempt_enable_no_resched(); 654 preempt_enable_no_resched();
667 return 1; 655 return 1;
668} 656}
669 657
670static struct kprobe trampoline_p = { 658static struct kprobe trampoline = {
671 .addr = (kprobe_opcode_t *) & kretprobe_trampoline, 659 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
672 .pre_handler = trampoline_probe_handler 660 .pre_handler = trampoline_probe_handler
673}; 661};
674 662
675int __init arch_init_kprobes(void) 663int __init arch_init_kprobes(void)
676{ 664{
677 return register_kprobe(&trampoline_p); 665 return register_kprobe(&trampoline);
678} 666}
679 667
680int __kprobes arch_trampoline_kprobe(struct kprobe *p) 668int __kprobes arch_trampoline_kprobe(struct kprobe *p)
681{ 669{
682 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) 670 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
683 return 1;
684 return 0;
685} 671}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index dfe015d7398c..1e6a55795628 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,22 +18,12 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .long ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
28 stm %r2,%r5,16(%r15) 24 stm %r2,%r5,16(%r15)
29 bras %r1,2f 25 bras %r1,2f
30#ifdef CONFIG_DYNAMIC_FTRACE
310: .long ftrace_dyn_func
32#else
330: .long ftrace_trace_function 260: .long ftrace_trace_function
34#endif
351: .long function_trace_stop 271: .long function_trace_stop
362: l %r2,1b-0b(%r1) 282: l %r2,1b-0b(%r1)
37 icm %r2,0xf,0(%r2) 29 icm %r2,0xf,0(%r2)
@@ -47,21 +39,15 @@ ftrace_caller:
47 l %r14,0(%r14) 39 l %r14,0(%r14)
48 basr %r14,%r14 40 basr %r14,%r14
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER 41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50#ifdef CONFIG_DYNAMIC_FTRACE 42 l %r2,100(%r15)
43 l %r3,152(%r15)
51 .globl ftrace_graph_caller 44 .globl ftrace_graph_caller
52ftrace_graph_caller: 45ftrace_graph_caller:
53 # This unconditional branch gets runtime patched. Change only if 46# The bras instruction gets runtime patched to call prepare_ftrace_return.
54 # you know what you are doing. See ftrace_enable_graph_caller(). 47# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
55 j 1f 48# bras %r14,prepare_ftrace_return
56#endif 49 bras %r14,0f
57 bras %r1,0f 500: st %r2,100(%r15)
58 .long prepare_ftrace_return
590: l %r2,152(%r15)
60 l %r4,0(%r1)
61 l %r3,100(%r15)
62 basr %r14,%r4
63 st %r2,100(%r15)
641:
65#endif 51#endif
66 ahi %r15,96 52 ahi %r15,96
67 l %r14,56(%r15) 53 l %r14,56(%r15)
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index c37211c6092b..e73667286ac0 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,12 +18,6 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .quad ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
@@ -35,26 +31,19 @@ ftrace_caller:
35 stg %r1,__SF_BACKCHAIN(%r15) 31 stg %r1,__SF_BACKCHAIN(%r15)
36 lgr %r2,%r14 32 lgr %r2,%r14
37 lg %r3,168(%r15) 33 lg %r3,168(%r15)
38#ifdef CONFIG_DYNAMIC_FTRACE
39 larl %r14,ftrace_dyn_func
40#else
41 larl %r14,ftrace_trace_function 34 larl %r14,ftrace_trace_function
42#endif
43 lg %r14,0(%r14) 35 lg %r14,0(%r14)
44 basr %r14,%r14 36 basr %r14,%r14
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER 37#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46#ifdef CONFIG_DYNAMIC_FTRACE 38 lg %r2,168(%r15)
39 lg %r3,272(%r15)
47 .globl ftrace_graph_caller 40 .globl ftrace_graph_caller
48ftrace_graph_caller: 41ftrace_graph_caller:
49 # This unconditional branch gets runtime patched. Change only if 42# The bras instruction gets runtime patched to call prepare_ftrace_return.
50 # you know what you are doing. See ftrace_enable_graph_caller(). 43# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
51 j 0f 44# bras %r14,prepare_ftrace_return
52#endif 45 bras %r14,0f
53 lg %r2,272(%r15) 460: stg %r2,168(%r15)
54 lg %r3,168(%r15)
55 brasl %r14,prepare_ftrace_return
56 stg %r2,168(%r15)
570:
58#endif 47#endif
59 aghi %r15,160 48 aghi %r15,160
60 lmg %r2,%r5,32(%r15) 49 lmg %r2,%r5,32(%r15)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 1995c1712fc8..fab88431a06f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -8,6 +8,7 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/kernel_stat.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/hardirq.h> 14#include <linux/hardirq.h>
@@ -255,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
255 nmi_enter(); 256 nmi_enter();
256 s390_idle_check(regs, S390_lowcore.mcck_clock, 257 s390_idle_check(regs, S390_lowcore.mcck_clock,
257 S390_lowcore.mcck_enter_timer); 258 S390_lowcore.mcck_enter_timer);
258 259 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
259 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
260 mcck = &__get_cpu_var(cpu_mcck); 261 mcck = &__get_cpu_var(cpu_mcck);
261 umode = user_mode(regs); 262 umode = user_mode(regs);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ec2e03b22ead..6ba42222b542 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -32,6 +32,7 @@
32#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/compat.h> 34#include <linux/compat.h>
35#include <linux/kprobes.h>
35#include <asm/compat.h> 36#include <asm/compat.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
@@ -41,6 +42,7 @@
41#include <asm/irq.h> 42#include <asm/irq.h>
42#include <asm/timer.h> 43#include <asm/timer.h>
43#include <asm/nmi.h> 44#include <asm/nmi.h>
45#include <asm/smp.h>
44#include "entry.h" 46#include "entry.h"
45 47
46asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 48asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,13 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 */ 77 */
76static void default_idle(void) 78static void default_idle(void)
77{ 79{
78 /* CPU is going idle. */ 80 if (cpu_is_offline(smp_processor_id()))
79#ifdef CONFIG_HOTPLUG_CPU
80 if (cpu_is_offline(smp_processor_id())) {
81 preempt_enable_no_resched();
82 cpu_die(); 81 cpu_die();
83 }
84#endif
85 local_irq_disable(); 82 local_irq_disable();
86 if (need_resched()) { 83 if (need_resched()) {
87 local_irq_enable(); 84 local_irq_enable();
@@ -116,15 +113,17 @@ void cpu_idle(void)
116 } 113 }
117} 114}
118 115
119extern void kernel_thread_starter(void); 116extern void __kprobes kernel_thread_starter(void);
120 117
121asm( 118asm(
122 ".align 4\n" 119 ".section .kprobes.text, \"ax\"\n"
120 ".global kernel_thread_starter\n"
123 "kernel_thread_starter:\n" 121 "kernel_thread_starter:\n"
124 " la 2,0(10)\n" 122 " la 2,0(10)\n"
125 " basr 14,9\n" 123 " basr 14,9\n"
126 " la 2,0\n" 124 " la 2,0\n"
127 " br 11\n"); 125 " br 11\n"
126 ".previous\n");
128 127
129int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 128int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
130{ 129{
@@ -214,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
214 /* start new process with ar4 pointing to the correct address space */ 213 /* start new process with ar4 pointing to the correct address space */
215 p->thread.mm_segment = get_fs(); 214 p->thread.mm_segment = get_fs();
216 /* Don't copy debug registers */ 215 /* Don't copy debug registers */
217 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 216 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
217 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
219 clear_tsk_thread_flag(p, TIF_PER_TRAP);
219 /* Initialize per thread user and system timer values */ 220 /* Initialize per thread user and system timer values */
220 ti = task_thread_info(p); 221 ti = task_thread_info(p);
221 ti->user_timer = 0; 222 ti->user_timer = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 644548e615c6..311e9d712888 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,7 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/cpu.h>
17#include <asm/elf.h> 17#include <asm/elf.h>
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
@@ -35,17 +35,6 @@ void __cpuinit cpu_init(void)
35} 35}
36 36
37/* 37/*
38 * print_cpu_info - print basic information about a cpu
39 */
40void __cpuinit print_cpu_info(void)
41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
44 pr_info("Processor %d started, address %d, identification %06X\n",
45 S390_lowcore.cpu_nr, stap(), id->ident);
46}
47
48/*
49 * show_cpuinfo - Get information on one CPU for use by procfs. 38 * show_cpuinfo - Get information on one CPU for use by procfs.
50 */ 39 */
51static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
@@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
57 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
58 int i; 47 int i;
59 48
60 s390_adjust_jiffies();
61 preempt_disable();
62 if (!n) { 49 if (!n) {
50 s390_adjust_jiffies();
63 seq_printf(m, "vendor_id : IBM/S390\n" 51 seq_printf(m, "vendor_id : IBM/S390\n"
64 "# processors : %i\n" 52 "# processors : %i\n"
65 "bogomips per cpu: %lu.%02lu\n", 53 "bogomips per cpu: %lu.%02lu\n",
@@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
71 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
72 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
73 } 61 }
74 62 get_online_cpus();
75 if (cpu_online(n)) { 63 if (cpu_online(n)) {
76 struct cpuid *id = &per_cpu(cpu_id, n); 64 struct cpuid *id = &per_cpu(cpu_id, n);
77 seq_printf(m, "processor %li: " 65 seq_printf(m, "processor %li: "
@@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
80 "machine = %04X\n", 68 "machine = %04X\n",
81 n, id->version, id->ident, id->machine); 69 n, id->version, id->ident, id->machine);
82 } 70 }
83 preempt_enable(); 71 put_online_cpus();
84 return 0; 72 return 0;
85} 73}
86 74
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 019bb714db49..ef86ad243986 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,25 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/ptrace.c 2 * Ptrace user space interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */ 7 */
24 8
25#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -61,76 +45,58 @@ enum s390_regset {
61 REGSET_GENERAL_EXTENDED, 45 REGSET_GENERAL_EXTENDED,
62}; 46};
63 47
64static void 48void update_per_regs(struct task_struct *task)
65FixPerRegisters(struct task_struct *task)
66{ 49{
67 struct pt_regs *regs; 50 static const struct per_regs per_single_step = {
68 per_struct *per_info; 51 .control = PER_EVENT_IFETCH,
69 per_cr_words cr_words; 52 .start = 0,
70 53 .end = PSW_ADDR_INSN,
71 regs = task_pt_regs(task); 54 };
72 per_info = (per_struct *) &task->thread.per_info; 55 struct pt_regs *regs = task_pt_regs(task);
73 per_info->control_regs.bits.em_instruction_fetch = 56 struct thread_struct *thread = &task->thread;
74 per_info->single_step | per_info->instruction_fetch; 57 const struct per_regs *new;
75 58 struct per_regs old;
76 if (per_info->single_step) { 59
77 per_info->control_regs.bits.starting_addr = 0; 60 /* TIF_SINGLE_STEP overrides the user specified PER registers. */
78#ifdef CONFIG_COMPAT 61 new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
79 if (is_compat_task()) 62 &per_single_step : &thread->per_user;
80 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 63
81 else 64 /* Take care of the PER enablement bit in the PSW. */
82#endif 65 if (!(new->control & PER_EVENT_MASK)) {
83 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
84 } else {
85 per_info->control_regs.bits.starting_addr =
86 per_info->starting_addr;
87 per_info->control_regs.bits.ending_addr =
88 per_info->ending_addr;
89 }
90 /*
91 * if any of the control reg tracing bits are on
92 * we switch on per in the psw
93 */
94 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
95 regs->psw.mask |= PSW_MASK_PER;
96 else
97 regs->psw.mask &= ~PSW_MASK_PER; 66 regs->psw.mask &= ~PSW_MASK_PER;
98 67 return;
99 if (per_info->control_regs.bits.em_storage_alteration)
100 per_info->control_regs.bits.storage_alt_space_ctl = 1;
101 else
102 per_info->control_regs.bits.storage_alt_space_ctl = 0;
103
104 if (task == current) {
105 __ctl_store(cr_words, 9, 11);
106 if (memcmp(&cr_words, &per_info->control_regs.words,
107 sizeof(cr_words)) != 0)
108 __ctl_load(per_info->control_regs.words, 9, 11);
109 } 68 }
69 regs->psw.mask |= PSW_MASK_PER;
70 __ctl_store(old, 9, 11);
71 if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
72 __ctl_load(*new, 9, 11);
110} 73}
111 74
112void user_enable_single_step(struct task_struct *task) 75void user_enable_single_step(struct task_struct *task)
113{ 76{
114 task->thread.per_info.single_step = 1; 77 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
115 FixPerRegisters(task); 78 if (task == current)
79 update_per_regs(task);
116} 80}
117 81
118void user_disable_single_step(struct task_struct *task) 82void user_disable_single_step(struct task_struct *task)
119{ 83{
120 task->thread.per_info.single_step = 0; 84 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
121 FixPerRegisters(task); 85 if (task == current)
86 update_per_regs(task);
122} 87}
123 88
124/* 89/*
125 * Called by kernel/ptrace.c when detaching.. 90 * Called by kernel/ptrace.c when detaching..
126 * 91 *
127 * Make sure single step bits etc are not set. 92 * Clear all debugging related fields.
128 */ 93 */
129void 94void ptrace_disable(struct task_struct *task)
130ptrace_disable(struct task_struct *child)
131{ 95{
132 /* make sure the single step bit is not set. */ 96 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
133 user_disable_single_step(child); 97 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
98 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
99 clear_tsk_thread_flag(task, TIF_PER_TRAP);
134} 100}
135 101
136#ifndef CONFIG_64BIT 102#ifndef CONFIG_64BIT
@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
139# define __ADDR_MASK 7 105# define __ADDR_MASK 7
140#endif 106#endif
141 107
108static inline unsigned long __peek_user_per(struct task_struct *child,
109 addr_t addr)
110{
111 struct per_struct_kernel *dummy = NULL;
112
113 if (addr == (addr_t) &dummy->cr9)
114 /* Control bits of the active per set. */
115 return test_thread_flag(TIF_SINGLE_STEP) ?
116 PER_EVENT_IFETCH : child->thread.per_user.control;
117 else if (addr == (addr_t) &dummy->cr10)
118 /* Start address of the active per set. */
119 return test_thread_flag(TIF_SINGLE_STEP) ?
120 0 : child->thread.per_user.start;
121 else if (addr == (addr_t) &dummy->cr11)
122 /* End address of the active per set. */
123 return test_thread_flag(TIF_SINGLE_STEP) ?
124 PSW_ADDR_INSN : child->thread.per_user.end;
125 else if (addr == (addr_t) &dummy->bits)
126 /* Single-step bit. */
127 return test_thread_flag(TIF_SINGLE_STEP) ?
128 (1UL << (BITS_PER_LONG - 1)) : 0;
129 else if (addr == (addr_t) &dummy->starting_addr)
130 /* Start address of the user specified per set. */
131 return child->thread.per_user.start;
132 else if (addr == (addr_t) &dummy->ending_addr)
133 /* End address of the user specified per set. */
134 return child->thread.per_user.end;
135 else if (addr == (addr_t) &dummy->perc_atmid)
136 /* PER code, ATMID and AI of the last PER trap */
137 return (unsigned long)
138 child->thread.per_event.cause << (BITS_PER_LONG - 16);
139 else if (addr == (addr_t) &dummy->address)
140 /* Address of the last PER trap */
141 return child->thread.per_event.address;
142 else if (addr == (addr_t) &dummy->access_id)
143 /* Access id of the last PER trap */
144 return (unsigned long)
145 child->thread.per_event.paid << (BITS_PER_LONG - 8);
146 return 0;
147}
148
142/* 149/*
143 * Read the word at offset addr from the user area of a process. The 150 * Read the word at offset addr from the user area of a process. The
144 * trouble here is that the information is littered over different 151 * trouble here is that the information is littered over different
@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
204 211
205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 212 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
206 /* 213 /*
207 * per_info is found in the thread structure 214 * Handle access to the per_info structure.
208 */ 215 */
209 offset = addr - (addr_t) &dummy->regs.per_info; 216 addr -= (addr_t) &dummy->regs.per_info;
210 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); 217 tmp = __peek_user_per(child, addr);
211 218
212 } else 219 } else
213 tmp = 0; 220 tmp = 0;
@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
237 return put_user(tmp, (addr_t __user *) data); 244 return put_user(tmp, (addr_t __user *) data);
238} 245}
239 246
247static inline void __poke_user_per(struct task_struct *child,
248 addr_t addr, addr_t data)
249{
250 struct per_struct_kernel *dummy = NULL;
251
252 /*
253 * There are only three fields in the per_info struct that the
254 * debugger user can write to.
255 * 1) cr9: the debugger wants to set a new PER event mask
256 * 2) starting_addr: the debugger wants to set a new starting
257 * address to use with the PER event mask.
258 * 3) ending_addr: the debugger wants to set a new ending
259 * address to use with the PER event mask.
260 * The user specified PER event mask and the start and end
261 * addresses are used only if single stepping is not in effect.
262 * Writes to any other field in per_info are ignored.
263 */
264 if (addr == (addr_t) &dummy->cr9)
265 /* PER event mask of the user specified per set. */
266 child->thread.per_user.control =
267 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
268 else if (addr == (addr_t) &dummy->starting_addr)
269 /* Starting address of the user specified per set. */
270 child->thread.per_user.start = data;
271 else if (addr == (addr_t) &dummy->ending_addr)
272 /* Ending address of the user specified per set. */
273 child->thread.per_user.end = data;
274}
275
240/* 276/*
241 * Write a word to the user area of a process at location addr. This 277 * Write a word to the user area of a process at location addr. This
242 * operation does have an additional problem compared to peek_user. 278 * operation does have an additional problem compared to peek_user.
@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
311 347
312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 348 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
313 /* 349 /*
314 * per_info is found in the thread structure 350 * Handle access to the per_info structure.
315 */ 351 */
316 offset = addr - (addr_t) &dummy->regs.per_info; 352 addr -= (addr_t) &dummy->regs.per_info;
317 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; 353 __poke_user_per(child, addr, data);
318 354
319 } 355 }
320 356
321 FixPerRegisters(child);
322 return 0; 357 return 0;
323} 358}
324 359
325static int 360static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
326poke_user(struct task_struct *child, addr_t addr, addr_t data)
327{ 361{
328 addr_t mask; 362 addr_t mask;
329 363
@@ -410,12 +444,53 @@ long arch_ptrace(struct task_struct *child, long request,
410 */ 444 */
411 445
412/* 446/*
447 * Same as peek_user_per but for a 31 bit program.
448 */
449static inline __u32 __peek_user_per_compat(struct task_struct *child,
450 addr_t addr)
451{
452 struct compat_per_struct_kernel *dummy32 = NULL;
453
454 if (addr == (addr_t) &dummy32->cr9)
455 /* Control bits of the active per set. */
456 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
457 PER_EVENT_IFETCH : child->thread.per_user.control;
458 else if (addr == (addr_t) &dummy32->cr10)
459 /* Start address of the active per set. */
460 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
461 0 : child->thread.per_user.start;
462 else if (addr == (addr_t) &dummy32->cr11)
463 /* End address of the active per set. */
464 return test_thread_flag(TIF_SINGLE_STEP) ?
465 PSW32_ADDR_INSN : child->thread.per_user.end;
466 else if (addr == (addr_t) &dummy32->bits)
467 /* Single-step bit. */
468 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
469 0x80000000 : 0;
470 else if (addr == (addr_t) &dummy32->starting_addr)
471 /* Start address of the user specified per set. */
472 return (__u32) child->thread.per_user.start;
473 else if (addr == (addr_t) &dummy32->ending_addr)
474 /* End address of the user specified per set. */
475 return (__u32) child->thread.per_user.end;
476 else if (addr == (addr_t) &dummy32->perc_atmid)
477 /* PER code, ATMID and AI of the last PER trap */
478 return (__u32) child->thread.per_event.cause << 16;
479 else if (addr == (addr_t) &dummy32->address)
480 /* Address of the last PER trap */
481 return (__u32) child->thread.per_event.address;
482 else if (addr == (addr_t) &dummy32->access_id)
483 /* Access id of the last PER trap */
484 return (__u32) child->thread.per_event.paid << 24;
485 return 0;
486}
487
488/*
413 * Same as peek_user but for a 31 bit program. 489 * Same as peek_user but for a 31 bit program.
414 */ 490 */
415static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 491static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
416{ 492{
417 struct user32 *dummy32 = NULL; 493 struct compat_user *dummy32 = NULL;
418 per_struct32 *dummy_per32 = NULL;
419 addr_t offset; 494 addr_t offset;
420 __u32 tmp; 495 __u32 tmp;
421 496
@@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
465 540
466 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 541 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
467 /* 542 /*
468 * per_info is found in the thread structure 543 * Handle access to the per_info structure.
469 */ 544 */
470 offset = addr - (addr_t) &dummy32->regs.per_info; 545 addr -= (addr_t) &dummy32->regs.per_info;
471 /* This is magic. See per_struct and per_struct32. */ 546 tmp = __peek_user_per_compat(child, addr);
472 if ((offset >= (addr_t) &dummy_per32->control_regs &&
473 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
474 (offset >= (addr_t) &dummy_per32->starting_addr &&
475 offset <= (addr_t) &dummy_per32->ending_addr) ||
476 offset == (addr_t) &dummy_per32->lowcore.words.address)
477 offset = offset*2 + 4;
478 else
479 offset = offset*2;
480 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
481 547
482 } else 548 } else
483 tmp = 0; 549 tmp = 0;
@@ -498,13 +564,32 @@ static int peek_user_compat(struct task_struct *child,
498} 564}
499 565
500/* 566/*
567 * Same as poke_user_per but for a 31 bit program.
568 */
569static inline void __poke_user_per_compat(struct task_struct *child,
570 addr_t addr, __u32 data)
571{
572 struct compat_per_struct_kernel *dummy32 = NULL;
573
574 if (addr == (addr_t) &dummy32->cr9)
575 /* PER event mask of the user specified per set. */
576 child->thread.per_user.control =
577 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
578 else if (addr == (addr_t) &dummy32->starting_addr)
579 /* Starting address of the user specified per set. */
580 child->thread.per_user.start = data;
581 else if (addr == (addr_t) &dummy32->ending_addr)
582 /* Ending address of the user specified per set. */
583 child->thread.per_user.end = data;
584}
585
586/*
501 * Same as poke_user but for a 31 bit program. 587 * Same as poke_user but for a 31 bit program.
502 */ 588 */
503static int __poke_user_compat(struct task_struct *child, 589static int __poke_user_compat(struct task_struct *child,
504 addr_t addr, addr_t data) 590 addr_t addr, addr_t data)
505{ 591{
506 struct user32 *dummy32 = NULL; 592 struct compat_user *dummy32 = NULL;
507 per_struct32 *dummy_per32 = NULL;
508 __u32 tmp = (__u32) data; 593 __u32 tmp = (__u32) data;
509 addr_t offset; 594 addr_t offset;
510 595
@@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
561 646
562 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 647 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
563 /* 648 /*
564 * per_info is found in the thread structure. 649 * Handle access to the per_info structure.
565 */
566 offset = addr - (addr_t) &dummy32->regs.per_info;
567 /*
568 * This is magic. See per_struct and per_struct32.
569 * By incident the offsets in per_struct are exactly
570 * twice the offsets in per_struct32 for all fields.
571 * The 8 byte fields need special handling though,
572 * because the second half (bytes 4-7) is needed and
573 * not the first half.
574 */ 650 */
575 if ((offset >= (addr_t) &dummy_per32->control_regs && 651 addr -= (addr_t) &dummy32->regs.per_info;
576 offset < (addr_t) (&dummy_per32->control_regs + 1)) || 652 __poke_user_per_compat(child, addr, data);
577 (offset >= (addr_t) &dummy_per32->starting_addr &&
578 offset <= (addr_t) &dummy_per32->ending_addr) ||
579 offset == (addr_t) &dummy_per32->lowcore.words.address)
580 offset = offset*2 + 4;
581 else
582 offset = offset*2;
583 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
584
585 } 653 }
586 654
587 FixPerRegisters(child);
588 return 0; 655 return 0;
589} 656}
590 657
591static int poke_user_compat(struct task_struct *child, 658static int poke_user_compat(struct task_struct *child,
592 addr_t addr, addr_t data) 659 addr_t addr, addr_t data)
593{ 660{
594 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 661 if (!is_compat_task() || (addr & 3) ||
662 addr > sizeof(struct compat_user) - 3)
595 return -EIO; 663 return -EIO;
596 664
597 return __poke_user_compat(child, addr, data); 665 return __poke_user_compat(child, addr, data);
@@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
602{ 670{
603 unsigned long addr = caddr; 671 unsigned long addr = caddr;
604 unsigned long data = cdata; 672 unsigned long data = cdata;
605 ptrace_area_emu31 parea; 673 compat_ptrace_area parea;
606 int copied, ret; 674 int copied, ret;
607 675
608 switch (request) { 676 switch (request) {
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bd1db508e8af..185029919c4d 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -1,33 +1,36 @@
1/* 1/*
2 * arch/s390/kernel/s390_ext.c 2 * Copyright IBM Corp. 1999,2010
3 * 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * S390 version 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */ 5 */
9 6
7#include <linux/kernel_stat.h>
8#include <linux/interrupt.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/kernel.h> 10#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/ftrace.h> 11#include <linux/ftrace.h>
14#include <linux/errno.h> 12#include <linux/errno.h>
15#include <linux/kernel_stat.h> 13#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <asm/cputime.h>
18#include <asm/lowcore.h>
19#include <asm/s390_ext.h> 14#include <asm/s390_ext.h>
20#include <asm/irq_regs.h> 15#include <asm/irq_regs.h>
16#include <asm/cputime.h>
17#include <asm/lowcore.h>
21#include <asm/irq.h> 18#include <asm/irq.h>
22#include "entry.h" 19#include "entry.h"
23 20
21struct ext_int_info {
22 struct ext_int_info *next;
23 ext_int_handler_t handler;
24 __u16 code;
25};
26
24/* 27/*
25 * ext_int_hash[index] is the start of the list for all external interrupts 28 * ext_int_hash[index] is the start of the list for all external interrupts
26 * that hash to this index. With the current set of external interrupts 29 * that hash to this index. With the current set of external interrupts
27 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 30 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
28 * iucv and 0x2603 pfault) this is always the first element. 31 * iucv and 0x2603 pfault) this is always the first element.
29 */ 32 */
30ext_int_info_t *ext_int_hash[256] = { NULL, }; 33static struct ext_int_info *ext_int_hash[256];
31 34
32static inline int ext_hash(__u16 code) 35static inline int ext_hash(__u16 code)
33{ 36{
@@ -36,90 +39,53 @@ static inline int ext_hash(__u16 code)
36 39
37int register_external_interrupt(__u16 code, ext_int_handler_t handler) 40int register_external_interrupt(__u16 code, ext_int_handler_t handler)
38{ 41{
39 ext_int_info_t *p; 42 struct ext_int_info *p;
40 int index; 43 int index;
41
42 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
43 if (p == NULL)
44 return -ENOMEM;
45 p->code = code;
46 p->handler = handler;
47 index = ext_hash(code);
48 p->next = ext_int_hash[index];
49 ext_int_hash[index] = p;
50 return 0;
51}
52
53int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
54 ext_int_info_t *p)
55{
56 int index;
57 44
58 if (p == NULL) 45 p = kmalloc(sizeof(*p), GFP_ATOMIC);
59 return -EINVAL; 46 if (!p)
60 p->code = code; 47 return -ENOMEM;
61 p->handler = handler; 48 p->code = code;
49 p->handler = handler;
62 index = ext_hash(code); 50 index = ext_hash(code);
63 p->next = ext_int_hash[index]; 51 p->next = ext_int_hash[index];
64 ext_int_hash[index] = p; 52 ext_int_hash[index] = p;
65 return 0; 53 return 0;
66} 54}
55EXPORT_SYMBOL(register_external_interrupt);
67 56
68int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) 57int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
69{ 58{
70 ext_int_info_t *p, *q; 59 struct ext_int_info *p, *q;
71 int index;
72
73 index = ext_hash(code);
74 q = NULL;
75 p = ext_int_hash[index];
76 while (p != NULL) {
77 if (p->code == code && p->handler == handler)
78 break;
79 q = p;
80 p = p->next;
81 }
82 if (p == NULL)
83 return -ENOENT;
84 if (q != NULL)
85 q->next = p->next;
86 else
87 ext_int_hash[index] = p->next;
88 kfree(p);
89 return 0;
90}
91
92int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
93 ext_int_info_t *p)
94{
95 ext_int_info_t *q;
96 int index; 60 int index;
97 61
98 if (p == NULL || p->code != code || p->handler != handler)
99 return -EINVAL;
100 index = ext_hash(code); 62 index = ext_hash(code);
101 q = ext_int_hash[index]; 63 q = NULL;
102 if (p != q) { 64 p = ext_int_hash[index];
103 while (q != NULL) { 65 while (p) {
104 if (q->next == p) 66 if (p->code == code && p->handler == handler)
105 break; 67 break;
106 q = q->next; 68 q = p;
107 } 69 p = p->next;
108 if (q == NULL) 70 }
109 return -ENOENT; 71 if (!p)
72 return -ENOENT;
73 if (q)
110 q->next = p->next; 74 q->next = p->next;
111 } else 75 else
112 ext_int_hash[index] = p->next; 76 ext_int_hash[index] = p->next;
77 kfree(p);
113 return 0; 78 return 0;
114} 79}
80EXPORT_SYMBOL(unregister_external_interrupt);
115 81
116void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, 82void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
117 unsigned int param32, unsigned long param64) 83 unsigned int param32, unsigned long param64)
118{ 84{
119 struct pt_regs *old_regs; 85 struct pt_regs *old_regs;
120 unsigned short code; 86 unsigned short code;
121 ext_int_info_t *p; 87 struct ext_int_info *p;
122 int index; 88 int index;
123 89
124 code = (unsigned short) ext_int_code; 90 code = (unsigned short) ext_int_code;
125 old_regs = set_irq_regs(regs); 91 old_regs = set_irq_regs(regs);
@@ -132,7 +98,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
132 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 98 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
133 if (code != 0x1004) 99 if (code != 0x1004)
134 __get_cpu_var(s390_idle).nohz_delay = 1; 100 __get_cpu_var(s390_idle).nohz_delay = 1;
135 index = ext_hash(code); 101 index = ext_hash(code);
136 for (p = ext_int_hash[index]; p; p = p->next) { 102 for (p = ext_int_hash[index]; p; p = p->next) {
137 if (likely(p->code == code)) 103 if (likely(p->code == code))
138 p->handler(ext_int_code, param32, param64); 104 p->handler(ext_int_code, param32, param64);
@@ -140,6 +106,3 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
140 irq_exit(); 106 irq_exit();
141 set_irq_regs(old_regs); 107 set_irq_regs(old_regs);
142} 108}
143
144EXPORT_SYMBOL(register_external_interrupt);
145EXPORT_SYMBOL(unregister_external_interrupt);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ee7ac8b11782..abbb3c3c7aab 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
505 * Let tracing know that we've done the handler setup. 505 * Let tracing know that we've done the handler setup.
506 */ 506 */
507 tracehook_signal_handler(signr, &info, &ka, regs, 507 tracehook_signal_handler(signr, &info, &ka, regs,
508 current->thread.per_info.single_step); 508 test_thread_flag(TIF_SINGLE_STEP));
509 } 509 }
510 return; 510 return;
511 } 511 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 94cf510b8fe1..63a97db83f96 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -23,6 +23,7 @@
23#define KMSG_COMPONENT "cpu" 23#define KMSG_COMPONENT "cpu"
24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 25
26#include <linux/workqueue.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
@@ -161,6 +162,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
161{ 162{
162 unsigned long bits; 163 unsigned long bits;
163 164
165 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
164 /* 166 /*
165 * handle bit signal external calls 167 * handle bit signal external calls
166 * 168 *
@@ -469,25 +471,25 @@ int __cpuinit start_secondary(void *cpuvoid)
469 ipi_call_unlock(); 471 ipi_call_unlock();
470 /* Switch on interrupts */ 472 /* Switch on interrupts */
471 local_irq_enable(); 473 local_irq_enable();
472 /* Print info about this processor */
473 print_cpu_info();
474 /* cpu_idle will call schedule for us */ 474 /* cpu_idle will call schedule for us */
475 cpu_idle(); 475 cpu_idle();
476 return 0; 476 return 0;
477} 477}
478 478
479static void __init smp_create_idle(unsigned int cpu) 479struct create_idle {
480 struct work_struct work;
481 struct task_struct *idle;
482 struct completion done;
483 int cpu;
484};
485
486static void __cpuinit smp_fork_idle(struct work_struct *work)
480{ 487{
481 struct task_struct *p; 488 struct create_idle *c_idle;
482 489
483 /* 490 c_idle = container_of(work, struct create_idle, work);
484 * don't care about the psw and regs settings since we'll never 491 c_idle->idle = fork_idle(c_idle->cpu);
485 * reschedule the forked task. 492 complete(&c_idle->done);
486 */
487 p = fork_idle(cpu);
488 if (IS_ERR(p))
489 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
490 current_set[cpu] = p;
491} 493}
492 494
493static int __cpuinit smp_alloc_lowcore(int cpu) 495static int __cpuinit smp_alloc_lowcore(int cpu)
@@ -551,6 +553,7 @@ static void smp_free_lowcore(int cpu)
551int __cpuinit __cpu_up(unsigned int cpu) 553int __cpuinit __cpu_up(unsigned int cpu)
552{ 554{
553 struct _lowcore *cpu_lowcore; 555 struct _lowcore *cpu_lowcore;
556 struct create_idle c_idle;
554 struct task_struct *idle; 557 struct task_struct *idle;
555 struct stack_frame *sf; 558 struct stack_frame *sf;
556 u32 lowcore; 559 u32 lowcore;
@@ -558,6 +561,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
558 561
559 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 562 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
560 return -EIO; 563 return -EIO;
564 idle = current_set[cpu];
565 if (!idle) {
566 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
567 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
568 c_idle.cpu = cpu;
569 schedule_work(&c_idle.work);
570 wait_for_completion(&c_idle.done);
571 if (IS_ERR(c_idle.idle))
572 return PTR_ERR(c_idle.idle);
573 idle = c_idle.idle;
574 current_set[cpu] = c_idle.idle;
575 }
576 init_idle(idle, cpu);
561 if (smp_alloc_lowcore(cpu)) 577 if (smp_alloc_lowcore(cpu))
562 return -ENOMEM; 578 return -ENOMEM;
563 do { 579 do {
@@ -572,7 +588,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
572 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 588 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
573 udelay(10); 589 udelay(10);
574 590
575 idle = current_set[cpu];
576 cpu_lowcore = lowcore_ptr[cpu]; 591 cpu_lowcore = lowcore_ptr[cpu];
577 cpu_lowcore->kernel_stack = (unsigned long) 592 cpu_lowcore->kernel_stack = (unsigned long)
578 task_stack_page(idle) + THREAD_SIZE; 593 task_stack_page(idle) + THREAD_SIZE;
@@ -664,7 +679,6 @@ void __cpu_die(unsigned int cpu)
664 udelay(10); 679 udelay(10);
665 smp_free_lowcore(cpu); 680 smp_free_lowcore(cpu);
666 atomic_dec(&init_mm.context.attach_count); 681 atomic_dec(&init_mm.context.attach_count);
667 pr_info("Processor %d stopped\n", cpu);
668} 682}
669 683
670void cpu_die(void) 684void cpu_die(void)
@@ -684,14 +698,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
684#endif 698#endif
685 unsigned long async_stack, panic_stack; 699 unsigned long async_stack, panic_stack;
686 struct _lowcore *lowcore; 700 struct _lowcore *lowcore;
687 unsigned int cpu;
688 701
689 smp_detect_cpus(); 702 smp_detect_cpus();
690 703
691 /* request the 0x1201 emergency signal external interrupt */ 704 /* request the 0x1201 emergency signal external interrupt */
692 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 705 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
693 panic("Couldn't request external interrupt 0x1201"); 706 panic("Couldn't request external interrupt 0x1201");
694 print_cpu_info();
695 707
696 /* Reallocate current lowcore, but keep its contents. */ 708 /* Reallocate current lowcore, but keep its contents. */
697 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 709 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -719,9 +731,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
719 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) 731 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
720 BUG(); 732 BUG();
721#endif 733#endif
722 for_each_possible_cpu(cpu)
723 if (cpu != smp_processor_id())
724 smp_create_idle(cpu);
725} 734}
726 735
727void __init smp_prepare_boot_cpu(void) 736void __init smp_prepare_boot_cpu(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index f754a6dc4f94..9e7b039458da 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -15,6 +15,7 @@
15#define KMSG_COMPONENT "time" 15#define KMSG_COMPONENT "time"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 17
18#include <linux/kernel_stat.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
@@ -37,6 +38,7 @@
37#include <linux/clocksource.h> 38#include <linux/clocksource.h>
38#include <linux/clockchips.h> 39#include <linux/clockchips.h>
39#include <linux/gfp.h> 40#include <linux/gfp.h>
41#include <linux/kprobes.h>
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/delay.h> 43#include <asm/delay.h>
42#include <asm/s390_ext.h> 44#include <asm/s390_ext.h>
@@ -60,7 +62,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
60/* 62/*
61 * Scheduler clock - returns current time in nanosec units. 63 * Scheduler clock - returns current time in nanosec units.
62 */ 64 */
63unsigned long long notrace sched_clock(void) 65unsigned long long notrace __kprobes sched_clock(void)
64{ 66{
65 return (get_clock_monotonic() * 125) >> 9; 67 return (get_clock_monotonic() * 125) >> 9;
66} 68}
@@ -159,6 +161,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
159 unsigned int param32, 161 unsigned int param32,
160 unsigned long param64) 162 unsigned long param64)
161{ 163{
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
162 if (S390_lowcore.clock_comparator == -1ULL) 165 if (S390_lowcore.clock_comparator == -1ULL)
163 set_clock_comparator(S390_lowcore.clock_comparator); 166 set_clock_comparator(S390_lowcore.clock_comparator);
164} 167}
@@ -169,6 +172,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
169static void timing_alert_interrupt(unsigned int ext_int_code, 172static void timing_alert_interrupt(unsigned int ext_int_code,
170 unsigned int param32, unsigned long param64) 173 unsigned int param32, unsigned long param64)
171{ 174{
175 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
172 if (param32 & 0x00c40000) 176 if (param32 & 0x00c40000)
173 etr_timing_alert((struct etr_irq_parm *) &param32); 177 etr_timing_alert((struct etr_irq_parm *) &param32);
174 if (param32 & 0x00038000) 178 if (param32 & 0x00038000)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 70640822621a..5eb78dd584ce 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); 365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
366} 366}
367 367
368void __kprobes do_single_step(struct pt_regs *regs) 368void __kprobes do_per_trap(struct pt_regs *regs)
369{ 369{
370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
371 SIGTRAP) == NOTIFY_STOP){
372 return; 371 return;
373 }
374 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 372 if (tracehook_consider_fatal_signal(current, SIGTRAP))
375 force_sig(SIGTRAP, current); 373 force_sig(SIGTRAP, current);
376} 374}
@@ -451,8 +449,8 @@ static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
451 "floating point exception", regs, &si); 449 "floating point exception", regs, &si);
452} 450}
453 451
454static void illegal_op(struct pt_regs *regs, long pgm_int_code, 452static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
455 unsigned long trans_exc_code) 453 unsigned long trans_exc_code)
456{ 454{
457 siginfo_t info; 455 siginfo_t info;
458 __u8 opcode[6]; 456 __u8 opcode[6];
@@ -688,7 +686,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
688 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); 686 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
689} 687}
690 688
691asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 689asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
692{ 690{
693 bust_spinlocks(1); 691 bust_spinlocks(1);
694 printk("Kernel stack overflow.\n"); 692 printk("Kernel stack overflow.\n");
@@ -733,5 +731,6 @@ void __init trap_init(void)
733 pgm_check_table[0x15] = &operand_exception; 731 pgm_check_table[0x15] = &operand_exception;
734 pgm_check_table[0x1C] = &space_switch_exception; 732 pgm_check_table[0x1C] = &space_switch_exception;
735 pgm_check_table[0x1D] = &hfp_sqrt_exception; 733 pgm_check_table[0x1D] = &hfp_sqrt_exception;
736 pfault_irq_init(); 734 /* Enable machine checks early. */
735 local_mcck_enable();
737} 736}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7eff9b7347c0..1ccdf4d8aa85 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -20,6 +20,7 @@
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h> 21#include <linux/posix-timers.h>
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/kprobes.h>
23 24
24#include <asm/s390_ext.h> 25#include <asm/s390_ext.h>
25#include <asm/timer.h> 26#include <asm/timer.h>
@@ -122,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk)
122} 123}
123EXPORT_SYMBOL_GPL(account_system_vtime); 124EXPORT_SYMBOL_GPL(account_system_vtime);
124 125
125void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 126void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
126{ 127{
127 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
128 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -162,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
162 idle->sequence++; 163 idle->sequence++;
163} 164}
164 165
165void vtime_stop_cpu(void) 166void __kprobes vtime_stop_cpu(void)
166{ 167{
167 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 168 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
168 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 169 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -323,6 +324,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
323 struct list_head cb_list; /* the callback queue */ 324 struct list_head cb_list; /* the callback queue */
324 __u64 elapsed, next; 325 __u64 elapsed, next;
325 326
327 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
326 INIT_LIST_HEAD(&cb_list); 328 INIT_LIST_HEAD(&cb_list);
327 vq = &__get_cpu_var(virt_cpu_timer); 329 vq = &__get_cpu_var(virt_cpu_timer);
328 330
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index a7251580891c..f66a1bdbb61d 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -4,8 +4,8 @@
4source "virt/kvm/Kconfig" 4source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 bool "Virtualization" 7 def_bool y
8 default y 8 prompt "Virtualization"
9 ---help--- 9 ---help---
10 Say Y here to get to see options for using your Linux host to run other 10 Say Y here to get to see options for using your Linux host to run other
11 operating systems inside virtual machines (guests). 11 operating systems inside virtual machines (guests).
@@ -16,7 +16,8 @@ menuconfig VIRTUALIZATION
16if VIRTUALIZATION 16if VIRTUALIZATION
17 17
18config KVM 18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support" 19 def_tristate y
20 prompt "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM && EXPERIMENTAL 21 depends on HAVE_KVM && EXPERIMENTAL
21 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
22 select ANON_INODES 23 select ANON_INODES
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 7c37ec359ec2..0f53110e1d09 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -47,7 +47,6 @@ static void __udelay_disabled(unsigned long long usecs)
47 lockdep_on(); 47 lockdep_on();
48 __ctl_load(cr0_saved, 0, 0); 48 __ctl_load(cr0_saved, 0, 0);
49 local_tick_enable(clock_saved); 49 local_tick_enable(clock_saved);
50 set_clock_comparator(S390_lowcore.clock_comparator);
51} 50}
52 51
53static void __udelay_enabled(unsigned long long usecs) 52static void __udelay_enabled(unsigned long long usecs)
@@ -70,7 +69,6 @@ static void __udelay_enabled(unsigned long long usecs)
70 if (clock_saved) 69 if (clock_saved)
71 local_tick_enable(clock_saved); 70 local_tick_enable(clock_saved);
72 } while (get_clock() < end); 71 } while (get_clock() < end);
73 set_clock_comparator(S390_lowcore.clock_comparator);
74} 72}
75 73
76/* 74/*
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe5701e9efbf..2c57806c0858 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,6 +10,7 @@
10 * Copyright (C) 1995 Linus Torvalds 10 * Copyright (C) 1995 Linus Torvalds
11 */ 11 */
12 12
13#include <linux/kernel_stat.h>
13#include <linux/perf_event.h> 14#include <linux/perf_event.h>
14#include <linux/signal.h> 15#include <linux/signal.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
@@ -234,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
234 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
235 236
236 if (!rc && instruction == 0x0a77) { 237 if (!rc && instruction == 0x0a77) {
237 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
238 if (is_compat_task()) 239 if (is_compat_task())
239 sys32_sigreturn(); 240 sys32_sigreturn();
240 else 241 else
241 sys_sigreturn(); 242 sys_sigreturn();
242 } else if (!rc && instruction == 0x0aad) { 243 } else if (!rc && instruction == 0x0aad) {
243 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
244 if (is_compat_task()) 245 if (is_compat_task())
245 sys32_rt_sigreturn(); 246 sys32_rt_sigreturn();
246 else 247 else
@@ -378,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
378 * The instruction that caused the program check will 379 * The instruction that caused the program check will
379 * be repeated. Don't signal single step via SIGTRAP. 380 * be repeated. Don't signal single step via SIGTRAP.
380 */ 381 */
381 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 382 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
382 fault = 0; 383 fault = 0;
383out_up: 384out_up:
384 up_read(&mm->mmap_sem); 385 up_read(&mm->mmap_sem);
@@ -480,8 +481,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
480/* 481/*
481 * 'pfault' pseudo page faults routines. 482 * 'pfault' pseudo page faults routines.
482 */ 483 */
483static ext_int_info_t ext_int_pfault; 484static int pfault_disable;
484static int pfault_disable = 0;
485 485
486static int __init nopfault(char *str) 486static int __init nopfault(char *str)
487{ 487{
@@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
543 struct task_struct *tsk; 543 struct task_struct *tsk;
544 __u16 subcode; 544 __u16 subcode;
545 545
546 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
546 /* 547 /*
547 * Get the external interruption subcode & pfault 548 * Get the external interruption subcode & pfault
548 * initial/completion signal bit. VM stores this 549 * initial/completion signal bit. VM stores this
@@ -592,24 +593,28 @@ static void pfault_interrupt(unsigned int ext_int_code,
592 } 593 }
593} 594}
594 595
595void __init pfault_irq_init(void) 596static int __init pfault_irq_init(void)
596{ 597{
597 if (!MACHINE_IS_VM) 598 int rc;
598 return;
599 599
600 if (!MACHINE_IS_VM)
601 return 0;
600 /* 602 /*
601 * Try to get pfault pseudo page faults going. 603 * Try to get pfault pseudo page faults going.
602 */ 604 */
603 if (register_early_external_interrupt(0x2603, pfault_interrupt, 605 rc = register_external_interrupt(0x2603, pfault_interrupt);
604 &ext_int_pfault) != 0) 606 if (rc) {
605 panic("Couldn't request external interrupt 0x2603"); 607 pfault_disable = 1;
606 608 return rc;
609 }
607 if (pfault_init() == 0) 610 if (pfault_init() == 0)
608 return; 611 return 0;
609 612
610 /* Tough luck, no pfault. */ 613 /* Tough luck, no pfault. */
611 pfault_disable = 1; 614 pfault_disable = 1;
612 unregister_early_external_interrupt(0x2603, pfault_interrupt, 615 unregister_external_interrupt(0x2603, pfault_interrupt);
613 &ext_int_pfault); 616 return 0;
614} 617}
618early_initcall(pfault_irq_init);
619
615#endif 620#endif